code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
# -*- coding: utf-8 -*-
import click
import logging
from pathlib import Path
import pandas as pd
import re
import string
from nltk.corpus import stopwords
def brand_preprocess(row, trim_len=2):
""" This function creates a brand name column by parsing out the product column of data. It trims the words based on trim length param to choose appropriate brand name.
Args:
row ([pd.Series]): Dataframe row
trim_len (int, optional): Length by which product name has to be trimmed. Defaults to 2.
Returns:
[str]: brand name corresponding to a product.
"""
assert isinstance(
row, pd.Series
), "Check whether the function is called over Series"
if pd.isna(row["product"]) or pd.isna(row["product"]):
return pd.NA
# Remove punctuations from product name
regexPunctuation = re.compile("[%s]" % re.escape(string.punctuation))
cleanProduct = regexPunctuation.sub("", row["product"])
nameList = [
_.upper()
for _ in cleanProduct.lower().split(" ")
if _ not in stopwords.words("english")
]
if len(nameList) == 0:
return ""
# for certain categories use trim length to select brand name.
if row["category"] in [
"Nuts/Edible Seed",
"Vit/Min/Prot/Unconv Diet(Human/Animal)",
]:
return (
" ".join(nameList)
if len(nameList) < trim_len
else " ".join(nameList[:trim_len])
)
return nameList[0]
def age_preprocess(row):
"""This function converts age reports to a single unit : year(s)
since Data has age reported in multiple units like month(s),day(s)
Args:
row ([pd.Series]): A row of the entire Dataframe
Returns:
[float]: value of patient_age converted to years unit
"""
assert isinstance(
row, pd.Series
), "Check whether the function is called over Series"
age_conv = {
"month(s)": 1 / 12,
"year(s)": 1,
"day(s)": 1 / 365,
"Decade(s)": 10,
"week(s)": 1 / 52,
}
unit = row["age_units"]
age = row["patient_age"]
if pd.isna(age) or pd.isna(unit):
return -1
else:
return row["patient_age"] * round(age_conv[unit], 4)
def strip_str(x):
if isinstance(x, str):
x = x.strip()
return x
@click.command()
@click.argument("input_dirpath", type=click.Path(exists=True))
@click.argument("output_dirpath", type=click.Path())
def main(
input_dirpath="../../data/raw/", output_dirpath="../../data/processed",
):
""" Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
"""
outPath = Path(output_dirpath)
inPath = Path(input_dirpath)
logger = logging.getLogger(__name__)
logger.info("Creating clean unified data from raw files")
aggReports = None
for p in list(inPath.glob("*.csv")):
curr_df = pd.read_csv(p, encoding="unicode_escape")
column_map = {x: x.lower().replace(" ", "_") for x in curr_df.columns}
curr_df = curr_df.rename(columns=column_map)
curr_df = curr_df.rename(
columns={"meddra_preferred_terms": "medra_preferred_terms"}
)
curr_df = curr_df.applymap(strip_str)
aggReports = curr_df if aggReports is None else pd.concat([aggReports, curr_df])
aggReports = aggReports.rename(columns={"description": "category"})
aggReports["caers_created_date"] = pd.to_datetime(aggReports.caers_created_date)
aggReports.reset_index(drop=True, inplace=True)
aggReports.to_csv(outPath / "clean_data.csv")
logger.info("Processing and enriching data")
# Create brand-enriched column.
logger.info("Making brand name column from clean data")
aggReports["brand"] = aggReports.apply(brand_preprocess, axis=1)
# Pre-processing Age column.
logger.info("Converting age to a common unit year(s)")
aggReports["patient_age"] = aggReports.apply(age_preprocess, axis=1)
aggReports = aggReports.drop(columns=["age_units"])
aggReports.to_csv(outPath / "processed_data.csv")
# Create exploded outcome-wise cleaned data.
logger.info("Making outcomes exploded data set from clean brand-name data")
aggReports.outcomes = aggReports.outcomes.apply(
lambda x: [y.strip() for y in x.split(",") if y != []]
)
expl_aggReports = aggReports.explode("outcomes")
expl_aggReports = expl_aggReports.reset_index(drop=True)
expl_aggReports.to_csv(outPath / "exploded_data.csv")
# Create time-stamp processed & exploded data.
aggReports_time = aggReports.drop_duplicates(
["report_id", "patient_age", "category", "sex"], ignore_index=True
)
aggReports_time["year"] = aggReports_time["caers_created_date"].apply(
lambda x: x.year
)
aggReports_time = aggReports_time.rename(
columns={"caers_created_date": "time_stamp"}
)
aggReports_time.to_csv(outPath / "clean_data_time.csv")
expl_aggReports_time = aggReports_time.explode("outcomes")
expl_aggReports_time["outcomes"] = expl_aggReports_time["outcomes"].str.strip()
expl_aggReports_time.loc[
expl_aggReports_time["outcomes"] == "", "outcomes"
] = "Not Specified"
expl_aggReports_time = expl_aggReports_time.reset_index(drop=True)
expl_aggReports_time.to_csv(outPath / "exploded_data_time.csv")
logger.info("Data cleaning and pre-processing done!")
if __name__ == "__main__":
log_fmt = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
logging.basicConfig(level=logging.INFO, format=log_fmt)
main()
| [
"logging.getLogger",
"logging.basicConfig",
"re.escape",
"nltk.corpus.stopwords.words",
"pandas.read_csv",
"pathlib.Path",
"click.Path",
"pandas.isna",
"click.command",
"pandas.concat",
"pandas.to_datetime"
] | [((2338, 2353), 'click.command', 'click.command', ([], {}), '()\n', (2351, 2353), False, 'import click\n'), ((2721, 2741), 'pathlib.Path', 'Path', (['output_dirpath'], {}), '(output_dirpath)\n', (2725, 2741), False, 'from pathlib import Path\n'), ((2755, 2774), 'pathlib.Path', 'Path', (['input_dirpath'], {}), '(input_dirpath)\n', (2759, 2774), False, 'from pathlib import Path\n'), ((2789, 2816), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2806, 2816), False, 'import logging\n'), ((3502, 3547), 'pandas.to_datetime', 'pd.to_datetime', (['aggReports.caers_created_date'], {}), '(aggReports.caers_created_date)\n', (3516, 3547), True, 'import pandas as pd\n'), ((5582, 5637), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': 'log_fmt'}), '(level=logging.INFO, format=log_fmt)\n', (5601, 5637), False, 'import logging\n'), ((706, 729), 'pandas.isna', 'pd.isna', (["row['product']"], {}), "(row['product'])\n", (713, 729), True, 'import pandas as pd\n'), ((733, 756), 'pandas.isna', 'pd.isna', (["row['product']"], {}), "(row['product'])\n", (740, 756), True, 'import pandas as pd\n'), ((2133, 2145), 'pandas.isna', 'pd.isna', (['age'], {}), '(age)\n', (2140, 2145), True, 'import pandas as pd\n'), ((2149, 2162), 'pandas.isna', 'pd.isna', (['unit'], {}), '(unit)\n', (2156, 2162), True, 'import pandas as pd\n'), ((2963, 3004), 'pandas.read_csv', 'pd.read_csv', (['p'], {'encoding': '"""unicode_escape"""'}), "(p, encoding='unicode_escape')\n", (2974, 3004), True, 'import pandas as pd\n'), ((2392, 2415), 'click.Path', 'click.Path', ([], {'exists': '(True)'}), '(exists=True)\n', (2402, 2415), False, 'import click\n'), ((2456, 2468), 'click.Path', 'click.Path', ([], {}), '()\n', (2466, 2468), False, 'import click\n'), ((866, 895), 're.escape', 're.escape', (['string.punctuation'], {}), '(string.punctuation)\n', (875, 895), False, 'import re\n'), ((3357, 3389), 'pandas.concat', 'pd.concat', (['[aggReports, curr_df]'], {}), '([aggReports, curr_df])\n', (3366, 3389), True, 'import pandas as pd\n'), ((1062, 1088), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (1077, 1088), False, 'from nltk.corpus import stopwords\n')] |
import os
import re
import hyperparams as hp
from data_load import DataLoad
from tqdm import tqdm
import numpy as np
import pandas as pd
import tensorflow as tf
def load_ckpt_paths(model_name='cdmf'):
# get ckpt
ckpt_path = '../model_ckpt/compare/{}/'.format(model_name)
fpaths = []
with open(ckpt_path+'checkpoint', 'r', encoding='utf-8') as f_ckpt :
for line in f_ckpt.readlines()[1:]:
fname = re.sub(r'\"', '', line.split(':')[-1]).strip()
fpath = os.path.join(ckpt_path, fname)
fpaths.append(fpath)
return fpaths
if __name__ == '__main__':
data = DataLoad(data_path=hp.DATA_PATH,
fnames=hp.FNAMES,
forced_seq_len=hp.FORCED_SEQ_LEN,
vocab_size=hp.VOCAB_SIZE,
paly_times=hp.PLAY_TIMES,
num_main_actors=hp.NUM_MAIN_ACTORS,
batch_size=hp.BATCH_SIZE,
num_epochs=hp.NUM_EPOCHS,
noise_rate=hp.NOISE_RATE)
# CDMF
graph = tf.Graph()
with graph.as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=False)
sess = tf.Session(config=session_conf)
with sess.as_default():
for fpath in load_ckpt_paths('cdmf'):
saver = tf.train.import_meta_graph(fpath+'.meta')
saver.restore(sess, fpath)
# Get the placeholders from the graph by name
m_oids = graph.get_tensor_by_name('movie_order_ids:0')
info = graph.get_tensor_by_name('info:0')
actors = graph.get_tensor_by_name('actors:0')
descriptions = graph.get_tensor_by_name('descriptions:0')
u_oids = graph.get_tensor_by_name('user_order_ids:0')
r_seq = graph.get_tensor_by_name('rating_sequence:0')
dropout_keep_prob = graph.get_tensor_by_name("dropout_keep_prob:0")
# Tensors we want to evaluate
mse_op = graph.get_tensor_by_name('mse/mse_op:0')
# load evalset
eval_iter = data.load_data('eval')
mse, count = 0.0, 0
for (sub_X_user, sub_X_movie), sub_Y in tqdm(eval_iter):
# unpack
sub_u_oids, sub_bu_seq = sub_X_user
sub_m_oids, sub_info, sub_actors, sub_des, sub_bm_seq = sub_X_movie
sub_r_seq = sub_Y
dev_feed_dict = {
m_oids: sub_m_oids,
info: sub_info,
actors: sub_actors,
descriptions: sub_des,
u_oids: sub_u_oids,
r_seq: sub_r_seq,
dropout_keep_prob: hp.DROPOUT_KEEP_PROB}
sub_mse = sess.run(mse_op, feed_dict=dev_feed_dict)
mse += sub_mse
count += 1
rmse = np.sqrt(mse / count)
print('cdmf | rmse:{}'.format(rmse))
# ConvMF
tf.reset_default_graph()
graph = tf.Graph()
with graph.as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=False)
sess = tf.Session(config=session_conf)
with sess.as_default():
for fpath in load_ckpt_paths('convmf'):
saver = tf.train.import_meta_graph(fpath+'.meta')
saver.restore(sess, fpath)
# Get the placeholders from the graph by name
m_oids = graph.get_tensor_by_name('movie_order_ids:0')
descriptions = graph.get_tensor_by_name('descriptions:0')
u_oids = graph.get_tensor_by_name('user_order_ids:0')
r_seq = graph.get_tensor_by_name('rating_sequence:0')
dropout_keep_prob = graph.get_tensor_by_name("dropout_keep_prob:0")
# Tensors we want to evaluate
mse_op = graph.get_tensor_by_name('mse/mse_op:0')
# load evalset
eval_iter = data.load_data('eval')
mse, count = 0.0, 0
for (sub_X_user, sub_X_movie), sub_Y in tqdm(eval_iter):
# unpack
sub_u_oids, sub_bu_seq = sub_X_user
sub_m_oids, sub_info, sub_actors, sub_des, sub_bm_seq = sub_X_movie
sub_r_seq = sub_Y
dev_feed_dict = {
m_oids: sub_m_oids,
descriptions: sub_des,
u_oids: sub_u_oids,
r_seq: sub_r_seq,
dropout_keep_prob: hp.DROPOUT_KEEP_PROB}
sub_mse = sess.run(mse_op, feed_dict=dev_feed_dict)
mse += sub_mse
count += 1
rmse = np.sqrt(mse / count)
print('convmf | rmse:{}'.format(rmse)) | [
"tensorflow.Graph",
"tensorflow.reset_default_graph",
"numpy.sqrt",
"tensorflow.Session",
"tqdm.tqdm",
"os.path.join",
"tensorflow.train.import_meta_graph",
"data_load.DataLoad",
"tensorflow.ConfigProto"
] | [((623, 886), 'data_load.DataLoad', 'DataLoad', ([], {'data_path': 'hp.DATA_PATH', 'fnames': 'hp.FNAMES', 'forced_seq_len': 'hp.FORCED_SEQ_LEN', 'vocab_size': 'hp.VOCAB_SIZE', 'paly_times': 'hp.PLAY_TIMES', 'num_main_actors': 'hp.NUM_MAIN_ACTORS', 'batch_size': 'hp.BATCH_SIZE', 'num_epochs': 'hp.NUM_EPOCHS', 'noise_rate': 'hp.NOISE_RATE'}), '(data_path=hp.DATA_PATH, fnames=hp.FNAMES, forced_seq_len=hp.\n FORCED_SEQ_LEN, vocab_size=hp.VOCAB_SIZE, paly_times=hp.PLAY_TIMES,\n num_main_actors=hp.NUM_MAIN_ACTORS, batch_size=hp.BATCH_SIZE,\n num_epochs=hp.NUM_EPOCHS, noise_rate=hp.NOISE_RATE)\n', (631, 886), False, 'from data_load import DataLoad\n'), ((1058, 1068), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (1066, 1068), True, 'import tensorflow as tf\n'), ((3153, 3177), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (3175, 3177), True, 'import tensorflow as tf\n'), ((3198, 3208), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (3206, 3208), True, 'import tensorflow as tf\n'), ((1121, 1190), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)', 'log_device_placement': '(False)'}), '(allow_soft_placement=True, log_device_placement=False)\n', (1135, 1190), True, 'import tensorflow as tf\n'), ((1231, 1262), 'tensorflow.Session', 'tf.Session', ([], {'config': 'session_conf'}), '(config=session_conf)\n', (1241, 1262), True, 'import tensorflow as tf\n'), ((3261, 3330), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)', 'log_device_placement': '(False)'}), '(allow_soft_placement=True, log_device_placement=False)\n', (3275, 3330), True, 'import tensorflow as tf\n'), ((3371, 3402), 'tensorflow.Session', 'tf.Session', ([], {'config': 'session_conf'}), '(config=session_conf)\n', (3381, 3402), True, 'import tensorflow as tf\n'), ((502, 532), 'os.path.join', 'os.path.join', (['ckpt_path', 'fname'], {}), '(ckpt_path, fname)\n', (514, 532), False, 'import os\n'), ((1382, 1425), 'tensorflow.train.import_meta_graph', 'tf.train.import_meta_graph', (["(fpath + '.meta')"], {}), "(fpath + '.meta')\n", (1408, 1425), True, 'import tensorflow as tf\n'), ((2307, 2322), 'tqdm.tqdm', 'tqdm', (['eval_iter'], {}), '(eval_iter)\n', (2311, 2322), False, 'from tqdm import tqdm\n'), ((3061, 3081), 'numpy.sqrt', 'np.sqrt', (['(mse / count)'], {}), '(mse / count)\n', (3068, 3081), True, 'import numpy as np\n'), ((3511, 3554), 'tensorflow.train.import_meta_graph', 'tf.train.import_meta_graph', (["(fpath + '.meta')"], {}), "(fpath + '.meta')\n", (3537, 3554), True, 'import tensorflow as tf\n'), ((4316, 4331), 'tqdm.tqdm', 'tqdm', (['eval_iter'], {}), '(eval_iter)\n', (4320, 4331), False, 'from tqdm import tqdm\n'), ((4986, 5006), 'numpy.sqrt', 'np.sqrt', (['(mse / count)'], {}), '(mse / count)\n', (4993, 5006), True, 'import numpy as np\n')] |
# Generated by Django 2.1 on 2018-08-14 09:42
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('substitute_finder', '0002_category'),
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('code', models.CharField(max_length=300, primary_key=True, serialize=False, verbose_name='identifiant')),
('product_name', models.CharField(max_length=300, verbose_name='nom du produit')),
('generic_name', models.CharField(max_length=1000, verbose_name='description')),
('url', models.URLField(max_length=1000, verbose_name='url OpenFoodFacts')),
('stores', models.CharField(max_length=300, verbose_name='vendeur')),
('nutrition_grade_fr', models.CharField(max_length=1, verbose_name='score nutritionnel')),
('last_updated', models.DateTimeField(auto_now=True, verbose_name='dernière mise à jour')),
('categories', models.ManyToManyField(to='substitute_finder.Category', verbose_name='categories')),
('users', models.ManyToManyField(related_name='favorite', to=settings.AUTH_USER_MODEL, verbose_name='utilisateurs')),
],
options={
'verbose_name': 'Produit',
'verbose_name_plural': 'Produits',
},
),
]
| [
"django.db.models.URLField",
"django.db.models.ManyToManyField",
"django.db.models.CharField",
"django.db.models.DateTimeField"
] | [((364, 463), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(300)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""identifiant"""'}), "(max_length=300, primary_key=True, serialize=False,\n verbose_name='identifiant')\n", (380, 463), False, 'from django.db import migrations, models\n'), ((495, 558), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(300)', 'verbose_name': '"""nom du produit"""'}), "(max_length=300, verbose_name='nom du produit')\n", (511, 558), False, 'from django.db import migrations, models\n'), ((594, 655), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1000)', 'verbose_name': '"""description"""'}), "(max_length=1000, verbose_name='description')\n", (610, 655), False, 'from django.db import migrations, models\n'), ((682, 748), 'django.db.models.URLField', 'models.URLField', ([], {'max_length': '(1000)', 'verbose_name': '"""url OpenFoodFacts"""'}), "(max_length=1000, verbose_name='url OpenFoodFacts')\n", (697, 748), False, 'from django.db import migrations, models\n'), ((778, 834), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(300)', 'verbose_name': '"""vendeur"""'}), "(max_length=300, verbose_name='vendeur')\n", (794, 834), False, 'from django.db import migrations, models\n'), ((876, 941), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1)', 'verbose_name': '"""score nutritionnel"""'}), "(max_length=1, verbose_name='score nutritionnel')\n", (892, 941), False, 'from django.db import migrations, models\n'), ((977, 1049), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)', 'verbose_name': '"""dernière mise à jour"""'}), "(auto_now=True, verbose_name='dernière mise à jour')\n", (997, 1049), False, 'from django.db import migrations, models\n'), ((1083, 1170), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'to': '"""substitute_finder.Category"""', 'verbose_name': '"""categories"""'}), "(to='substitute_finder.Category', verbose_name=\n 'categories')\n", (1105, 1170), False, 'from django.db import migrations, models\n'), ((1194, 1303), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'related_name': '"""favorite"""', 'to': 'settings.AUTH_USER_MODEL', 'verbose_name': '"""utilisateurs"""'}), "(related_name='favorite', to=settings.AUTH_USER_MODEL,\n verbose_name='utilisateurs')\n", (1216, 1303), False, 'from django.db import migrations, models\n')] |
# coding=utf-8
# Author: <NAME> <<EMAIL>>
import numpy as np
import re
class KeelAttribute:
"""
A class that represent an attribute of keel dataset format.
"""
TYPE_REAL, TYPE_INTEGER, TYPE_NOMINAL = ("real", "integer", "nominal")
def __init__(self, attribute_name, attribute_type, attribute_range, attribute_builder):
self.name = attribute_name
self.type = attribute_type
self.range = attribute_range
self.builder = attribute_builder
class KeelDataSet:
"""
A class that represent the keel dataset format.
"""
UNKNOWN = '?'
def __init__(self, relation_name, attributes, data, inputs=None, outputs=None):
self.name = relation_name
self.attributes = attributes
self.data = data
self.inputs = inputs
self.outputs = outputs
self.shape = len(data[0]), len(data)
self.ir = self.__imbalance_ratio()
def __get_data(self, attributes):
return [self.data[self.attributes.index(a)] for a in attributes]
def __imbalance_ratio(self):
"""Compute the imbalance ratio of the dataset
"""
labels = self.__get_data(self.outputs)
labels = np.concatenate(labels)
_, count_classes = np.unique(labels, return_counts=True)
max_count = np.max(count_classes)
min_count = np.min(count_classes)
return round((max_count / min_count), 2)
def get_data(self):
"""Returns (data, target) of the dataset.
"""
inputs = self.__get_data(self.inputs)
outputs = self.__get_data(self.outputs)
return np.transpose(inputs), np.concatenate(outputs)
def __str__(self):
row_format = "{:<31}" * 5
labels = self.__get_data(self.outputs)
labels = np.concatenate(labels)
classes = np.unique(labels)
# metadata = f"{self.name}:\tAttributes: {self.shape[1]}\tSamples: {self.shape[0]}\tClasses: {classes.shape[0]}\tImbalance Ratio: {self.ir}"
return row_format.format(f"{self.name} ", *[f"Attributes: {self.shape[1]}", f"Samples: {self.shape[0]}", f"Classes: {classes.shape[0]}", f"IR: {self.ir}"])
def __get_header(self):
"""Get the header of a keel dataset format.
"""
header = f"@relation {self.name}\n"
attributes = []
for attr in self.attributes:
attr_type = "real" if attr.type == KeelAttribute.TYPE_REAL else "integer" if attr.type == KeelAttribute.TYPE_INTEGER else ''
if len(attr_type) > 0:
attributes.append(f"@attribute {attr.name} {attr_type} [{attr.range[0]}, {attr.range[1]}]")
else:
attributes.append("@attribute " + attr.name + " {" + (", ").join(list(attr.range)) + "}")
header += "\n".join(attributes)
header += "\n"
header += f"@inputs {(', ').join([attr.name for attr in self.inputs])}\n"
header += f"@outputs {(', ').join([attr.name for attr in self.outputs])}\n"
header += "@data\n"
return header
def save(self, path):
"""Export the data on keel dataset format.
Parameters
----------
path : str
The filepath to save the dataset.
"""
with open(path, 'w') as f:
# Write header of database
f.write(self.__get_header())
# Write data of database
data = list(map(list, zip(*self.data)))
data = '\n'.join(map(', '.join, map(lambda x: map(str, x), data)))
f.write(data)
def load_keel_file(path):
"""Load a keel dataset format.
Parameters
----------
path : str
The filepath of the keel dataset format.
Returns
-------
keel_dataset: KeelDataset
The keel dataset format loaded.
"""
handle = open(path)
try:
line = handle.readline().strip()
header_parts = line.split()
if header_parts[0] != "@relation" or len(header_parts) != 2:
raise SyntaxError("This is not a valid keel database.")
# Get database name
relation_name = header_parts[1]
# Get attributes
line = handle.readline().strip()
attrs = []
lkp = {}
while line.startswith("@attribute"):
# Get attribute name
attr_name = line.split(" ")[1]
# Get attribute type
match = re.findall(r"\s([a-z]+)\s{0,1}\[", line)
if len(match) > 0:
attr_type = match[0]
else:
attr_type = "nominal"
# Get values range
if attr_type != "nominal":
match = re.findall(r"\[(.*?)\]", line)
attr_builder = float if attr_type == "real" else int
attr_range = tuple(map(attr_builder, match[0].split(",")))
else:
match = re.findall(r"\{(.*?)\}", line)
attr_builder = str
attr_range = tuple(match[0].replace(" ", "").split(","))
keel_attribute = KeelAttribute(attr_name, attr_type, attr_range, attr_builder)
attrs.append(keel_attribute)
lkp[attr_name] = keel_attribute
line = handle.readline().strip()
# Get inputs
if not line.startswith("@input"):
raise SyntaxError("Expected @input or @inputs. " + line)
inputs_parts = line.split(maxsplit=1)
inputs_name = inputs_parts[1].replace(" ", "").split(",")
inputs = [lkp[name] for name in inputs_name]
# Get output
line = handle.readline().strip()
if not line.startswith("@output"):
raise SyntaxError("Expected @outputs or @outputs. " + line)
output_parts = line.split(maxsplit=1)
output_name = output_parts[1].replace(" ", "").split(",")
outputs = [lkp[name] for name in output_name]
# Get data
line = handle.readline().strip()
if line != "@data":
raise SyntaxError("Expected @data.")
data = [[] for _ in range(len(attrs))]
for data_line in handle:
if data_line:
data_values = data_line.strip().replace(" ", "").split(',')
for lst, value, attr in zip(data, data_values, attrs):
v = value
v = v if v == KeelDataSet.UNKNOWN else attr.builder(v)
lst.append(v)
return KeelDataSet(relation_name, attrs, data, inputs, outputs)
finally:
if path:
handle.close()
| [
"numpy.unique",
"numpy.max",
"numpy.concatenate",
"numpy.min",
"re.findall",
"numpy.transpose"
] | [((1202, 1224), 'numpy.concatenate', 'np.concatenate', (['labels'], {}), '(labels)\n', (1216, 1224), True, 'import numpy as np\n'), ((1253, 1290), 'numpy.unique', 'np.unique', (['labels'], {'return_counts': '(True)'}), '(labels, return_counts=True)\n', (1262, 1290), True, 'import numpy as np\n'), ((1320, 1341), 'numpy.max', 'np.max', (['count_classes'], {}), '(count_classes)\n', (1326, 1341), True, 'import numpy as np\n'), ((1362, 1383), 'numpy.min', 'np.min', (['count_classes'], {}), '(count_classes)\n', (1368, 1383), True, 'import numpy as np\n'), ((1800, 1822), 'numpy.concatenate', 'np.concatenate', (['labels'], {}), '(labels)\n', (1814, 1822), True, 'import numpy as np\n'), ((1842, 1859), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (1851, 1859), True, 'import numpy as np\n'), ((1631, 1651), 'numpy.transpose', 'np.transpose', (['inputs'], {}), '(inputs)\n', (1643, 1651), True, 'import numpy as np\n'), ((1653, 1676), 'numpy.concatenate', 'np.concatenate', (['outputs'], {}), '(outputs)\n', (1667, 1676), True, 'import numpy as np\n'), ((4458, 4500), 're.findall', 're.findall', (['"""\\\\s([a-z]+)\\\\s{0,1}\\\\["""', 'line'], {}), "('\\\\s([a-z]+)\\\\s{0,1}\\\\[', line)\n", (4468, 4500), False, 'import re\n'), ((4718, 4749), 're.findall', 're.findall', (['"""\\\\[(.*?)\\\\]"""', 'line'], {}), "('\\\\[(.*?)\\\\]', line)\n", (4728, 4749), False, 'import re\n'), ((4935, 4966), 're.findall', 're.findall', (['"""\\\\{(.*?)\\\\}"""', 'line'], {}), "('\\\\{(.*?)\\\\}', line)\n", (4945, 4966), False, 'import re\n')] |
#!/usr/bin/env python3
import os
import re
import sys
import urllib.request
# api_filename = "projects.md"
api_filename = "groups.md"
url = "https://gitlab.com/gitlab-org/gitlab-ce/raw/master/doc/api/" + api_filename
doc_dir = "doc_tmp"
if not os.path.exists(doc_dir):
os.makedirs(doc_dir)
filename, headers = urllib.request.urlretrieve(url)
with open(filename, 'r') as f:
markdown = f.read()
# print("markdown:", markdown)
urllib.request.urlcleanup()
# Strip out all `json` code blocks included in the file.
p = re.compile("```json.*?```", re.MULTILINE | re.DOTALL)
markdown_wo_json = re.sub(p, "", markdown)
GET_block = "GET /"
p_GET_block = re.compile("```\n(%s.*?)\n```" % GET_block, re.MULTILINE | re.DOTALL)
p_GET_variable = re.compile("(:[^/]*)")
sectionsList = re.sub("[^#]#", "TOSPLIT#", markdown_wo_json).split("TOSPLIT")
for section in sectionsList:
if GET_block in section:
lines = section.splitlines()
title = lines[0].replace("#", "").strip()
# print("title:", title)
# section = re.sub(p_GET_block, "```\n```")
m = p_GET_block.search(section)
GET_command = m.group(1)
GET_variables = p_GET_variable.findall(GET_command)
# Sort the variables in decreasing order of _length_. The reason is that a replace of a shorter
# variable might catch a longer one and corrupt the final result.
GET_variables.sort(key = lambda s: -len(s))
# Replace occurrences of the found variables with upper case, removing the ":"
new_GET_command = GET_command
for GET_variable in GET_variables:
new_GET_command = new_GET_command.replace(GET_variable, GET_variable.replace(":", "").upper())
# section = section.replace(GET_command, new_GET_command)
lines = [line.replace(GET_command, new_GET_command) for line in lines]
# print("title:", title)
filename = api_filename.replace(".md", "") + "-GET-" + title.replace(" ", "-").lower() + ".md"
print("filename:", filename)
full_filename = os.path.join(doc_dir, filename)
with open(full_filename, "w") as f:
f.write("//! %s\n" % title)
f.write("//!\n")
f.write("//! # %s\n" % title)
for line in lines[1:]:
f.write("//! %s\n" % line)
| [
"os.path.exists",
"os.makedirs",
"re.compile",
"os.path.join",
"re.sub"
] | [((530, 583), 're.compile', 're.compile', (['"""```json.*?```"""', '(re.MULTILINE | re.DOTALL)'], {}), "('```json.*?```', re.MULTILINE | re.DOTALL)\n", (540, 583), False, 'import re\n'), ((603, 626), 're.sub', 're.sub', (['p', '""""""', 'markdown'], {}), "(p, '', markdown)\n", (609, 626), False, 'import re\n'), ((663, 732), 're.compile', 're.compile', (["('```\\n(%s.*?)\\n```' % GET_block)", '(re.MULTILINE | re.DOTALL)'], {}), "('```\\n(%s.*?)\\n```' % GET_block, re.MULTILINE | re.DOTALL)\n", (673, 732), False, 'import re\n'), ((750, 772), 're.compile', 're.compile', (['"""(:[^/]*)"""'], {}), "('(:[^/]*)')\n", (760, 772), False, 'import re\n'), ((249, 272), 'os.path.exists', 'os.path.exists', (['doc_dir'], {}), '(doc_dir)\n', (263, 272), False, 'import os\n'), ((278, 298), 'os.makedirs', 'os.makedirs', (['doc_dir'], {}), '(doc_dir)\n', (289, 298), False, 'import os\n'), ((790, 835), 're.sub', 're.sub', (['"""[^#]#"""', '"""TOSPLIT#"""', 'markdown_wo_json'], {}), "('[^#]#', 'TOSPLIT#', markdown_wo_json)\n", (796, 835), False, 'import re\n'), ((2068, 2099), 'os.path.join', 'os.path.join', (['doc_dir', 'filename'], {}), '(doc_dir, filename)\n', (2080, 2099), False, 'import os\n')] |
import os
import pandas as pd
import pytest
import yaml
import wandb
run = wandb.init(project='RP_NVIDIA_Machine_Learning',
job_type='data_validation')
@pytest.fixture(scope='session')
def data():
config_path = os.path.join(os.pardir, 'configs')
with open(os.path.join(config_path, 'dataval_config.yaml'), 'r') as file:
config_name = yaml.safe_load(file)
data_artifact = config_name['parameters']['artifact_name']
if data_artifact is None:
pytest.fail('missing --data_artifact argument')
data_path = run.use_artifact(data_artifact).file()
df = pd.read_csv(data_path)
return df
| [
"pandas.read_csv",
"os.path.join",
"wandb.init",
"pytest.fail",
"yaml.safe_load",
"pytest.fixture"
] | [((78, 154), 'wandb.init', 'wandb.init', ([], {'project': '"""RP_NVIDIA_Machine_Learning"""', 'job_type': '"""data_validation"""'}), "(project='RP_NVIDIA_Machine_Learning', job_type='data_validation')\n", (88, 154), False, 'import wandb\n'), ((175, 206), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (189, 206), False, 'import pytest\n'), ((238, 272), 'os.path.join', 'os.path.join', (['os.pardir', '"""configs"""'], {}), "(os.pardir, 'configs')\n", (250, 272), False, 'import os\n'), ((611, 633), 'pandas.read_csv', 'pd.read_csv', (['data_path'], {}), '(data_path)\n', (622, 633), True, 'import pandas as pd\n'), ((374, 394), 'yaml.safe_load', 'yaml.safe_load', (['file'], {}), '(file)\n', (388, 394), False, 'import yaml\n'), ((498, 545), 'pytest.fail', 'pytest.fail', (['"""missing --data_artifact argument"""'], {}), "('missing --data_artifact argument')\n", (509, 545), False, 'import pytest\n'), ((288, 336), 'os.path.join', 'os.path.join', (['config_path', '"""dataval_config.yaml"""'], {}), "(config_path, 'dataval_config.yaml')\n", (300, 336), False, 'import os\n')] |
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns(
'popcorn_gallery.tutorials.views',
url(r'^(?P<slug>[\w-]+)/$', 'object_detail', name='object_detail'),
url(r'^$', 'object_list', name='object_list'),
)
| [
"django.conf.urls.defaults.url"
] | [((120, 186), 'django.conf.urls.defaults.url', 'url', (['"""^(?P<slug>[\\\\w-]+)/$"""', '"""object_detail"""'], {'name': '"""object_detail"""'}), "('^(?P<slug>[\\\\w-]+)/$', 'object_detail', name='object_detail')\n", (123, 186), False, 'from django.conf.urls.defaults import patterns, url\n'), ((192, 236), 'django.conf.urls.defaults.url', 'url', (['"""^$"""', '"""object_list"""'], {'name': '"""object_list"""'}), "('^$', 'object_list', name='object_list')\n", (195, 236), False, 'from django.conf.urls.defaults import patterns, url\n')] |
"""
This test module has tests relating to kelvin model validations.
All functions in /calculations/models_kelvin.py are tested here.
The purposes are:
- testing the meniscus shape determination function
- testing the output of the kelvin equations
- testing that the "function getter" is performing as expected.
The kelvin functions are tested against pre-calculated values
at several points.
"""
import numpy
import pytest
import pygaps.characterisation.models_kelvin as km
import pygaps.utilities.exceptions as pgEx
@pytest.mark.characterisation
class TestKelvinModels():
"""Test the kelvin models."""
@pytest.mark.parametrize(
'branch, pore, geometry', [
('ads', 'slit', 'hemicylindrical'),
('ads', 'cylinder', 'cylindrical'),
('ads', 'sphere', 'hemispherical'),
('des', 'slit', 'hemicylindrical'),
('des', 'cylinder', 'hemispherical'),
('des', 'sphere', 'hemispherical'),
]
)
def test_meniscus_geometry(self, branch, pore, geometry):
"""Test the meniscus geometry function."""
assert km.get_meniscus_geometry(branch, pore) == geometry
@pytest.mark.parametrize(
'model, pressure', [
(km._KELVIN_MODELS['Kelvin'], [0.1, 0.4, 0.9]),
]
)
@pytest.mark.parametrize(
'geometry, c_radius', [
('cylindrical', [0.208, 0.522, 4.539]),
('hemispherical', [0.415, 1.044, 9.078]),
('hemicylindrical', [0.831, 2.090, 18.180]),
]
)
def test_kelvin_model(
self, model, geometry, pressure, c_radius, basic_adsorbate
):
"""Test each model against pre-calculated values for N2 at 77K."""
temperature = 77.355
pressure = [0.1, 0.4, 0.9]
for index, value in enumerate(pressure):
radius = model(
value, geometry, temperature,
basic_adsorbate.liquid_density(temperature),
basic_adsorbate.molar_mass(),
basic_adsorbate.surface_tension(temperature)
)
assert numpy.isclose(radius, c_radius[index], 0.01, 0.01)
def test_kelvin_kjs_model(self, basic_adsorbate):
"""Test Kelvin KJS model against pre-calculated values for N2 at 77K."""
temperature = 77.355
pressure = [0.1, 0.4, 0.9]
c_radius = [0.715, 1.344, 9.378]
model = km._KELVIN_MODELS['Kelvin-KJS']
geometry = 'cylindrical'
for index, value in enumerate(pressure):
radius = model(
value, geometry, temperature,
basic_adsorbate.liquid_density(temperature),
basic_adsorbate.molar_mass(),
basic_adsorbate.surface_tension(temperature)
)
assert numpy.isclose(radius, c_radius[index], 0.01, 0.01)
# Now check for excluding other models
geometry = 'hemispherical'
with pytest.raises(pgEx.ParameterError):
radius = model(
value, geometry, temperature,
basic_adsorbate.liquid_density(temperature),
basic_adsorbate.molar_mass(),
basic_adsorbate.surface_tension(temperature)
)
def test_get_kelvin_error(self):
"""When the model requested is not found we raise."""
with pytest.raises(pgEx.ParameterError):
km.get_kelvin_model('bad_model')
def test_get_kelvin_callable(self):
"""When we pass a function and dict, we receive a partial back."""
def call_this(addendum):
return 'called' + addendum
ret = km.get_kelvin_model(call_this, addendum='add')
assert ret() == 'calledadd'
| [
"pygaps.characterisation.models_kelvin.get_meniscus_geometry",
"numpy.isclose",
"pytest.mark.parametrize",
"pytest.raises",
"pygaps.characterisation.models_kelvin.get_kelvin_model"
] | [((633, 914), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""branch, pore, geometry"""', "[('ads', 'slit', 'hemicylindrical'), ('ads', 'cylinder', 'cylindrical'), (\n 'ads', 'sphere', 'hemispherical'), ('des', 'slit', 'hemicylindrical'),\n ('des', 'cylinder', 'hemispherical'), ('des', 'sphere', 'hemispherical')]"], {}), "('branch, pore, geometry', [('ads', 'slit',\n 'hemicylindrical'), ('ads', 'cylinder', 'cylindrical'), ('ads',\n 'sphere', 'hemispherical'), ('des', 'slit', 'hemicylindrical'), ('des',\n 'cylinder', 'hemispherical'), ('des', 'sphere', 'hemispherical')])\n", (656, 914), False, 'import pytest\n'), ((1185, 1282), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""model, pressure"""', "[(km._KELVIN_MODELS['Kelvin'], [0.1, 0.4, 0.9])]"], {}), "('model, pressure', [(km._KELVIN_MODELS['Kelvin'], [\n 0.1, 0.4, 0.9])])\n", (1208, 1282), False, 'import pytest\n'), ((1320, 1502), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""geometry, c_radius"""', "[('cylindrical', [0.208, 0.522, 4.539]), ('hemispherical', [0.415, 1.044, \n 9.078]), ('hemicylindrical', [0.831, 2.09, 18.18])]"], {}), "('geometry, c_radius', [('cylindrical', [0.208, \n 0.522, 4.539]), ('hemispherical', [0.415, 1.044, 9.078]), (\n 'hemicylindrical', [0.831, 2.09, 18.18])])\n", (1343, 1502), False, 'import pytest\n'), ((3655, 3701), 'pygaps.characterisation.models_kelvin.get_kelvin_model', 'km.get_kelvin_model', (['call_this'], {'addendum': '"""add"""'}), "(call_this, addendum='add')\n", (3674, 3701), True, 'import pygaps.characterisation.models_kelvin as km\n'), ((1128, 1166), 'pygaps.characterisation.models_kelvin.get_meniscus_geometry', 'km.get_meniscus_geometry', (['branch', 'pore'], {}), '(branch, pore)\n', (1152, 1166), True, 'import pygaps.characterisation.models_kelvin as km\n'), ((2121, 2171), 'numpy.isclose', 'numpy.isclose', (['radius', 'c_radius[index]', '(0.01)', '(0.01)'], {}), '(radius, c_radius[index], 0.01, 0.01)\n', (2134, 2171), False, 'import numpy\n'), ((2819, 2869), 'numpy.isclose', 'numpy.isclose', (['radius', 'c_radius[index]', '(0.01)', '(0.01)'], {}), '(radius, c_radius[index], 0.01, 0.01)\n', (2832, 2869), False, 'import numpy\n'), ((2966, 3000), 'pytest.raises', 'pytest.raises', (['pgEx.ParameterError'], {}), '(pgEx.ParameterError)\n', (2979, 3000), False, 'import pytest\n'), ((3371, 3405), 'pytest.raises', 'pytest.raises', (['pgEx.ParameterError'], {}), '(pgEx.ParameterError)\n', (3384, 3405), False, 'import pytest\n'), ((3419, 3451), 'pygaps.characterisation.models_kelvin.get_kelvin_model', 'km.get_kelvin_model', (['"""bad_model"""'], {}), "('bad_model')\n", (3438, 3451), True, 'import pygaps.characterisation.models_kelvin as km\n')] |
import os
from datetime import datetime
import sys
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from jet_bridge_base import configuration
from jet_bridge.configuration import JetBridgeConfiguration
conf = JetBridgeConfiguration()
configuration.set_configuration(conf)
from jet_bridge_base.commands.check_token import check_token_command
from jet_bridge_base.db import database_connect
from jet_bridge_base.logger import logger
from jet_bridge import settings, VERSION
from jet_bridge.settings import missing_options, required_options_without_default
def main():
args = sys.argv[1:]
if 'ARGS' in os.environ:
args = os.environ['ARGS'].split(' ')
logger.info(datetime.now().strftime('%B %d, %Y - %H:%M:%S %Z'))
logger.info('Jet Bridge version {}'.format(VERSION))
if (len(args) >= 1 and args[0] == 'config') or missing_options == required_options_without_default:
from jet_bridge.utils.create_config import create_config
create_config(missing_options == required_options_without_default)
return
elif len(missing_options) and len(missing_options) < len(required_options_without_default):
logger.info('Required options are not specified: {}'.format(', '.join(missing_options)))
return
address = 'localhost' if settings.ADDRESS == '0.0.0.0' else settings.ADDRESS
url = 'http://{}:{}/'.format(address, settings.PORT)
api_url = '{}api/'.format(url)
if len(args) >= 1:
if args[0] == 'check_token':
check_token_command(api_url)
return
database_connect()
from jet_bridge.app import make_app
app = make_app()
server = HTTPServer(app)
server.bind(settings.PORT, settings.ADDRESS)
server.start(settings.WORKERS if not settings.DEBUG else 1)
if settings.WORKERS > 1 and settings.DEBUG:
logger.warning('Multiple workers are not supported in DEBUG mode')
logger.info('Starting server at {}'.format(url))
if settings.DEBUG:
logger.warning('Server is running in DEBUG mode')
logger.info('Quit the server with CONTROL-C')
check_token_command(api_url)
IOLoop.current().start()
if __name__ == '__main__':
main()
| [
"jet_bridge_base.logger.logger.warning",
"jet_bridge_base.logger.logger.info",
"jet_bridge.utils.create_config.create_config",
"tornado.ioloop.IOLoop.current",
"jet_bridge.configuration.JetBridgeConfiguration",
"jet_bridge_base.db.database_connect",
"datetime.datetime.now",
"jet_bridge_base.commands.c... | [((239, 263), 'jet_bridge.configuration.JetBridgeConfiguration', 'JetBridgeConfiguration', ([], {}), '()\n', (261, 263), False, 'from jet_bridge.configuration import JetBridgeConfiguration\n'), ((264, 301), 'jet_bridge_base.configuration.set_configuration', 'configuration.set_configuration', (['conf'], {}), '(conf)\n', (295, 301), False, 'from jet_bridge_base import configuration\n'), ((1593, 1611), 'jet_bridge_base.db.database_connect', 'database_connect', ([], {}), '()\n', (1609, 1611), False, 'from jet_bridge_base.db import database_connect\n'), ((1664, 1674), 'jet_bridge.app.make_app', 'make_app', ([], {}), '()\n', (1672, 1674), False, 'from jet_bridge.app import make_app\n'), ((1688, 1703), 'tornado.httpserver.HTTPServer', 'HTTPServer', (['app'], {}), '(app)\n', (1698, 1703), False, 'from tornado.httpserver import HTTPServer\n'), ((2082, 2127), 'jet_bridge_base.logger.logger.info', 'logger.info', (['"""Quit the server with CONTROL-C"""'], {}), "('Quit the server with CONTROL-C')\n", (2093, 2127), False, 'from jet_bridge_base.logger import logger\n'), ((2133, 2161), 'jet_bridge_base.commands.check_token.check_token_command', 'check_token_command', (['api_url'], {}), '(api_url)\n', (2152, 2161), False, 'from jet_bridge_base.commands.check_token import check_token_command\n'), ((1003, 1069), 'jet_bridge.utils.create_config.create_config', 'create_config', (['(missing_options == required_options_without_default)'], {}), '(missing_options == required_options_without_default)\n', (1016, 1069), False, 'from jet_bridge.utils.create_config import create_config\n'), ((1874, 1940), 'jet_bridge_base.logger.logger.warning', 'logger.warning', (['"""Multiple workers are not supported in DEBUG mode"""'], {}), "('Multiple workers are not supported in DEBUG mode')\n", (1888, 1940), False, 'from jet_bridge_base.logger import logger\n'), ((2027, 2076), 'jet_bridge_base.logger.logger.warning', 'logger.warning', (['"""Server is running in DEBUG mode"""'], {}), "('Server is running in DEBUG mode')\n", (2041, 2076), False, 'from jet_bridge_base.logger import logger\n'), ((1540, 1568), 'jet_bridge_base.commands.check_token.check_token_command', 'check_token_command', (['api_url'], {}), '(api_url)\n', (1559, 1568), False, 'from jet_bridge_base.commands.check_token import check_token_command\n'), ((2167, 2183), 'tornado.ioloop.IOLoop.current', 'IOLoop.current', ([], {}), '()\n', (2181, 2183), False, 'from tornado.ioloop import IOLoop\n'), ((716, 730), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (728, 730), False, 'from datetime import datetime\n')] |
from distutils.spawn import find_executable
from os import path
import click
from .settings import (
BASE_DEVELOPMENT_REQUIREMENTS_FILENAME,
BASE_REQUIREMENTS_FILENAME,
DEVELOPMENT_REQUIREMENTS_FILENAME,
REQUIREMENTS_FILENAME,
)
from .util import print_and_run
def _ensure_pip_tools_installed():
if not find_executable('pip-sync'):
click.echo('Installing pip-tools')
print_and_run(('pip', 'install', 'pip-tools'))
@click.group()
def cli():
pass
@cli.command()
@click.option('--dev', is_flag=True, default=False)
def install(dev):
_ensure_pip_tools_installed()
requirements_file = (
DEVELOPMENT_REQUIREMENTS_FILENAME
if dev
else REQUIREMENTS_FILENAME
)
print_and_run(('pip-sync', requirements_file))
click.echo('Requirements setup complete!')
@cli.command()
def update():
_ensure_pip_tools_installed()
print_and_run((
'pip-compile',
'-q',
f'--output-file={path.relpath(REQUIREMENTS_FILENAME)}',
path.relpath(BASE_REQUIREMENTS_FILENAME),
))
click.echo(f'Requiremnts file updated: {path.relpath(REQUIREMENTS_FILENAME)}')
@cli.command()
def update_dev():
_ensure_pip_tools_installed()
print_and_run((
'pip-compile',
'-q',
f'--output-file={path.relpath(DEVELOPMENT_REQUIREMENTS_FILENAME)}',
path.relpath(BASE_DEVELOPMENT_REQUIREMENTS_FILENAME),
))
click.echo(f'Development requirements file updated: {DEVELOPMENT_REQUIREMENTS_FILENAME}')
if __name__ == '__main__':
cli()
| [
"distutils.spawn.find_executable",
"click.group",
"click.option",
"click.echo",
"os.path.relpath"
] | [((457, 470), 'click.group', 'click.group', ([], {}), '()\n', (468, 470), False, 'import click\n'), ((509, 559), 'click.option', 'click.option', (['"""--dev"""'], {'is_flag': '(True)', 'default': '(False)'}), "('--dev', is_flag=True, default=False)\n", (521, 559), False, 'import click\n'), ((794, 836), 'click.echo', 'click.echo', (['"""Requirements setup complete!"""'], {}), "('Requirements setup complete!')\n", (804, 836), False, 'import click\n'), ((1442, 1541), 'click.echo', 'click.echo', (['f"""Development requirements file updated: {DEVELOPMENT_REQUIREMENTS_FILENAME}"""'], {}), "(\n f'Development requirements file updated: {DEVELOPMENT_REQUIREMENTS_FILENAME}'\n )\n", (1452, 1541), False, 'import click\n'), ((327, 354), 'distutils.spawn.find_executable', 'find_executable', (['"""pip-sync"""'], {}), "('pip-sync')\n", (342, 354), False, 'from distutils.spawn import find_executable\n'), ((364, 398), 'click.echo', 'click.echo', (['"""Installing pip-tools"""'], {}), "('Installing pip-tools')\n", (374, 398), False, 'import click\n'), ((1032, 1072), 'os.path.relpath', 'path.relpath', (['BASE_REQUIREMENTS_FILENAME'], {}), '(BASE_REQUIREMENTS_FILENAME)\n', (1044, 1072), False, 'from os import path\n'), ((1376, 1428), 'os.path.relpath', 'path.relpath', (['BASE_DEVELOPMENT_REQUIREMENTS_FILENAME'], {}), '(BASE_DEVELOPMENT_REQUIREMENTS_FILENAME)\n', (1388, 1428), False, 'from os import path\n'), ((1126, 1161), 'os.path.relpath', 'path.relpath', (['REQUIREMENTS_FILENAME'], {}), '(REQUIREMENTS_FILENAME)\n', (1138, 1161), False, 'from os import path\n'), ((985, 1020), 'os.path.relpath', 'path.relpath', (['REQUIREMENTS_FILENAME'], {}), '(REQUIREMENTS_FILENAME)\n', (997, 1020), False, 'from os import path\n'), ((1317, 1364), 'os.path.relpath', 'path.relpath', (['DEVELOPMENT_REQUIREMENTS_FILENAME'], {}), '(DEVELOPMENT_REQUIREMENTS_FILENAME)\n', (1329, 1364), False, 'from os import path\n')] |
# Copyright © 2020 Interplanetary Database Association e.V.,
# Planetmint and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
from typing import List
from planetmint_driver import Planetmint
class Hosts:
hostnames = []
connections = []
def __init__(self, filepath):
self.set_hostnames(filepath=filepath)
self.set_connections()
def set_hostnames(self, filepath) -> None:
with open(filepath) as f:
self.hostnames = f.readlines()
def set_connections(self) -> None:
self.connections = list(map(lambda h: Planetmint(h), self.hostnames))
def get_connection(self, index=0) -> Planetmint:
return self.connections[index]
def get_transactions(self, tx_id) -> List:
return list(map(lambda connection: connection.transactions.retrieve(tx_id), self.connections))
def assert_transaction(self, tx_id) -> None:
txs = self.get_transactions(tx_id)
for tx in txs:
assert txs[0] == tx, \
'Cannot find transaction {}'.format(tx_id)
| [
"planetmint_driver.Planetmint"
] | [((649, 662), 'planetmint_driver.Planetmint', 'Planetmint', (['h'], {}), '(h)\n', (659, 662), False, 'from planetmint_driver import Planetmint\n')] |
from flask import Blueprint, flash, redirect, render_template, request, url_for
from werkzeug.security import check_password_hash, generate_password_hash
from flask_login import login_required, login_user, logout_user
from logbook.models import User, db
from peewee import fn
auth = Blueprint("auth", __name__)
@auth.route("/login")
def login():
return render_template("login.html")
@auth.route("/login", methods=["POST"])
def login_post():
username = request.form.get("username")
password = request.form.get("password")
remember = True if request.form.get("remember") else False
user = User.get_or_none(fn.Lower(User.username) == username.lower())
# inform the user if the username/password is wrong
if user is None or not check_password_hash(user.password, password):
flash("Please check your login details and try again.")
return redirect(url_for("logbook.index_next_pages"))
login_user(user, remember=remember)
return redirect(url_for("logbook.index_next_pages"))
@auth.route("/signup")
def signup():
return render_template("signup.html")
@auth.route("/signup", methods=["POST"])
def signup_post():
username = request.form.get("username").lower()
password = request.form.get("password")
user = User.get_or_none(
fn.Lower(User.username) == username
) # if this returns a user, then the email already exists in database
if user is not None:
flash("Username already exists")
return redirect(url_for("auth.signup"))
# create new user with the form data. Hash the password so plaintext version isn't saved.
new_user = User(
username=username, password=generate_password_hash(password, method="sha256")
)
new_user.save()
return redirect(url_for("auth.login"))
@auth.route("/logout", methods=["POST"])
@login_required
def logout():
logout_user()
return redirect(url_for("auth.login"))
| [
"flask.render_template",
"flask.flash",
"flask_login.login_user",
"flask_login.logout_user",
"flask.request.form.get",
"flask.url_for",
"peewee.fn.Lower",
"werkzeug.security.generate_password_hash",
"flask.Blueprint",
"werkzeug.security.check_password_hash"
] | [((292, 319), 'flask.Blueprint', 'Blueprint', (['"""auth"""', '__name__'], {}), "('auth', __name__)\n", (301, 319), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((373, 402), 'flask.render_template', 'render_template', (['"""login.html"""'], {}), "('login.html')\n", (388, 402), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((483, 511), 'flask.request.form.get', 'request.form.get', (['"""username"""'], {}), "('username')\n", (499, 511), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((528, 556), 'flask.request.form.get', 'request.form.get', (['"""password"""'], {}), "('password')\n", (544, 556), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((964, 999), 'flask_login.login_user', 'login_user', (['user'], {'remember': 'remember'}), '(user, remember=remember)\n', (974, 999), False, 'from flask_login import login_required, login_user, logout_user\n'), ((1113, 1143), 'flask.render_template', 'render_template', (['"""signup.html"""'], {}), "('signup.html')\n", (1128, 1143), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((1281, 1309), 'flask.request.form.get', 'request.form.get', (['"""password"""'], {}), "('password')\n", (1297, 1309), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((1945, 1958), 'flask_login.logout_user', 'logout_user', ([], {}), '()\n', (1956, 1958), False, 'from flask_login import login_required, login_user, logout_user\n'), ((581, 609), 'flask.request.form.get', 'request.form.get', (['"""remember"""'], {}), "('remember')\n", (597, 609), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((839, 894), 'flask.flash', 'flash', (['"""Please check your login details and try again."""'], {}), "('Please check your login details and try again.')\n", (844, 894), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((1021, 1056), 'flask.url_for', 'url_for', (['"""logbook.index_next_pages"""'], {}), "('logbook.index_next_pages')\n", (1028, 1056), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((1500, 1532), 'flask.flash', 'flash', (['"""Username already exists"""'], {}), "('Username already exists')\n", (1505, 1532), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((1839, 1860), 'flask.url_for', 'url_for', (['"""auth.login"""'], {}), "('auth.login')\n", (1846, 1860), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((1980, 2001), 'flask.url_for', 'url_for', (['"""auth.login"""'], {}), "('auth.login')\n", (1987, 2001), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((652, 675), 'peewee.fn.Lower', 'fn.Lower', (['User.username'], {}), '(User.username)\n', (660, 675), False, 'from peewee import fn\n'), ((784, 828), 'werkzeug.security.check_password_hash', 'check_password_hash', (['user.password', 'password'], {}), '(user.password, password)\n', (803, 828), False, 'from werkzeug.security import check_password_hash, generate_password_hash\n'), ((920, 955), 'flask.url_for', 'url_for', (['"""logbook.index_next_pages"""'], {}), "('logbook.index_next_pages')\n", (927, 955), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((1228, 1256), 'flask.request.form.get', 'request.form.get', (['"""username"""'], {}), "('username')\n", (1244, 1256), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((1351, 1374), 'peewee.fn.Lower', 'fn.Lower', (['User.username'], {}), '(User.username)\n', (1359, 1374), False, 'from peewee import fn\n'), ((1558, 1580), 'flask.url_for', 'url_for', (['"""auth.signup"""'], {}), "('auth.signup')\n", (1565, 1580), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((1738, 1787), 'werkzeug.security.generate_password_hash', 'generate_password_hash', (['password'], {'method': '"""sha256"""'}), "(password, method='sha256')\n", (1760, 1787), False, 'from werkzeug.security import check_password_hash, generate_password_hash\n')] |
import aiohttp
from aiohttp import ClientConnectionError, ClientResponseError
from .models import CoinsResponse, SimplePriceResponse
from .configs import Config
from typing import List, Dict, Union
class APIHandler:
def __init__(self):
self._config: Config = Config()
async def get_supported_coins(self) -> List[CoinsResponse]:
uri: str = self._config.coins_list_uri
async with aiohttp.ClientSession() as session:
async with session.get(uri) as resp:
try:
res: List[Dict[str, str]] = await resp.json()
res_instances: List[CoinsResponse] = list()
for coin in res:
instance: CoinsResponse = CoinsResponse(
id=coin.get('id', ''),
symbol=coin.get('symbol', ''),
name=coin.get('name', '')
)
res_instances.append(instance)
return res_instances
except (
ClientConnectionError, ClientResponseError,
Exception
) as e:
print(f'Exception from API: {type(e).__name__}')
raise e
async def get_simple_price(self, currencies: List[str]) -> Union[List[SimplePriceResponse], None]:
uri: str = self._config.simple_price_uri(currencies)
async with aiohttp.ClientSession() as session:
async with session.get(uri) as resp:
try:
res: Union[Dict, None] = await resp.json()
if res.get('error', None) is not None:
msg = res['error']
raise Exception(msg)
except (
ClientConnectionError, ClientResponseError,
Exception
) as e:
print(f'Exception from API: {type(e).__name__}')
raise e
if type(res) is dict:
responses: List[SimplePriceResponse] = list()
for k, v in res.items():
_id = k
currency_key_list = list(v.keys())
curr = currency_key_list[0]
value = v[curr]
price_response = SimplePriceResponse(
id=_id,
quote=curr,
price=value
)
responses.append(price_response)
return responses
else:
return None
| [
"aiohttp.ClientSession"
] | [((413, 436), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (434, 436), False, 'import aiohttp\n'), ((1469, 1492), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (1490, 1492), False, 'import aiohttp\n')] |
from argparse import ArgumentParser
from pathlib import Path
from tensorflow import keras
# Define this script's flags
parser = ArgumentParser()
parser.add_argument('--lr', type=float, default=1e-3)
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--max_epochs', type=int, default=5)
parser.add_argument('--data_dir', type=str, default="./data/")
args = parser.parse_args()
# Make sure data_dir is absolute + create it if it doesn't exist
data_dir = Path(args.data_dir).absolute()
data_dir.mkdir(parents=True, exist_ok=True)
# Download and/or load data from disk
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data(data_dir / 'mnist.npz')
# Standardize X's to be between 0.0-1.0 instead of 0-255
x_train, x_test = x_train.astype("float32") / 255, x_test.astype("float32") / 255
# Build Model
model = keras.models.Sequential(
[
keras.layers.Flatten(input_shape=(28, 28, 1)),
keras.layers.Dense(128, activation='relu'),
keras.layers.Dense(10, activation='softmax'),
]
)
# Compile
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=args.lr),
loss="sparse_categorical_crossentropy",
metrics=["sparse_categorical_accuracy"],
)
# Train
history = model.fit(
x_train,
y_train,
batch_size=args.batch_size,
epochs=args.max_epochs,
validation_split=0.1,
callbacks=[keras.callbacks.TensorBoard(log_dir='./lightning_logs/keras')],
)
# Evaluate
model.evaluate(x_test, y_test)
| [
"tensorflow.keras.callbacks.TensorBoard",
"argparse.ArgumentParser",
"pathlib.Path",
"tensorflow.keras.datasets.mnist.load_data",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Flatten"
] | [((130, 146), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (144, 146), False, 'from argparse import ArgumentParser\n'), ((636, 690), 'tensorflow.keras.datasets.mnist.load_data', 'keras.datasets.mnist.load_data', (["(data_dir / 'mnist.npz')"], {}), "(data_dir / 'mnist.npz')\n", (666, 690), False, 'from tensorflow import keras\n'), ((483, 502), 'pathlib.Path', 'Path', (['args.data_dir'], {}), '(args.data_dir)\n', (487, 502), False, 'from pathlib import Path\n'), ((893, 938), 'tensorflow.keras.layers.Flatten', 'keras.layers.Flatten', ([], {'input_shape': '(28, 28, 1)'}), '(input_shape=(28, 28, 1))\n', (913, 938), False, 'from tensorflow import keras\n'), ((948, 990), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (966, 990), False, 'from tensorflow import keras\n'), ((1000, 1044), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(10)'], {'activation': '"""softmax"""'}), "(10, activation='softmax')\n", (1018, 1044), False, 'from tensorflow import keras\n'), ((1094, 1138), 'tensorflow.keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {'learning_rate': 'args.lr'}), '(learning_rate=args.lr)\n', (1115, 1138), False, 'from tensorflow import keras\n'), ((1388, 1449), 'tensorflow.keras.callbacks.TensorBoard', 'keras.callbacks.TensorBoard', ([], {'log_dir': '"""./lightning_logs/keras"""'}), "(log_dir='./lightning_logs/keras')\n", (1415, 1449), False, 'from tensorflow import keras\n')] |
import pygame as pg
from pygame.time import Clock
from src.drawer import Drawer
from src.game import Game
from src.utils.config import Config
from src.utils.score import ScoresList
from src.utils.sfx import SfxHolder
from src.utils.text import Text
from src.utils.util import Util, user_quit
class Loop:
def __init__(self, util: Util, cfg: Config, sfx: SfxHolder, txt: Text,
drawer: Drawer):
self.util = util
self.cfg = cfg
self.sfx = sfx
self.txt = txt
self.drawer = drawer
self.clock = Clock()
def main(self, screen, game: Game) -> bool:
# Dump first tick to ignore past
self.clock.tick(self.cfg.frames_per_second)
while True:
# Get change in time
dt = self.clock.tick(self.cfg.frames_per_second)
# Loop over events (quit, key down, key up)
for event in pg.event.get():
if user_quit(event):
return False
elif event.type == pg.KEYDOWN:
if event.key in self.cfg.all_keys:
game.press_key(event.key)
elif event.type == pg.KEYUP:
if event.key in self.cfg.all_keys:
game.release_key(event.key)
# Move and draw game (with possible paused screen and fps)
if not game.paused:
game.move(dt)
if not game.game_over:
self.drawer.draw_game(screen, game, dt)
if game.paused:
self.drawer.draw_paused_overlay(screen)
if self.cfg.draw_fps:
self.drawer.draw_fps(screen, self.clock.get_fps())
# Update display
pg.display.update()
# Break if game no longer running
if game.game_over:
return True
def game_over(self, screen, game: Game, scores: ScoresList) -> bool:
score_saved = False # not saved yet
self.sfx.game_over.play() # play audio
i = 0
while True:
# Fade-in game over screen
if i < 256:
pg.event.get() # dummy get
self.drawer.draw_game(screen, game, 0) # draw game
self.drawer.draw_game_over_overlay(
screen, i, score_saved) # fade-in game over screen
self.clock.tick(60) # slow-down the fade-in
# Refresh screen
pg.display.flip()
i += 1
# Check for quit or restart events
for event in pg.event.get():
if user_quit(event):
return False
elif event.type == pg.MOUSEBUTTONDOWN:
if self.txt.restart_rect.collidepoint(*event.pos):
return True
elif not score_saved and \
self.txt.save_score_rect.collidepoint(*event.pos):
for score in game.get_scores():
scores.add_score(score)
scores.write()
score_saved = True
| [
"pygame.event.get",
"pygame.display.flip",
"src.utils.util.user_quit",
"pygame.time.Clock",
"pygame.display.update"
] | [((560, 567), 'pygame.time.Clock', 'Clock', ([], {}), '()\n', (565, 567), False, 'from pygame.time import Clock\n'), ((907, 921), 'pygame.event.get', 'pg.event.get', ([], {}), '()\n', (919, 921), True, 'import pygame as pg\n'), ((1749, 1768), 'pygame.display.update', 'pg.display.update', ([], {}), '()\n', (1766, 1768), True, 'import pygame as pg\n'), ((2601, 2615), 'pygame.event.get', 'pg.event.get', ([], {}), '()\n', (2613, 2615), True, 'import pygame as pg\n'), ((942, 958), 'src.utils.util.user_quit', 'user_quit', (['event'], {}), '(event)\n', (951, 958), False, 'from src.utils.util import Util, user_quit\n'), ((2156, 2170), 'pygame.event.get', 'pg.event.get', ([], {}), '()\n', (2168, 2170), True, 'import pygame as pg\n'), ((2487, 2504), 'pygame.display.flip', 'pg.display.flip', ([], {}), '()\n', (2502, 2504), True, 'import pygame as pg\n'), ((2636, 2652), 'src.utils.util.user_quit', 'user_quit', (['event'], {}), '(event)\n', (2645, 2652), False, 'from src.utils.util import Util, user_quit\n')] |
#!/usr/bin/env python
"""A third example to get started with tinypyki.
Toying with mass certificate generation.
"""
import os
import tinypyki as tiny
print("Creating a pki instance named \"mass-pki\"")
pki = tiny.PKI("mass-pki")
print("Create the \"root-ca\"")
root_ca = tiny.Node(nid = "root-ca", pathlen = 1, san="email=<EMAIL>")
print("Create 10 sub nodes")
targets = [tiny.Node(nid = "target-{0}".format(i), issuer = "root-ca", ntype="u", san="ip=192.168.0.{0}, dns=hexample.com".format((175+i)%256)) for i in range(10)]
print("Insert the root-ca then all nodes in the pki")
tiny.do.insert(root_ca, pki)
for node in targets:
tiny.change.subj(node, cn=node.nid + "-dummy-hexample")
tiny.do.insert(node, pki)
print("Create everything, including p12 bundles")
tiny.do.everything(pki, pkcs12 = True)
print("Observe the pki changes")
tiny.show(pki)
# Uncomment this if you wish to see the contents of all the files
# print("Showing the contents of all files")
# for node in pki.nodes.values():
# tiny.show(node.key_path)
# tiny.show(node.csr_path)
# tiny.show(node.cert_path)
# tiny.show(node.crl_path)
print("Revoking every other certificate")
for node in pki.nodes.values():
if node.nid.startswith("target"):
if not int(node.nid.split("-")[-1])%2:
# Valid reasons: "unspecified", "keycompromise", "cacompromise", "affiliationchanged", "superseded", "cessationofoperation", "certificatehold", "removefromcrl"
tiny.do.revoke(node, reason="keycompromise")
print("Observe the crl changes of the root-ca")
tiny.show(pki.nodes["root-ca"].crl_path)
print("Create the verification environment")
tiny.do.verifyenv(pki, create=True)
print("Verify every file related to root-ca")
tiny.do.verify(pki.nodes["root-ca"])
# You can verify specific elements, by specifying "key", "csr", "cert", "crl" or "pkcs12"
# tiny.do.verify(pki.nodes["root-ca"], "key")
# You can verify the whole pki as follows
# tiny.do.verify_all(pki)
print("Destroy the verification environment")
tiny.do.verifyenv(pki, create=False)
# Uncomment this if you wish to delete the files
# print("Cleaning up the work direcotry")
# tiny.do.clean(pki)
| [
"tinypyki.do.insert",
"tinypyki.change.subj",
"tinypyki.do.verify",
"tinypyki.PKI",
"tinypyki.do.verifyenv",
"tinypyki.do.everything",
"tinypyki.do.revoke",
"tinypyki.Node",
"tinypyki.show"
] | [((213, 233), 'tinypyki.PKI', 'tiny.PKI', (['"""mass-pki"""'], {}), "('mass-pki')\n", (221, 233), True, 'import tinypyki as tiny\n'), ((278, 334), 'tinypyki.Node', 'tiny.Node', ([], {'nid': '"""root-ca"""', 'pathlen': '(1)', 'san': '"""email=<EMAIL>"""'}), "(nid='root-ca', pathlen=1, san='email=<EMAIL>')\n", (287, 334), True, 'import tinypyki as tiny\n'), ((589, 617), 'tinypyki.do.insert', 'tiny.do.insert', (['root_ca', 'pki'], {}), '(root_ca, pki)\n', (603, 617), True, 'import tinypyki as tiny\n'), ((780, 816), 'tinypyki.do.everything', 'tiny.do.everything', (['pki'], {'pkcs12': '(True)'}), '(pki, pkcs12=True)\n', (798, 816), True, 'import tinypyki as tiny\n'), ((853, 867), 'tinypyki.show', 'tiny.show', (['pki'], {}), '(pki)\n', (862, 867), True, 'import tinypyki as tiny\n'), ((1577, 1617), 'tinypyki.show', 'tiny.show', (["pki.nodes['root-ca'].crl_path"], {}), "(pki.nodes['root-ca'].crl_path)\n", (1586, 1617), True, 'import tinypyki as tiny\n'), ((1664, 1699), 'tinypyki.do.verifyenv', 'tiny.do.verifyenv', (['pki'], {'create': '(True)'}), '(pki, create=True)\n', (1681, 1699), True, 'import tinypyki as tiny\n'), ((1747, 1783), 'tinypyki.do.verify', 'tiny.do.verify', (["pki.nodes['root-ca']"], {}), "(pki.nodes['root-ca'])\n", (1761, 1783), True, 'import tinypyki as tiny\n'), ((2035, 2071), 'tinypyki.do.verifyenv', 'tiny.do.verifyenv', (['pki'], {'create': '(False)'}), '(pki, create=False)\n', (2052, 2071), True, 'import tinypyki as tiny\n'), ((643, 698), 'tinypyki.change.subj', 'tiny.change.subj', (['node'], {'cn': "(node.nid + '-dummy-hexample')"}), "(node, cn=node.nid + '-dummy-hexample')\n", (659, 698), True, 'import tinypyki as tiny\n'), ((703, 728), 'tinypyki.do.insert', 'tiny.do.insert', (['node', 'pki'], {}), '(node, pki)\n', (717, 728), True, 'import tinypyki as tiny\n'), ((1483, 1527), 'tinypyki.do.revoke', 'tiny.do.revoke', (['node'], {'reason': '"""keycompromise"""'}), "(node, reason='keycompromise')\n", (1497, 1527), True, 'import tinypyki as tiny\n')] |
from django.contrib import admin
from django.contrib.gis import admin as geo_model_admin
from leaflet.admin import LeafletGeoAdmin
from .models import Forecasts, Dam, Species
# Forecast Model
class ForecastsAdmin(admin.ModelAdmin):
list_display = ('dam', 'species', 'forecast_range')
admin.site.register(Forecasts, ForecastsAdmin)
# Species Model
class SpeciesAdmin(admin.ModelAdmin):
list_display = ('name', 'reference_name')
admin.site.register(Species, SpeciesAdmin)
# Dam Model - requires GeoAdmin privelages
class DamAdmin(LeafletGeoAdmin):
list_display = ('name', 'abbr', 'location')
admin.site.register(Dam, DamAdmin)
| [
"django.contrib.admin.site.register"
] | [((288, 334), 'django.contrib.admin.site.register', 'admin.site.register', (['Forecasts', 'ForecastsAdmin'], {}), '(Forecasts, ForecastsAdmin)\n', (307, 334), False, 'from django.contrib import admin\n'), ((435, 477), 'django.contrib.admin.site.register', 'admin.site.register', (['Species', 'SpeciesAdmin'], {}), '(Species, SpeciesAdmin)\n', (454, 477), False, 'from django.contrib import admin\n'), ((602, 636), 'django.contrib.admin.site.register', 'admin.site.register', (['Dam', 'DamAdmin'], {}), '(Dam, DamAdmin)\n', (621, 636), False, 'from django.contrib import admin\n')] |
# -*- coding: UTF-8 -*-
# vim: set expandtab sw=4 ts=4 sts=4:
#
# phpMyAdmin web site
#
# Copyright (C) 2008 - 2016 <NAME> <<EMAIL>>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import json
import urllib2
from django.dispatch import receiver
from django.db.models.signals import post_save
from django.core.urlresolvers import reverse
from django.db import models
from django.conf import settings
from django.utils import timezone
import os.path
from data.themes import CSSMAP
from markupfield.fields import MarkupField
from pmaweb.cdn import purge_cdn, purge_all_cdn
# Naming of versions
VERSION_INFO = (
('alpha1', ' First alpha version.'),
('alpha2', ' Second alpha version.'),
('alpha3', ' Third alpha version.'),
('alpha4', ' Fourth alpha version.'),
('beta1', ' First beta version.'),
('beta2', ' Second beta version.'),
('beta3', ' Third beta version.'),
('beta4', ' Fourth beta version.'),
('beta', ' Beta version.'),
('rc1', ' First release candidate.'),
('rc2', ' Second release candidate.'),
('rc3', ' Third release candidate.'),
('rc4', ' Fourth release candidate.'),
('rc', ' Release candidate.'),
)
DOCKER_TRIGGER = \
'https://registry.hub.docker.com/u/phpmyadmin/phpmyadmin/trigger/{0}/'
def get_current_releases():
delta = 1000000
result = []
for version in settings.LISTED_BRANCHES:
min_vernum = Release.parse_version(version)
max_vernum = min_vernum + delta
stable_releases = Release.objects.filter(
version_num__gte=min_vernum,
version_num__lt=max_vernum,
stable=True,
)
if stable_releases.exists():
result.append(stable_releases[0])
return result
class Release(models.Model):
version = models.CharField(max_length=50, unique=True)
version_num = models.IntegerField(default=0, unique=True)
release_notes = MarkupField(default_markup_type='markdown')
stable = models.BooleanField(default=False, db_index=True)
snapshot = models.BooleanField(default=False, db_index=True)
date = models.DateTimeField(db_index=True, default=timezone.now)
purged = False
class Meta(object):
ordering = ['-version_num']
def __unicode__(self):
return self.version
def get_absolute_url(self):
if self.snapshot:
return reverse('downloads')
return reverse('release', kwargs={'version': self.version})
def simpledownload(self):
try:
return self.download_set.get(
filename__endswith='-all-languages.zip'
)
except Download.DoesNotExist:
try:
return self.download_set.all()[0]
except IndexError:
return None
@staticmethod
def parse_version(version):
if '+' in version:
# Snapshots, eg. 4.7+snapshot
parts = [int(x) for x in version.split('+')[0].split('.')]
assert len(parts) == 2
return (
100000000 * parts[0] +
1000000 * parts[1]
)
if '-' in version:
version, suffix = version.split('-')
if suffix.startswith('alpha'):
suffix_num = int(suffix[5:])
elif suffix.startswith('beta'):
suffix_num = 10 + int(suffix[4:])
elif suffix.startswith('rc'):
suffix_num = 50 + int(suffix[2:])
else:
raise ValueError(version)
else:
suffix_num = 99
version = version
parts = [int(x) for x in version.split('.')]
if len(parts) == 2:
parts.append(0)
if len(parts) == 3:
parts.append(0)
assert len(parts) == 4
return (
100000000 * parts[0] +
1000000 * parts[1] +
10000 * parts[2] +
100 * parts[3] +
suffix_num
)
def save(self, *args, **kwargs):
self.version_num = self.parse_version(self.version)
self.stable = self.version_num % 100 == 99
super(Release, self).save(*args, **kwargs)
def get_version_suffix(self):
'''
Returns suffix for a version.
'''
for match, result in VERSION_INFO:
if self.version.find(match) != -1:
return result
return ''
def get_php_versions(self):
if self.version[:3] == '5.1':
return '>=7.1,<8.0'
elif self.version[:3] == '5.0':
return '>=7.1,<8.0'
elif self.version[:3] == '4.9':
return '>=5.5,<8.0'
elif self.version[:3] == '4.8':
return '>=5.5,<7.3'
elif self.version[:3] == '4.7':
return '>=5.5,<7.3'
elif self.version[:3] == '4.6':
return '>=5.5,<7.2'
elif self.version[:3] == '4.5':
return '>=5.5,<7.1'
elif self.version[:3] == '4.4':
return '>=5.3,<7.1'
elif self.version[:3] == '4.3':
return '>=5.3,<7.0'
elif self.version[:3] == '4.2':
return '>=5.3,<7.0'
elif self.version[:3] == '4.1':
return '>=5.3,<7.0'
elif self.version[:3] == '4.0':
return '>=5.2,<5.3'
def get_mysql_versions(self):
if self.version[:3] == '5.1':
return '>=5.5'
elif self.version[:3] == '5.0':
return '>=5.5'
elif self.version[:3] == '4.9':
return '>=5.5'
elif self.version[:3] == '4.8':
return '>=5.5'
elif self.version[:3] == '4.7':
return '>=5.5'
elif self.version[:3] == '4.6':
return '>=5.5'
elif self.version[:3] == '4.5':
return '>=5.5'
elif self.version[:3] == '4.4':
return '>=5.5'
elif self.version[:3] == '4.3':
return '>=5.5'
elif self.version[:3] == '4.2':
return '>=5.5'
elif self.version[:3] == '4.1':
return '>=5.5'
elif self.version[:3] == '4.0':
return '>=5.0'
def get_version_info(self):
'''
Returns description to the phpMyAdmin version.
'''
text = ''
if self.version[:2] == '0.':
text = 'Historical release.'
elif self.version[:2] == '1.':
text = 'Historical release.'
elif self.version[:2] == '2.':
text = 'Version compatible with PHP 4+ and MySQL 3+.'
elif self.version[:2] == '3.':
text = (
'Frames version not requiring Javascript. ' +
'Requires PHP 5.2 and MySQL 5. ' +
'Supported for security fixes only, until Jan 1, 2014.'
)
elif self.version[:3] == '5.1':
text = (
'Future version compatible with PHP 7.1 and newer and MySQL 5.5 and newer. '
)
elif self.version[:3] == '5.0':
text = (
'Current version compatible with PHP 7.1 and newer and MySQL 5.5 and newer. '
)
elif self.version[:3] == '4.9':
text = (
'Older version compatible with PHP 5.5 to 7.4 and MySQL 5.5 and newer. ' +
'Currently supported for security fixes only. '
)
elif self.version[:3] == '4.8':
text = (
'Older version compatible with PHP 5.5 to 7.2 and MySQL 5.5 and newer. ' +
'Was supported until June 4, 2019.'
)
elif self.version in ('4.7.0', '4.7.1', '4.7.2', '4.7.3', '4.7.0-rc1', '4.7.0-beta1'):
text = (
'Older version compatible with PHP 5.5 to 7.1 and MySQL 5.5 and newer. ' +
'Was supported until April 7, 2018.'
)
elif self.version[:3] == '4.7':
text = (
'Older version compatible with PHP 5.5 to 7.2 and MySQL 5.5 and newer. ' +
'Was supported until April 7, 2018.'
)
elif self.version[:3] == '4.6':
text = (
'Older version compatible with PHP 5.5 to 7.1 and MySQL 5.5 and newer. ' +
'Was supported until April 1, 2017.'
)
elif self.version[:3] == '4.5':
text = (
'Older version compatible with PHP 5.5 to 7.0 and MySQL 5.5. ' +
'Was supported until April 1, 2016.'
)
elif self.version[:3] == '4.4':
text = (
'Older version compatible with PHP 5.3.7 to 7.0 and MySQL 5.5. ' +
'Was supported until October 1, 2016.'
)
elif self.version[:3] == '4.3':
text = (
'Older version compatible with PHP 5.3 and MySQL 5.5. ' +
'Was supported until October 1, 2015.'
)
elif self.version[:3] == '4.2':
text = (
'Older version compatible with PHP 5.3 and MySQL 5.5. ' +
'Was supported until July 1, 2015.'
)
elif self.version[:3] == '4.1':
text = (
'Older version compatible with PHP 5.3 and MySQL 5.5. ' +
'Was supported until January 1, 2015.'
)
elif self.version[:3] == '4.0':
text = (
'Older version compatible with PHP 5.2 and MySQL 5. ' +
'Does not support PHP 5.5 or newer. ' +
'Was supported until April 1, 2017.'
)
text += self.get_version_suffix()
return text
def get_downloads(self):
"""Lists downloads, making all-languages.zip first"""
dlset = self.download_set
return (
list(dlset.filter(filename__endswith='all-languages.zip')) +
list(dlset.exclude(filename__endswith='all-languages.zip'))
)
class Download(models.Model):
release = models.ForeignKey(Release)
filename = models.CharField(max_length=50)
size = models.IntegerField(default=0)
sha1 = models.CharField(max_length=40)
sha256 = models.CharField(max_length=64)
signed = models.BooleanField(default=False)
class Meta(object):
ordering = ['-release__version_num', 'filename']
unique_together = ['release', 'filename']
def __unicode__(self):
if self.release.snapshot:
return '/snapshots/{0}'.format(
self.filename
)
return '/phpMyAdmin/{0}/{1}'.format(
self.release.version,
self.filename
)
@property
def size_k(self):
return self.size / 1024
@property
def size_m(self):
return self.size / (1024 * 1024)
def get_filesystem_path(self):
return os.path.join(
settings.FILES_PATH,
'phpMyAdmin',
self.release.version,
self.filename
)
def get_absolute_url(self):
return 'https://files.phpmyadmin.net{0}'.format(
self.__unicode__()
)
def get_signed_url(self):
if not self.signed:
return ''
return 'https://files.phpmyadmin.net{0}.asc'.format(
self.__unicode__()
)
def get_checksum_url(self):
return 'https://files.phpmyadmin.net{0}.sha256'.format(
self.__unicode__()
)
def get_alternate_url(self):
return 'https://1126968067.rsc.cdn77.org{0}'.format(
self.__unicode__()
)
@property
def archive(self):
return self.filename.rsplit('.', 1)[-1]
@property
def composer_type(self):
ext = self.filename.rsplit('.', 1)[-1]
if ext == 'zip':
return 'zip'
else:
return 'tar'
@property
def get_stable_url(self):
filename, ext = self.filename.rsplit('.', 1)
if ext not in ('zip', '7z'):
filename, ext2 = filename.rsplit('.', 1)
ext = '{0}.{1}'.format(ext2, ext)
variant = filename.split('-', 2)[2]
return reverse(
'latest-download',
kwargs={
'flavor': variant,
'extension': '.' + ext,
}
)
@property
def get_stable_filename(self):
return self.get_stable_url.rsplit('/', 1)[1]
@property
def is_featured(self):
return self.filename.endswith('all-languages.zip')
class Theme(models.Model):
name = models.CharField(max_length=50)
display_name = models.CharField(max_length=50)
version = models.CharField(max_length=50)
filename = models.CharField(max_length=100, unique=True)
supported_versions = models.CharField(max_length=50)
description = models.TextField()
author = models.CharField(max_length=200)
url = models.URLField(blank=True)
size = models.IntegerField(default=0)
sha1 = models.CharField(max_length=40)
sha256 = models.CharField(max_length=64)
signed = models.BooleanField(default=False)
date = models.DateTimeField(db_index=True, default=timezone.now)
show = models.BooleanField(default=True)
class Meta(object):
ordering = ['name', 'version']
def __unicode__(self):
return u'{0} {1}'.format(self.display_name, self.version)
@property
def imgname(self):
return 'images/themes/{0}.png'.format(self.name)
def get_absolute_url(self):
return 'https://files.phpmyadmin.net/themes/{0}/{1}/{2}'.format(
self.name,
self.version,
self.filename,
)
def get_signed_url(self):
if not self.signed:
return ''
return 'https://files.phpmyadmin.net/themes/{0}/{1}/{2}.asc'.format(
self.name,
self.version,
self.filename,
)
def get_filesystem_path(self):
return os.path.join(
settings.FILES_PATH,
'themes',
self.name,
self.version,
self.filename
)
@property
def get_css(self):
return CSSMAP[self.supported_versions]
def dockerhub_trigger(tag):
if settings.DOCKERHUB_TOKEN is None:
return
request = urllib2.Request(
DOCKER_TRIGGER.format(settings.DOCKERHUB_TOKEN),
json.dumps({'docker_tag': tag}),
{'Content-Type': 'application/json'}
)
handle = urllib2.urlopen(request)
handle.read()
@receiver(post_save, sender=Release)
def purge_release(sender, instance, **kwargs):
if instance.purged:
return
instance.purged = True
purge_cdn(
# Pages with _littleboxes.html
reverse('home'),
reverse('news'),
# Download lists
reverse('files'),
reverse('feed-files'),
reverse('downloads'),
# Version dumps
'/downloads/list.txt',
'/home_page/version.txt',
'/home_page/version.js',
'/home_page/version.json',
'/downloads/phpMyAdmin-latest-all-languages.tar.bz2',
'/downloads/phpMyAdmin-latest-all-languages.tar.gz',
'/downloads/phpMyAdmin-latest-all-languages.tar.xz',
'/downloads/phpMyAdmin-latest-all-languages.zip',
'/downloads/phpMyAdmin-latest-english.tar.bz2',
'/downloads/phpMyAdmin-latest-english.tar.gz',
'/downloads/phpMyAdmin-latest-english.tar.xz',
'/downloads/phpMyAdmin-latest-english.zip',
'/downloads/phpMyAdmin-latest-source.tar.xz',
reverse('doap'),
reverse('pad'),
# This release
instance.get_absolute_url(),
)
# Purge all pages as every page contains download link
purge_all_cdn()
@receiver(post_save, sender=Download)
def purge_download(sender, instance, **kwargs):
purge_release(sender, instance.release)
@receiver(post_save, sender=Theme)
def purge_theme(sender, instance, **kwargs):
purge_cdn(reverse('themes'))
| [
"markupfield.fields.MarkupField",
"urllib2.urlopen",
"django.db.models.TextField",
"django.db.models.IntegerField",
"django.db.models.ForeignKey",
"json.dumps",
"pmaweb.cdn.purge_all_cdn",
"django.db.models.DateTimeField",
"django.db.models.BooleanField",
"django.core.urlresolvers.reverse",
"dja... | [((15141, 15176), 'django.dispatch.receiver', 'receiver', (['post_save'], {'sender': 'Release'}), '(post_save, sender=Release)\n', (15149, 15176), False, 'from django.dispatch import receiver\n'), ((16374, 16410), 'django.dispatch.receiver', 'receiver', (['post_save'], {'sender': 'Download'}), '(post_save, sender=Download)\n', (16382, 16410), False, 'from django.dispatch import receiver\n'), ((16506, 16539), 'django.dispatch.receiver', 'receiver', (['post_save'], {'sender': 'Theme'}), '(post_save, sender=Theme)\n', (16514, 16539), False, 'from django.dispatch import receiver\n'), ((2443, 2487), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'unique': '(True)'}), '(max_length=50, unique=True)\n', (2459, 2487), False, 'from django.db import models\n'), ((2506, 2549), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)', 'unique': '(True)'}), '(default=0, unique=True)\n', (2525, 2549), False, 'from django.db import models\n'), ((2570, 2613), 'markupfield.fields.MarkupField', 'MarkupField', ([], {'default_markup_type': '"""markdown"""'}), "(default_markup_type='markdown')\n", (2581, 2613), False, 'from markupfield.fields import MarkupField\n'), ((2627, 2676), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'db_index': '(True)'}), '(default=False, db_index=True)\n', (2646, 2676), False, 'from django.db import models\n'), ((2692, 2741), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'db_index': '(True)'}), '(default=False, db_index=True)\n', (2711, 2741), False, 'from django.db import models\n'), ((2753, 2810), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'db_index': '(True)', 'default': 'timezone.now'}), '(db_index=True, default=timezone.now)\n', (2773, 2810), False, 'from django.db import models\n'), ((10636, 10662), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Release'], {}), '(Release)\n', (10653, 10662), False, 'from django.db import models\n'), ((10678, 10709), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (10694, 10709), False, 'from django.db import models\n'), ((10721, 10751), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (10740, 10751), False, 'from django.db import models\n'), ((10763, 10794), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(40)'}), '(max_length=40)\n', (10779, 10794), False, 'from django.db import models\n'), ((10808, 10839), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)'}), '(max_length=64)\n', (10824, 10839), False, 'from django.db import models\n'), ((10853, 10887), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (10872, 10887), False, 'from django.db import models\n'), ((13177, 13208), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (13193, 13208), False, 'from django.db import models\n'), ((13228, 13259), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (13244, 13259), False, 'from django.db import models\n'), ((13274, 13305), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (13290, 13305), False, 'from django.db import models\n'), ((13321, 13366), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'unique': '(True)'}), '(max_length=100, unique=True)\n', (13337, 13366), False, 'from django.db import models\n'), ((13392, 13423), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (13408, 13423), False, 'from django.db import models\n'), ((13442, 13460), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (13458, 13460), False, 'from django.db import models\n'), ((13474, 13506), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (13490, 13506), False, 'from django.db import models\n'), ((13517, 13544), 'django.db.models.URLField', 'models.URLField', ([], {'blank': '(True)'}), '(blank=True)\n', (13532, 13544), False, 'from django.db import models\n'), ((13556, 13586), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (13575, 13586), False, 'from django.db import models\n'), ((13598, 13629), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(40)'}), '(max_length=40)\n', (13614, 13629), False, 'from django.db import models\n'), ((13643, 13674), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)'}), '(max_length=64)\n', (13659, 13674), False, 'from django.db import models\n'), ((13688, 13722), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (13707, 13722), False, 'from django.db import models\n'), ((13734, 13791), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'db_index': '(True)', 'default': 'timezone.now'}), '(db_index=True, default=timezone.now)\n', (13754, 13791), False, 'from django.db import models\n'), ((13803, 13836), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (13822, 13836), False, 'from django.db import models\n'), ((15095, 15119), 'urllib2.urlopen', 'urllib2.urlopen', (['request'], {}), '(request)\n', (15110, 15119), False, 'import urllib2\n'), ((16355, 16370), 'pmaweb.cdn.purge_all_cdn', 'purge_all_cdn', ([], {}), '()\n', (16368, 16370), False, 'from pmaweb.cdn import purge_cdn, purge_all_cdn\n'), ((3062, 3114), 'django.core.urlresolvers.reverse', 'reverse', (['"""release"""'], {'kwargs': "{'version': self.version}"}), "('release', kwargs={'version': self.version})\n", (3069, 3114), False, 'from django.core.urlresolvers import reverse\n'), ((12773, 12851), 'django.core.urlresolvers.reverse', 'reverse', (['"""latest-download"""'], {'kwargs': "{'flavor': variant, 'extension': '.' + ext}"}), "('latest-download', kwargs={'flavor': variant, 'extension': '.' + ext})\n", (12780, 12851), False, 'from django.core.urlresolvers import reverse\n'), ((14998, 15029), 'json.dumps', 'json.dumps', (["{'docker_tag': tag}"], {}), "({'docker_tag': tag})\n", (15008, 15029), False, 'import json\n'), ((15352, 15367), 'django.core.urlresolvers.reverse', 'reverse', (['"""home"""'], {}), "('home')\n", (15359, 15367), False, 'from django.core.urlresolvers import reverse\n'), ((15377, 15392), 'django.core.urlresolvers.reverse', 'reverse', (['"""news"""'], {}), "('news')\n", (15384, 15392), False, 'from django.core.urlresolvers import reverse\n'), ((15427, 15443), 'django.core.urlresolvers.reverse', 'reverse', (['"""files"""'], {}), "('files')\n", (15434, 15443), False, 'from django.core.urlresolvers import reverse\n'), ((15453, 15474), 'django.core.urlresolvers.reverse', 'reverse', (['"""feed-files"""'], {}), "('feed-files')\n", (15460, 15474), False, 'from django.core.urlresolvers import reverse\n'), ((15484, 15504), 'django.core.urlresolvers.reverse', 'reverse', (['"""downloads"""'], {}), "('downloads')\n", (15491, 15504), False, 'from django.core.urlresolvers import reverse\n'), ((16185, 16200), 'django.core.urlresolvers.reverse', 'reverse', (['"""doap"""'], {}), "('doap')\n", (16192, 16200), False, 'from django.core.urlresolvers import reverse\n'), ((16210, 16224), 'django.core.urlresolvers.reverse', 'reverse', (['"""pad"""'], {}), "('pad')\n", (16217, 16224), False, 'from django.core.urlresolvers import reverse\n'), ((16599, 16616), 'django.core.urlresolvers.reverse', 'reverse', (['"""themes"""'], {}), "('themes')\n", (16606, 16616), False, 'from django.core.urlresolvers import reverse\n'), ((3026, 3046), 'django.core.urlresolvers.reverse', 'reverse', (['"""downloads"""'], {}), "('downloads')\n", (3033, 3046), False, 'from django.core.urlresolvers import reverse\n')] |
from django.conf import settings
from django.conf.urls.defaults import *
from django.views.generic.simple import direct_to_template
from django.contrib import admin
admin.autodiscover()
from account.openid_consumer import PinaxConsumer
handler500 = "pinax.views.server_error"
if settings.ACCOUNT_OPEN_SIGNUP:
signup_view = "account.views.signup"
else:
signup_view = "signup_codes.views.signup"
urlpatterns = patterns("",
url(r"^$", direct_to_template, {
"template": "homepage.html",
}, name="home"),
url(r"^admin/invite_user/$", "signup_codes.views.admin_invite_user", name="admin_invite_user"),
url(r"^account/signup/$", signup_view, name="acct_signup"),
(r"^about/", include("about.urls")),
(r"^account/", include("account.urls")),
(r"^openid/(.*)", PinaxConsumer()),
(r"^profiles/", include("basic_profiles.urls")),
(r"^notices/", include("notification.urls")),
(r"^announcements/", include("announcements.urls")),
(r"^tagging_utils/", include("tagging_utils.urls")),
(r"^comments/", include("threadedcomments.urls")),
(r"^attachments/", include("attachments.urls")),
(r"^groups/", include("basic_groups.urls")),
(r"^tribes/", include("tribes.urls")),
(r"^projects/", include("projects.urls")),
(r"^flag/", include("flag.urls")),
(r"^admin/", include(admin.site.urls)),
)
from tagging.models import TaggedItem
from projects.models import Project
from tasks.models import Task
from topics.models import Topic
from wiki.models import Article as WikiArticle
tagged_models = (
dict(title="Projects",
query=lambda tag: TaggedItem.objects.get_by_model(Project, tag),
),
dict(title="Topics",
query=lambda tag: TaggedItem.objects.get_by_model(Topic, tag),
),
dict(title="Project Tasks",
query=lambda tag: TaggedItem.objects.get_by_model(Task, tag),
),
dict(title="Wiki Articles",
query=lambda tag: TaggedItem.objects.get_by_model(WikiArticle, tag),
),
)
tagging_ext_kwargs = {
'tagged_models':tagged_models,
}
urlpatterns += patterns('',
url(r'^tags/(?P<tag>.+)/(?P<model>.+)$', 'tagging_ext.views.tag_by_model',
kwargs=tagging_ext_kwargs, name='tagging_ext_tag_by_model'),
url(r'^tags/(?P<tag>.+)/$', 'tagging_ext.views.tag',
kwargs=tagging_ext_kwargs, name='tagging_ext_tag'),
url(r'^tags/$', 'tagging_ext.views.index', name='tagging_ext_index'),
)
if settings.SERVE_MEDIA:
urlpatterns += patterns("",
(r"", include("staticfiles.urls")),
)
| [
"tagging.models.TaggedItem.objects.get_by_model",
"account.openid_consumer.PinaxConsumer",
"django.contrib.admin.autodiscover"
] | [((166, 186), 'django.contrib.admin.autodiscover', 'admin.autodiscover', ([], {}), '()\n', (184, 186), False, 'from django.contrib import admin\n'), ((815, 830), 'account.openid_consumer.PinaxConsumer', 'PinaxConsumer', ([], {}), '()\n', (828, 830), False, 'from account.openid_consumer import PinaxConsumer\n'), ((1653, 1698), 'tagging.models.TaggedItem.objects.get_by_model', 'TaggedItem.objects.get_by_model', (['Project', 'tag'], {}), '(Project, tag)\n', (1684, 1698), False, 'from tagging.models import TaggedItem\n'), ((1758, 1801), 'tagging.models.TaggedItem.objects.get_by_model', 'TaggedItem.objects.get_by_model', (['Topic', 'tag'], {}), '(Topic, tag)\n', (1789, 1801), False, 'from tagging.models import TaggedItem\n'), ((1868, 1910), 'tagging.models.TaggedItem.objects.get_by_model', 'TaggedItem.objects.get_by_model', (['Task', 'tag'], {}), '(Task, tag)\n', (1899, 1910), False, 'from tagging.models import TaggedItem\n'), ((1977, 2026), 'tagging.models.TaggedItem.objects.get_by_model', 'TaggedItem.objects.get_by_model', (['WikiArticle', 'tag'], {}), '(WikiArticle, tag)\n', (2008, 2026), False, 'from tagging.models import TaggedItem\n')] |
import os
import sys
import pytest
import numpy as np
import pandas as pd
from scipy.stats import ks_2samp
sys.path.append("zarnitsa/")
from zarnitsa.stats import DataAugmenterExternally
N_TO_CHECK = 500
SIG = 0.5
@pytest.fixture
def dae():
return DataAugmenterExternally()
@pytest.fixture
def normal_data():
return pd.Series(np.random.normal(0, SIG * 3, size=N_TO_CHECK), dtype="float64")
def test_augment_column_permute(dae, normal_data):
"""
Augment column with normal distribution
"""
normal_data_aug = dae.augment_distrib_random(
aug_type="normal", size=N_TO_CHECK, loc=0, scale=SIG * 3
)
assert ks_2samp(normal_data, normal_data_aug).pvalue > 0.01, "KS criteria"
| [
"numpy.random.normal",
"zarnitsa.stats.DataAugmenterExternally",
"sys.path.append",
"scipy.stats.ks_2samp"
] | [((109, 137), 'sys.path.append', 'sys.path.append', (['"""zarnitsa/"""'], {}), "('zarnitsa/')\n", (124, 137), False, 'import sys\n'), ((259, 284), 'zarnitsa.stats.DataAugmenterExternally', 'DataAugmenterExternally', ([], {}), '()\n', (282, 284), False, 'from zarnitsa.stats import DataAugmenterExternally\n'), ((343, 388), 'numpy.random.normal', 'np.random.normal', (['(0)', '(SIG * 3)'], {'size': 'N_TO_CHECK'}), '(0, SIG * 3, size=N_TO_CHECK)\n', (359, 388), True, 'import numpy as np\n'), ((652, 690), 'scipy.stats.ks_2samp', 'ks_2samp', (['normal_data', 'normal_data_aug'], {}), '(normal_data, normal_data_aug)\n', (660, 690), False, 'from scipy.stats import ks_2samp\n')] |
from .Wavefunction import Wavefunction
import numpy as np
from scipy.fft import ifft2, fft2
import numba
CACHE_OPTIMIZATIONS = True
class Collision():
targetWavefunction = None # Implements wilson line
incidentWavefunction = None # Doesn't (have to) implement wilson line
_omega = None
_omegaFFT = None
_particlesProduced = None
_particlesProducedDeriv = None
_momentaMagSquared = None
_momentaComponents = None
_thetaInFourierSpace = None
_momentaBins = None
_fourierHarmonics = None # This will be initialized as an empty dict to store harmonics (see __init__)
_omegaExists = False
_omegaFFTExists = False
_momentaComponentsExist = False
_particlesProducedExists = False
_particlesProducedDerivExists = False
_momentaBinsExists = False
def __init__(self, wavefunction1: Wavefunction, wavefunction2: Wavefunction):
r"""
Initialize a collision with two wavefunctions, presumably a nucleus and a proton. One must implement
the wilson line, though the order of the arguments does not matter.
In the case that both wavefunctions implement the wilson line, the first (wavefunction1) will be used as such.
In the case that neither implement the wilson line, an exception will be raised.
Parameters
----------
wavefunction1 : Wavefunction (or child)
The first wavefunction
wavefunction2 : Wavefunction (or child)
The second wavefunction
"""
# Make sure that at least one has a wilson line
wilsonLineExists1 = callable(getattr(wavefunction1, "wilsonLine", None))
wilsonLineExists2 = callable(getattr(wavefunction2, "wilsonLine", None))
if not wilsonLineExists1 and not wilsonLineExists2:
raise Exception("Neither of the wavefunctions passed to Collision(Wavefunction, Wavefunction) implement the wilsonLine() method; at least one is required to.")
if wilsonLineExists1 and not wilsonLineExists2:
self.targetWavefunction = wavefunction1
self.incidentWavefunction = wavefunction2
elif wilsonLineExists2 and not wilsonLineExists1:
self.targetWavefunction = wavefunction2
self.incidentWavefunction = wavefunction1
else:
self.targetWavefunction = wavefunction1
self.incidentWavefunction = wavefunction2
# Make sure that both use the same number of colors
if self.targetWavefunction.gluonDOF != self.incidentWavefunction.gluonDOF:
raise Exception(f"Wavefunctions implement different gluon degrees of freedom (number of color charges): {self.incidentWavefunction.gluonDOF} vs. {self.targetWavefunction.gluonDOF}")
# Probably some other checks that need to be done to make sure the two wavefunctions are compatable, but this is fine for now
# Carry over some variables so we don't have to call through the wavefunctions so much
self.N = self.targetWavefunction.N
self.length = self.targetWavefunction.length
self.gluonDOF = self.targetWavefunction.gluonDOF
self.delta = self.targetWavefunction.delta
self.delta2 = self.targetWavefunction.delta2
#print(self.targetWavefunction)
#print(self.incidentWavefunction)
# Variables to do with binning the momenta later on
self.binSize = 4*np.pi/self.length
self.kMax = 2/self.delta
self.numBins = int(self.kMax/self.binSize)
# This has to be initialized as an empty dict within the constructor
# because otherwise it can retain information across separate objects
# (no idea how, but this fixes it)
self._fourierHarmonics = {}
def omega(self, forceCalculate=False, verbose=0):
r"""
Calculate the field omega at each point on the lattice.
If the field already exists, it is simply returned and no calculation is done.
Parameters
----------
forceCalculate : bool (default=False)
If the quantity has previously been calculated, the calculation will not be done
again unless this argument is set to True.
verbose : int (default=0)
How much output should be printed as calculations are done. Options are 0, 1, or 2.
Returns
-------
omega : array(N, N, 2, 2, `colorCharges`**2 - 1)
"""
if self._omegaExists and not forceCalculate:
return self._omega
self.incidentWavefunction.gaugeField(verbose=verbose)
self.targetWavefunction.adjointWilsonLine(verbose=verbose)
if verbose > 0:
print(f'Calculating {type(self).__name__} omega' + '.'*10, end='')
self._omega = _calculateOmegaOpt(self.N, self.gluonDOF, self.delta, self.incidentWavefunction.gaugeField(), self.targetWavefunction.adjointWilsonLine())
self._omegaExists = True
if verbose > 0:
print('finished!')
return self._omega
def omegaFFT(self, forceCalculate=False, verbose=0):
r"""
Compute the fourier transform of the field omega on the lattice.
If the fft of the field already exists, it is simply returned and no calculation is done.
Parameters
----------
forceCalculate : bool (default=False)
If the quantity has previously been calculated, the calculation will not be done
again unless this argument is set to True.
verbose : int (default=0)
How much output should be printed as calculations are done. Options are 0, 1, or 2.
Returns
-------
omegaFFT : array(N, N, 2, 2, `colorCharges`**2 - 1)
"""
if self._omegaFFTExists and not forceCalculate:
return self._omegaFFT
# Make sure omega exists
self.omega(verbose=verbose)
if verbose > 0:
print(f'Calculating {type(self).__name__} omega fourier transform' + '.'*10, end='')
# We want to do the normalization explicitly, but scipy doesn't offer no
# normalization as an option, so we just set it to be the opposite of whatever
# we are doing (forward for ifft, backward for fft)
# (we had some issues with scipy changing its default mode)
self._omegaFFT = self.delta2 * fft2(self._omega, axes=(0,1), norm='backward')
self._omegaFFTExists = True
if verbose > 0:
print('finished!')
return self._omegaFFT
def momentaBins(self, forceCalculate=False, verbose=0):
r"""
Compute the range of momenta at which particles will be created based on the dimensions of the lattice.
The exact values are:
- \( k_{max} = 2 / \Delta\)
- \( w_k = 4 \pi / L \)
If the bins already exist, they are simply returned and no calculation is done.
Parameters
----------
forceCalculate : bool (default=False)
If the quantity has previously been calculated, the calculation will not be done
again unless this argument is set to True.
verbose : int (default=0)
How much output should be printed as calculations are done. Options are 0, 1, or 2.
Returns
-------
momentaBins : array(numBins = L / (delta 2 pi))
"""
if self._momentaBinsExists and not forceCalculate:
return self._momentaBins
if verbose > 0:
print(f'Calculating {type(self).__name__} momentum bins' + '.'*10, end='')
self._momentaBins = [i*self.binSize for i in range(self.numBins)]
self._momentaBinsExists = True
if verbose > 0:
print('finished!')
return self._momentaBins
def momentaComponents(self, forceCalculate=False, verbose=0):
r"""
Compute the components of the momentum at each point on the lattice, according to:
$$ (k_x, k_y) = \frac{2}{\Delta} \left( \sin\left( \frac{\pi i}{N} \right), \sin\left( \frac{\pi j}{N} \right) \right) $$
where \(i\) and \(j\) index the \(x\) and \(y\) directions in real space, respectively.
If the calculation has already been done, the result is simply returned and is not repeated.
Parameters
----------
forceCalculate : bool (default=False)
If the quantity has previously been calculated, the calculation will not be done
again unless this argument is set to True.
verbose : int (default=0)
How much output should be printed as calculations are done. Options are 0, 1, or 2.
Returns
-------
momentaComponents : array(N, N, 2)
"""
if self._momentaComponentsExist and not forceCalculate:
return self._momentaComponents
if verbose > 0:
print(f'Calculating {type(self).__name__} momentum components' + '.'*10, end='')
self._momentaComponents, self._thetaInFourierSpace = _calculateMomentaOpt(self.N, self.delta)
self._momentaMagSquared = np.linalg.norm(self._momentaComponents, axis=2)**2
self._momentaComponentsExist = True
if verbose > 0:
print('finished!')
return self._momentaComponents
def momentaMagnitudeSquared(self, forceCalculate=False, verbose=0):
r"""
Compute the magnitude of the momentum at each point on the lattice, according to:
$$ |k| = \sqrt{k_x^2 + k_y^2} $$
$$ (k_x, k_y) = \frac{2}{\Delta} \left( \sin\left( \frac{\pi i}{N} \right), \sin\left( \frac{\pi j}{N} \right) \right) $$
where \(i\) and \(j\) index the \(x\) and \(y\) directions in real space, respectively.
If the calculation has already been done, the result is simply returned and is not repeated.
Parameters
----------
forceCalculate : bool (default=False)
If the quantity has previously been calculated, the calculation will not be done
again unless this argument is set to True.
verbose : int (default=0)
How much output should be printed as calculations are done. Options are 0, 1, or 2.
Returns
-------
momentaComponents : array(N, N)
"""
if self._momentaComponentsExist and not forceCalculate:
return self._momentaMagSquared
if verbose > 0:
print(f'Calculating {type(self).__name__} momenta magnitude squared' + '.'*10, end='')
self._momentaComponents, self._thetaInFourierSpace = _calculateMomentaOpt(self.N, self.delta)
self._momentaMagSquared = np.linalg.norm(self._momentaComponents, axis=2)**2
self._momentaComponentsExist = True
if verbose > 0:
print('finished!')
return self._momentaMagSquared
def particlesProducedDeriv(self, forceCalculate=False, verbose=0):
r"""
Compute the derivative of particles produced (\( \frac{d^2 N}{d^2 k} \)) at each point on the lattice
If the calculation has already been done, the result is simply returned and is not repeated.
Parameters
----------
forceCalculate : bool (default=False)
If the quantity has previously been calculated, the calculation will not be done
again unless this argument is set to True.
verbose : int (default=0)
How much output should be printed as calculations are done. Options are 0, 1, or 2.
Returns
-------
particlesProducedDeriv : array(N, N)
"""
if self._particlesProducedDerivExists and not forceCalculate:
return self._particlesProducedDeriv
# Make sure these quantities exist
self.omegaFFT(verbose=verbose)
self.momentaMagnitudeSquared(verbose=verbose) # This also calculates thetaInFourierSpace and momentaComponents
if verbose > 0:
print(f'Calculating {type(self).__name__} derivative of particles produced' + '.'*10, end='')
self._particlesProducedDeriv = _calculateParticlesProducedDerivOpt(self.N, self.gluonDOF, self._momentaMagSquared, self._omegaFFT)
if verbose > 0:
print('finished!')
self._particlesProducedDerivExists = True
return self._particlesProducedDeriv
def particlesProduced(self, forceCalculate=False, verbose=0):
r"""
Compute the number of particles produced \(N(|k|)\) as a function of momentum. Note that this
is technically the zeroth fourier harmonic, so this actually just calls the
cgc.Collision.fourierHarmonic() function.
The particles are binned according to cgc.Collision.momentaBins().
Most likely will be plotted against cgc.Collision.momentaBins().
If the calculation has already been done, the result is simply returned and is not repeated.
Parameters
----------
forceCalculate : bool (default=False)
If the quantity has previously been calculated, the calculation will not be done
again unless this argument is set to True.
verbose : int (default=0)
How much output should be printed as calculations are done. Options are 0, 1, or 2.
Returns
-------
particlesProduced : array(numBins = L / (delta 2 pi))
"""
# This one is strictly real, so we should make sure that is updated
self._fourierHarmonics[0] = np.real(self.fourierHarmonic(0, forceCalculate, verbose))
return self._fourierHarmonics[0]
def fourierHarmonic(self, harmonic: int, forceCalculate=False, verbose=0):
r"""
Calculate the fourier harmonic of the particle production as:
$$ v_n = \frac{ \sum_{(i,j)\in [k, k+ \Delta k]} |k| \frac{d^2 N}{d^2 k} e^{i n \theta }} { \sum_{(i,j)\in [k, k+ \Delta k]} |k| } $$
If the calculation has already been done, the result is simply returned and is not repeated.
Parameters
----------
harmonic : int
The fourier harmonic to calculate. All odd harmonics should be zero, and the zeroth harmonic
will be equal to cgc.Collision.particlesProduced()
forceCalculate : bool (default=False)
If the quantity has previously been calculated, the calculation will not be done
again unless this argument is set to True.
verbose : int (default=0)
How much output should be printed as calculations are done. Options are 0, 1, or 2.
Returns
-------
particlesProduced : array(numBins = L / (delta 2 pi))
"""
# First, see if we have already calculated this harmonic
if harmonic in self._fourierHarmonics.keys() and not forceCalculate:
return self._fourierHarmonics[harmonic]
# For actually calculating the harmonic, we first have to make sure we've calculated
# the derivative, dN/d^2k
# This makes sure that _momentaMagSquared, _thetaInFourierSpace and _particlesProducedDeriv
# all exist
self.particlesProducedDeriv(verbose=verbose)
if verbose > 0:
print(f'Calculating {type(self).__name__} fourier harmonic: {harmonic}' + '.'*10, end='')
# Drop all of our arrays into long 1D structure, since we will want to bin them
vectorizedParticleDerivs = np.reshape(self._particlesProducedDeriv, [self.N*self.N])
vectorizedTheta = np.reshape(self._thetaInFourierSpace, [self.N*self.N])
vectorizedMomentaMag = np.reshape(np.sqrt(self._momentaMagSquared), [self.N*self.N])
# The number of particles that are produced in each bin
# These bins are actually just thin rings in momentum space
self._fourierHarmonics[harmonic] = np.zeros(self.numBins, dtype='complex')
# The bin sizes/bounds are calculated for elsewhere
self.momentaBins()
# Ideally, these rings should be only have a thickness dk (infinitesimal)
# but since we have a discrete lattice, we weight the particles by their momentum
# (which may slightly vary) and then properly normalize
# Go through each bin and calculate (for all points in that bin):
# 1. Sum over |k| * dN/d^2k * exp(i * harmonic * theta)
# 2. Sum over |k|
# 3. Divide 1./2.
for i in range(self.numBins):
# Find which places on the lattice fall into this particular momentum bin
# Note the use of element-wise (or bitwise) and, "&"
particleDerivsInRing = vectorizedParticleDerivs[(vectorizedMomentaMag < self.binSize*(i+1)) & (vectorizedMomentaMag > self.binSize*i)]
momentaMagInRing = vectorizedMomentaMag[(vectorizedMomentaMag < self.binSize*(i+1)) & (vectorizedMomentaMag > self.binSize*i)]
thetaInRing = vectorizedTheta[(vectorizedMomentaMag < self.binSize*(i+1)) & (vectorizedMomentaMag > self.binSize*i)]
# Note that multiplication is done element-wise by default
numeratorSum = np.sum(particleDerivsInRing * momentaMagInRing * np.exp(1.j * harmonic * thetaInRing))
denominatorSum = np.sum(momentaMagInRing)
self._fourierHarmonics[harmonic][i] = numeratorSum / denominatorSum
if verbose > 0:
print('finished!')
return self._fourierHarmonics[harmonic]
# Using custom functions within other jitted functions can cause some issues,
# so we define the signatures explicitly for these two functions.
@numba.jit((numba.float64[:,:], numba.int64, numba.int64, numba.int64, numba.float64), nopython=True, cache=CACHE_OPTIMIZATIONS)
def _x_deriv(matrix, i, j, N, delta):
return (matrix[i,(j+1)%N] - matrix[i,j-1]) / (2 * delta)
@numba.jit((numba.float64[:,:], numba.int64, numba.int64, numba.int64, numba.float64), nopython=True, cache=CACHE_OPTIMIZATIONS)
def _y_deriv(matrix, i, j, N, delta):
return (matrix[(i+1)%N,j] - matrix[i-1,j]) / (2 * delta)
# Because of the same issue described above, we can't cache this function
# This function gives a warning because numba only experimentally supports
# treating functions as objects (the list derivs).
@numba.jit(nopython=True)
def _calculateOmegaOpt(N, gluonDOF, delta, incidentGaugeField, targetAdjointWilsonLine):
"""
Calculate the field omega at each point on the lattice.
If the field already exists, it is simply returned and no calculation is done.
Returns
-------
numpy.array : shape=(N, N, 2, 2, `colorCharges`**2 - 1)
"""
# 2,2 is for the 2 dimensions, x and y
omega = np.zeros((N, N, 2, 2, gluonDOF), dtype='complex') # 2 is for two dimensions, x and y
derivs = [_x_deriv, _y_deriv]
for i in range(N):
for j in range(N):
for k in range(gluonDOF):
for l in range(2): # 2 is number of dimensions
for n in range(2): # 2 is number of dimensions
omega[i,j,l,n,k] = np.sum(np.array([derivs[l](incidentGaugeField[:,:,m], i, j, N, delta) * derivs[n](targetAdjointWilsonLine[:,:,k,m], i, j, N, delta) for m in range(gluonDOF)]))
return omega
@numba.jit(nopython=True, cache=CACHE_OPTIMIZATIONS)
def _calculateMomentaOpt(N, delta):
"""
Optimized (via numba) function to calculated the position (momentum) in Fourier space of each point
Parameters
----------
N : int
Size of the lattice
delta : double
Spacing between each point
Returns
-------
(momentaComponents, theta)
momentaComponents : array(N, N, 2)
x and y components of the momentum at each point
theta : array(N, N)
Relationship between x and y components at each point, or atan2(k_y, k_x)
"""
momentaComponents = np.zeros((N, N, 2))
theta = np.zeros((N, N))
for i in range(N):
for j in range(N):
# Note that these components are of the form:
# k_x = 2/a sin(k_x' a / 2)
# Though the argument of the sin is simplified a bit
momentaComponents[i,j] = [2/delta * np.sin(np.pi*i/N) * np.sign(np.sin(2*np.pi*i/N)), 2/delta * np.sin(np.pi*j/N) * np.sign(np.sin(2*np.pi*j/N))]
theta[i,j] = np.arctan2(momentaComponents[i,j,1], momentaComponents[i,j,0])
return momentaComponents, theta
@numba.jit(nopython=True, cache=CACHE_OPTIMIZATIONS)
def _calculateParticlesProducedDerivOpt(N, gluonDOF, momentaMagSquared, omegaFFT):
"""
Optimized (via numba) function to calculate dN/d^2k
Parameters
----------
N : int
The system size
gluonDOF : int
The number of gluon degrees of freedom ((possible color charges)^2 - 1)
momentaMagSquared : array(N, N)
The magnitude of the momentum at each point, likely calculated (in part) with _calculateMomentaOpt()
omegaFFT : array(2, 2, gluonDOF, N, N)
Previously calculated omega array
Returns
-------
particleProduction : array(N, N)
The number of particles produced at each point on the momentum lattice
"""
# Where we will calculate dN/d^2k
particleProduction = np.zeros((N,N))
# # 2D Levi-Cevita symbol
LCS = np.array([[0,1],[-1,0]])
# # 2D Delta function
KDF = np.array([[1,0],[0,1]])
# Note that unlike in the rest of the code, i and j *do not* refer to the
# spacial indices here: x and y do (too many indices... :/ )
for y in range(N):
for x in range(N):
# To prevent any divide by zero errors
if momentaMagSquared[y,x] == 0:
continue
# All of these 2s are for our two dimensions, x and y
for i in range(2):
for j in range(2):
for l in range(2):
for m in range(2):
for a in range(gluonDOF):
particleProduction[y,x] += np.real(2/(2*np.pi)**3 / momentaMagSquared[y,x] * (
(KDF[i,j]*KDF[l,m] + LCS[i,j]*LCS[l,m])) * (
omegaFFT[y,x,i,j,a] * np.conj(omegaFFT[y,x,l,m,a])))
return particleProduction
| [
"numpy.reshape",
"scipy.fft.fft2",
"numpy.sqrt",
"numpy.conj",
"numpy.exp",
"numpy.array",
"numpy.zeros",
"numba.jit",
"numpy.sum",
"numpy.arctan2",
"numpy.linalg.norm",
"numpy.sin"
] | [((17641, 17773), 'numba.jit', 'numba.jit', (['(numba.float64[:, :], numba.int64, numba.int64, numba.int64, numba.float64)'], {'nopython': '(True)', 'cache': 'CACHE_OPTIMIZATIONS'}), '((numba.float64[:, :], numba.int64, numba.int64, numba.int64,\n numba.float64), nopython=True, cache=CACHE_OPTIMIZATIONS)\n', (17650, 17773), False, 'import numba\n'), ((17870, 18002), 'numba.jit', 'numba.jit', (['(numba.float64[:, :], numba.int64, numba.int64, numba.int64, numba.float64)'], {'nopython': '(True)', 'cache': 'CACHE_OPTIMIZATIONS'}), '((numba.float64[:, :], numba.int64, numba.int64, numba.int64,\n numba.float64), nopython=True, cache=CACHE_OPTIMIZATIONS)\n', (17879, 18002), False, 'import numba\n'), ((18299, 18323), 'numba.jit', 'numba.jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (18308, 18323), False, 'import numba\n'), ((19278, 19329), 'numba.jit', 'numba.jit', ([], {'nopython': '(True)', 'cache': 'CACHE_OPTIMIZATIONS'}), '(nopython=True, cache=CACHE_OPTIMIZATIONS)\n', (19287, 19329), False, 'import numba\n'), ((20446, 20497), 'numba.jit', 'numba.jit', ([], {'nopython': '(True)', 'cache': 'CACHE_OPTIMIZATIONS'}), '(nopython=True, cache=CACHE_OPTIMIZATIONS)\n', (20455, 20497), False, 'import numba\n'), ((18715, 18764), 'numpy.zeros', 'np.zeros', (['(N, N, 2, 2, gluonDOF)'], {'dtype': '"""complex"""'}), "((N, N, 2, 2, gluonDOF), dtype='complex')\n", (18723, 18764), True, 'import numpy as np\n'), ((19898, 19917), 'numpy.zeros', 'np.zeros', (['(N, N, 2)'], {}), '((N, N, 2))\n', (19906, 19917), True, 'import numpy as np\n'), ((19930, 19946), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (19938, 19946), True, 'import numpy as np\n'), ((21259, 21275), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (21267, 21275), True, 'import numpy as np\n'), ((21316, 21343), 'numpy.array', 'np.array', (['[[0, 1], [-1, 0]]'], {}), '([[0, 1], [-1, 0]])\n', (21324, 21343), True, 'import numpy as np\n'), ((21378, 21404), 'numpy.array', 'np.array', (['[[1, 0], [0, 1]]'], {}), '([[1, 0], [0, 1]])\n', (21386, 21404), True, 'import numpy as np\n'), ((15481, 15540), 'numpy.reshape', 'np.reshape', (['self._particlesProducedDeriv', '[self.N * self.N]'], {}), '(self._particlesProducedDeriv, [self.N * self.N])\n', (15491, 15540), True, 'import numpy as np\n'), ((15565, 15621), 'numpy.reshape', 'np.reshape', (['self._thetaInFourierSpace', '[self.N * self.N]'], {}), '(self._thetaInFourierSpace, [self.N * self.N])\n', (15575, 15621), True, 'import numpy as np\n'), ((15896, 15935), 'numpy.zeros', 'np.zeros', (['self.numBins'], {'dtype': '"""complex"""'}), "(self.numBins, dtype='complex')\n", (15904, 15935), True, 'import numpy as np\n'), ((6384, 6431), 'scipy.fft.fft2', 'fft2', (['self._omega'], {'axes': '(0, 1)', 'norm': '"""backward"""'}), "(self._omega, axes=(0, 1), norm='backward')\n", (6388, 6431), False, 'from scipy.fft import ifft2, fft2\n'), ((9129, 9176), 'numpy.linalg.norm', 'np.linalg.norm', (['self._momentaComponents'], {'axis': '(2)'}), '(self._momentaComponents, axis=2)\n', (9143, 9176), True, 'import numpy as np\n'), ((10691, 10738), 'numpy.linalg.norm', 'np.linalg.norm', (['self._momentaComponents'], {'axis': '(2)'}), '(self._momentaComponents, axis=2)\n', (10705, 10738), True, 'import numpy as np\n'), ((15662, 15694), 'numpy.sqrt', 'np.sqrt', (['self._momentaMagSquared'], {}), '(self._momentaMagSquared)\n', (15669, 15694), True, 'import numpy as np\n'), ((17282, 17306), 'numpy.sum', 'np.sum', (['momentaMagInRing'], {}), '(momentaMagInRing)\n', (17288, 17306), True, 'import numpy as np\n'), ((20344, 20410), 'numpy.arctan2', 'np.arctan2', (['momentaComponents[i, j, 1]', 'momentaComponents[i, j, 0]'], {}), '(momentaComponents[i, j, 1], momentaComponents[i, j, 0])\n', (20354, 20410), True, 'import numpy as np\n'), ((17203, 17240), 'numpy.exp', 'np.exp', (['(1.0j * harmonic * thetaInRing)'], {}), '(1.0j * harmonic * thetaInRing)\n', (17209, 17240), True, 'import numpy as np\n'), ((20209, 20230), 'numpy.sin', 'np.sin', (['(np.pi * i / N)'], {}), '(np.pi * i / N)\n', (20215, 20230), True, 'import numpy as np\n'), ((20237, 20262), 'numpy.sin', 'np.sin', (['(2 * np.pi * i / N)'], {}), '(2 * np.pi * i / N)\n', (20243, 20262), True, 'import numpy as np\n'), ((20269, 20290), 'numpy.sin', 'np.sin', (['(np.pi * j / N)'], {}), '(np.pi * j / N)\n', (20275, 20290), True, 'import numpy as np\n'), ((20297, 20322), 'numpy.sin', 'np.sin', (['(2 * np.pi * j / N)'], {}), '(2 * np.pi * j / N)\n', (20303, 20322), True, 'import numpy as np\n'), ((22252, 22284), 'numpy.conj', 'np.conj', (['omegaFFT[y, x, l, m, a]'], {}), '(omegaFFT[y, x, l, m, a])\n', (22259, 22284), True, 'import numpy as np\n')] |
import numpy as np
import tensorflow as tf
import model
if __name__ == "__main__":
print("Making level configs...")
level_configs = model.default_level_configs()
print("Making filter variables...")
filters = model.make_filters(tf.get_default_graph(), level_configs)
print("Done")
| [
"model.default_level_configs",
"tensorflow.get_default_graph"
] | [((144, 173), 'model.default_level_configs', 'model.default_level_configs', ([], {}), '()\n', (171, 173), False, 'import model\n'), ((248, 270), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (268, 270), True, 'import tensorflow as tf\n')] |
from datadog import initialize, api
options = {
'api_key': 'api_key',
'app_key': 'app_key'
}
initialize(**options)
# Schedule downtime
api.Downtime.create(scope='env:staging', start=int(time.time()))
| [
"datadog.initialize"
] | [((103, 124), 'datadog.initialize', 'initialize', ([], {}), '(**options)\n', (113, 124), False, 'from datadog import initialize, api\n')] |
import contextlib
import re
from typing import NamedTuple, Optional
import discord
from redbot.core.commands import BadArgument, Context, MemberConverter
_discord_member_converter_instance = MemberConverter()
_id_regex = re.compile(r"([0-9]{15,21})$")
_mention_regex = re.compile(r"<@!?([0-9]{15,21})>$")
class MemberOrID(NamedTuple):
member: Optional[discord.Member]
id: int
@classmethod
async def convert(cls, ctx: Context, argument: str):
with contextlib.suppress(Exception):
m = await _discord_member_converter_instance.convert(ctx, argument)
return cls(m, m.id)
match = _id_regex.match(argument) or _mention_regex.match(argument)
if match:
return cls(None, int(match.group(1)))
raise BadArgument()
| [
"redbot.core.commands.BadArgument",
"redbot.core.commands.MemberConverter",
"contextlib.suppress",
"re.compile"
] | [((193, 210), 'redbot.core.commands.MemberConverter', 'MemberConverter', ([], {}), '()\n', (208, 210), False, 'from redbot.core.commands import BadArgument, Context, MemberConverter\n'), ((223, 252), 're.compile', 're.compile', (['"""([0-9]{15,21})$"""'], {}), "('([0-9]{15,21})$')\n", (233, 252), False, 'import re\n'), ((271, 305), 're.compile', 're.compile', (['"""<@!?([0-9]{15,21})>$"""'], {}), "('<@!?([0-9]{15,21})>$')\n", (281, 305), False, 'import re\n'), ((781, 794), 'redbot.core.commands.BadArgument', 'BadArgument', ([], {}), '()\n', (792, 794), False, 'from redbot.core.commands import BadArgument, Context, MemberConverter\n'), ((477, 507), 'contextlib.suppress', 'contextlib.suppress', (['Exception'], {}), '(Exception)\n', (496, 507), False, 'import contextlib\n')] |
from django.contrib import admin
from face_api.models import KnowledgeDatabase
from face_api.models import ImageUploads
# Register your models here.
admin.site.register(KnowledgeDatabase)
admin.site.register(ImageUploads)
| [
"django.contrib.admin.site.register"
] | [((150, 188), 'django.contrib.admin.site.register', 'admin.site.register', (['KnowledgeDatabase'], {}), '(KnowledgeDatabase)\n', (169, 188), False, 'from django.contrib import admin\n'), ((189, 222), 'django.contrib.admin.site.register', 'admin.site.register', (['ImageUploads'], {}), '(ImageUploads)\n', (208, 222), False, 'from django.contrib import admin\n')] |
from dydx3.constants import SYNTHETIC_ASSET_MAP, SYNTHETIC_ASSET_ID_MAP, ASSET_RESOLUTION, COLLATERAL_ASSET
class TestConstants():
def test_constants_have_regular_structure(self):
for market, asset in SYNTHETIC_ASSET_MAP.items():
market_parts = market.split('-')
base_token, quote_token = market_parts
assert base_token == asset
assert quote_token == 'USD'
assert len(market_parts) == 2
assert list(SYNTHETIC_ASSET_MAP.values()) == list(SYNTHETIC_ASSET_ID_MAP.keys())
assets = [x for x in ASSET_RESOLUTION.keys() if x != COLLATERAL_ASSET]
assert assets == list(SYNTHETIC_ASSET_MAP.values())
| [
"dydx3.constants.SYNTHETIC_ASSET_MAP.values",
"dydx3.constants.SYNTHETIC_ASSET_MAP.items",
"dydx3.constants.ASSET_RESOLUTION.keys",
"dydx3.constants.SYNTHETIC_ASSET_ID_MAP.keys"
] | [((215, 242), 'dydx3.constants.SYNTHETIC_ASSET_MAP.items', 'SYNTHETIC_ASSET_MAP.items', ([], {}), '()\n', (240, 242), False, 'from dydx3.constants import SYNTHETIC_ASSET_MAP, SYNTHETIC_ASSET_ID_MAP, ASSET_RESOLUTION, COLLATERAL_ASSET\n'), ((482, 510), 'dydx3.constants.SYNTHETIC_ASSET_MAP.values', 'SYNTHETIC_ASSET_MAP.values', ([], {}), '()\n', (508, 510), False, 'from dydx3.constants import SYNTHETIC_ASSET_MAP, SYNTHETIC_ASSET_ID_MAP, ASSET_RESOLUTION, COLLATERAL_ASSET\n'), ((520, 549), 'dydx3.constants.SYNTHETIC_ASSET_ID_MAP.keys', 'SYNTHETIC_ASSET_ID_MAP.keys', ([], {}), '()\n', (547, 549), False, 'from dydx3.constants import SYNTHETIC_ASSET_MAP, SYNTHETIC_ASSET_ID_MAP, ASSET_RESOLUTION, COLLATERAL_ASSET\n'), ((581, 604), 'dydx3.constants.ASSET_RESOLUTION.keys', 'ASSET_RESOLUTION.keys', ([], {}), '()\n', (602, 604), False, 'from dydx3.constants import SYNTHETIC_ASSET_MAP, SYNTHETIC_ASSET_ID_MAP, ASSET_RESOLUTION, COLLATERAL_ASSET\n'), ((661, 689), 'dydx3.constants.SYNTHETIC_ASSET_MAP.values', 'SYNTHETIC_ASSET_MAP.values', ([], {}), '()\n', (687, 689), False, 'from dydx3.constants import SYNTHETIC_ASSET_MAP, SYNTHETIC_ASSET_ID_MAP, ASSET_RESOLUTION, COLLATERAL_ASSET\n')] |
#INVASION COMMANDS:
# !invasions // !atinvasions <reward> // !rminvasions
import discord
from discord.ext import commands
import asyncio
from src import sess
class Invasions(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.alert_dict = {} # user: reward, list of prev invasions with reward
@commands.Cog.listener()
async def on_ready(self):
print('Invasions Online')
# Periodically check
while True:
await asyncio.gather(self.check_invasions(50))
@commands.command()
async def invasions(self, ctx):
inv = await sess.request('invasions')
if inv == 0:
print("Could not retrieve data.")
return
embed = discord.Embed(title="Invasions")
# Organize invasions into description/type
inv_dict = {} # example: {GrineerOffensive: [{mission}, {mission}], }
for i in inv:
if not i['completed']: # Do not add invasions that have been completed
if i['desc'] in inv_dict:
inv_dict[i['desc']].append(i)
else:
inv_dict[i['desc']] = []
inv_dict[i['desc']].append(i)
# Show invasion information grouped via description/type
for key, li in inv_dict.items():
info = ''
for v in li:
node = v['node']
atk_reward = v['attackerReward']['asString'] or 'N/A'
def_reward = v['defenderReward']['asString'] or 'N/A'
attackers = v['attackingFaction']
defenders = v['defendingFaction']
info += node + ': \n' + attackers + f' [{atk_reward}]' + ' vs ' + defenders + f' [{def_reward}]\n'
embed.add_field(name=f'{key}', value=f'{info}', inline=False)
await ctx.send(embed=embed)
# Add user of command to the alert_dict to be alerted of invasions with specific reward
@commands.command()
async def atinvasions(self, ctx, *, reward=''):
try:
if not reward:
await ctx.send(ctx.message.author.mention + ' Enter an invasion reward to be alerted for.')
else:
self.alert_dict[ctx.message.author] = [reward, []]
await ctx.message.author.send(
f' You will now be alerted for invasions with a {reward.title()} reward.'
' To stop being alerted, use command "!rminvasions"')
except ValueError:
await ctx.message.author.send('Enter an invasion reward to be alerted for.')
# Remove user of command from the alert_dict to no longer be notified of invasion rewards
@commands.command()
async def rminvasions(self, ctx):
try:
self.alert_dict.pop(ctx.message.author)
await ctx.message.author.send('You are no longer being alerted for invasions.')
except KeyError:
await ctx.message.author.send('You are currently not being alerted.')
# THIS WILL BE PERIODICALLY CALLED on_ready
# Check for invasions with specific rewards for each user
async def check_invasions(self, delay):
# Wait before making request
await asyncio.sleep(delay)
inv = await sess.request('invasions')
if inv == 0:
print("Could not retrieve data.")
return
embed = discord.Embed(title="Invasions")
# Organize invasions into description/type
inv_dict = {} # example: {GrineerOffensive: [{mission}, {mission}], }
for i in inv:
if not i['completed']: # Do not add invasions that have been completed
if i['desc'] in inv_dict:
inv_dict[i['desc']].append(i)
else:
inv_dict[i['desc']] = []
inv_dict[i['desc']].append(i)
# Check each user's tracked reward and notify of any missions with their specific reward
for user in self.alert_dict.keys():
embed.clear_fields()
user_inv = []
for key, li in inv_dict.items():
info = ''
for v in li:
if self.alert_dict[user][0].lower() in v['attackerReward']['asString'].lower() \
or self.alert_dict[user][0].lower() in v['defenderReward']['asString'].lower():
user_inv.append(v)
node = v['node']
atk_reward = v['attackerReward']['asString'] or 'N/A'
def_reward = v['defenderReward']['asString'] or 'N/A'
attackers = v['attackingFaction']
defenders = v['defendingFaction']
info += node + ': \n' + attackers + f' [{atk_reward}]' + ' vs ' + defenders + f' [{def_reward}]\n'
if info != '':
embed.add_field(name=f'{key}', value=f'{info}', inline=False)
# Check if need to notify user
if len(self.alert_dict[user][1]) != len(user_inv): # If lengths do not match, alert of update
self.alert_dict[user][1] = user_inv.copy()
await user.send(f'Invasions with {self.alert_dict[user][0].title()} reward has been updated!',
embed=embed)
else:
for i in range(len(self.alert_dict[user][1])):
if self.alert_dict[user][1][i]['node'] != user_inv[i]['node']:
self.alert_dict[user][1] = user_inv.copy()
await user.send(f'Invasions with {self.alert_dict[user][0].title()} reward has been updated!',
embed=embed)
def setup(bot):
bot.add_cog(Invasions(bot))
| [
"discord.ext.commands.Cog.listener",
"src.sess.request",
"asyncio.sleep",
"discord.Embed",
"discord.ext.commands.command"
] | [((333, 356), 'discord.ext.commands.Cog.listener', 'commands.Cog.listener', ([], {}), '()\n', (354, 356), False, 'from discord.ext import commands\n'), ((535, 553), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (551, 553), False, 'from discord.ext import commands\n'), ((1969, 1987), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (1985, 1987), False, 'from discord.ext import commands\n'), ((2705, 2723), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (2721, 2723), False, 'from discord.ext import commands\n'), ((739, 771), 'discord.Embed', 'discord.Embed', ([], {'title': '"""Invasions"""'}), "(title='Invasions')\n", (752, 771), False, 'import discord\n'), ((3402, 3434), 'discord.Embed', 'discord.Embed', ([], {'title': '"""Invasions"""'}), "(title='Invasions')\n", (3415, 3434), False, 'import discord\n'), ((610, 635), 'src.sess.request', 'sess.request', (['"""invasions"""'], {}), "('invasions')\n", (622, 635), False, 'from src import sess\n'), ((3232, 3252), 'asyncio.sleep', 'asyncio.sleep', (['delay'], {}), '(delay)\n', (3245, 3252), False, 'import asyncio\n'), ((3273, 3298), 'src.sess.request', 'sess.request', (['"""invasions"""'], {}), "('invasions')\n", (3285, 3298), False, 'from src import sess\n')] |
import sqlalchemy as sa
from .meta import Base
class Person(Base):
__tablename__ = "person"
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String)
date_of_birth = sa.Column(sa.Date)
height = sa.Column(sa.Integer)
weight = sa.Column(sa.Numeric)
__all__ = [
"Person",
]
| [
"sqlalchemy.Column"
] | [((109, 148), 'sqlalchemy.Column', 'sa.Column', (['sa.Integer'], {'primary_key': '(True)'}), '(sa.Integer, primary_key=True)\n', (118, 148), True, 'import sqlalchemy as sa\n'), ((160, 180), 'sqlalchemy.Column', 'sa.Column', (['sa.String'], {}), '(sa.String)\n', (169, 180), True, 'import sqlalchemy as sa\n'), ((201, 219), 'sqlalchemy.Column', 'sa.Column', (['sa.Date'], {}), '(sa.Date)\n', (210, 219), True, 'import sqlalchemy as sa\n'), ((233, 254), 'sqlalchemy.Column', 'sa.Column', (['sa.Integer'], {}), '(sa.Integer)\n', (242, 254), True, 'import sqlalchemy as sa\n'), ((268, 289), 'sqlalchemy.Column', 'sa.Column', (['sa.Numeric'], {}), '(sa.Numeric)\n', (277, 289), True, 'import sqlalchemy as sa\n')] |
import sys
sys.path.append("../../configs")
#../../configs
from path import EXP_PATH
import numpy as np
DECAY_PARAMS_DICT =\
{
'stair' :
{
128 :{
'a1': {'initial_lr' : 1e-5, 'decay_steps' : 50000, 'decay_rate' : 0.3},
'a2' : {'initial_lr' : 3e-4, 'decay_steps' : 50000, 'decay_rate' : 0.3},
'a3' : {'initial_lr' : 1e-3, 'decay_steps' : 50000, 'decay_rate' : 0.3},
'a4' : {'initial_lr' : 3e-3, 'decay_steps' : 50000, 'decay_rate' : 0.3},
'a5' : {'initial_lr' : 1e-2, 'decay_steps' : 50000, 'decay_rate' : 0.3}
}
},
'piecewise' :
{
128 : {
'a1' : {'boundaries' : [10000, 20000], 'values' : [1e-4, 3e-5, 1e-5]},
'a2' : {'boundaries' : [10000, 20000], 'values' : [3e-4, 1e-4, 3e-5]},
'a3' : {'boundaries' : [10000, 20000], 'values' : [1e-3, 3e-4, 1e-4]},
'a4' : {'boundaries' : [10000, 20000], 'values' : [3e-3, 1e-3, 3e-4]},
'a5' : {'boundaries' : [10000, 20000], 'values' : [1e-2, 3e-3, 1e-3]},
'b1' : {'boundaries' : [20000, 35000], 'values' : [1e-4, 3e-5, 1e-5]},
'b2' : {'boundaries' : [20000, 35000], 'values' : [3e-4, 1e-4, 3e-5]},
'b3' : {'boundaries' : [20000, 35000], 'values' : [1e-3, 3e-4, 1e-4]},
'b4' : {'boundaries' : [20000, 35000], 'values' : [3e-3, 1e-3, 3e-4]},
'b5' : {'boundaries' : [20000, 35000], 'values' : [1e-2, 3e-3, 1e-3]}
}
}
}
ACTIVATE_K_SET = np.arange(1, 5)
K_SET = [1,4,16]
RESULT_DIR = EXP_PATH+"cifar_exps/"
#========================PARAM============================#
DATASET= 'cifar'
GPU_ID = 0
BATCH_SIZE = 128
EPOCH = 300
NSCLASS = 16
# model
EMBED_M= 64
CONV_NAME = 'conv1'
# metric loss
LOSS_TYPE = 'triplet'
MARGIN_ALPHA = 0.3
LAMBDA = 0.003 # regularization for npair
# learning
DECAY_TYPE = 'stair'
DECAY_PARAM_TYPE = 'a3'
| [
"sys.path.append",
"numpy.arange"
] | [((11, 43), 'sys.path.append', 'sys.path.append', (['"""../../configs"""'], {}), "('../../configs')\n", (26, 43), False, 'import sys\n'), ((1616, 1631), 'numpy.arange', 'np.arange', (['(1)', '(5)'], {}), '(1, 5)\n', (1625, 1631), True, 'import numpy as np\n')] |
# Copyright (C) 2020 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Contains WithAction mixin.
A mixin for processing actions on an object in the scope of put request .
"""
from collections import namedtuple, defaultdict
import werkzeug.exceptions as wzg_exceptions
from ggrc import db
from ggrc.login import get_current_user
from ggrc.models.comment import Comment
from ggrc.models.document import Document
from ggrc.models.evidence import Evidence
from ggrc.models.snapshot import Snapshot
from ggrc.models.exceptions import ValidationError
from ggrc.models.reflection import ApiAttributes
from ggrc.models.reflection import Attribute
from ggrc.models.relationship import Relationship
from ggrc.rbac import permissions
class WithAction(object):
"""Mixin for add/remove map/unmap actions processing"""
_api_attrs = ApiAttributes(
Attribute("actions", create=False, update=True, read=False)
)
_operation_order = [
"add_related",
"remove_related",
]
_object_map = {
"Document": Document,
"Evidence": Evidence,
"Comment": Comment,
"Snapshot": Snapshot,
}
_actions = None
_added = None # collect added objects for signals sending
_deleted = None # collect deleted objects fro signals sending
_relationships_map = None
def actions(self, value):
"""Save actions for further processing"""
if value:
self._actions = value.get("actions")
def _validate_actions(self):
"""Validate operation types"""
invalid_actions = ",".join(set(self._actions) - set(self._operation_order))
if invalid_actions:
raise ValueError("Invalid actions found: {}".format(invalid_actions))
def _build_relationships_map(self):
"""Build relationships map"""
self._relationships_map = {
(rel.destination_type, rel.destination_id): rel
for rel in self.related_destinations
}
self._relationships_map.update({
(rel.source_type, rel.source_id): rel
for rel in self.related_sources
})
def _process_operation(self, operation):
"""Process operation actions"""
for action in self._actions[operation]:
# get object class
obj_type = action.get("type")
if not obj_type:
raise ValidationError('type is not defined')
obj_class = self._object_map.get(obj_type)
if not obj_class:
raise ValueError('Invalid action type: {type}'.format(type=obj_type))
# get handler class
action_type = '{type}Action'.format(type=obj_type)
action_class = getattr(self, action_type, None)
if not action_class:
raise ValueError('Invalid action type: {type}'.format(type=obj_type))
# process action
# pylint: disable=not-callable
added, deleted = getattr(action_class(), operation)(self, action)
# collect added/deleted objects
self._added.extend(added)
self._deleted.extend(deleted)
def process_actions(self):
"""Process actions"""
if not self._actions:
return {}, []
self._validate_actions()
self._added = []
self._deleted = []
for operation in self._operation_order:
if operation not in self._actions:
continue
if not self._actions[operation]:
continue
self._build_relationships_map()
self._process_operation(operation)
# collect added/deleted objects for signals sending
added = defaultdict(list)
for obj in self._added:
added[obj.__class__].append(obj)
return added, self._deleted
class BaseAction(object):
"""Base action"""
AddRelated = namedtuple("AddRelated", ["id", "type"])
MapRelated = namedtuple("MapRelated", ["id", "type"])
RemoveRelated = namedtuple("RemoveRelated", ["id", "type"])
def add_related(self, parent, _action):
"""Add/map object to parent"""
added = []
if _action.get("id"):
action = self._validate(_action, self.MapRelated)
obj = self._get(action)
else:
action = self._validate(_action, self.AddRelated)
obj = self._create(parent, action)
added.append(obj)
from ggrc.models.hooks.common import check_mapping_permissions
check_mapping_permissions(parent, obj)
rel = Relationship(source=parent,
destination=obj,
context=parent.context)
added.append(rel)
return added, []
@staticmethod
def _validate(_action, ntuple):
try:
return ntuple(**_action)
except TypeError:
# According to documentation _fields is not private property
# but public, '_' added to prevent conflicts with tuple field names
# pylint: disable=protected-access
missing_fields = set(ntuple._fields) - set(_action)
raise ValidationError(
"Fields {} are missing for action: {!r}".format(
", ".join(missing_fields), _action
)
)
# pylint: disable=unused-argument,no-self-use
def _create(self, parent, action):
raise ValidationError("Can't create {type} object".format(
type=action.type))
def _get(self, action):
"""Get object specified in action"""
if not action.id:
raise ValueError("id is not defined")
# pylint: disable=protected-access
obj_class = WithAction._object_map[action.type]
obj = obj_class.query.get(action.id)
if not obj:
raise ValueError(
'Object not found: {type} {id}'.format(type=action.type,
id=action.id))
return obj
def remove_related(self, parent, _action):
"""Remove relationship"""
action = self._validate(_action, self.RemoveRelated)
deleted = []
obj = self._get(action)
# pylint: disable=protected-access
rel = parent._relationships_map.get((obj.type, obj.id))
if rel:
db.session.delete(rel)
deleted.append(rel)
return [], deleted
def _check_related_permissions(self, obj):
"""Check permissions before deleting related Evidence or Document"""
if not permissions.is_allowed_delete(
obj.type, obj.id, obj.context_id) \
and not permissions.has_conditions("delete", obj.type):
raise wzg_exceptions.Forbidden()
if not permissions.is_allowed_delete_for(obj):
raise wzg_exceptions.Forbidden()
class DocumentAction(BaseAction):
"""Document action"""
AddRelated = namedtuple("AddRelated", ["id",
"type",
"kind",
"link",
"title"])
@staticmethod
def _validate_parent(parent):
"""Validates if paren in allowed parents"""
from ggrc.models.object_document import Documentable
if not isinstance(parent, Documentable):
raise ValueError('Type "{}" is not Documentable.'.format(parent.type))
def _create(self, parent, action):
self._validate_parent(parent)
obj = Document(link=action.link,
title=action.title,
kind=action.kind,
context=parent.context)
return obj
def remove_related(self, parent, _action):
"""Remove relationship"""
action = self._validate(_action, self.RemoveRelated)
deleted = []
obj = self._get(action)
# pylint: disable=protected-access
rel = parent._relationships_map.get((obj.type, obj.id))
self._check_related_permissions(obj)
if rel:
db.session.delete(rel)
deleted.append(rel)
return [], deleted
class EvidenceAction(BaseAction):
"""Evidence action"""
AddRelatedTuple = namedtuple("AddRelated", ["id",
"type",
"kind",
"link",
"title",
"source_gdrive_id"])
def add_related_wrapper(self, id, type, kind, link,
title, source_gdrive_id=''):
"""Used to add 'default' value to the named tuple
In case of Evidence.FILE source_gdrive_id is mandatory
"""
return self.AddRelatedTuple(id, type, kind, link,
title, source_gdrive_id)
AddRelated = add_related_wrapper
AddRelated._fields = AddRelatedTuple._fields
def _create(self, parent, action):
obj = Evidence(link=action.link,
title=action.title,
kind=action.kind,
source_gdrive_id=action.source_gdrive_id,
context=parent.context)
return obj
def remove_related(self, parent, _action):
"""Remove relationship"""
action = self._validate(_action, self.RemoveRelated)
deleted = []
obj = self._get(action)
# pylint: disable=protected-access
rel = parent._relationships_map.get((obj.type, obj.id))
self._check_related_permissions(obj)
if rel:
db.session.delete(rel)
deleted.append(rel)
obj.status = Evidence.DEPRECATED
return [], deleted
class CommentAction(BaseAction):
"""Comment action"""
AddRelated = namedtuple("AddRelated", ["id",
"type",
"description",
"custom_attribute_definition_id"])
def _create(self, parent, action):
# get assignee type
current_user = get_current_user()
assignee_types = parent.assignees.get(current_user, [])
assignee_type = ",".join(assignee_types) or None
# create object
cad_id = action.custom_attribute_definition_id
if not cad_id:
obj = Comment(description=action.description,
assignee_type=assignee_type,
context=parent.context)
else:
obj = Comment(description=action.description,
custom_attribute_definition_id=cad_id,
assignee_type=assignee_type,
context=parent.context)
return obj
class SnapshotAction(BaseAction):
"""Snapshot action"""
| [
"collections.namedtuple",
"ggrc.models.document.Document",
"ggrc.login.get_current_user",
"ggrc.rbac.permissions.is_allowed_delete_for",
"ggrc.db.session.delete",
"werkzeug.exceptions.Forbidden",
"ggrc.models.evidence.Evidence",
"ggrc.models.exceptions.ValidationError",
"ggrc.rbac.permissions.has_co... | [((895, 954), 'ggrc.models.reflection.Attribute', 'Attribute', (['"""actions"""'], {'create': '(False)', 'update': '(True)', 'read': '(False)'}), "('actions', create=False, update=True, read=False)\n", (904, 954), False, 'from ggrc.models.reflection import Attribute\n'), ((3437, 3454), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3448, 3454), False, 'from collections import namedtuple, defaultdict\n'), ((3624, 3664), 'collections.namedtuple', 'namedtuple', (['"""AddRelated"""', "['id', 'type']"], {}), "('AddRelated', ['id', 'type'])\n", (3634, 3664), False, 'from collections import namedtuple, defaultdict\n'), ((3682, 3722), 'collections.namedtuple', 'namedtuple', (['"""MapRelated"""', "['id', 'type']"], {}), "('MapRelated', ['id', 'type'])\n", (3692, 3722), False, 'from collections import namedtuple, defaultdict\n'), ((3743, 3786), 'collections.namedtuple', 'namedtuple', (['"""RemoveRelated"""', "['id', 'type']"], {}), "('RemoveRelated', ['id', 'type'])\n", (3753, 3786), False, 'from collections import namedtuple, defaultdict\n'), ((6518, 6583), 'collections.namedtuple', 'namedtuple', (['"""AddRelated"""', "['id', 'type', 'kind', 'link', 'title']"], {}), "('AddRelated', ['id', 'type', 'kind', 'link', 'title'])\n", (6528, 6583), False, 'from collections import namedtuple, defaultdict\n'), ((7819, 7908), 'collections.namedtuple', 'namedtuple', (['"""AddRelated"""', "['id', 'type', 'kind', 'link', 'title', 'source_gdrive_id']"], {}), "('AddRelated', ['id', 'type', 'kind', 'link', 'title',\n 'source_gdrive_id'])\n", (7829, 7908), False, 'from collections import namedtuple, defaultdict\n'), ((9425, 9518), 'collections.namedtuple', 'namedtuple', (['"""AddRelated"""', "['id', 'type', 'description', 'custom_attribute_definition_id']"], {}), "('AddRelated', ['id', 'type', 'description',\n 'custom_attribute_definition_id'])\n", (9435, 9518), False, 'from collections import namedtuple, defaultdict\n'), ((4219, 4257), 'ggrc.models.hooks.common.check_mapping_permissions', 'check_mapping_permissions', (['parent', 'obj'], {}), '(parent, obj)\n', (4244, 4257), False, 'from ggrc.models.hooks.common import check_mapping_permissions\n'), ((4271, 4339), 'ggrc.models.relationship.Relationship', 'Relationship', ([], {'source': 'parent', 'destination': 'obj', 'context': 'parent.context'}), '(source=parent, destination=obj, context=parent.context)\n', (4283, 4339), False, 'from ggrc.models.relationship import Relationship\n'), ((7132, 7225), 'ggrc.models.document.Document', 'Document', ([], {'link': 'action.link', 'title': 'action.title', 'kind': 'action.kind', 'context': 'parent.context'}), '(link=action.link, title=action.title, kind=action.kind, context=\n parent.context)\n', (7140, 7225), False, 'from ggrc.models.document import Document\n'), ((8641, 8775), 'ggrc.models.evidence.Evidence', 'Evidence', ([], {'link': 'action.link', 'title': 'action.title', 'kind': 'action.kind', 'source_gdrive_id': 'action.source_gdrive_id', 'context': 'parent.context'}), '(link=action.link, title=action.title, kind=action.kind,\n source_gdrive_id=action.source_gdrive_id, context=parent.context)\n', (8649, 8775), False, 'from ggrc.models.evidence import Evidence\n'), ((9731, 9749), 'ggrc.login.get_current_user', 'get_current_user', ([], {}), '()\n', (9747, 9749), False, 'from ggrc.login import get_current_user\n'), ((2278, 2316), 'ggrc.models.exceptions.ValidationError', 'ValidationError', (['"""type is not defined"""'], {}), "('type is not defined')\n", (2293, 2316), False, 'from ggrc.models.exceptions import ValidationError\n'), ((5948, 5970), 'ggrc.db.session.delete', 'db.session.delete', (['rel'], {}), '(rel)\n', (5965, 5970), False, 'from ggrc import db\n'), ((6316, 6342), 'werkzeug.exceptions.Forbidden', 'wzg_exceptions.Forbidden', ([], {}), '()\n', (6340, 6342), True, 'import werkzeug.exceptions as wzg_exceptions\n'), ((6356, 6394), 'ggrc.rbac.permissions.is_allowed_delete_for', 'permissions.is_allowed_delete_for', (['obj'], {}), '(obj)\n', (6389, 6394), False, 'from ggrc.rbac import permissions\n'), ((6410, 6436), 'werkzeug.exceptions.Forbidden', 'wzg_exceptions.Forbidden', ([], {}), '()\n', (6434, 6436), True, 'import werkzeug.exceptions as wzg_exceptions\n'), ((7657, 7679), 'ggrc.db.session.delete', 'db.session.delete', (['rel'], {}), '(rel)\n', (7674, 7679), False, 'from ggrc import db\n'), ((9229, 9251), 'ggrc.db.session.delete', 'db.session.delete', (['rel'], {}), '(rel)\n', (9246, 9251), False, 'from ggrc import db\n'), ((9977, 10073), 'ggrc.models.comment.Comment', 'Comment', ([], {'description': 'action.description', 'assignee_type': 'assignee_type', 'context': 'parent.context'}), '(description=action.description, assignee_type=assignee_type,\n context=parent.context)\n', (9984, 10073), False, 'from ggrc.models.comment import Comment\n'), ((10140, 10276), 'ggrc.models.comment.Comment', 'Comment', ([], {'description': 'action.description', 'custom_attribute_definition_id': 'cad_id', 'assignee_type': 'assignee_type', 'context': 'parent.context'}), '(description=action.description, custom_attribute_definition_id=\n cad_id, assignee_type=assignee_type, context=parent.context)\n', (10147, 10276), False, 'from ggrc.models.comment import Comment\n'), ((6160, 6223), 'ggrc.rbac.permissions.is_allowed_delete', 'permissions.is_allowed_delete', (['obj.type', 'obj.id', 'obj.context_id'], {}), '(obj.type, obj.id, obj.context_id)\n', (6189, 6223), False, 'from ggrc.rbac import permissions\n'), ((6254, 6300), 'ggrc.rbac.permissions.has_conditions', 'permissions.has_conditions', (['"""delete"""', 'obj.type'], {}), "('delete', obj.type)\n", (6280, 6300), False, 'from ggrc.rbac import permissions\n')] |
import torch
class GAN_discriminator (torch.nn.Module):
def __init__(self, H):
#for GAN
# H=[5, 256, 128, 128, 5, 1, 64, 128, 256, 256, 4096, 1]
#for CGAN
# H =[8, 256, 128, 64, 8, 9, 64, 128, 256, 256, 4096, 1]
super(GAN_discriminator, self).__init__()
#region
self.upsample0 = torch.nn.ConvTranspose2d(H[0],H[0],(4,1), stride=(4,1))
self.convolution0 = torch.nn.Conv2d(H[0],H[1],(5,3),padding=(2,1))
#relu
self.batchNorm0 = torch.nn.BatchNorm2d(H[1])
self.upsample1 = torch.nn.ConvTranspose2d(H[1], H[1], (4, 1), stride=(4, 1))
self.convolution1 = torch.nn.Conv2d(H[1], H[2], (5, 3), padding=(2, 1))
# relu
self.batchNorm1 = torch.nn.BatchNorm2d(H[2])
self.upsample2 = torch.nn.ConvTranspose2d(H[2], H[2], (2, 1), stride=(2, 1))
self.convolution2 = torch.nn.Conv2d(H[2], H[3], (3, 3), padding=(1, 1))
# relu
self.batchNorm2 = torch.nn.BatchNorm2d(H[3])
self.upsample3 = torch.nn.ConvTranspose2d(H[3], H[3], (2, 1), stride=(2, 1))
self.convolution3 = torch.nn.Conv2d(H[3], H[4], (3, 3), padding=(1, 1))
# relu
self.batchNorm3 = torch.nn.BatchNorm2d(H[4])
#endregion
#concatenate
self.convolution5 = torch.nn.Conv2d(H[5],H[6],(3,3),stride=(2,2),padding=(1,1))
#relu
self.convolution6 = torch.nn.Conv2d(H[6], H[7], (3, 3), stride=(2, 2),padding=(1,1))
# relu
self.convolution7 = torch.nn.Conv2d(H[7], H[8], (3, 3), stride=(2, 2),padding=(1,1))
# relu
self.convolution8 = torch.nn.Conv2d(H[8], H[9], (3, 3), stride=(2, 2),padding=(1,1))
# relu
#flatten
self.dense9=torch.nn.Linear(H[10],H[11])
self.sigmoid9 = torch.nn.Sigmoid()
def forward(self, x, scene):
#region
if x != None:
h_upsample0 = self.upsample0(x)
h_conv0 = self.convolution0(h_upsample0)
h_relu0 = torch.nn.functional.leaky_relu(h_conv0,0.2)
h_batch0 = self.batchNorm0(h_relu0)
h_upsample1 = self.upsample1(h_batch0)
h_conv1 = self.convolution1(h_upsample1)
h_relu1 = torch.nn.functional.leaky_relu(h_conv1, 0.2)
h_batch1 = self.batchNorm1(h_relu1)
h_upsample2 = self.upsample2(h_batch1)
h_conv2 = self.convolution2(h_upsample2)
h_relu2 = torch.nn.functional.leaky_relu(h_conv2, 0.2)
h_batch2 = self.batchNorm2(h_relu2)
h_upsample3 = self.upsample3(h_batch2)
h_conv3 = self.convolution3(h_upsample3)
h_relu3 = torch.nn.functional.leaky_relu(h_conv3, 0.2)
h_batch3 = self.batchNorm3(h_relu3)
#endregion
h_conc4 = torch.cat((h_batch3, scene),1)
else:
h_conc4=scene
h_conv5 = self.convolution5(h_conc4)
h_relu5 = torch.nn.functional.leaky_relu(h_conv5, 0.2)
h_conv6 = self.convolution6(h_relu5)
h_relu6 = torch.nn.functional.leaky_relu(h_conv6, 0.2)
h_conv7 = self.convolution7(h_relu6)
h_relu7 = torch.nn.functional.leaky_relu(h_conv7, 0.2)
h_conv8 = self.convolution8(h_relu7)
h_relu8 = torch.nn.functional.leaky_relu(h_conv8, 0.2)
h_flat9 = h_relu8.view(len(scene),-1)
h_dense9 = self.dense9(h_flat9)
h_out=self.sigmoid9(h_dense9)
return h_out
| [
"torch.nn.BatchNorm2d",
"torch.nn.Sigmoid",
"torch.nn.functional.leaky_relu",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.ConvTranspose2d",
"torch.cat"
] | [((339, 398), 'torch.nn.ConvTranspose2d', 'torch.nn.ConvTranspose2d', (['H[0]', 'H[0]', '(4, 1)'], {'stride': '(4, 1)'}), '(H[0], H[0], (4, 1), stride=(4, 1))\n', (363, 398), False, 'import torch\n'), ((423, 474), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['H[0]', 'H[1]', '(5, 3)'], {'padding': '(2, 1)'}), '(H[0], H[1], (5, 3), padding=(2, 1))\n', (438, 474), False, 'import torch\n'), ((510, 536), 'torch.nn.BatchNorm2d', 'torch.nn.BatchNorm2d', (['H[1]'], {}), '(H[1])\n', (530, 536), False, 'import torch\n'), ((563, 622), 'torch.nn.ConvTranspose2d', 'torch.nn.ConvTranspose2d', (['H[1]', 'H[1]', '(4, 1)'], {'stride': '(4, 1)'}), '(H[1], H[1], (4, 1), stride=(4, 1))\n', (587, 622), False, 'import torch\n'), ((651, 702), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['H[1]', 'H[2]', '(5, 3)'], {'padding': '(2, 1)'}), '(H[1], H[2], (5, 3), padding=(2, 1))\n', (666, 702), False, 'import torch\n'), ((744, 770), 'torch.nn.BatchNorm2d', 'torch.nn.BatchNorm2d', (['H[2]'], {}), '(H[2])\n', (764, 770), False, 'import torch\n'), ((797, 856), 'torch.nn.ConvTranspose2d', 'torch.nn.ConvTranspose2d', (['H[2]', 'H[2]', '(2, 1)'], {'stride': '(2, 1)'}), '(H[2], H[2], (2, 1), stride=(2, 1))\n', (821, 856), False, 'import torch\n'), ((885, 936), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['H[2]', 'H[3]', '(3, 3)'], {'padding': '(1, 1)'}), '(H[2], H[3], (3, 3), padding=(1, 1))\n', (900, 936), False, 'import torch\n'), ((978, 1004), 'torch.nn.BatchNorm2d', 'torch.nn.BatchNorm2d', (['H[3]'], {}), '(H[3])\n', (998, 1004), False, 'import torch\n'), ((1031, 1090), 'torch.nn.ConvTranspose2d', 'torch.nn.ConvTranspose2d', (['H[3]', 'H[3]', '(2, 1)'], {'stride': '(2, 1)'}), '(H[3], H[3], (2, 1), stride=(2, 1))\n', (1055, 1090), False, 'import torch\n'), ((1119, 1170), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['H[3]', 'H[4]', '(3, 3)'], {'padding': '(1, 1)'}), '(H[3], H[4], (3, 3), padding=(1, 1))\n', (1134, 1170), False, 'import torch\n'), ((1212, 1238), 'torch.nn.BatchNorm2d', 'torch.nn.BatchNorm2d', (['H[4]'], {}), '(H[4])\n', (1232, 1238), False, 'import torch\n'), ((1309, 1375), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['H[5]', 'H[6]', '(3, 3)'], {'stride': '(2, 2)', 'padding': '(1, 1)'}), '(H[5], H[6], (3, 3), stride=(2, 2), padding=(1, 1))\n', (1324, 1375), False, 'import torch\n'), ((1412, 1478), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['H[6]', 'H[7]', '(3, 3)'], {'stride': '(2, 2)', 'padding': '(1, 1)'}), '(H[6], H[7], (3, 3), stride=(2, 2), padding=(1, 1))\n', (1427, 1478), False, 'import torch\n'), ((1521, 1587), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['H[7]', 'H[8]', '(3, 3)'], {'stride': '(2, 2)', 'padding': '(1, 1)'}), '(H[7], H[8], (3, 3), stride=(2, 2), padding=(1, 1))\n', (1536, 1587), False, 'import torch\n'), ((1630, 1696), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['H[8]', 'H[9]', '(3, 3)'], {'stride': '(2, 2)', 'padding': '(1, 1)'}), '(H[8], H[9], (3, 3), stride=(2, 2), padding=(1, 1))\n', (1645, 1696), False, 'import torch\n'), ((1748, 1777), 'torch.nn.Linear', 'torch.nn.Linear', (['H[10]', 'H[11]'], {}), '(H[10], H[11])\n', (1763, 1777), False, 'import torch\n'), ((1801, 1819), 'torch.nn.Sigmoid', 'torch.nn.Sigmoid', ([], {}), '()\n', (1817, 1819), False, 'import torch\n'), ((2939, 2983), 'torch.nn.functional.leaky_relu', 'torch.nn.functional.leaky_relu', (['h_conv5', '(0.2)'], {}), '(h_conv5, 0.2)\n', (2969, 2983), False, 'import torch\n'), ((3048, 3092), 'torch.nn.functional.leaky_relu', 'torch.nn.functional.leaky_relu', (['h_conv6', '(0.2)'], {}), '(h_conv6, 0.2)\n', (3078, 3092), False, 'import torch\n'), ((3157, 3201), 'torch.nn.functional.leaky_relu', 'torch.nn.functional.leaky_relu', (['h_conv7', '(0.2)'], {}), '(h_conv7, 0.2)\n', (3187, 3201), False, 'import torch\n'), ((3266, 3310), 'torch.nn.functional.leaky_relu', 'torch.nn.functional.leaky_relu', (['h_conv8', '(0.2)'], {}), '(h_conv8, 0.2)\n', (3296, 3310), False, 'import torch\n'), ((2012, 2056), 'torch.nn.functional.leaky_relu', 'torch.nn.functional.leaky_relu', (['h_conv0', '(0.2)'], {}), '(h_conv0, 0.2)\n', (2042, 2056), False, 'import torch\n'), ((2231, 2275), 'torch.nn.functional.leaky_relu', 'torch.nn.functional.leaky_relu', (['h_conv1', '(0.2)'], {}), '(h_conv1, 0.2)\n', (2261, 2275), False, 'import torch\n'), ((2451, 2495), 'torch.nn.functional.leaky_relu', 'torch.nn.functional.leaky_relu', (['h_conv2', '(0.2)'], {}), '(h_conv2, 0.2)\n', (2481, 2495), False, 'import torch\n'), ((2671, 2715), 'torch.nn.functional.leaky_relu', 'torch.nn.functional.leaky_relu', (['h_conv3', '(0.2)'], {}), '(h_conv3, 0.2)\n', (2701, 2715), False, 'import torch\n'), ((2805, 2836), 'torch.cat', 'torch.cat', (['(h_batch3, scene)', '(1)'], {}), '((h_batch3, scene), 1)\n', (2814, 2836), False, 'import torch\n')] |
""" test gpath
isort:skip_file
"""
import os
import sys
import unittest
try:
from unittest import mock
except ImportError:
import mock
SRC = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "src")
if SRC not in sys.path:
sys.path.insert(0, SRC)
from ciopath.gpath import Path
sys.modules["glob"] = __import__("mocks.glob", fromlist=["dummy"])
class BadInputTest(unittest.TestCase):
def test_empty_input(self):
with self.assertRaises(ValueError):
self.p = Path("")
class RootPath(unittest.TestCase):
def test_root_path(self):
self.p = Path("/")
self.assertEqual(self.p.fslash(), "/")
self.assertEqual(self.p.bslash(), "\\")
def test_drive_letter_root_path(self):
self.p = Path("C:\\")
self.assertEqual(self.p.fslash(), "C:/")
self.assertEqual(self.p.bslash(), "C:\\")
class SpecifyDriveLetterUse(unittest.TestCase):
def test_remove_from_path(self):
self.p = Path("C:\\a\\b\\c")
self.assertEqual(self.p.fslash(with_drive=False), "/a/b/c")
self.assertEqual(self.p.bslash(with_drive=False), "\\a\\b\\c")
def test_remove_from_root_path(self):
self.p = Path("C:\\")
self.assertEqual(self.p.fslash(with_drive=False), "/")
self.assertEqual(self.p.bslash(with_drive=False), "\\")
class AbsPosixPathTest(unittest.TestCase):
def setUp(self):
self.p = Path("/a/b/c")
def test_fslash_out(self):
self.assertEqual(self.p.fslash(), "/a/b/c")
def test_win_path_out(self):
self.assertEqual(self.p.bslash(), "\\a\\b\\c")
class AbsWindowsPathTest(unittest.TestCase):
def setUp(self):
self.p = Path("C:\\a\\b\\c")
def test_fslash_out(self):
self.assertEqual(self.p.fslash(), "C:/a/b/c")
def test_win_path_out(self):
self.assertEqual(self.p.bslash(), "C:\\a\\b\\c")
# consider just testing on both platforms
def test_os_path_out(self):
with mock.patch("os.name", "posix"):
self.assertEqual(self.p.os_path(), "C:/a/b/c")
with mock.patch("os.name", "nt"):
self.assertEqual(self.p.os_path(), "C:\\a\\b\\c")
class PathStringTest(unittest.TestCase):
def test_path_emits_string_posix(self):
input_file = "/path/to/thefile.jpg"
p = Path(input_file)
self.assertEqual(str(p), input_file)
def test_path_emits_string_with_drive(self):
input_file = "C:/path/to/thefile.jpg"
p = Path(input_file)
self.assertEqual(str(p), input_file)
def test_path_emits_string_relative(self):
input_file = "path/to/thefile.jpg"
p = Path(input_file)
self.assertEqual(str(p), input_file)
class WindowsMixedPathTest(unittest.TestCase):
def test_abs_in_fslash_out(self):
self.p = Path("\\a\\b\\c/d/e")
self.assertEqual(self.p.fslash(), "/a/b/c/d/e")
def test_abs_in_bslash_out(self):
self.p = Path("\\a\\b\\c/d/e")
self.assertEqual(self.p.bslash(), "\\a\\b\\c\\d\\e")
def test_letter_abs_in_fslash_out(self):
self.p = Path("C:\\a\\b\\c/d/e")
self.assertEqual(self.p.fslash(), "C:/a/b/c/d/e")
def test_letter_abs_in_bslash_out(self):
self.p = Path("C:\\a\\b\\c/d/e")
self.assertEqual(self.p.bslash(), "C:\\a\\b\\c\\d\\e")
class MiscPathTest(unittest.TestCase):
def test_many_to_single_backslashes_bslash_out(self):
self.p = Path("C:\\\\a\\b///c")
self.assertEqual(self.p.bslash(), "C:\\a\\b\\c")
class PathExpansionTest(unittest.TestCase):
def setUp(self):
self.env = {
"HOME": "/users/joebloggs",
"SHOT": "/metropolis/shot01",
"DEPT": "texturing",
}
def test_posix_tilde_input(self):
with mock.patch.dict("os.environ", self.env):
self.p = Path("~/a/b/c")
self.assertEqual(self.p.fslash(), "/users/joebloggs/a/b/c")
def test_posix_var_input(self):
with mock.patch.dict("os.environ", self.env):
self.p = Path("$SHOT/a/b/c")
self.assertEqual(self.p.fslash(), "/metropolis/shot01/a/b/c")
def test_posix_two_var_input(self):
with mock.patch.dict("os.environ", self.env):
self.p = Path("$SHOT/a/b/$DEPT/c")
self.assertEqual(self.p.fslash(), "/metropolis/shot01/a/b/texturing/c")
def test_windows_var_input(self):
with mock.patch.dict("os.environ", self.env):
self.p = Path("$HOME\\a\\b\\c")
self.assertEqual(self.p.bslash(), "\\users\\joebloggs\\a\\b\\c")
self.assertEqual(self.p.fslash(), "/users/joebloggs/a/b/c")
def test_tilde_no_expand(self):
with mock.patch.dict("os.environ", self.env):
self.p = Path("~/a/b/c", no_expand=True)
self.assertEqual(self.p.fslash(), "~/a/b/c")
def test_posix_var_no_expand(self):
with mock.patch.dict("os.environ", self.env):
self.p = Path("$SHOT/a/b/c", no_expand=True)
self.assertEqual(self.p.fslash(), "$SHOT/a/b/c")
def no_expand_variable_considered_relative(self):
with mock.patch.dict("os.environ", self.env):
self.p = Path("$SHOT/a/b/c", no_expand=True)
self.assertTrue(self.p.relative)
self.assertFalse(self.p.absolute)
def expanded_variable_considered_absolute(self):
with mock.patch.dict("os.environ", self.env):
self.p = Path("$SHOT/a/b/c", no_expand=False)
self.assertFalse(self.p.relative)
self.assertTrue(self.p.absolute)
class PathContextExpansionTest(unittest.TestCase):
def setUp(self):
self.env = {
"HOME": "/users/joebloggs",
"SHOT": "/metropolis/shot01",
"DEPT": "texturing",
}
self.context = {
"HOME": "/users/janedoe",
"FOO": "fooval",
"BAR_FLY1_": "bar_fly1_val",
"ROOT_DIR": "/some/root",
}
def test_path_replaces_context(self):
self.p = Path("$ROOT_DIR/thefile.jpg", context=self.context)
self.assertEqual(self.p.fslash(), "/some/root/thefile.jpg")
def test_path_replaces_multiple_context(self):
self.p = Path("$ROOT_DIR/$BAR_FLY1_/thefile.jpg", context=self.context)
self.assertEqual(self.p.fslash(), "/some/root/bar_fly1_val/thefile.jpg")
def test_path_context_overrides_env(self):
self.p = Path("$HOME/thefile.jpg", context=self.context)
self.assertEqual(self.p.fslash(), "/users/janedoe/thefile.jpg")
def test_path_leave_unknown_variable_in_tact(self):
self.p = Path("$ROOT_DIR/$BAR_FLY1_/$FOO/thefile.$F.jpg", context=self.context)
self.assertEqual(self.p.fslash(), "/some/root/bar_fly1_val/fooval/thefile.$F.jpg")
def test_path_replaces_context_braces(self):
self.p = Path("${ROOT_DIR}/thefile.jpg", context=self.context)
self.assertEqual(self.p.fslash(), "/some/root/thefile.jpg")
def test_path_replaces_multiple_context_braces(self):
self.p = Path("${ROOT_DIR}/${BAR_FLY1_}/thefile.jpg", context=self.context)
self.assertEqual(self.p.fslash(), "/some/root/bar_fly1_val/thefile.jpg")
def test_path_context_overrides_env_braces(self):
self.p = Path("${HOME}/thefile.jpg", context=self.context)
self.assertEqual(self.p.fslash(), "/users/janedoe/thefile.jpg")
def test_path_leave_unknown_variable_in_tact_braces(self):
self.p = Path("${ROOT_DIR}/${BAR_FLY1_}/${FOO}/thefile.$F.jpg", context=self.context)
self.assertEqual(self.p.fslash(), "/some/root/bar_fly1_val/fooval/thefile.$F.jpg")
class PathLengthTest(unittest.TestCase):
def test_len_with_drive_letter(self):
self.p = Path("C:\\aaa\\bbb/c")
self.assertEqual(len(self.p), 12)
def test_len_with_no_drive_letter(self):
self.p = Path("\\aaa\\bbb/c")
self.assertEqual(len(self.p), 10)
def test_depth_with_drive_letter(self):
self.p = Path("C:\\aaa\\bbb/c")
self.assertEqual(self.p.depth, 3)
def test_depth_with_no_drive_letter(self):
self.p = Path("\\aaa\\bbb/c")
self.assertEqual(self.p.depth, 3)
def test_depth_with_literal_rel_path(self):
self.p = Path("aaa\\bbb/c")
self.assertEqual(self.p.depth, 3)
class AbsolutePathCollapseDotsTest(unittest.TestCase):
def test_path_collapses_single_dot(self):
p = Path("/a/b/./c")
self.assertEqual(p.fslash(), "/a/b/c")
def test_path_collapses_double_dot(self):
p = Path("/a/b/../c")
self.assertEqual(p.fslash(), "/a/c")
def test_path_collapses_many_single_dots(self):
p = Path("/a/b/./c/././d")
self.assertEqual(p.fslash(), "/a/b/c/d")
def test_path_collapses_many_consecutive_double_dots(self):
p = Path("/a/b/c/../../d")
self.assertEqual(p.fslash(), "/a/d")
def test_path_collapses_many_non_consecutive_double_dots(self):
p = Path("/a/b/c/../../d/../e/f/../g")
self.assertEqual(p.fslash(), "/a/e/g")
def test_path_collapses_many_non_consecutive_mixed_dots(self):
p = Path("/a/./b/c/../.././d/../././e/f/../g/./")
self.assertEqual(p.fslash(), "/a/e/g")
self.assertEqual(p.depth, 3)
def test_path_collapses_to_root(self):
p = Path("/a/b/../../")
self.assertEqual(p.fslash(), "/")
self.assertEqual(p.depth, 0)
def test_raise_when_collapse_too_many_dots(self):
with self.assertRaises(ValueError):
Path("/a/b/../../../")
class RelativePathCollapseDotsTest(unittest.TestCase):
def test_resolve_relative_several_dots(self):
p = Path("./a/b/../../../c/d")
self.assertEqual(p.fslash(), "../c/d")
self.assertEqual(p.all_components, ["..", "c", "d"])
self.assertEqual(p.depth, 3)
def test_resolve_leading_relative_dots(self):
p = Path("../c/d")
self.assertEqual(p.fslash(), "../c/d")
def test_resolve_leading_relative_dots(self):
p = Path("../../../c/d")
self.assertEqual(p.fslash(), "../../../c/d")
def test_resolve_only_relative_dots(self):
p = Path("../../../")
self.assertEqual(p.fslash(), "../../../")
def test_collapse_contained_components(self):
p = Path("../../../a/b/../../../")
self.assertEqual(p.fslash(), "../../../../")
def test_remove_trailing_dot(self):
p = Path("../../.././")
self.assertEqual(p.fslash(), "../../../")
def test_cwd(self):
p = Path(".")
self.assertEqual(p.fslash(), "./")
def test_down_up_cwd(self):
p = Path("a/..")
self.assertEqual(p.fslash(), "./")
def test_up_down_sibling(self):
p = Path("../a")
self.assertEqual(p.fslash(), "../a")
def test_up_down_sibling_bslash(self):
p = Path("../a")
self.assertEqual(p.bslash(), "..\\a")
class PathComponentsTest(unittest.TestCase):
def test_path_gets_tail(self):
p = Path("/a/b/c")
self.assertEqual(p.tail, "c")
def test_path_gets_none_when_no_tail(self):
p = Path("/")
self.assertEqual(p.tail, None)
def test_path_ends_with(self):
p = Path("/a/b/cdef")
self.assertTrue(p.endswith("ef"))
def test_path_not_ends_with(self):
p = Path("/a/b/cdef")
self.assertFalse(p.endswith("eg"))
class RelativePathTest(unittest.TestCase):
def test_rel_path_does_not_raise(self):
p = Path("a/b/c")
self.assertEqual(p.fslash(), "a/b/c")
class EqualityTests(unittest.TestCase):
def test_paths_equal(self):
p1 = Path("a/b/c")
p2 = Path("a/b/c")
self.assertTrue(p1 == p2)
def test_same_object_equal(self):
p1 = Path("a/b/c")
self.assertTrue(p1 == p1)
def test_different_paths_equal_false(self):
p1 = Path("a/b/c")
p2 = Path("a/b/d")
self.assertFalse(p1 == p2)
def test_paths_not_equal(self):
p1 = Path("a/b/c")
p2 = Path("a/b/d")
self.assertTrue(p1 != p2)
class InitializeWithComponentsTests(unittest.TestCase):
def test_initialize_with_lettered_components(self):
p = Path(["C:", "a", "b", "c"])
self.assertEqual(p.fslash(with_drive=True), "C:/a/b/c")
def test_initialize_with_backslash_unc_components(self):
p = Path(["\\", "a", "b", "c"])
self.assertEqual(p.fslash(with_drive=True), "//a/b/c")
def test_initialize_with_fwslash_unc_components(self):
p = Path(["/", "a", "b", "c"])
self.assertEqual(p.fslash(with_drive=True), "//a/b/c")
def test_initialize_with_unc_components(self):
p = Path(["/", "a", "b", "c"])
self.assertEqual(p.bslash(with_drive=True), "\\\\a\\b\\c")
def test_initialize_with_relative_components(self):
p = Path(["a", "b", "c"])
self.assertEqual(p.bslash(with_drive=True), "a\\b\\c")
def test_initialize_with_relative_components_is_relative(self):
p = Path(["a", "b", "c"])
self.assertTrue(p.relative)
self.assertFalse(p.absolute)
class GetComponentsTests(unittest.TestCase):
def test_get_all_components(self):
p = Path("/a/b/c")
self.assertEqual(p.all_components, ["a", "b", "c"])
def test_get_all_components_with_drive(self):
p = Path("C:/a/b/c")
self.assertEqual(p.all_components, ["C:", "a", "b", "c"])
def test_get_all_components_with_unc_fwslash(self):
p = Path("//a/b/c")
self.assertEqual(p.all_components, ["/", "a", "b", "c"])
def test_get_all_components_with_unc_backslash(self):
p = Path("\\\\a\\b\\c")
self.assertEqual(p.all_components, ["/", "a", "b", "c"])
class UNCTests(unittest.TestCase):
def test_unc_root_with_drive(self):
p = Path("\\\\a\\b\\c")
self.assertEqual(p.fslash(with_drive=True), "//a/b/c")
def test_unc_is_absolute(self):
p = Path("\\\\a\\b\\c")
self.assertTrue(p.absolute)
def test_unc_root_without_drive(self):
p = Path("\\\\a\\b\\c")
self.assertEqual(p.fslash(with_drive=False), "/a/b/c")
def test_unc_root_with_forward(self):
p = Path("//a/b/c")
self.assertEqual(p.fslash(with_drive=True), "//a/b/c")
def test_is_unc(self):
p = Path("\\\\a\\b\\c")
self.assertTrue(p.is_unc)
p = Path("//a/b/c")
self.assertTrue(p.is_unc)
def test_posix_abs_is_not_unc(self):
p = Path(["/a/b/c"])
self.assertFalse(p.is_unc)
def test_relative_is_not_unc(self):
p = Path(["a/b/c"])
self.assertFalse(p.is_unc)
def test_drive_letter_is_not_unc(self):
p = Path("C:\\aaa\\bbb\\c")
self.assertFalse(p.is_unc)
if __name__ == "__main__":
unittest.main()
| [
"ciopath.gpath.Path",
"sys.path.insert",
"mock.patch",
"mock.patch.dict",
"unittest.main",
"os.path.abspath"
] | [((266, 289), 'sys.path.insert', 'sys.path.insert', (['(0)', 'SRC'], {}), '(0, SRC)\n', (281, 289), False, 'import sys\n'), ((14859, 14874), 'unittest.main', 'unittest.main', ([], {}), '()\n', (14872, 14874), False, 'import unittest\n'), ((621, 630), 'ciopath.gpath.Path', 'Path', (['"""/"""'], {}), "('/')\n", (625, 630), False, 'from ciopath.gpath import Path\n'), ((787, 799), 'ciopath.gpath.Path', 'Path', (['"""C:\\\\"""'], {}), "('C:\\\\')\n", (791, 799), False, 'from ciopath.gpath import Path\n'), ((1003, 1022), 'ciopath.gpath.Path', 'Path', (['"""C:\\\\a\\\\b\\\\c"""'], {}), "('C:\\\\a\\\\b\\\\c')\n", (1007, 1022), False, 'from ciopath.gpath import Path\n'), ((1222, 1234), 'ciopath.gpath.Path', 'Path', (['"""C:\\\\"""'], {}), "('C:\\\\')\n", (1226, 1234), False, 'from ciopath.gpath import Path\n'), ((1445, 1459), 'ciopath.gpath.Path', 'Path', (['"""/a/b/c"""'], {}), "('/a/b/c')\n", (1449, 1459), False, 'from ciopath.gpath import Path\n'), ((1718, 1737), 'ciopath.gpath.Path', 'Path', (['"""C:\\\\a\\\\b\\\\c"""'], {}), "('C:\\\\a\\\\b\\\\c')\n", (1722, 1737), False, 'from ciopath.gpath import Path\n'), ((2345, 2361), 'ciopath.gpath.Path', 'Path', (['input_file'], {}), '(input_file)\n', (2349, 2361), False, 'from ciopath.gpath import Path\n'), ((2515, 2531), 'ciopath.gpath.Path', 'Path', (['input_file'], {}), '(input_file)\n', (2519, 2531), False, 'from ciopath.gpath import Path\n'), ((2680, 2696), 'ciopath.gpath.Path', 'Path', (['input_file'], {}), '(input_file)\n', (2684, 2696), False, 'from ciopath.gpath import Path\n'), ((2846, 2867), 'ciopath.gpath.Path', 'Path', (['"""\\\\a\\\\b\\\\c/d/e"""'], {}), "('\\\\a\\\\b\\\\c/d/e')\n", (2850, 2867), False, 'from ciopath.gpath import Path\n'), ((2980, 3001), 'ciopath.gpath.Path', 'Path', (['"""\\\\a\\\\b\\\\c/d/e"""'], {}), "('\\\\a\\\\b\\\\c/d/e')\n", (2984, 3001), False, 'from ciopath.gpath import Path\n'), ((3126, 3149), 'ciopath.gpath.Path', 'Path', (['"""C:\\\\a\\\\b\\\\c/d/e"""'], {}), "('C:\\\\a\\\\b\\\\c/d/e')\n", (3130, 3149), False, 'from ciopath.gpath import Path\n'), ((3271, 3294), 'ciopath.gpath.Path', 'Path', (['"""C:\\\\a\\\\b\\\\c/d/e"""'], {}), "('C:\\\\a\\\\b\\\\c/d/e')\n", (3275, 3294), False, 'from ciopath.gpath import Path\n'), ((3474, 3496), 'ciopath.gpath.Path', 'Path', (['"""C:\\\\\\\\a\\\\b///c"""'], {}), "('C:\\\\\\\\a\\\\b///c')\n", (3478, 3496), False, 'from ciopath.gpath import Path\n'), ((6078, 6129), 'ciopath.gpath.Path', 'Path', (['"""$ROOT_DIR/thefile.jpg"""'], {'context': 'self.context'}), "('$ROOT_DIR/thefile.jpg', context=self.context)\n", (6082, 6129), False, 'from ciopath.gpath import Path\n'), ((6267, 6329), 'ciopath.gpath.Path', 'Path', (['"""$ROOT_DIR/$BAR_FLY1_/thefile.jpg"""'], {'context': 'self.context'}), "('$ROOT_DIR/$BAR_FLY1_/thefile.jpg', context=self.context)\n", (6271, 6329), False, 'from ciopath.gpath import Path\n'), ((6476, 6523), 'ciopath.gpath.Path', 'Path', (['"""$HOME/thefile.jpg"""'], {'context': 'self.context'}), "('$HOME/thefile.jpg', context=self.context)\n", (6480, 6523), False, 'from ciopath.gpath import Path\n'), ((6670, 6740), 'ciopath.gpath.Path', 'Path', (['"""$ROOT_DIR/$BAR_FLY1_/$FOO/thefile.$F.jpg"""'], {'context': 'self.context'}), "('$ROOT_DIR/$BAR_FLY1_/$FOO/thefile.$F.jpg', context=self.context)\n", (6674, 6740), False, 'from ciopath.gpath import Path\n'), ((6899, 6952), 'ciopath.gpath.Path', 'Path', (['"""${ROOT_DIR}/thefile.jpg"""'], {'context': 'self.context'}), "('${ROOT_DIR}/thefile.jpg', context=self.context)\n", (6903, 6952), False, 'from ciopath.gpath import Path\n'), ((7097, 7163), 'ciopath.gpath.Path', 'Path', (['"""${ROOT_DIR}/${BAR_FLY1_}/thefile.jpg"""'], {'context': 'self.context'}), "('${ROOT_DIR}/${BAR_FLY1_}/thefile.jpg', context=self.context)\n", (7101, 7163), False, 'from ciopath.gpath import Path\n'), ((7317, 7366), 'ciopath.gpath.Path', 'Path', (['"""${HOME}/thefile.jpg"""'], {'context': 'self.context'}), "('${HOME}/thefile.jpg', context=self.context)\n", (7321, 7366), False, 'from ciopath.gpath import Path\n'), ((7520, 7596), 'ciopath.gpath.Path', 'Path', (['"""${ROOT_DIR}/${BAR_FLY1_}/${FOO}/thefile.$F.jpg"""'], {'context': 'self.context'}), "('${ROOT_DIR}/${BAR_FLY1_}/${FOO}/thefile.$F.jpg', context=self.context)\n", (7524, 7596), False, 'from ciopath.gpath import Path\n'), ((7790, 7812), 'ciopath.gpath.Path', 'Path', (['"""C:\\\\aaa\\\\bbb/c"""'], {}), "('C:\\\\aaa\\\\bbb/c')\n", (7794, 7812), False, 'from ciopath.gpath import Path\n'), ((7918, 7938), 'ciopath.gpath.Path', 'Path', (['"""\\\\aaa\\\\bbb/c"""'], {}), "('\\\\aaa\\\\bbb/c')\n", (7922, 7938), False, 'from ciopath.gpath import Path\n'), ((8043, 8065), 'ciopath.gpath.Path', 'Path', (['"""C:\\\\aaa\\\\bbb/c"""'], {}), "('C:\\\\aaa\\\\bbb/c')\n", (8047, 8065), False, 'from ciopath.gpath import Path\n'), ((8173, 8193), 'ciopath.gpath.Path', 'Path', (['"""\\\\aaa\\\\bbb/c"""'], {}), "('\\\\aaa\\\\bbb/c')\n", (8177, 8193), False, 'from ciopath.gpath import Path\n'), ((8302, 8320), 'ciopath.gpath.Path', 'Path', (['"""aaa\\\\bbb/c"""'], {}), "('aaa\\\\bbb/c')\n", (8306, 8320), False, 'from ciopath.gpath import Path\n'), ((8478, 8494), 'ciopath.gpath.Path', 'Path', (['"""/a/b/./c"""'], {}), "('/a/b/./c')\n", (8482, 8494), False, 'from ciopath.gpath import Path\n'), ((8601, 8618), 'ciopath.gpath.Path', 'Path', (['"""/a/b/../c"""'], {}), "('/a/b/../c')\n", (8605, 8618), False, 'from ciopath.gpath import Path\n'), ((8729, 8751), 'ciopath.gpath.Path', 'Path', (['"""/a/b/./c/././d"""'], {}), "('/a/b/./c/././d')\n", (8733, 8751), False, 'from ciopath.gpath import Path\n'), ((8878, 8900), 'ciopath.gpath.Path', 'Path', (['"""/a/b/c/../../d"""'], {}), "('/a/b/c/../../d')\n", (8882, 8900), False, 'from ciopath.gpath import Path\n'), ((9027, 9061), 'ciopath.gpath.Path', 'Path', (['"""/a/b/c/../../d/../e/f/../g"""'], {}), "('/a/b/c/../../d/../e/f/../g')\n", (9031, 9061), False, 'from ciopath.gpath import Path\n'), ((9189, 9234), 'ciopath.gpath.Path', 'Path', (['"""/a/./b/c/../.././d/../././e/f/../g/./"""'], {}), "('/a/./b/c/../.././d/../././e/f/../g/./')\n", (9193, 9234), False, 'from ciopath.gpath import Path\n'), ((9375, 9394), 'ciopath.gpath.Path', 'Path', (['"""/a/b/../../"""'], {}), "('/a/b/../../')\n", (9379, 9394), False, 'from ciopath.gpath import Path\n'), ((9727, 9753), 'ciopath.gpath.Path', 'Path', (['"""./a/b/../../../c/d"""'], {}), "('./a/b/../../../c/d')\n", (9731, 9753), False, 'from ciopath.gpath import Path\n'), ((9962, 9976), 'ciopath.gpath.Path', 'Path', (['"""../c/d"""'], {}), "('../c/d')\n", (9966, 9976), False, 'from ciopath.gpath import Path\n'), ((10087, 10107), 'ciopath.gpath.Path', 'Path', (['"""../../../c/d"""'], {}), "('../../../c/d')\n", (10091, 10107), False, 'from ciopath.gpath import Path\n'), ((10221, 10238), 'ciopath.gpath.Path', 'Path', (['"""../../../"""'], {}), "('../../../')\n", (10225, 10238), False, 'from ciopath.gpath import Path\n'), ((10352, 10382), 'ciopath.gpath.Path', 'Path', (['"""../../../a/b/../../../"""'], {}), "('../../../a/b/../../../')\n", (10356, 10382), False, 'from ciopath.gpath import Path\n'), ((10489, 10508), 'ciopath.gpath.Path', 'Path', (['"""../../.././"""'], {}), "('../../.././')\n", (10493, 10508), False, 'from ciopath.gpath import Path\n'), ((10596, 10605), 'ciopath.gpath.Path', 'Path', (['"""."""'], {}), "('.')\n", (10600, 10605), False, 'from ciopath.gpath import Path\n'), ((10694, 10706), 'ciopath.gpath.Path', 'Path', (['"""a/.."""'], {}), "('a/..')\n", (10698, 10706), False, 'from ciopath.gpath import Path\n'), ((10799, 10811), 'ciopath.gpath.Path', 'Path', (['"""../a"""'], {}), "('../a')\n", (10803, 10811), False, 'from ciopath.gpath import Path\n'), ((10913, 10925), 'ciopath.gpath.Path', 'Path', (['"""../a"""'], {}), "('../a')\n", (10917, 10925), False, 'from ciopath.gpath import Path\n'), ((11066, 11080), 'ciopath.gpath.Path', 'Path', (['"""/a/b/c"""'], {}), "('/a/b/c')\n", (11070, 11080), False, 'from ciopath.gpath import Path\n'), ((11180, 11189), 'ciopath.gpath.Path', 'Path', (['"""/"""'], {}), "('/')\n", (11184, 11189), False, 'from ciopath.gpath import Path\n'), ((11277, 11294), 'ciopath.gpath.Path', 'Path', (['"""/a/b/cdef"""'], {}), "('/a/b/cdef')\n", (11281, 11294), False, 'from ciopath.gpath import Path\n'), ((11389, 11406), 'ciopath.gpath.Path', 'Path', (['"""/a/b/cdef"""'], {}), "('/a/b/cdef')\n", (11393, 11406), False, 'from ciopath.gpath import Path\n'), ((11551, 11564), 'ciopath.gpath.Path', 'Path', (['"""a/b/c"""'], {}), "('a/b/c')\n", (11555, 11564), False, 'from ciopath.gpath import Path\n'), ((11698, 11711), 'ciopath.gpath.Path', 'Path', (['"""a/b/c"""'], {}), "('a/b/c')\n", (11702, 11711), False, 'from ciopath.gpath import Path\n'), ((11725, 11738), 'ciopath.gpath.Path', 'Path', (['"""a/b/c"""'], {}), "('a/b/c')\n", (11729, 11738), False, 'from ciopath.gpath import Path\n'), ((11825, 11838), 'ciopath.gpath.Path', 'Path', (['"""a/b/c"""'], {}), "('a/b/c')\n", (11829, 11838), False, 'from ciopath.gpath import Path\n'), ((11935, 11948), 'ciopath.gpath.Path', 'Path', (['"""a/b/c"""'], {}), "('a/b/c')\n", (11939, 11948), False, 'from ciopath.gpath import Path\n'), ((11962, 11975), 'ciopath.gpath.Path', 'Path', (['"""a/b/d"""'], {}), "('a/b/d')\n", (11966, 11975), False, 'from ciopath.gpath import Path\n'), ((12061, 12074), 'ciopath.gpath.Path', 'Path', (['"""a/b/c"""'], {}), "('a/b/c')\n", (12065, 12074), False, 'from ciopath.gpath import Path\n'), ((12088, 12101), 'ciopath.gpath.Path', 'Path', (['"""a/b/d"""'], {}), "('a/b/d')\n", (12092, 12101), False, 'from ciopath.gpath import Path\n'), ((12262, 12289), 'ciopath.gpath.Path', 'Path', (["['C:', 'a', 'b', 'c']"], {}), "(['C:', 'a', 'b', 'c'])\n", (12266, 12289), False, 'from ciopath.gpath import Path\n'), ((12428, 12455), 'ciopath.gpath.Path', 'Path', (["['\\\\', 'a', 'b', 'c']"], {}), "(['\\\\', 'a', 'b', 'c'])\n", (12432, 12455), False, 'from ciopath.gpath import Path\n'), ((12591, 12617), 'ciopath.gpath.Path', 'Path', (["['/', 'a', 'b', 'c']"], {}), "(['/', 'a', 'b', 'c'])\n", (12595, 12617), False, 'from ciopath.gpath import Path\n'), ((12745, 12771), 'ciopath.gpath.Path', 'Path', (["['/', 'a', 'b', 'c']"], {}), "(['/', 'a', 'b', 'c'])\n", (12749, 12771), False, 'from ciopath.gpath import Path\n'), ((12908, 12929), 'ciopath.gpath.Path', 'Path', (["['a', 'b', 'c']"], {}), "(['a', 'b', 'c'])\n", (12912, 12929), False, 'from ciopath.gpath import Path\n'), ((13074, 13095), 'ciopath.gpath.Path', 'Path', (["['a', 'b', 'c']"], {}), "(['a', 'b', 'c'])\n", (13078, 13095), False, 'from ciopath.gpath import Path\n'), ((13267, 13281), 'ciopath.gpath.Path', 'Path', (['"""/a/b/c"""'], {}), "('/a/b/c')\n", (13271, 13281), False, 'from ciopath.gpath import Path\n'), ((13405, 13421), 'ciopath.gpath.Path', 'Path', (['"""C:/a/b/c"""'], {}), "('C:/a/b/c')\n", (13409, 13421), False, 'from ciopath.gpath import Path\n'), ((13557, 13572), 'ciopath.gpath.Path', 'Path', (['"""//a/b/c"""'], {}), "('//a/b/c')\n", (13561, 13572), False, 'from ciopath.gpath import Path\n'), ((13709, 13728), 'ciopath.gpath.Path', 'Path', (['"""\\\\\\\\a\\\\b\\\\c"""'], {}), "('\\\\\\\\a\\\\b\\\\c')\n", (13713, 13728), False, 'from ciopath.gpath import Path\n'), ((13883, 13902), 'ciopath.gpath.Path', 'Path', (['"""\\\\\\\\a\\\\b\\\\c"""'], {}), "('\\\\\\\\a\\\\b\\\\c')\n", (13887, 13902), False, 'from ciopath.gpath import Path\n'), ((14015, 14034), 'ciopath.gpath.Path', 'Path', (['"""\\\\\\\\a\\\\b\\\\c"""'], {}), "('\\\\\\\\a\\\\b\\\\c')\n", (14019, 14034), False, 'from ciopath.gpath import Path\n'), ((14127, 14146), 'ciopath.gpath.Path', 'Path', (['"""\\\\\\\\a\\\\b\\\\c"""'], {}), "('\\\\\\\\a\\\\b\\\\c')\n", (14131, 14146), False, 'from ciopath.gpath import Path\n'), ((14265, 14280), 'ciopath.gpath.Path', 'Path', (['"""//a/b/c"""'], {}), "('//a/b/c')\n", (14269, 14280), False, 'from ciopath.gpath import Path\n'), ((14384, 14403), 'ciopath.gpath.Path', 'Path', (['"""\\\\\\\\a\\\\b\\\\c"""'], {}), "('\\\\\\\\a\\\\b\\\\c')\n", (14388, 14403), False, 'from ciopath.gpath import Path\n'), ((14450, 14465), 'ciopath.gpath.Path', 'Path', (['"""//a/b/c"""'], {}), "('//a/b/c')\n", (14454, 14465), False, 'from ciopath.gpath import Path\n'), ((14554, 14570), 'ciopath.gpath.Path', 'Path', (["['/a/b/c']"], {}), "(['/a/b/c'])\n", (14558, 14570), False, 'from ciopath.gpath import Path\n'), ((14659, 14674), 'ciopath.gpath.Path', 'Path', (["['a/b/c']"], {}), "(['a/b/c'])\n", (14663, 14674), False, 'from ciopath.gpath import Path\n'), ((14767, 14790), 'ciopath.gpath.Path', 'Path', (['"""C:\\\\aaa\\\\bbb\\\\c"""'], {}), "('C:\\\\aaa\\\\bbb\\\\c')\n", (14771, 14790), False, 'from ciopath.gpath import Path\n'), ((202, 227), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (217, 227), False, 'import os\n'), ((528, 536), 'ciopath.gpath.Path', 'Path', (['""""""'], {}), "('')\n", (532, 536), False, 'from ciopath.gpath import Path\n'), ((2007, 2037), 'mock.patch', 'mock.patch', (['"""os.name"""', '"""posix"""'], {}), "('os.name', 'posix')\n", (2017, 2037), False, 'import mock\n'), ((2111, 2138), 'mock.patch', 'mock.patch', (['"""os.name"""', '"""nt"""'], {}), "('os.name', 'nt')\n", (2121, 2138), False, 'import mock\n'), ((3819, 3858), 'mock.patch.dict', 'mock.patch.dict', (['"""os.environ"""', 'self.env'], {}), "('os.environ', self.env)\n", (3834, 3858), False, 'import mock\n'), ((3881, 3896), 'ciopath.gpath.Path', 'Path', (['"""~/a/b/c"""'], {}), "('~/a/b/c')\n", (3885, 3896), False, 'from ciopath.gpath import Path\n'), ((4019, 4058), 'mock.patch.dict', 'mock.patch.dict', (['"""os.environ"""', 'self.env'], {}), "('os.environ', self.env)\n", (4034, 4058), False, 'import mock\n'), ((4081, 4100), 'ciopath.gpath.Path', 'Path', (['"""$SHOT/a/b/c"""'], {}), "('$SHOT/a/b/c')\n", (4085, 4100), False, 'from ciopath.gpath import Path\n'), ((4229, 4268), 'mock.patch.dict', 'mock.patch.dict', (['"""os.environ"""', 'self.env'], {}), "('os.environ', self.env)\n", (4244, 4268), False, 'import mock\n'), ((4291, 4316), 'ciopath.gpath.Path', 'Path', (['"""$SHOT/a/b/$DEPT/c"""'], {}), "('$SHOT/a/b/$DEPT/c')\n", (4295, 4316), False, 'from ciopath.gpath import Path\n'), ((4453, 4492), 'mock.patch.dict', 'mock.patch.dict', (['"""os.environ"""', 'self.env'], {}), "('os.environ', self.env)\n", (4468, 4492), False, 'import mock\n'), ((4515, 4537), 'ciopath.gpath.Path', 'Path', (['"""$HOME\\\\a\\\\b\\\\c"""'], {}), "('$HOME\\\\a\\\\b\\\\c')\n", (4519, 4537), False, 'from ciopath.gpath import Path\n'), ((4737, 4776), 'mock.patch.dict', 'mock.patch.dict', (['"""os.environ"""', 'self.env'], {}), "('os.environ', self.env)\n", (4752, 4776), False, 'import mock\n'), ((4799, 4830), 'ciopath.gpath.Path', 'Path', (['"""~/a/b/c"""'], {'no_expand': '(True)'}), "('~/a/b/c', no_expand=True)\n", (4803, 4830), False, 'from ciopath.gpath import Path\n'), ((4942, 4981), 'mock.patch.dict', 'mock.patch.dict', (['"""os.environ"""', 'self.env'], {}), "('os.environ', self.env)\n", (4957, 4981), False, 'import mock\n'), ((5004, 5039), 'ciopath.gpath.Path', 'Path', (['"""$SHOT/a/b/c"""'], {'no_expand': '(True)'}), "('$SHOT/a/b/c', no_expand=True)\n", (5008, 5039), False, 'from ciopath.gpath import Path\n'), ((5169, 5208), 'mock.patch.dict', 'mock.patch.dict', (['"""os.environ"""', 'self.env'], {}), "('os.environ', self.env)\n", (5184, 5208), False, 'import mock\n'), ((5231, 5266), 'ciopath.gpath.Path', 'Path', (['"""$SHOT/a/b/c"""'], {'no_expand': '(True)'}), "('$SHOT/a/b/c', no_expand=True)\n", (5235, 5266), False, 'from ciopath.gpath import Path\n'), ((5425, 5464), 'mock.patch.dict', 'mock.patch.dict', (['"""os.environ"""', 'self.env'], {}), "('os.environ', self.env)\n", (5440, 5464), False, 'import mock\n'), ((5487, 5523), 'ciopath.gpath.Path', 'Path', (['"""$SHOT/a/b/c"""'], {'no_expand': '(False)'}), "('$SHOT/a/b/c', no_expand=False)\n", (5491, 5523), False, 'from ciopath.gpath import Path\n'), ((9585, 9607), 'ciopath.gpath.Path', 'Path', (['"""/a/b/../../../"""'], {}), "('/a/b/../../../')\n", (9589, 9607), False, 'from ciopath.gpath import Path\n')] |
# Copyright 2022 Sony Semiconductors Israel, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from typing import Callable
from model_compression_toolkit.core import common
from model_compression_toolkit.core.common import Logger
from model_compression_toolkit.core.common.constants import PYTORCH
from model_compression_toolkit.gptq.common.gptq_config import GradientPTQConfig
from model_compression_toolkit.core.common.target_platform import TargetPlatformCapabilities
from model_compression_toolkit.core.common.mixed_precision.kpi import KPI
from model_compression_toolkit.core.common.framework_info import FrameworkInfo
from model_compression_toolkit import CoreConfig
from model_compression_toolkit.core.common.mixed_precision.mixed_precision_quantization_config import \
MixedPrecisionQuantizationConfigV2
from model_compression_toolkit.core.common.post_training_quantization import post_training_quantization
import importlib
if importlib.util.find_spec("torch") is not None:
from model_compression_toolkit.core.pytorch.default_framework_info import DEFAULT_PYTORCH_INFO
from model_compression_toolkit.core.pytorch.pytorch_implementation import PytorchImplementation
from model_compression_toolkit.core.pytorch.constants import DEFAULT_TP_MODEL
from torch.nn import Module
from model_compression_toolkit import get_target_platform_capabilities
DEFAULT_PYTORCH_TPC = get_target_platform_capabilities(PYTORCH, DEFAULT_TP_MODEL)
def pytorch_gradient_post_training_quantization_experimental(in_module: Module,
representative_data_gen: Callable,
target_kpi: KPI = None,
core_config: CoreConfig = CoreConfig(),
fw_info: FrameworkInfo = DEFAULT_PYTORCH_INFO,
gptq_config: GradientPTQConfig = None,
target_platform_capabilities: TargetPlatformCapabilities = DEFAULT_PYTORCH_TPC):
"""
Quantize a trained Pytorch module using post-training quantization.
By default, the module is quantized using a symmetric constraint quantization thresholds
(power of two) as defined in the default TargetPlatformCapabilities.
The module is first optimized using several transformations (e.g. BatchNormalization folding to
preceding layers). Then, using a given dataset, statistics (e.g. min/max, histogram, etc.) are
being collected for each layer's output (and input, depends on the quantization configuration).
Thresholds are then being calculated using the collected statistics and the module is quantized
(both coefficients and activations by default).
If gptq_config is passed, the quantized weights are optimized using gradient based post
training quantization by comparing points between the float and quantized modules, and minimizing the
observed loss.
Args:
in_module (Module): Pytorch module to quantize.
representative_data_gen (Callable): Dataset used for calibration.
target_kpi (KPI): KPI object to limit the search of the mixed-precision configuration as desired.
core_config (CoreConfig): Configuration object containing parameters of how the model should be quantized, including mixed precision parameters.
fw_info (FrameworkInfo): Information needed for quantization about the specific framework (e.g., kernel channels indices, groups of layers by how they should be quantized, etc.). `Default PyTorch info <https://github.com/sony/model_optimization/blob/main/model_compression_toolkit/core/pytorch/default_framework_info.py>`_
gptq_config (GradientPTQConfig): Configuration for using gptq (e.g. optimizer).
target_platform_capabilities (TargetPlatformCapabilities): TargetPlatformCapabilities to optimize the PyTorch model according to. `Default PyTorch TPC <https://github.com/sony/model_optimization/blob/main/model_compression_toolkit/core/tpc_models/pytorch_tp_models/pytorch_default.py>`_
Returns:
A quantized module and information the user may need to handle the quantized module.
Examples:
Import a Pytorch module:
>>> import torchvision.models.mobilenet_v2 as models
>>> module = models.mobilenet_v2()
Create a random dataset generator:
>>> import numpy as np
>>> def repr_datagen(): return [np.random.random((1,224,224,3))]
Import mct and pass the module with the representative dataset generator to get a quantized module:
>>> import model_compression_toolkit as mct
>>> quantized_module, quantization_info = mct.pytorch_post_training_quantization(module, repr_datagen)
"""
if core_config.mixed_precision_enable:
if not isinstance(core_config.mixed_precision_config, MixedPrecisionQuantizationConfigV2):
common.Logger.error("Given quantization config to mixed-precision facade is not of type "
"MixedPrecisionQuantizationConfigV2. Please use pytorch_post_training_quantization API,"
"or pass a valid mixed precision configuration.")
common.Logger.info("Using experimental mixed-precision quantization. "
"If you encounter an issue please file a bug.")
return post_training_quantization(in_module,
representative_data_gen,
core_config,
fw_info,
PytorchImplementation(),
target_platform_capabilities,
gptq_config,
target_kpi=target_kpi)
else:
# If torch is not installed,
# we raise an exception when trying to use these functions.
def pytorch_gradient_post_training_quantization_experimental(*args, **kwargs):
Logger.critical('Installing Pytorch is mandatory '
'when using pytorch_gradient_post_training_quantization_experimental. '
'Could not find the torch package.')
| [
"model_compression_toolkit.core.pytorch.pytorch_implementation.PytorchImplementation",
"model_compression_toolkit.get_target_platform_capabilities",
"model_compression_toolkit.core.common.Logger.error",
"importlib.util.find_spec",
"model_compression_toolkit.core.common.Logger.critical",
"model_compression... | [((1546, 1579), 'importlib.util.find_spec', 'importlib.util.find_spec', (['"""torch"""'], {}), "('torch')\n", (1570, 1579), False, 'import importlib\n'), ((2008, 2067), 'model_compression_toolkit.get_target_platform_capabilities', 'get_target_platform_capabilities', (['PYTORCH', 'DEFAULT_TP_MODEL'], {}), '(PYTORCH, DEFAULT_TP_MODEL)\n', (2040, 2067), False, 'from model_compression_toolkit import get_target_platform_capabilities\n'), ((2434, 2446), 'model_compression_toolkit.CoreConfig', 'CoreConfig', ([], {}), '()\n', (2444, 2446), False, 'from model_compression_toolkit import CoreConfig\n'), ((6976, 7139), 'model_compression_toolkit.core.common.Logger.critical', 'Logger.critical', (['"""Installing Pytorch is mandatory when using pytorch_gradient_post_training_quantization_experimental. Could not find the torch package."""'], {}), "(\n 'Installing Pytorch is mandatory when using pytorch_gradient_post_training_quantization_experimental. Could not find the torch package.'\n )\n", (6991, 7139), False, 'from model_compression_toolkit.core.common import Logger\n'), ((6144, 6269), 'model_compression_toolkit.core.common.Logger.info', 'common.Logger.info', (['"""Using experimental mixed-precision quantization. If you encounter an issue please file a bug."""'], {}), "(\n 'Using experimental mixed-precision quantization. If you encounter an issue please file a bug.'\n )\n", (6162, 6269), False, 'from model_compression_toolkit.core import common\n'), ((6563, 6586), 'model_compression_toolkit.core.pytorch.pytorch_implementation.PytorchImplementation', 'PytorchImplementation', ([], {}), '()\n', (6584, 6586), False, 'from model_compression_toolkit.core.pytorch.pytorch_implementation import PytorchImplementation\n'), ((5830, 6062), 'model_compression_toolkit.core.common.Logger.error', 'common.Logger.error', (['"""Given quantization config to mixed-precision facade is not of type MixedPrecisionQuantizationConfigV2. Please use pytorch_post_training_quantization API,or pass a valid mixed precision configuration."""'], {}), "(\n 'Given quantization config to mixed-precision facade is not of type MixedPrecisionQuantizationConfigV2. Please use pytorch_post_training_quantization API,or pass a valid mixed precision configuration.'\n )\n", (5849, 6062), False, 'from model_compression_toolkit.core import common\n')] |
import dgl
import torch as th
import numpy as np
import itertools
import time
from collections import *
Graph = namedtuple('Graph',
['g', 'src', 'tgt', 'tgt_y', 'nids', 'eids', 'nid_arr', 'n_nodes', 'n_edges', 'n_tokens', 'layer_eids'])
# We need to create new graph pools for relative position attention (ngram style)
def dedupe_tuples(tups):
try:
return list(set([(a, b) if a < b else (b, a) for a, b in tups]))
except ValueError:
raise Exception(tups)
def get_src_dst_deps(src_deps, order=1):
if not isinstance(src_deps, list):
src_deps = [src_deps]
# If order is one, then we simply return src_deps
if order == 1:
return list(set(src_deps))
else:
new_deps = list()
for src, dst in src_deps:
# Go up one order. i.e make dst the src, and find its parent
for src_dup, dst_dup in src_deps:
if dst_dup == dst and src != src_dup:
new_deps.append((src, src_dup))
elif src_dup == src and dst != dst_dup:
new_deps.append((dst, dst_dup))
elif dst == src_dup and src != dst_dup:
new_deps.append((src, dst_dup))
return list(set(get_src_dst_deps(new_deps, order=order - 1)).difference(set(src_deps)))
class GraphPool:
"Create a graph pool in advance to accelerate graph building phase in Transformer."
def __init__(self, n=50, m=50):
'''
args:
n: maximum length of input sequence.
m: maximum length of output sequence.
'''
print('start creating graph pool...')
tic = time.time()
self.n, self.m = n, m
g_pool = [[dgl.DGLGraph() for _ in range(m)] for _ in range(n)]
num_edges = {
'ee': np.zeros((n, n)).astype(int),
'ed': np.zeros((n, m)).astype(int),
'dd': np.zeros((m, m)).astype(int)
}
for i, j in itertools.product(range(n), range(m)):
src_length = i + 1
tgt_length = j + 1
g_pool[i][j].add_nodes(src_length + tgt_length)
enc_nodes = th.arange(src_length, dtype=th.long)
dec_nodes = th.arange(tgt_length, dtype=th.long) + src_length
# enc -> enc
us = enc_nodes.unsqueeze(-1).repeat(1, src_length).view(-1)
vs = enc_nodes.repeat(src_length)
g_pool[i][j].add_edges(us, vs)
num_edges['ee'][i][j] = len(us)
# enc -> dec
us = enc_nodes.unsqueeze(-1).repeat(1, tgt_length).view(-1)
vs = dec_nodes.repeat(src_length)
g_pool[i][j].add_edges(us, vs)
num_edges['ed'][i][j] = len(us)
# dec -> dec
indices = th.triu(th.ones(tgt_length, tgt_length)) == 1
us = dec_nodes.unsqueeze(-1).repeat(1, tgt_length)[indices]
vs = dec_nodes.unsqueeze(0).repeat(tgt_length, 1)[indices]
g_pool[i][j].add_edges(us, vs)
num_edges['dd'][i][j] = len(us)
print('successfully created graph pool, time: {0:0.3f}s'.format(time.time() - tic))
self.g_pool = g_pool
self.num_edges = num_edges
def beam(self, src_buf, start_sym, max_len, k, device='cpu', src_deps=None):
'''
Return a batched graph for beam search during inference of Transformer.
args:
src_buf: a list of input sequence
start_sym: the index of start-of-sequence symbol
max_len: maximum length for decoding
k: beam size
device: 'cpu' or 'cuda:*'
'''
if src_deps is None:
src_deps = list()
g_list = []
src_lens = [len(_) for _ in src_buf]
tgt_lens = [max_len] * len(src_buf)
num_edges = {'ee': [], 'ed': [], 'dd': []}
for src_len, tgt_len in zip(src_lens, tgt_lens):
i, j = src_len - 1, tgt_len - 1
for _ in range(k):
g_list.append(self.g_pool[i][j])
for key in ['ee', 'ed', 'dd']:
num_edges[key].append(int(self.num_edges[key][i][j]))
g = dgl.batch(g_list)
src, tgt = [], []
src_pos, tgt_pos = [], []
enc_ids, dec_ids = [], []
layer_eids = {
'dep': [[], []]
}
e2e_eids, e2d_eids, d2d_eids = [], [], []
n_nodes, n_edges, n_tokens = 0, 0, 0
for src_sample, src_dep, n, n_ee, n_ed, n_dd in zip(src_buf, src_deps, src_lens, num_edges['ee'], num_edges['ed'], num_edges['dd']):
for _ in range(k):
src.append(th.tensor(src_sample, dtype=th.long, device=device))
src_pos.append(th.arange(n, dtype=th.long, device=device))
enc_ids.append(th.arange(n_nodes, n_nodes + n, dtype=th.long, device=device))
n_nodes += n
e2e_eids.append(th.arange(n_edges, n_edges + n_ee, dtype=th.long, device=device))
# Copy the ids of edges that correspond to a given node and its previous N nodes
# We are using arange here. This will not work. Instead we need to select edges that
# correspond to previous positions. This information is present in graph pool
# For each edge, we need to figure out source_node_id and target_node_id.
if src_dep:
for i in range(0, 2):
for src_node_id, dst_node_id in dedupe_tuples(get_src_dst_deps(src_dep, i + 1)):
layer_eids['dep'][i].append(n_edges + src_node_id * n + dst_node_id)
layer_eids['dep'][i].append(n_edges + dst_node_id * n + src_node_id)
n_edges += n_ee
tgt_seq = th.zeros(max_len, dtype=th.long, device=device)
tgt_seq[0] = start_sym
tgt.append(tgt_seq)
tgt_pos.append(th.arange(max_len, dtype=th.long, device=device))
dec_ids.append(th.arange(n_nodes, n_nodes + max_len, dtype=th.long, device=device))
n_nodes += max_len
e2d_eids.append(th.arange(n_edges, n_edges + n_ed, dtype=th.long, device=device))
n_edges += n_ed
d2d_eids.append(th.arange(n_edges, n_edges + n_dd, dtype=th.long, device=device))
n_edges += n_dd
g.set_n_initializer(dgl.init.zero_initializer)
g.set_e_initializer(dgl.init.zero_initializer)
return Graph(g=g,
src=(th.cat(src), th.cat(src_pos)),
tgt=(th.cat(tgt), th.cat(tgt_pos)),
tgt_y=None,
nids = {'enc': th.cat(enc_ids), 'dec': th.cat(dec_ids)},
eids = {'ee': th.cat(e2e_eids), 'ed': th.cat(e2d_eids), 'dd': th.cat(d2d_eids)},
nid_arr = {'enc': enc_ids, 'dec': dec_ids},
n_nodes=n_nodes,
n_edges=n_edges,
layer_eids={
'dep': [
th.tensor(layer_eids['dep'][i]) for i in range(0, len(layer_eids['dep']))
]
},
n_tokens=n_tokens)
def __call__(self, src_buf, tgt_buf, device='cpu', src_deps=None):
'''
Return a batched graph for the training phase of Transformer.
args:
src_buf: a set of input sequence arrays.
tgt_buf: a set of output sequence arrays.
device: 'cpu' or 'cuda:*'
src_deps: list, optional
Dependency parses of the source in the form of src_node_id -> dst_node_id.
where src is the child and dst is the parent. i.e a child node attends on its
syntactic parent in a dependency parse
'''
if src_deps is None:
src_deps = list()
g_list = []
src_lens = [len(_) for _ in src_buf]
tgt_lens = [len(_) - 1 for _ in tgt_buf]
num_edges = {'ee': [], 'ed': [], 'dd': []}
# We are running over source and target pairs here
for src_len, tgt_len in zip(src_lens, tgt_lens):
i, j = src_len - 1, tgt_len - 1
g_list.append(self.g_pool[i][j])
for key in ['ee', 'ed', 'dd']:
num_edges[key].append(int(self.num_edges[key][i][j]))
g = dgl.batch(g_list)
src, tgt, tgt_y = [], [], []
src_pos, tgt_pos = [], []
enc_ids, dec_ids = [], []
e2e_eids, d2d_eids, e2d_eids = [], [], []
layer_eids = {
'dep': [[], []]
}
n_nodes, n_edges, n_tokens = 0, 0, 0
for src_sample, tgt_sample, src_dep, n, m, n_ee, n_ed, n_dd in zip(src_buf, tgt_buf, src_deps, src_lens, tgt_lens, num_edges['ee'], num_edges['ed'], num_edges['dd']):
src.append(th.tensor(src_sample, dtype=th.long, device=device))
tgt.append(th.tensor(tgt_sample[:-1], dtype=th.long, device=device))
tgt_y.append(th.tensor(tgt_sample[1:], dtype=th.long, device=device))
src_pos.append(th.arange(n, dtype=th.long, device=device))
tgt_pos.append(th.arange(m, dtype=th.long, device=device))
enc_ids.append(th.arange(n_nodes, n_nodes + n, dtype=th.long, device=device))
n_nodes += n
dec_ids.append(th.arange(n_nodes, n_nodes + m, dtype=th.long, device=device))
n_nodes += m
e2e_eids.append(th.arange(n_edges, n_edges + n_ee, dtype=th.long, device=device))
# Copy the ids of edges that correspond to a given node and its previous N nodes
# We are using arange here. This will not work. Instead we need to select edges that
# correspond to previous positions. This information is present in graph pool
# For each edge, we need to figure out source_node_id and target_node_id.
if src_dep:
for i in range(0, 2):
for src_node_id, dst_node_id in dedupe_tuples(get_src_dst_deps(src_dep, i + 1)):
layer_eids['dep'][i].append(n_edges + src_node_id * n + dst_node_id)
layer_eids['dep'][i].append(n_edges + dst_node_id * n + src_node_id)
n_edges += n_ee
e2d_eids.append(th.arange(n_edges, n_edges + n_ed, dtype=th.long, device=device))
n_edges += n_ed
d2d_eids.append(th.arange(n_edges, n_edges + n_dd, dtype=th.long, device=device))
n_edges += n_dd
n_tokens += m
g.set_n_initializer(dgl.init.zero_initializer)
g.set_e_initializer(dgl.init.zero_initializer)
return Graph(g=g,
src=(th.cat(src), th.cat(src_pos)),
tgt=(th.cat(tgt), th.cat(tgt_pos)),
tgt_y=th.cat(tgt_y),
nids = {'enc': th.cat(enc_ids), 'dec': th.cat(dec_ids)},
eids = {'ee': th.cat(e2e_eids), 'ed': th.cat(e2d_eids), 'dd': th.cat(d2d_eids)},
nid_arr = {'enc': enc_ids, 'dec': dec_ids},
n_nodes=n_nodes,
layer_eids={
'dep': [
th.tensor(layer_eids['dep'][i]) for i in range(0, len(layer_eids['dep']))
]
},
n_edges=n_edges,
n_tokens=n_tokens)
| [
"dgl.batch",
"torch.tensor",
"torch.cat",
"numpy.zeros",
"dgl.DGLGraph",
"time.time",
"torch.zeros",
"torch.arange",
"torch.ones"
] | [((1669, 1680), 'time.time', 'time.time', ([], {}), '()\n', (1678, 1680), False, 'import time\n'), ((4163, 4180), 'dgl.batch', 'dgl.batch', (['g_list'], {}), '(g_list)\n', (4172, 4180), False, 'import dgl\n'), ((8419, 8436), 'dgl.batch', 'dgl.batch', (['g_list'], {}), '(g_list)\n', (8428, 8436), False, 'import dgl\n'), ((2164, 2200), 'torch.arange', 'th.arange', (['src_length'], {'dtype': 'th.long'}), '(src_length, dtype=th.long)\n', (2173, 2200), True, 'import torch as th\n'), ((1730, 1744), 'dgl.DGLGraph', 'dgl.DGLGraph', ([], {}), '()\n', (1742, 1744), False, 'import dgl\n'), ((2225, 2261), 'torch.arange', 'th.arange', (['tgt_length'], {'dtype': 'th.long'}), '(tgt_length, dtype=th.long)\n', (2234, 2261), True, 'import torch as th\n'), ((5790, 5837), 'torch.zeros', 'th.zeros', (['max_len'], {'dtype': 'th.long', 'device': 'device'}), '(max_len, dtype=th.long, device=device)\n', (5798, 5837), True, 'import torch as th\n'), ((8896, 8947), 'torch.tensor', 'th.tensor', (['src_sample'], {'dtype': 'th.long', 'device': 'device'}), '(src_sample, dtype=th.long, device=device)\n', (8905, 8947), True, 'import torch as th\n'), ((8972, 9028), 'torch.tensor', 'th.tensor', (['tgt_sample[:-1]'], {'dtype': 'th.long', 'device': 'device'}), '(tgt_sample[:-1], dtype=th.long, device=device)\n', (8981, 9028), True, 'import torch as th\n'), ((9055, 9110), 'torch.tensor', 'th.tensor', (['tgt_sample[1:]'], {'dtype': 'th.long', 'device': 'device'}), '(tgt_sample[1:], dtype=th.long, device=device)\n', (9064, 9110), True, 'import torch as th\n'), ((9139, 9181), 'torch.arange', 'th.arange', (['n'], {'dtype': 'th.long', 'device': 'device'}), '(n, dtype=th.long, device=device)\n', (9148, 9181), True, 'import torch as th\n'), ((9210, 9252), 'torch.arange', 'th.arange', (['m'], {'dtype': 'th.long', 'device': 'device'}), '(m, dtype=th.long, device=device)\n', (9219, 9252), True, 'import torch as th\n'), ((9281, 9342), 'torch.arange', 'th.arange', (['n_nodes', '(n_nodes + n)'], {'dtype': 'th.long', 'device': 'device'}), '(n_nodes, n_nodes + n, dtype=th.long, device=device)\n', (9290, 9342), True, 'import torch as th\n'), ((9396, 9457), 'torch.arange', 'th.arange', (['n_nodes', '(n_nodes + m)'], {'dtype': 'th.long', 'device': 'device'}), '(n_nodes, n_nodes + m, dtype=th.long, device=device)\n', (9405, 9457), True, 'import torch as th\n'), ((9513, 9577), 'torch.arange', 'th.arange', (['n_edges', '(n_edges + n_ee)'], {'dtype': 'th.long', 'device': 'device'}), '(n_edges, n_edges + n_ee, dtype=th.long, device=device)\n', (9522, 9577), True, 'import torch as th\n'), ((10352, 10416), 'torch.arange', 'th.arange', (['n_edges', '(n_edges + n_ed)'], {'dtype': 'th.long', 'device': 'device'}), '(n_edges, n_edges + n_ed, dtype=th.long, device=device)\n', (10361, 10416), True, 'import torch as th\n'), ((10474, 10538), 'torch.arange', 'th.arange', (['n_edges', '(n_edges + n_dd)'], {'dtype': 'th.long', 'device': 'device'}), '(n_edges, n_edges + n_dd, dtype=th.long, device=device)\n', (10483, 10538), True, 'import torch as th\n'), ((10874, 10887), 'torch.cat', 'th.cat', (['tgt_y'], {}), '(tgt_y)\n', (10880, 10887), True, 'import torch as th\n'), ((1823, 1839), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (1831, 1839), True, 'import numpy as np\n'), ((1871, 1887), 'numpy.zeros', 'np.zeros', (['(n, m)'], {}), '((n, m))\n', (1879, 1887), True, 'import numpy as np\n'), ((1919, 1935), 'numpy.zeros', 'np.zeros', (['(m, m)'], {}), '((m, m))\n', (1927, 1935), True, 'import numpy as np\n'), ((2792, 2823), 'torch.ones', 'th.ones', (['tgt_length', 'tgt_length'], {}), '(tgt_length, tgt_length)\n', (2799, 2823), True, 'import torch as th\n'), ((3133, 3144), 'time.time', 'time.time', ([], {}), '()\n', (3142, 3144), False, 'import time\n'), ((4630, 4681), 'torch.tensor', 'th.tensor', (['src_sample'], {'dtype': 'th.long', 'device': 'device'}), '(src_sample, dtype=th.long, device=device)\n', (4639, 4681), True, 'import torch as th\n'), ((4714, 4756), 'torch.arange', 'th.arange', (['n'], {'dtype': 'th.long', 'device': 'device'}), '(n, dtype=th.long, device=device)\n', (4723, 4756), True, 'import torch as th\n'), ((4789, 4850), 'torch.arange', 'th.arange', (['n_nodes', '(n_nodes + n)'], {'dtype': 'th.long', 'device': 'device'}), '(n_nodes, n_nodes + n, dtype=th.long, device=device)\n', (4798, 4850), True, 'import torch as th\n'), ((4913, 4977), 'torch.arange', 'th.arange', (['n_edges', '(n_edges + n_ee)'], {'dtype': 'th.long', 'device': 'device'}), '(n_edges, n_edges + n_ee, dtype=th.long, device=device)\n', (4922, 4977), True, 'import torch as th\n'), ((5944, 5992), 'torch.arange', 'th.arange', (['max_len'], {'dtype': 'th.long', 'device': 'device'}), '(max_len, dtype=th.long, device=device)\n', (5953, 5992), True, 'import torch as th\n'), ((6026, 6093), 'torch.arange', 'th.arange', (['n_nodes', '(n_nodes + max_len)'], {'dtype': 'th.long', 'device': 'device'}), '(n_nodes, n_nodes + max_len, dtype=th.long, device=device)\n', (6035, 6093), True, 'import torch as th\n'), ((6163, 6227), 'torch.arange', 'th.arange', (['n_edges', '(n_edges + n_ed)'], {'dtype': 'th.long', 'device': 'device'}), '(n_edges, n_edges + n_ed, dtype=th.long, device=device)\n', (6172, 6227), True, 'import torch as th\n'), ((6293, 6357), 'torch.arange', 'th.arange', (['n_edges', '(n_edges + n_dd)'], {'dtype': 'th.long', 'device': 'device'}), '(n_edges, n_edges + n_dd, dtype=th.long, device=device)\n', (6302, 6357), True, 'import torch as th\n'), ((6555, 6566), 'torch.cat', 'th.cat', (['src'], {}), '(src)\n', (6561, 6566), True, 'import torch as th\n'), ((6568, 6583), 'torch.cat', 'th.cat', (['src_pos'], {}), '(src_pos)\n', (6574, 6583), True, 'import torch as th\n'), ((6612, 6623), 'torch.cat', 'th.cat', (['tgt'], {}), '(tgt)\n', (6618, 6623), True, 'import torch as th\n'), ((6625, 6640), 'torch.cat', 'th.cat', (['tgt_pos'], {}), '(tgt_pos)\n', (6631, 6640), True, 'import torch as th\n'), ((6712, 6727), 'torch.cat', 'th.cat', (['enc_ids'], {}), '(enc_ids)\n', (6718, 6727), True, 'import torch as th\n'), ((6736, 6751), 'torch.cat', 'th.cat', (['dec_ids'], {}), '(dec_ids)\n', (6742, 6751), True, 'import torch as th\n'), ((6789, 6805), 'torch.cat', 'th.cat', (['e2e_eids'], {}), '(e2e_eids)\n', (6795, 6805), True, 'import torch as th\n'), ((6813, 6829), 'torch.cat', 'th.cat', (['e2d_eids'], {}), '(e2d_eids)\n', (6819, 6829), True, 'import torch as th\n'), ((6837, 6853), 'torch.cat', 'th.cat', (['d2d_eids'], {}), '(d2d_eids)\n', (6843, 6853), True, 'import torch as th\n'), ((10759, 10770), 'torch.cat', 'th.cat', (['src'], {}), '(src)\n', (10765, 10770), True, 'import torch as th\n'), ((10772, 10787), 'torch.cat', 'th.cat', (['src_pos'], {}), '(src_pos)\n', (10778, 10787), True, 'import torch as th\n'), ((10816, 10827), 'torch.cat', 'th.cat', (['tgt'], {}), '(tgt)\n', (10822, 10827), True, 'import torch as th\n'), ((10829, 10844), 'torch.cat', 'th.cat', (['tgt_pos'], {}), '(tgt_pos)\n', (10835, 10844), True, 'import torch as th\n'), ((10925, 10940), 'torch.cat', 'th.cat', (['enc_ids'], {}), '(enc_ids)\n', (10931, 10940), True, 'import torch as th\n'), ((10949, 10964), 'torch.cat', 'th.cat', (['dec_ids'], {}), '(dec_ids)\n', (10955, 10964), True, 'import torch as th\n'), ((11002, 11018), 'torch.cat', 'th.cat', (['e2e_eids'], {}), '(e2e_eids)\n', (11008, 11018), True, 'import torch as th\n'), ((11026, 11042), 'torch.cat', 'th.cat', (['e2d_eids'], {}), '(e2d_eids)\n', (11032, 11042), True, 'import torch as th\n'), ((11050, 11066), 'torch.cat', 'th.cat', (['d2d_eids'], {}), '(d2d_eids)\n', (11056, 11066), True, 'import torch as th\n'), ((7094, 7125), 'torch.tensor', 'th.tensor', (["layer_eids['dep'][i]"], {}), "(layer_eids['dep'][i])\n", (7103, 7125), True, 'import torch as th\n'), ((11269, 11300), 'torch.tensor', 'th.tensor', (["layer_eids['dep'][i]"], {}), "(layer_eids['dep'][i])\n", (11278, 11300), True, 'import torch as th\n')] |
# pylint: disable=not-context-manager
from unittest.mock import ANY, Mock
from starlette.testclient import TestClient
from ariadne.asgi import (
GQL_CONNECTION_ACK,
GQL_CONNECTION_INIT,
GQL_DATA,
GQL_ERROR,
GQL_START,
GraphQL,
)
from ariadne.types import Extension
def test_custom_context_value_is_passed_to_resolvers(schema):
app = GraphQL(schema, context_value={"test": "TEST-CONTEXT"})
client = TestClient(app)
response = client.post("/", json={"query": "{ testContext }"})
assert response.json() == {"data": {"testContext": "TEST-CONTEXT"}}
def test_custom_context_value_function_is_set_and_called_by_app(schema):
get_context_value = Mock(return_value=True)
app = GraphQL(schema, context_value=get_context_value)
client = TestClient(app)
client.post("/", json={"query": "{ status }"})
get_context_value.assert_called_once()
def test_custom_context_value_function_result_is_passed_to_resolvers(schema):
get_context_value = Mock(return_value={"test": "TEST-CONTEXT"})
app = GraphQL(schema, context_value=get_context_value)
client = TestClient(app)
response = client.post("/", json={"query": "{ testContext }"})
assert response.json() == {"data": {"testContext": "TEST-CONTEXT"}}
def test_async_context_value_function_result_is_awaited_before_passing_to_resolvers(
schema,
):
async def get_context_value(*_):
return {"test": "TEST-ASYNC-CONTEXT"}
app = GraphQL(schema, context_value=get_context_value)
client = TestClient(app)
response = client.post("/", json={"query": "{ testContext }"})
assert response.json() == {"data": {"testContext": "TEST-ASYNC-CONTEXT"}}
def test_custom_root_value_is_passed_to_query_resolvers(schema):
app = GraphQL(schema, root_value={"test": "TEST-ROOT"})
client = TestClient(app)
response = client.post("/", json={"query": "{ testRoot }"})
assert response.json() == {"data": {"testRoot": "TEST-ROOT"}}
def test_custom_root_value_is_passed_to_subscription_resolvers(schema):
app = GraphQL(schema, root_value={"test": "TEST-ROOT"})
client = TestClient(app)
with client.websocket_connect("/", "graphql-ws") as ws:
ws.send_json({"type": GQL_CONNECTION_INIT})
ws.send_json(
{
"type": GQL_START,
"id": "test1",
"payload": {"query": "subscription { testRoot }"},
}
)
response = ws.receive_json()
assert response["type"] == GQL_CONNECTION_ACK
response = ws.receive_json()
assert response["type"] == GQL_DATA
assert response["payload"] == {"data": {"testRoot": "TEST-ROOT"}}
def test_custom_root_value_function_is_called_by_query(schema):
get_root_value = Mock(return_value=True)
app = GraphQL(schema, root_value=get_root_value)
client = TestClient(app)
client.post("/", json={"query": "{ status }"})
get_root_value.assert_called_once()
def test_custom_root_value_function_is_called_by_subscription(schema):
get_root_value = Mock(return_value=True)
app = GraphQL(schema, root_value=get_root_value)
client = TestClient(app)
with client.websocket_connect("/", "graphql-ws") as ws:
ws.send_json({"type": GQL_CONNECTION_INIT})
ws.send_json(
{
"type": GQL_START,
"id": "test1",
"payload": {"query": "subscription { ping }"},
}
)
response = ws.receive_json()
assert response["type"] == GQL_CONNECTION_ACK
response = ws.receive_json()
assert response["type"] == GQL_DATA
get_root_value.assert_called_once()
def test_custom_root_value_function_is_called_with_context_value(schema):
get_root_value = Mock(return_value=True)
app = GraphQL(
schema, context_value={"test": "TEST-CONTEXT"}, root_value=get_root_value
)
client = TestClient(app)
client.post("/", json={"query": "{ status }"})
get_root_value.assert_called_once_with({"test": "TEST-CONTEXT"}, ANY)
def test_custom_validation_rule_is_called_by_query_validation(schema, validation_rule):
app = GraphQL(schema, validation_rules=[validation_rule])
client = TestClient(app)
client.post("/", json={"query": "{ status }"})
validation_rule.assert_called_once()
def test_custom_validation_rules_function_is_set_and_called_on_query_execution(
schema, validation_rule
):
get_validation_rules = Mock(return_value=[validation_rule])
app = GraphQL(schema, validation_rules=get_validation_rules)
client = TestClient(app)
client.post("/", json={"query": "{ status }"})
get_validation_rules.assert_called_once()
validation_rule.assert_called_once()
def test_custom_validation_rules_function_is_called_with_context_value(
schema, validation_rule
):
get_validation_rules = Mock(return_value=[validation_rule])
app = GraphQL(
schema,
context_value={"test": "TEST-CONTEXT"},
validation_rules=get_validation_rules,
)
client = TestClient(app)
client.post("/", json={"query": "{ status }"})
get_validation_rules.assert_called_once_with({"test": "TEST-CONTEXT"}, ANY, ANY)
def execute_failing_query(app):
client = TestClient(app)
client.post("/", json={"query": "{ error }"})
def test_default_logger_is_used_to_log_error_if_custom_is_not_set(schema, mocker):
logging_mock = mocker.patch("ariadne.logger.logging")
app = GraphQL(schema)
execute_failing_query(app)
logging_mock.getLogger.assert_called_once_with("ariadne")
def test_custom_logger_is_used_to_log_query_error(schema, mocker):
logging_mock = mocker.patch("ariadne.logger.logging")
app = GraphQL(schema, logger="custom")
execute_failing_query(app)
logging_mock.getLogger.assert_called_once_with("custom")
def test_custom_logger_is_used_to_log_subscription_source_error(schema, mocker):
logging_mock = mocker.patch("ariadne.logger.logging")
app = GraphQL(schema, logger="custom")
client = TestClient(app)
with client.websocket_connect("/", "graphql-ws") as ws:
ws.send_json({"type": GQL_CONNECTION_INIT})
ws.send_json(
{
"type": GQL_START,
"id": "test1",
"payload": {"query": "subscription { sourceError }"},
}
)
response = ws.receive_json()
assert response["type"] == GQL_CONNECTION_ACK
response = ws.receive_json()
assert response["type"] == GQL_DATA
logging_mock.getLogger.assert_called_once_with("custom")
def test_custom_logger_is_used_to_log_subscription_resolver_error(schema, mocker):
logging_mock = mocker.patch("ariadne.logger.logging")
app = GraphQL(schema, logger="custom")
client = TestClient(app)
with client.websocket_connect("/", "graphql-ws") as ws:
ws.send_json({"type": GQL_CONNECTION_INIT})
ws.send_json(
{
"type": GQL_START,
"id": "test1",
"payload": {"query": "subscription { resolverError }"},
}
)
response = ws.receive_json()
assert response["type"] == GQL_CONNECTION_ACK
response = ws.receive_json()
assert response["type"] == GQL_DATA
logging_mock.getLogger.assert_called_once_with("custom")
def test_custom_error_formatter_is_used_to_format_query_error(schema):
error_formatter = Mock(return_value=True)
app = GraphQL(schema, error_formatter=error_formatter)
execute_failing_query(app)
error_formatter.assert_called_once()
def test_custom_error_formatter_is_used_to_format_subscription_syntax_error(schema):
error_formatter = Mock(return_value=True)
app = GraphQL(schema, error_formatter=error_formatter)
client = TestClient(app)
with client.websocket_connect("/", "graphql-ws") as ws:
ws.send_json({"type": GQL_CONNECTION_INIT})
ws.send_json(
{"type": GQL_START, "id": "test1", "payload": {"query": "subscription {"}}
)
response = ws.receive_json()
assert response["type"] == GQL_CONNECTION_ACK
response = ws.receive_json()
assert response["type"] == GQL_ERROR
assert response["id"] == "test1"
error_formatter.assert_called_once()
def test_custom_error_formatter_is_used_to_format_subscription_source_error(schema):
error_formatter = Mock(return_value=True)
app = GraphQL(schema, error_formatter=error_formatter)
client = TestClient(app)
with client.websocket_connect("/", "graphql-ws") as ws:
ws.send_json({"type": GQL_CONNECTION_INIT})
ws.send_json(
{
"type": GQL_START,
"id": "test1",
"payload": {"query": "subscription { sourceError }"},
}
)
response = ws.receive_json()
assert response["type"] == GQL_CONNECTION_ACK
response = ws.receive_json()
assert response["type"] == GQL_DATA
assert response["id"] == "test1"
error_formatter.assert_called_once()
def test_custom_error_formatter_is_used_to_format_subscription_resolver_error(schema):
error_formatter = Mock(return_value=True)
app = GraphQL(schema, error_formatter=error_formatter)
client = TestClient(app)
with client.websocket_connect("/", "graphql-ws") as ws:
ws.send_json({"type": GQL_CONNECTION_INIT})
ws.send_json(
{
"type": GQL_START,
"id": "test1",
"payload": {"query": "subscription { resolverError }"},
}
)
response = ws.receive_json()
assert response["type"] == GQL_CONNECTION_ACK
response = ws.receive_json()
assert response["type"] == GQL_DATA
assert response["id"] == "test1"
error_formatter.assert_called_once()
def test_error_formatter_is_called_with_debug_enabled(schema):
error_formatter = Mock(return_value=True)
app = GraphQL(schema, debug=True, error_formatter=error_formatter)
execute_failing_query(app)
error_formatter.assert_called_once_with(ANY, True)
def test_error_formatter_is_called_with_debug_disabled(schema):
error_formatter = Mock(return_value=True)
app = GraphQL(schema, debug=False, error_formatter=error_formatter)
execute_failing_query(app)
error_formatter.assert_called_once_with(ANY, False)
class CustomExtension(Extension):
async def resolve(self, next_, parent, info, **kwargs):
return next_(parent, info, **kwargs).lower()
def test_extension_from_option_are_passed_to_query_executor(schema):
app = GraphQL(schema, extensions=[CustomExtension])
client = TestClient(app)
response = client.post("/", json={"query": '{ hello(name: "BOB") }'})
assert response.json() == {"data": {"hello": "hello, bob!"}}
def test_extensions_function_result_is_passed_to_query_executor(schema):
def get_extensions(*_):
return [CustomExtension]
app = GraphQL(schema, extensions=get_extensions)
client = TestClient(app)
response = client.post("/", json={"query": '{ hello(name: "BOB") }'})
assert response.json() == {"data": {"hello": "hello, bob!"}}
def test_async_extensions_function_result_is_passed_to_query_executor(schema):
async def get_extensions(*_):
return [CustomExtension]
app = GraphQL(schema, extensions=get_extensions)
client = TestClient(app)
response = client.post("/", json={"query": '{ hello(name: "BOB") }'})
assert response.json() == {"data": {"hello": "hello, bob!"}}
def middleware(next_fn, *args, **kwargs):
value = next_fn(*args, **kwargs)
return f"**{value}**"
def test_middlewares_are_passed_to_query_executor(schema):
app = GraphQL(schema, middleware=[middleware])
client = TestClient(app)
response = client.post("/", json={"query": '{ hello(name: "BOB") }'})
assert response.json() == {"data": {"hello": "**Hello, BOB!**"}}
def test_middleware_function_result_is_passed_to_query_executor(schema):
def get_middleware(*_):
return [middleware]
app = GraphQL(schema, middleware=get_middleware)
client = TestClient(app)
response = client.post("/", json={"query": '{ hello(name: "BOB") }'})
assert response.json() == {"data": {"hello": "**Hello, BOB!**"}}
def test_async_middleware_function_result_is_passed_to_query_executor(schema):
async def get_middleware(*_):
return [middleware]
app = GraphQL(schema, middleware=get_middleware)
client = TestClient(app)
response = client.post("/", json={"query": '{ hello(name: "BOB") }'})
assert response.json() == {"data": {"hello": "**Hello, BOB!**"}}
| [
"ariadne.asgi.GraphQL",
"starlette.testclient.TestClient",
"unittest.mock.Mock"
] | [((365, 420), 'ariadne.asgi.GraphQL', 'GraphQL', (['schema'], {'context_value': "{'test': 'TEST-CONTEXT'}"}), "(schema, context_value={'test': 'TEST-CONTEXT'})\n", (372, 420), False, 'from ariadne.asgi import GQL_CONNECTION_ACK, GQL_CONNECTION_INIT, GQL_DATA, GQL_ERROR, GQL_START, GraphQL\n'), ((434, 449), 'starlette.testclient.TestClient', 'TestClient', (['app'], {}), '(app)\n', (444, 449), False, 'from starlette.testclient import TestClient\n'), ((688, 711), 'unittest.mock.Mock', 'Mock', ([], {'return_value': '(True)'}), '(return_value=True)\n', (692, 711), False, 'from unittest.mock import ANY, Mock\n'), ((722, 770), 'ariadne.asgi.GraphQL', 'GraphQL', (['schema'], {'context_value': 'get_context_value'}), '(schema, context_value=get_context_value)\n', (729, 770), False, 'from ariadne.asgi import GQL_CONNECTION_ACK, GQL_CONNECTION_INIT, GQL_DATA, GQL_ERROR, GQL_START, GraphQL\n'), ((784, 799), 'starlette.testclient.TestClient', 'TestClient', (['app'], {}), '(app)\n', (794, 799), False, 'from starlette.testclient import TestClient\n'), ((998, 1041), 'unittest.mock.Mock', 'Mock', ([], {'return_value': "{'test': 'TEST-CONTEXT'}"}), "(return_value={'test': 'TEST-CONTEXT'})\n", (1002, 1041), False, 'from unittest.mock import ANY, Mock\n'), ((1052, 1100), 'ariadne.asgi.GraphQL', 'GraphQL', (['schema'], {'context_value': 'get_context_value'}), '(schema, context_value=get_context_value)\n', (1059, 1100), False, 'from ariadne.asgi import GQL_CONNECTION_ACK, GQL_CONNECTION_INIT, GQL_DATA, GQL_ERROR, GQL_START, GraphQL\n'), ((1114, 1129), 'starlette.testclient.TestClient', 'TestClient', (['app'], {}), '(app)\n', (1124, 1129), False, 'from starlette.testclient import TestClient\n'), ((1465, 1513), 'ariadne.asgi.GraphQL', 'GraphQL', (['schema'], {'context_value': 'get_context_value'}), '(schema, context_value=get_context_value)\n', (1472, 1513), False, 'from ariadne.asgi import GQL_CONNECTION_ACK, GQL_CONNECTION_INIT, GQL_DATA, GQL_ERROR, GQL_START, GraphQL\n'), ((1527, 1542), 'starlette.testclient.TestClient', 'TestClient', (['app'], {}), '(app)\n', (1537, 1542), False, 'from starlette.testclient import TestClient\n'), ((1765, 1814), 'ariadne.asgi.GraphQL', 'GraphQL', (['schema'], {'root_value': "{'test': 'TEST-ROOT'}"}), "(schema, root_value={'test': 'TEST-ROOT'})\n", (1772, 1814), False, 'from ariadne.asgi import GQL_CONNECTION_ACK, GQL_CONNECTION_INIT, GQL_DATA, GQL_ERROR, GQL_START, GraphQL\n'), ((1828, 1843), 'starlette.testclient.TestClient', 'TestClient', (['app'], {}), '(app)\n', (1838, 1843), False, 'from starlette.testclient import TestClient\n'), ((2058, 2107), 'ariadne.asgi.GraphQL', 'GraphQL', (['schema'], {'root_value': "{'test': 'TEST-ROOT'}"}), "(schema, root_value={'test': 'TEST-ROOT'})\n", (2065, 2107), False, 'from ariadne.asgi import GQL_CONNECTION_ACK, GQL_CONNECTION_INIT, GQL_DATA, GQL_ERROR, GQL_START, GraphQL\n'), ((2121, 2136), 'starlette.testclient.TestClient', 'TestClient', (['app'], {}), '(app)\n', (2131, 2136), False, 'from starlette.testclient import TestClient\n'), ((2775, 2798), 'unittest.mock.Mock', 'Mock', ([], {'return_value': '(True)'}), '(return_value=True)\n', (2779, 2798), False, 'from unittest.mock import ANY, Mock\n'), ((2809, 2851), 'ariadne.asgi.GraphQL', 'GraphQL', (['schema'], {'root_value': 'get_root_value'}), '(schema, root_value=get_root_value)\n', (2816, 2851), False, 'from ariadne.asgi import GQL_CONNECTION_ACK, GQL_CONNECTION_INIT, GQL_DATA, GQL_ERROR, GQL_START, GraphQL\n'), ((2865, 2880), 'starlette.testclient.TestClient', 'TestClient', (['app'], {}), '(app)\n', (2875, 2880), False, 'from starlette.testclient import TestClient\n'), ((3066, 3089), 'unittest.mock.Mock', 'Mock', ([], {'return_value': '(True)'}), '(return_value=True)\n', (3070, 3089), False, 'from unittest.mock import ANY, Mock\n'), ((3100, 3142), 'ariadne.asgi.GraphQL', 'GraphQL', (['schema'], {'root_value': 'get_root_value'}), '(schema, root_value=get_root_value)\n', (3107, 3142), False, 'from ariadne.asgi import GQL_CONNECTION_ACK, GQL_CONNECTION_INIT, GQL_DATA, GQL_ERROR, GQL_START, GraphQL\n'), ((3156, 3171), 'starlette.testclient.TestClient', 'TestClient', (['app'], {}), '(app)\n', (3166, 3171), False, 'from starlette.testclient import TestClient\n'), ((3786, 3809), 'unittest.mock.Mock', 'Mock', ([], {'return_value': '(True)'}), '(return_value=True)\n', (3790, 3809), False, 'from unittest.mock import ANY, Mock\n'), ((3820, 3907), 'ariadne.asgi.GraphQL', 'GraphQL', (['schema'], {'context_value': "{'test': 'TEST-CONTEXT'}", 'root_value': 'get_root_value'}), "(schema, context_value={'test': 'TEST-CONTEXT'}, root_value=\n get_root_value)\n", (3827, 3907), False, 'from ariadne.asgi import GQL_CONNECTION_ACK, GQL_CONNECTION_INIT, GQL_DATA, GQL_ERROR, GQL_START, GraphQL\n'), ((3930, 3945), 'starlette.testclient.TestClient', 'TestClient', (['app'], {}), '(app)\n', (3940, 3945), False, 'from starlette.testclient import TestClient\n'), ((4171, 4222), 'ariadne.asgi.GraphQL', 'GraphQL', (['schema'], {'validation_rules': '[validation_rule]'}), '(schema, validation_rules=[validation_rule])\n', (4178, 4222), False, 'from ariadne.asgi import GQL_CONNECTION_ACK, GQL_CONNECTION_INIT, GQL_DATA, GQL_ERROR, GQL_START, GraphQL\n'), ((4236, 4251), 'starlette.testclient.TestClient', 'TestClient', (['app'], {}), '(app)\n', (4246, 4251), False, 'from starlette.testclient import TestClient\n'), ((4484, 4520), 'unittest.mock.Mock', 'Mock', ([], {'return_value': '[validation_rule]'}), '(return_value=[validation_rule])\n', (4488, 4520), False, 'from unittest.mock import ANY, Mock\n'), ((4531, 4585), 'ariadne.asgi.GraphQL', 'GraphQL', (['schema'], {'validation_rules': 'get_validation_rules'}), '(schema, validation_rules=get_validation_rules)\n', (4538, 4585), False, 'from ariadne.asgi import GQL_CONNECTION_ACK, GQL_CONNECTION_INIT, GQL_DATA, GQL_ERROR, GQL_START, GraphQL\n'), ((4599, 4614), 'starlette.testclient.TestClient', 'TestClient', (['app'], {}), '(app)\n', (4609, 4614), False, 'from starlette.testclient import TestClient\n'), ((4885, 4921), 'unittest.mock.Mock', 'Mock', ([], {'return_value': '[validation_rule]'}), '(return_value=[validation_rule])\n', (4889, 4921), False, 'from unittest.mock import ANY, Mock\n'), ((4932, 5031), 'ariadne.asgi.GraphQL', 'GraphQL', (['schema'], {'context_value': "{'test': 'TEST-CONTEXT'}", 'validation_rules': 'get_validation_rules'}), "(schema, context_value={'test': 'TEST-CONTEXT'}, validation_rules=\n get_validation_rules)\n", (4939, 5031), False, 'from ariadne.asgi import GQL_CONNECTION_ACK, GQL_CONNECTION_INIT, GQL_DATA, GQL_ERROR, GQL_START, GraphQL\n'), ((5071, 5086), 'starlette.testclient.TestClient', 'TestClient', (['app'], {}), '(app)\n', (5081, 5086), False, 'from starlette.testclient import TestClient\n'), ((5270, 5285), 'starlette.testclient.TestClient', 'TestClient', (['app'], {}), '(app)\n', (5280, 5285), False, 'from starlette.testclient import TestClient\n'), ((5489, 5504), 'ariadne.asgi.GraphQL', 'GraphQL', (['schema'], {}), '(schema)\n', (5496, 5504), False, 'from ariadne.asgi import GQL_CONNECTION_ACK, GQL_CONNECTION_INIT, GQL_DATA, GQL_ERROR, GQL_START, GraphQL\n'), ((5735, 5767), 'ariadne.asgi.GraphQL', 'GraphQL', (['schema'], {'logger': '"""custom"""'}), "(schema, logger='custom')\n", (5742, 5767), False, 'from ariadne.asgi import GQL_CONNECTION_ACK, GQL_CONNECTION_INIT, GQL_DATA, GQL_ERROR, GQL_START, GraphQL\n'), ((6011, 6043), 'ariadne.asgi.GraphQL', 'GraphQL', (['schema'], {'logger': '"""custom"""'}), "(schema, logger='custom')\n", (6018, 6043), False, 'from ariadne.asgi import GQL_CONNECTION_ACK, GQL_CONNECTION_INIT, GQL_DATA, GQL_ERROR, GQL_START, GraphQL\n'), ((6057, 6072), 'starlette.testclient.TestClient', 'TestClient', (['app'], {}), '(app)\n', (6067, 6072), False, 'from starlette.testclient import TestClient\n'), ((6771, 6803), 'ariadne.asgi.GraphQL', 'GraphQL', (['schema'], {'logger': '"""custom"""'}), "(schema, logger='custom')\n", (6778, 6803), False, 'from ariadne.asgi import GQL_CONNECTION_ACK, GQL_CONNECTION_INIT, GQL_DATA, GQL_ERROR, GQL_START, GraphQL\n'), ((6817, 6832), 'starlette.testclient.TestClient', 'TestClient', (['app'], {}), '(app)\n', (6827, 6832), False, 'from starlette.testclient import TestClient\n'), ((7475, 7498), 'unittest.mock.Mock', 'Mock', ([], {'return_value': '(True)'}), '(return_value=True)\n', (7479, 7498), False, 'from unittest.mock import ANY, Mock\n'), ((7509, 7557), 'ariadne.asgi.GraphQL', 'GraphQL', (['schema'], {'error_formatter': 'error_formatter'}), '(schema, error_formatter=error_formatter)\n', (7516, 7557), False, 'from ariadne.asgi import GQL_CONNECTION_ACK, GQL_CONNECTION_INIT, GQL_DATA, GQL_ERROR, GQL_START, GraphQL\n'), ((7739, 7762), 'unittest.mock.Mock', 'Mock', ([], {'return_value': '(True)'}), '(return_value=True)\n', (7743, 7762), False, 'from unittest.mock import ANY, Mock\n'), ((7773, 7821), 'ariadne.asgi.GraphQL', 'GraphQL', (['schema'], {'error_formatter': 'error_formatter'}), '(schema, error_formatter=error_formatter)\n', (7780, 7821), False, 'from ariadne.asgi import GQL_CONNECTION_ACK, GQL_CONNECTION_INIT, GQL_DATA, GQL_ERROR, GQL_START, GraphQL\n'), ((7835, 7850), 'starlette.testclient.TestClient', 'TestClient', (['app'], {}), '(app)\n', (7845, 7850), False, 'from starlette.testclient import TestClient\n'), ((8450, 8473), 'unittest.mock.Mock', 'Mock', ([], {'return_value': '(True)'}), '(return_value=True)\n', (8454, 8473), False, 'from unittest.mock import ANY, Mock\n'), ((8484, 8532), 'ariadne.asgi.GraphQL', 'GraphQL', (['schema'], {'error_formatter': 'error_formatter'}), '(schema, error_formatter=error_formatter)\n', (8491, 8532), False, 'from ariadne.asgi import GQL_CONNECTION_ACK, GQL_CONNECTION_INIT, GQL_DATA, GQL_ERROR, GQL_START, GraphQL\n'), ((8546, 8561), 'starlette.testclient.TestClient', 'TestClient', (['app'], {}), '(app)\n', (8556, 8561), False, 'from starlette.testclient import TestClient\n'), ((9239, 9262), 'unittest.mock.Mock', 'Mock', ([], {'return_value': '(True)'}), '(return_value=True)\n', (9243, 9262), False, 'from unittest.mock import ANY, Mock\n'), ((9273, 9321), 'ariadne.asgi.GraphQL', 'GraphQL', (['schema'], {'error_formatter': 'error_formatter'}), '(schema, error_formatter=error_formatter)\n', (9280, 9321), False, 'from ariadne.asgi import GQL_CONNECTION_ACK, GQL_CONNECTION_INIT, GQL_DATA, GQL_ERROR, GQL_START, GraphQL\n'), ((9335, 9350), 'starlette.testclient.TestClient', 'TestClient', (['app'], {}), '(app)\n', (9345, 9350), False, 'from starlette.testclient import TestClient\n'), ((10006, 10029), 'unittest.mock.Mock', 'Mock', ([], {'return_value': '(True)'}), '(return_value=True)\n', (10010, 10029), False, 'from unittest.mock import ANY, Mock\n'), ((10040, 10100), 'ariadne.asgi.GraphQL', 'GraphQL', (['schema'], {'debug': '(True)', 'error_formatter': 'error_formatter'}), '(schema, debug=True, error_formatter=error_formatter)\n', (10047, 10100), False, 'from ariadne.asgi import GQL_CONNECTION_ACK, GQL_CONNECTION_INIT, GQL_DATA, GQL_ERROR, GQL_START, GraphQL\n'), ((10275, 10298), 'unittest.mock.Mock', 'Mock', ([], {'return_value': '(True)'}), '(return_value=True)\n', (10279, 10298), False, 'from unittest.mock import ANY, Mock\n'), ((10309, 10370), 'ariadne.asgi.GraphQL', 'GraphQL', (['schema'], {'debug': '(False)', 'error_formatter': 'error_formatter'}), '(schema, debug=False, error_formatter=error_formatter)\n', (10316, 10370), False, 'from ariadne.asgi import GQL_CONNECTION_ACK, GQL_CONNECTION_INIT, GQL_DATA, GQL_ERROR, GQL_START, GraphQL\n'), ((10688, 10733), 'ariadne.asgi.GraphQL', 'GraphQL', (['schema'], {'extensions': '[CustomExtension]'}), '(schema, extensions=[CustomExtension])\n', (10695, 10733), False, 'from ariadne.asgi import GQL_CONNECTION_ACK, GQL_CONNECTION_INIT, GQL_DATA, GQL_ERROR, GQL_START, GraphQL\n'), ((10747, 10762), 'starlette.testclient.TestClient', 'TestClient', (['app'], {}), '(app)\n', (10757, 10762), False, 'from starlette.testclient import TestClient\n'), ((11049, 11091), 'ariadne.asgi.GraphQL', 'GraphQL', (['schema'], {'extensions': 'get_extensions'}), '(schema, extensions=get_extensions)\n', (11056, 11091), False, 'from ariadne.asgi import GQL_CONNECTION_ACK, GQL_CONNECTION_INIT, GQL_DATA, GQL_ERROR, GQL_START, GraphQL\n'), ((11105, 11120), 'starlette.testclient.TestClient', 'TestClient', (['app'], {}), '(app)\n', (11115, 11120), False, 'from starlette.testclient import TestClient\n'), ((11419, 11461), 'ariadne.asgi.GraphQL', 'GraphQL', (['schema'], {'extensions': 'get_extensions'}), '(schema, extensions=get_extensions)\n', (11426, 11461), False, 'from ariadne.asgi import GQL_CONNECTION_ACK, GQL_CONNECTION_INIT, GQL_DATA, GQL_ERROR, GQL_START, GraphQL\n'), ((11475, 11490), 'starlette.testclient.TestClient', 'TestClient', (['app'], {}), '(app)\n', (11485, 11490), False, 'from starlette.testclient import TestClient\n'), ((11808, 11848), 'ariadne.asgi.GraphQL', 'GraphQL', (['schema'], {'middleware': '[middleware]'}), '(schema, middleware=[middleware])\n', (11815, 11848), False, 'from ariadne.asgi import GQL_CONNECTION_ACK, GQL_CONNECTION_INIT, GQL_DATA, GQL_ERROR, GQL_START, GraphQL\n'), ((11862, 11877), 'starlette.testclient.TestClient', 'TestClient', (['app'], {}), '(app)\n', (11872, 11877), False, 'from starlette.testclient import TestClient\n'), ((12163, 12205), 'ariadne.asgi.GraphQL', 'GraphQL', (['schema'], {'middleware': 'get_middleware'}), '(schema, middleware=get_middleware)\n', (12170, 12205), False, 'from ariadne.asgi import GQL_CONNECTION_ACK, GQL_CONNECTION_INIT, GQL_DATA, GQL_ERROR, GQL_START, GraphQL\n'), ((12219, 12234), 'starlette.testclient.TestClient', 'TestClient', (['app'], {}), '(app)\n', (12229, 12234), False, 'from starlette.testclient import TestClient\n'), ((12532, 12574), 'ariadne.asgi.GraphQL', 'GraphQL', (['schema'], {'middleware': 'get_middleware'}), '(schema, middleware=get_middleware)\n', (12539, 12574), False, 'from ariadne.asgi import GQL_CONNECTION_ACK, GQL_CONNECTION_INIT, GQL_DATA, GQL_ERROR, GQL_START, GraphQL\n'), ((12588, 12603), 'starlette.testclient.TestClient', 'TestClient', (['app'], {}), '(app)\n', (12598, 12603), False, 'from starlette.testclient import TestClient\n')] |
import tensorflow as tf
import prettytensor as pt
import numpy as np
import gym
import math
import random
from collections import deque
from agents import mixed_network, spaces, replay_buffer
tensorType = tf.float32
"""
Implements a Deep Deterministic Policy Gradient agent.
Adjustable parameters:
- Actor / Critic learning rates
- Temporal Difference discount factor
- Experience Replay buffer / batch sizes
"""
class DDPGAgent:
"""
Creates a new DDPG agent.
Args:
- actorGen and criticGen should be functions that create new
neural networks with supplied Placeholder input Tensors.
- state_shape will be the shape of the state input Placeholder.
- action_shape should be the shape of the tensors output by the
actor neural network.
- buf_sz is the size of the agent's internal experience replay buffer.
- batch_sz will be the size of each training batch (drawn from the replay buffer)
"""
def __init__(self, actorGen, criticGen, state_shape, action_shape, buf_sz=100000,
batch_sz=64, critic_learning_rate=0.001, actor_learning_rate=0.0001,
discount_factor=0.99, actor_mix_factor=0.001,
critic_mix_factor=0.001, actor_gradient_clipping=None, critic_gradient_clipping=None):
self.graph = tf.Graph()
self.session = tf.Session(graph=self.graph)
self.discount_factor = discount_factor
self.replay_buf = deque(maxlen=buf_sz)
self.batch_size = batch_sz
self.state_shape = state_shape
self.action_shape = action_shape
self.__single_state_shape = self.state_shape[:]
self.__single_state_shape[0] = 1
with self.graph.as_default():
self.state_in = tf.placeholder(tensorType, state_shape, name='state-in')
self.action_in = tf.placeholder(tensorType, action_shape, name='action-in')
with tf.variable_scope('critic'):
self.critic = mixed_network.MixedNetwork(self.graph, self.session,
tf.concat_v2([self.state_in, self.action_in], axis=1),
criticGen, target_mix_factor=critic_mix_factor,
prefix='critic/')
self.critic_prediction = tf.placeholder(tensorType, [None])
self.critic_loss = tf.reduce_mean( tf.square( self.critic_prediction - tf.squeeze(self.critic.main_out) ) )
critic_optimizer = tf.train.AdamOptimizer(critic_learning_rate)
if isinstance(critic_gradient_clipping, tuple):
critic_gradients = critic_optimizer.compute_gradients(self.critic_loss, self.critic.main_parameters)
clipped_grads = [ \
( tf.clip_by_value(gv[0], critic_gradient_clipping[0], critic_gradient_clipping[1]), gv[1]) \
for gv in critic_gradients ]
self.critic_optimize = critic_optimizer.apply_gradients(clipped_grads)
else:
self.critic_optimize = critic_optimizer.minimize(self.critic_loss, var_list=self.critic.main_parameters)
# gradient of the critic network w.r.t. the actions, averaged over all (s,a) pairs in batch
self.action_gradient = tf.div(tf.gradients(self.critic.main_out, self.action_in), tf.constant(self.batch_size, tensorType))
with tf.variable_scope('actor'):
self.actor = mixed_network.MixedNetwork(self.graph,
self.session, self.state_in, actorGen, prefix='actor/',
target_mix_factor=actor_mix_factor)
#self.aGrad_pl = tf.placeholder(tensorType, action_shape, name='action-gradient-placeholder')
self.actor_gradients = tf.gradients(self.actor.main_out, self.actor.main_parameters, self.action_gradient)
#self.actor_optimize = [p.assign(p + actor_learning_rate*g) \
#for p, g in zip(self.actor.main_parameters, self.actor_gradients)]
#self.actor_optimize = tf.train.GradientDescentOptimizer(actor_learning_rate).apply_gradients(
# zip(self.actor_gradients, self.actor.main_parameters)
#)
if isinstance(actor_gradient_clipping, tuple):
self.actor_gradients = [tf.clip_by_value(g, actor_gradient_clipping[0], actor_gradient_clipping[1]) for g in self.actor_gradients]
self.actor_gradients = [tf.negative(g) for g in self.actor_gradients]
self.actor_optimize = tf.train.AdamOptimizer(actor_learning_rate).apply_gradients(
zip(self.actor_gradients, self.actor.main_parameters)
)
self.session.run(tf.global_variables_initializer())
def act(self, observation):
return self.actor.get_main({ self.state_in: np.reshape(observation, self.__single_state_shape)})
def add_experience(self, state, action, reward, done, next_state):
self.replay_buf.append( (state, action, reward, done, next_state) )
def train(self):
sm = random.sample(self.replay_buf, min(len(self.replay_buf), self.batch_size))
state_shape = self.state_shape[:]
action_shape = self.action_shape[:]
state_shape[0] = action_shape[0] = len(sm)
states = np.reshape([ ts[0] for ts in sm ], state_shape)
actions = np.reshape([ ts[1] for ts in sm ], action_shape)
rewards = np.reshape([ ts[2] for ts in sm ], [len(sm)])
term_state = np.reshape([ ts[3] for ts in sm ], [len(sm)])
next_states = np.reshape([ ts[4] for ts in sm ], state_shape)
# Use target actor and critic networks to estimate TD targets
target_a = np.reshape(self.actor.get_target({self.state_in:next_states}), action_shape)
target_q = np.reshape(self.critic.get_target({ self.state_in:next_states, self.action_in:target_a }), [len(sm)])
td_targets = []
for i, t in enumerate(target_q):
if term_state[i]:
td_targets.append(rewards[i])
else:
td_targets.append(rewards[i] + (self.discount_factor * t))
_, crit_loss, predicted_q = self.session.run([self.critic_optimize, self.critic_loss, self.critic.main_out], {
self.state_in: states,
self.action_in: actions,
self.critic_prediction: np.squeeze(td_targets)
})
net_actions = np.reshape(self.actor.get_main({self.state_in: states}), action_shape)
self.session.run(self.actor_optimize, {self.state_in:states, self.action_in:net_actions})
#self.session.run(self.actor_optimize, {self.state_in:states, self.action_in:actions})
#actor_grad = self.session.run(self.actor_gradients, {self.state_in:states, self.action_in:net_actions})[0]
#assert not np.isnan(np.sum(actor_grad))
return np.squeeze(predicted_q), crit_loss
def update_targets(self):
self.actor.update_target()
self.critic.update_target()
| [
"tensorflow.Graph",
"agents.mixed_network.MixedNetwork",
"collections.deque",
"numpy.reshape",
"tensorflow.variable_scope",
"tensorflow.placeholder",
"tensorflow.Session",
"numpy.squeeze",
"tensorflow.gradients",
"tensorflow.global_variables_initializer",
"tensorflow.negative",
"tensorflow.con... | [((1300, 1310), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (1308, 1310), True, 'import tensorflow as tf\n'), ((1334, 1362), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'self.graph'}), '(graph=self.graph)\n', (1344, 1362), True, 'import tensorflow as tf\n'), ((1438, 1458), 'collections.deque', 'deque', ([], {'maxlen': 'buf_sz'}), '(maxlen=buf_sz)\n', (1443, 1458), False, 'from collections import deque\n'), ((5346, 5391), 'numpy.reshape', 'np.reshape', (['[ts[0] for ts in sm]', 'state_shape'], {}), '([ts[0] for ts in sm], state_shape)\n', (5356, 5391), True, 'import numpy as np\n'), ((5412, 5458), 'numpy.reshape', 'np.reshape', (['[ts[1] for ts in sm]', 'action_shape'], {}), '([ts[1] for ts in sm], action_shape)\n', (5422, 5458), True, 'import numpy as np\n'), ((5614, 5659), 'numpy.reshape', 'np.reshape', (['[ts[4] for ts in sm]', 'state_shape'], {}), '([ts[4] for ts in sm], state_shape)\n', (5624, 5659), True, 'import numpy as np\n'), ((1739, 1795), 'tensorflow.placeholder', 'tf.placeholder', (['tensorType', 'state_shape'], {'name': '"""state-in"""'}), "(tensorType, state_shape, name='state-in')\n", (1753, 1795), True, 'import tensorflow as tf\n'), ((1825, 1883), 'tensorflow.placeholder', 'tf.placeholder', (['tensorType', 'action_shape'], {'name': '"""action-in"""'}), "(tensorType, action_shape, name='action-in')\n", (1839, 1883), True, 'import tensorflow as tf\n'), ((6919, 6942), 'numpy.squeeze', 'np.squeeze', (['predicted_q'], {}), '(predicted_q)\n', (6929, 6942), True, 'import numpy as np\n'), ((1902, 1929), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""critic"""'], {}), "('critic')\n", (1919, 1929), True, 'import tensorflow as tf\n'), ((2273, 2307), 'tensorflow.placeholder', 'tf.placeholder', (['tensorType', '[None]'], {}), '(tensorType, [None])\n', (2287, 2307), True, 'import tensorflow as tf\n'), ((2467, 2511), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['critic_learning_rate'], {}), '(critic_learning_rate)\n', (2489, 2511), True, 'import tensorflow as tf\n'), ((3410, 3436), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""actor"""'], {}), "('actor')\n", (3427, 3436), True, 'import tensorflow as tf\n'), ((3467, 3601), 'agents.mixed_network.MixedNetwork', 'mixed_network.MixedNetwork', (['self.graph', 'self.session', 'self.state_in', 'actorGen'], {'prefix': '"""actor/"""', 'target_mix_factor': 'actor_mix_factor'}), "(self.graph, self.session, self.state_in,\n actorGen, prefix='actor/', target_mix_factor=actor_mix_factor)\n", (3493, 3601), False, 'from agents import mixed_network, spaces, replay_buffer\n'), ((3781, 3869), 'tensorflow.gradients', 'tf.gradients', (['self.actor.main_out', 'self.actor.main_parameters', 'self.action_gradient'], {}), '(self.actor.main_out, self.actor.main_parameters, self.\n action_gradient)\n', (3793, 3869), True, 'import tensorflow as tf\n'), ((4759, 4792), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4790, 4792), True, 'import tensorflow as tf\n'), ((4879, 4929), 'numpy.reshape', 'np.reshape', (['observation', 'self.__single_state_shape'], {}), '(observation, self.__single_state_shape)\n', (4889, 4929), True, 'import numpy as np\n'), ((6415, 6437), 'numpy.squeeze', 'np.squeeze', (['td_targets'], {}), '(td_targets)\n', (6425, 6437), True, 'import numpy as np\n'), ((2046, 2099), 'tensorflow.concat_v2', 'tf.concat_v2', (['[self.state_in, self.action_in]'], {'axis': '(1)'}), '([self.state_in, self.action_in], axis=1)\n', (2058, 2099), True, 'import tensorflow as tf\n'), ((3298, 3348), 'tensorflow.gradients', 'tf.gradients', (['self.critic.main_out', 'self.action_in'], {}), '(self.critic.main_out, self.action_in)\n', (3310, 3348), True, 'import tensorflow as tf\n'), ((3350, 3390), 'tensorflow.constant', 'tf.constant', (['self.batch_size', 'tensorType'], {}), '(self.batch_size, tensorType)\n', (3361, 3390), True, 'import tensorflow as tf\n'), ((4491, 4505), 'tensorflow.negative', 'tf.negative', (['g'], {}), '(g)\n', (4502, 4505), True, 'import tensorflow as tf\n'), ((4343, 4418), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['g', 'actor_gradient_clipping[0]', 'actor_gradient_clipping[1]'], {}), '(g, actor_gradient_clipping[0], actor_gradient_clipping[1])\n', (4359, 4418), True, 'import tensorflow as tf\n'), ((4576, 4619), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['actor_learning_rate'], {}), '(actor_learning_rate)\n', (4598, 4619), True, 'import tensorflow as tf\n'), ((2395, 2427), 'tensorflow.squeeze', 'tf.squeeze', (['self.critic.main_out'], {}), '(self.critic.main_out)\n', (2405, 2427), True, 'import tensorflow as tf\n'), ((2763, 2848), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['gv[0]', 'critic_gradient_clipping[0]', 'critic_gradient_clipping[1]'], {}), '(gv[0], critic_gradient_clipping[0],\n critic_gradient_clipping[1])\n', (2779, 2848), True, 'import tensorflow as tf\n')] |
from aiohttp.test_utils import TestClient
import pytest
import typing
import unittest.mock
from rolling.kernel import Kernel
from rolling.model.character import CharacterModel
from rolling.model.character import MINIMUM_BEFORE_EXHAUSTED
from rolling.server.document.affinity import AffinityDirectionType
from rolling.server.document.affinity import AffinityJoinType
from rolling.server.document.affinity import CHIEF_STATUS
from rolling.server.document.affinity import MEMBER_STATUS
from rolling.server.document.build import BuildDocument
from rolling.server.document.build import DOOR_MODE_LABELS
from rolling.server.document.build import DOOR_MODE__CLOSED
from rolling.server.document.build import DOOR_MODE__CLOSED_EXCEPT_FOR
from rolling.server.document.build import DoorDocument
@pytest.fixture
def websocket_prepare_mock() -> typing.Generator[unittest.mock.AsyncMock, None, None]:
with unittest.mock.patch("aiohttp.web_ws.WebSocketResponse.prepare") as mock_:
yield mock_
@pytest.fixture
def zone_event_manager_listen_mock() -> typing.Generator[
unittest.mock.AsyncMock, None, None
]:
with unittest.mock.patch(
"rolling.server.zone.websocket.ZoneEventsManager._listen"
) as mock_:
yield mock_
@pytest.fixture
def zone_event_manager_close_mock() -> typing.Generator[
unittest.mock.AsyncMock, None, None
]:
with unittest.mock.patch(
"rolling.server.zone.websocket.ZoneEventsManager.close_websocket"
) as mock_:
yield mock_
@pytest.fixture
def socket_send_str_mock() -> typing.Generator[unittest.mock.AsyncMock, None, None]:
with unittest.mock.patch("aiohttp.web_ws.WebSocketResponse.send_str") as mock_:
yield mock_
class TestDoor:
def _place_door(self, kernel: Kernel) -> DoorDocument:
build = kernel.build_lib.place_build(
world_row_i=1,
world_col_i=1,
zone_row_i=10,
zone_col_i=10,
build_id="DOOR",
under_construction=False,
)
return build
def _create_rule(
self,
kernel: Kernel,
author: CharacterModel,
door: BuildDocument,
mode: str,
affinity_ids: typing.Optional[typing.List[int]],
) -> None:
kernel.door_lib.update(
character_id=author.id,
build_id=door.id,
new_mode=mode,
new_affinity_ids=affinity_ids,
)
def test_one_rule_lock__author_here__stranger_cant(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_arthur_model: CharacterModel,
worldmapc_kernel: Kernel,
) -> None:
kernel = worldmapc_kernel
xena = worldmapc_xena_model
arthur = worldmapc_arthur_model
# Given
door = self._place_door(kernel)
self._create_rule(
kernel, author=xena, door=door, mode=DOOR_MODE__CLOSED, affinity_ids=[]
)
# When
assert not kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=xena.id
)
assert kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=arthur.id
)
def test_one_rule_lock_except__author_here__stranger_cant_but_member_can(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_arthur_model: CharacterModel,
worldmapc_franck_model: CharacterModel,
worldmapc_kernel: Kernel,
) -> None:
kernel = worldmapc_kernel
xena = worldmapc_xena_model
arthur = worldmapc_arthur_model
franck = worldmapc_franck_model
# Given
aff = kernel.affinity_lib.create(
name="aff1",
join_type=AffinityJoinType.ACCEPT_ALL,
direction_type=AffinityDirectionType.ONE_DIRECTOR,
)
kernel.affinity_lib.join(
character_id=xena.id,
affinity_id=aff.id,
accepted=True,
request=False,
status_id=CHIEF_STATUS[0],
)
kernel.affinity_lib.join(
character_id=franck.id,
affinity_id=aff.id,
accepted=True,
request=False,
status_id=MEMBER_STATUS[0],
)
door = self._place_door(kernel)
self._create_rule(
kernel,
author=xena,
door=door,
mode=DOOR_MODE__CLOSED_EXCEPT_FOR,
affinity_ids=[aff.id],
)
# When
assert not kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=xena.id
)
assert kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=arthur.id
)
assert not kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=franck.id
)
def test_two_rule_lock__author_here_and_first_can__stranger_second_cant(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_arthur_model: CharacterModel,
worldmapc_kernel: Kernel,
) -> None:
kernel = worldmapc_kernel
xena = worldmapc_xena_model
arthur = worldmapc_arthur_model
# Given
door = self._place_door(kernel)
self._create_rule(
kernel, author=xena, door=door, mode=DOOR_MODE__CLOSED, affinity_ids=[]
)
self._create_rule(
kernel, author=arthur, door=door, mode=DOOR_MODE__CLOSED, affinity_ids=[]
)
# When
assert not kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=xena.id
)
assert kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=arthur.id
)
async def test_two_rule_lock__author_first_travel__stranger_second_can(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_arthur_model: CharacterModel,
worldmapc_kernel: Kernel,
) -> None:
kernel = worldmapc_kernel
xena = worldmapc_xena_model
arthur = worldmapc_arthur_model
# Given
door = self._place_door(kernel)
self._create_rule(
kernel, author=xena, door=door, mode=DOOR_MODE__CLOSED, affinity_ids=[]
)
self._create_rule(
kernel, author=arthur, door=door, mode=DOOR_MODE__CLOSED, affinity_ids=[]
)
# When/Then 1
assert not kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=xena.id
)
assert kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=arthur.id
)
# Given 2
await kernel.character_lib.move(
character=xena,
to_world_row=2,
to_world_col=2,
)
# When/Then 2
assert not kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=arthur.id
)
# Given 2
await kernel.character_lib.move(
character=xena,
to_world_row=1,
to_world_col=1,
)
# When/Then 3
assert kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=xena.id
)
assert not kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=arthur.id
)
async def test_one_rule_lock__author_first_travel__stranger_second_can(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_arthur_model: CharacterModel,
worldmapc_kernel: Kernel,
) -> None:
kernel = worldmapc_kernel
xena = worldmapc_xena_model
arthur = worldmapc_arthur_model
# Given
door = self._place_door(kernel)
self._create_rule(
kernel, author=xena, door=door, mode=DOOR_MODE__CLOSED, affinity_ids=[]
)
# When/Then 1
assert not kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=xena.id
)
assert kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=arthur.id
)
# Given 2
await kernel.character_lib.move(
character=xena,
to_world_row=2,
to_world_col=2,
)
# When/Then 2
assert not kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=arthur.id
)
# Given 2
await kernel.character_lib.move(
character=xena,
to_world_row=1,
to_world_col=1,
)
# When/Then 3
assert not kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=xena.id
)
assert kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=arthur.id
)
async def test_one_rule_lock__author_dead__stranger_can(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_arthur_model: CharacterModel,
worldmapc_kernel: Kernel,
) -> None:
kernel = worldmapc_kernel
xena = worldmapc_xena_model
arthur = worldmapc_arthur_model
# Given
door = self._place_door(kernel)
self._create_rule(
kernel, author=xena, door=door, mode=DOOR_MODE__CLOSED, affinity_ids=[]
)
# When/Then 1
assert not kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=xena.id
)
assert kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=arthur.id
)
# Given 2
kernel.character_lib.kill(character_id=xena.id)
# When/Then 2
assert not kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=arthur.id
)
async def test_one_rule_lock__author_vulnerable__stranger_can(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_arthur_model: CharacterModel,
worldmapc_kernel: Kernel,
) -> None:
kernel = worldmapc_kernel
xena = worldmapc_xena_model
arthur = worldmapc_arthur_model
# Given
door = self._place_door(kernel)
self._create_rule(
kernel, author=xena, door=door, mode=DOOR_MODE__CLOSED, affinity_ids=[]
)
# When/Then 1
assert not kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=xena.id
)
assert kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=arthur.id
)
# Given 2
xena_doc = kernel.character_lib.get_document(xena.id)
xena_doc.tiredness = MINIMUM_BEFORE_EXHAUSTED + 1
kernel.server_db_session.add(xena_doc)
kernel.server_db_session.commit()
xena = kernel.character_lib.get(id_=xena.id)
assert xena.vulnerable
# When/Then 2
assert not kernel.door_lib.is_access_locked_for(
build_id=door.id, character_id=arthur.id
)
@pytest.mark.usefixtures("websocket_prepare_mock")
@pytest.mark.usefixtures("zone_event_manager_listen_mock")
@pytest.mark.usefixtures("zone_event_manager_close_mock")
async def test_events_when_door_author_left_when_back_in_zone(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_arthur_model: CharacterModel,
worldmapc_kernel: Kernel,
socket_send_str_mock: unittest.mock.AsyncMock,
) -> None:
kernel = worldmapc_kernel
xena = worldmapc_xena_model
arthur = worldmapc_arthur_model
request_mock = unittest.mock.AsyncMock()
# Given
door = self._place_door(kernel)
self._create_rule(
kernel, author=xena, door=door, mode=DOOR_MODE__CLOSED, affinity_ids=[]
)
_ = await kernel.server_zone_events_manager.get_new_socket(
request=request_mock,
row_i=1,
col_i=1,
character_id=arthur.id,
)
# When
await kernel.character_lib.move(
character=xena,
to_world_row=1,
to_world_col=2,
)
# Then
socket_send_str_mock.assert_awaited()
events_str_list = [arg[0][0] for arg in socket_send_str_mock.await_args_list]
assert any(["NEW_BUILD" in event_str for event_str in events_str_list])
assert any(['{"WALKING":true}' in event_str for event_str in events_str_list])
# When
socket_send_str_mock.reset_mock()
await kernel.character_lib.move(
character=xena,
to_world_row=1,
to_world_col=1,
)
# Then
socket_send_str_mock.assert_awaited()
events_str_list = [arg[0][0] for arg in socket_send_str_mock.await_args_list]
assert any(["NEW_BUILD" in event_str for event_str in events_str_list])
assert any(['{"WALKING":false}' in event_str for event_str in events_str_list])
@pytest.mark.usefixtures("websocket_prepare_mock")
@pytest.mark.usefixtures("zone_event_manager_listen_mock")
@pytest.mark.usefixtures("zone_event_manager_close_mock")
async def test_events_when_door_author_update_rule(
self,
worldmapc_xena_model: CharacterModel,
worldmapc_arthur_model: CharacterModel,
worldmapc_kernel: Kernel,
socket_send_str_mock: unittest.mock.AsyncMock,
worldmapc_web_app: TestClient,
) -> None:
kernel = worldmapc_kernel
xena = worldmapc_xena_model
arthur = worldmapc_arthur_model
request_mock = unittest.mock.AsyncMock()
web = worldmapc_web_app
# Given
door = self._place_door(kernel)
_ = await kernel.server_zone_events_manager.get_new_socket(
request=request_mock,
row_i=1,
col_i=1,
character_id=arthur.id,
)
# When
response = await web.post(
f"/character/{xena.id}/door/{door.id}?mode={DOOR_MODE_LABELS[DOOR_MODE__CLOSED]}"
)
assert response.status == 200
# Then
socket_send_str_mock.assert_awaited()
events_str_list = [arg[0][0] for arg in socket_send_str_mock.await_args_list]
assert any(["NEW_BUILD" in event_str for event_str in events_str_list])
assert any(['{"WALKING":false}' in event_str for event_str in events_str_list])
| [
"pytest.mark.usefixtures"
] | [((11035, 11084), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""websocket_prepare_mock"""'], {}), "('websocket_prepare_mock')\n", (11058, 11084), False, 'import pytest\n'), ((11090, 11147), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""zone_event_manager_listen_mock"""'], {}), "('zone_event_manager_listen_mock')\n", (11113, 11147), False, 'import pytest\n'), ((11153, 11209), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""zone_event_manager_close_mock"""'], {}), "('zone_event_manager_close_mock')\n", (11176, 11209), False, 'import pytest\n'), ((12997, 13046), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""websocket_prepare_mock"""'], {}), "('websocket_prepare_mock')\n", (13020, 13046), False, 'import pytest\n'), ((13052, 13109), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""zone_event_manager_listen_mock"""'], {}), "('zone_event_manager_listen_mock')\n", (13075, 13109), False, 'import pytest\n'), ((13115, 13171), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""zone_event_manager_close_mock"""'], {}), "('zone_event_manager_close_mock')\n", (13138, 13171), False, 'import pytest\n')] |
# myWeather.py for inkyphat and RPiZW
print('Starting')
try:
import requests
print('requests module imported')
except:
print('Sorry, need to install requests module')
exit()
wx_url = 'api.openweathermap.org/data/2.5/weather?'
wx_city = 'q=Quispamsis,CA&units=metric'
wx_cityID = 'id=6115383&units=metric'
api_key = '&APPID='+'ENTER YOUR API KEY HERE'
try:
resp = requests.get('http://'+wx_url+wx_cityID+api_key)
print('got data')
except:
print('Cannot connect to service...')
exit()
if resp.status_code != 200:
raise ApiError('GET /weather/ {}'.format(resp.status_code))
try:
city=resp.json()["name"]
temperature=resp.json()["main"]["temp"] # in celcius
pressure=resp.json()["main"]["pressure"] # in hPa
humidity=resp.json()["main"]["humidity"] # in %
windSpeed = resp.json()["wind"]["speed"] # in m/s
windDeg = resp.json()["wind"]["deg"]
print('got json info')
except:
print('Cannot read data in api call...')
exit()
print('Weather in', city+':')
print('\tTemperature:\t',str(temperature)+'°C')
print('\tPressure:\t',pressure,'hPa')
print('\tWind:\t\t',windSpeed,'m/s from',str(windDeg)+'°')
print('\tWind:\t\t',
round(windSpeed*0.277778,1),'km/h from',str(windDeg)+'°')
| [
"requests.get"
] | [((403, 457), 'requests.get', 'requests.get', (["('http://' + wx_url + wx_cityID + api_key)"], {}), "('http://' + wx_url + wx_cityID + api_key)\n", (415, 457), False, 'import requests\n')] |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum
from six import with_metaclass
from azure.core import CaseInsensitiveEnumMeta
class AccessIdName(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
ACCESS = "access"
GIT_ACCESS = "gitAccess"
class AccessType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The type of access to be used for the storage account.
"""
#: Use access key.
ACCESS_KEY = "AccessKey"
#: Use system assigned managed identity.
SYSTEM_ASSIGNED_MANAGED_IDENTITY = "SystemAssignedManagedIdentity"
#: Use user assigned managed identity.
USER_ASSIGNED_MANAGED_IDENTITY = "UserAssignedManagedIdentity"
class AlwaysLog(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Specifies for what type of messages sampling settings should not apply.
"""
#: Always log all erroneous request regardless of sampling settings.
ALL_ERRORS = "allErrors"
class ApiManagementSkuCapacityScaleType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The scale type applicable to the sku.
"""
AUTOMATIC = "Automatic"
MANUAL = "Manual"
NONE = "None"
class ApiManagementSkuRestrictionsReasonCode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The reason for restriction.
"""
QUOTA_ID = "QuotaId"
NOT_AVAILABLE_FOR_SUBSCRIPTION = "NotAvailableForSubscription"
class ApiManagementSkuRestrictionsType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The type of restrictions.
"""
LOCATION = "Location"
ZONE = "Zone"
class ApimIdentityType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The type of identity used for the resource. The type 'SystemAssigned, UserAssigned' includes
both an implicitly created identity and a set of user assigned identities. The type 'None' will
remove any identities from the service.
"""
SYSTEM_ASSIGNED = "SystemAssigned"
USER_ASSIGNED = "UserAssigned"
SYSTEM_ASSIGNED_USER_ASSIGNED = "SystemAssigned, UserAssigned"
NONE = "None"
class ApiType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Type of API.
"""
HTTP = "http"
SOAP = "soap"
WEBSOCKET = "websocket"
GRAPHQL = "graphql"
class ApiVersionSetContractDetailsVersioningScheme(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""An value that determines where the API Version identifier will be located in a HTTP request.
"""
SEGMENT = "Segment"
QUERY = "Query"
HEADER = "Header"
class AppType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
#: User create request was sent by legacy developer portal.
PORTAL = "portal"
#: User create request was sent by new developer portal.
DEVELOPER_PORTAL = "developerPortal"
class AsyncOperationStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Status of an async operation.
"""
STARTED = "Started"
IN_PROGRESS = "InProgress"
SUCCEEDED = "Succeeded"
FAILED = "Failed"
class AuthorizationMethod(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
HEAD = "HEAD"
OPTIONS = "OPTIONS"
TRACE = "TRACE"
GET = "GET"
POST = "POST"
PUT = "PUT"
PATCH = "PATCH"
DELETE = "DELETE"
class BackendProtocol(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Backend communication protocol.
"""
#: The Backend is a RESTful service.
HTTP = "http"
#: The Backend is a SOAP service.
SOAP = "soap"
class BearerTokenSendingMethod(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
AUTHORIZATION_HEADER = "authorizationHeader"
QUERY = "query"
class BearerTokenSendingMethods(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Form of an authorization grant, which the client uses to request the access token.
"""
#: Access token will be transmitted in the Authorization header using Bearer schema.
AUTHORIZATION_HEADER = "authorizationHeader"
#: Access token will be transmitted as query parameters.
QUERY = "query"
class CertificateConfigurationStoreName(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The System.Security.Cryptography.x509certificates.StoreName certificate store location. Only
Root and CertificateAuthority are valid locations.
"""
CERTIFICATE_AUTHORITY = "CertificateAuthority"
ROOT = "Root"
class CertificateSource(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Certificate Source.
"""
MANAGED = "Managed"
KEY_VAULT = "KeyVault"
CUSTOM = "Custom"
BUILT_IN = "BuiltIn"
class CertificateStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Certificate Status.
"""
COMPLETED = "Completed"
FAILED = "Failed"
IN_PROGRESS = "InProgress"
class ClientAuthenticationMethod(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
#: Basic Client Authentication method.
BASIC = "Basic"
#: Body based Authentication method.
BODY = "Body"
class ConfigurationIdName(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
CONFIGURATION = "configuration"
class Confirmation(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Determines the type of confirmation e-mail that will be sent to the newly created user.
"""
#: Send an e-mail to the user confirming they have successfully signed up.
SIGNUP = "signup"
#: Send an e-mail inviting the user to sign-up and complete registration.
INVITE = "invite"
class ConnectionStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The connection status.
"""
UNKNOWN = "Unknown"
CONNECTED = "Connected"
DISCONNECTED = "Disconnected"
DEGRADED = "Degraded"
class ConnectivityCheckProtocol(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The request's protocol. Specific protocol configuration can be available based on this
selection. The specified destination address must be coherent with this value.
"""
TCP = "TCP"
HTTP = "HTTP"
HTTPS = "HTTPS"
class ConnectivityStatusType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Resource Connectivity Status Type identifier.
"""
INITIALIZING = "initializing"
SUCCESS = "success"
FAILURE = "failure"
class ContentFormat(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Format of the Content in which the API is getting imported.
"""
#: The contents are inline and Content type is a WADL document.
WADL_XML = "wadl-xml"
#: The WADL document is hosted on a publicly accessible internet address.
WADL_LINK_JSON = "wadl-link-json"
#: The contents are inline and Content Type is a OpenAPI 2.0 JSON Document.
SWAGGER_JSON = "swagger-json"
#: The OpenAPI 2.0 JSON document is hosted on a publicly accessible internet address.
SWAGGER_LINK_JSON = "swagger-link-json"
#: The contents are inline and the document is a WSDL/Soap document.
WSDL = "wsdl"
#: The WSDL document is hosted on a publicly accessible internet address.
WSDL_LINK = "wsdl-link"
#: The contents are inline and Content Type is a OpenAPI 3.0 YAML Document.
OPENAPI = "openapi"
#: The contents are inline and Content Type is a OpenAPI 3.0 JSON Document.
OPENAPI_JSON = "openapi+json"
#: The OpenAPI 3.0 YAML document is hosted on a publicly accessible internet address.
OPENAPI_LINK = "openapi-link"
#: The OpenAPI 3.0 JSON document is hosted on a publicly accessible internet address.
OPENAPI_JSON_LINK = "openapi+json-link"
#: The GraphQL API endpoint hosted on a publicly accessible internet address.
GRAPHQL_LINK = "graphql-link"
class CreatedByType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The type of identity that created the resource.
"""
USER = "User"
APPLICATION = "Application"
MANAGED_IDENTITY = "ManagedIdentity"
KEY = "Key"
class DataMaskingMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Data masking mode.
"""
#: Mask the value of an entity.
MASK = "Mask"
#: Hide the presence of an entity.
HIDE = "Hide"
class ExportApi(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
TRUE = "true"
class ExportFormat(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
#: Export the Api Definition in OpenAPI 2.0 Specification as JSON document to the Storage Blob.
SWAGGER = "swagger-link"
#: Export the Api Definition in WSDL Schema to Storage Blob. This is only supported for APIs of
#: Type ``soap``.
WSDL = "wsdl-link"
#: Export the Api Definition in WADL Schema to Storage Blob.
WADL = "wadl-link"
#: Export the Api Definition in OpenAPI 3.0 Specification as YAML document to Storage Blob.
OPENAPI = "openapi-link"
#: Export the Api Definition in OpenAPI 3.0 Specification as JSON document to Storage Blob.
OPENAPI_JSON = "openapi+json-link"
class ExportResultFormat(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Format in which the API Details are exported to the Storage Blob with Sas Key valid for 5
minutes.
"""
#: The API Definition is exported in OpenAPI Specification 2.0 format to the Storage Blob.
SWAGGER = "swagger-link-json"
#: The API Definition is exported in WSDL Schema to Storage Blob. This is only supported for APIs
#: of Type ``soap``.
WSDL = "wsdl-link+xml"
#: Export the API Definition in WADL Schema to Storage Blob.
WADL = "wadl-link-json"
#: Export the API Definition in OpenAPI Specification 3.0 to Storage Blob.
OPEN_API = "openapi-link"
class GrantType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
#: Authorization Code Grant flow as described https://tools.ietf.org/html/rfc6749#section-4.1.
AUTHORIZATION_CODE = "authorizationCode"
#: Implicit Code Grant flow as described https://tools.ietf.org/html/rfc6749#section-4.2.
IMPLICIT = "implicit"
#: Resource Owner Password Grant flow as described
#: https://tools.ietf.org/html/rfc6749#section-4.3.
RESOURCE_OWNER_PASSWORD = "<PASSWORD>"
#: Client Credentials Grant flow as described https://tools.ietf.org/html/rfc6749#section-4.4.
CLIENT_CREDENTIALS = "clientCredentials"
class GroupType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Group type.
"""
CUSTOM = "custom"
SYSTEM = "system"
EXTERNAL = "external"
class HostnameType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Hostname type.
"""
PROXY = "Proxy"
PORTAL = "Portal"
MANAGEMENT = "Management"
SCM = "Scm"
DEVELOPER_PORTAL = "DeveloperPortal"
class HttpCorrelationProtocol(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Sets correlation protocol to use for Application Insights diagnostics.
"""
#: Do not read and inject correlation headers.
NONE = "None"
#: Inject Request-Id and Request-Context headers with request correlation data. See
#: https://github.com/dotnet/corefx/blob/master/src/System.Diagnostics.DiagnosticSource/src/HttpCorrelationProtocol.md.
LEGACY = "Legacy"
#: Inject Trace Context headers. See https://w3c.github.io/trace-context.
W3_C = "W3C"
class IdentityProviderType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
#: Facebook as Identity provider.
FACEBOOK = "facebook"
#: Google as Identity provider.
GOOGLE = "google"
#: Microsoft Live as Identity provider.
MICROSOFT = "microsoft"
#: Twitter as Identity provider.
TWITTER = "twitter"
#: Azure Active Directory as Identity provider.
AAD = "aad"
#: Azure Active Directory B2C as Identity provider.
AAD_B2_C = "aadB2C"
class IssueType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The type of issue.
"""
UNKNOWN = "Unknown"
AGENT_STOPPED = "AgentStopped"
GUEST_FIREWALL = "GuestFirewall"
DNS_RESOLUTION = "DnsResolution"
SOCKET_BIND = "SocketBind"
NETWORK_SECURITY_RULE = "NetworkSecurityRule"
USER_DEFINED_ROUTE = "UserDefinedRoute"
PORT_THROTTLED = "PortThrottled"
PLATFORM = "Platform"
class KeyType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The Key to be used to generate token for user.
"""
PRIMARY = "primary"
SECONDARY = "secondary"
class LoggerType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Logger type.
"""
#: Azure Event Hub as log destination.
AZURE_EVENT_HUB = "azureEventHub"
#: Azure Application Insights as log destination.
APPLICATION_INSIGHTS = "applicationInsights"
#: Azure Monitor.
AZURE_MONITOR = "azureMonitor"
class Method(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The HTTP method to be used.
"""
GET = "GET"
POST = "POST"
class NameAvailabilityReason(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Invalid indicates the name provided does not match the resource provider’s naming requirements
(incorrect length, unsupported characters, etc.) AlreadyExists indicates that the name is
already in use and is therefore unavailable.
"""
VALID = "Valid"
INVALID = "Invalid"
ALREADY_EXISTS = "AlreadyExists"
class NotificationName(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
#: The following email recipients and users will receive email notifications about subscription
#: requests for API products requiring approval.
REQUEST_PUBLISHER_NOTIFICATION_MESSAGE = "RequestPublisherNotificationMessage"
#: The following email recipients and users will receive email notifications about new API product
#: subscriptions.
PURCHASE_PUBLISHER_NOTIFICATION_MESSAGE = "PurchasePublisherNotificationMessage"
#: The following email recipients and users will receive email notifications when new applications
#: are submitted to the application gallery.
NEW_APPLICATION_NOTIFICATION_MESSAGE = "NewApplicationNotificationMessage"
#: The following recipients will receive blind carbon copies of all emails sent to developers.
BCC = "BCC"
#: The following email recipients and users will receive email notifications when a new issue or
#: comment is submitted on the developer portal.
NEW_ISSUE_PUBLISHER_NOTIFICATION_MESSAGE = "NewIssuePublisherNotificationMessage"
#: The following email recipients and users will receive email notifications when developer closes
#: his account.
ACCOUNT_CLOSED_PUBLISHER = "AccountClosedPublisher"
#: The following email recipients and users will receive email notifications when subscription
#: usage gets close to usage quota.
QUOTA_LIMIT_APPROACHING_PUBLISHER_NOTIFICATION_MESSAGE = "QuotaLimitApproachingPublisherNotificationMessage"
class OperationNameFormat(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The format of the Operation Name for Application Insights telemetries. Default is Name.
"""
#: API_NAME;rev=API_REVISION - OPERATION_NAME.
NAME = "Name"
#: HTTP_VERB URL.
URL = "Url"
class Origin(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The origin of the issue.
"""
LOCAL = "Local"
INBOUND = "Inbound"
OUTBOUND = "Outbound"
class PlatformVersion(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Compute Platform Version running the service.
"""
#: Platform version cannot be determined, as compute platform is not deployed.
UNDETERMINED = "undetermined"
#: Platform running the service on Single Tenant V1 platform.
STV1 = "stv1"
#: Platform running the service on Single Tenant V2 platform.
STV2 = "stv2"
#: Platform running the service on Multi Tenant V1 platform.
MTV1 = "mtv1"
class PolicyContentFormat(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Format of the policyContent.
"""
#: The contents are inline and Content type is an XML document.
XML = "xml"
#: The policy XML document is hosted on a http endpoint accessible from the API Management
#: service.
XML_LINK = "xml-link"
#: The contents are inline and Content type is a non XML encoded policy document.
RAWXML = "rawxml"
#: The policy document is not Xml encoded and is hosted on a http endpoint accessible from the API
#: Management service.
RAWXML_LINK = "rawxml-link"
class PolicyExportFormat(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
#: The contents are inline and Content type is an XML document.
XML = "xml"
#: The contents are inline and Content type is a non XML encoded policy document.
RAWXML = "rawxml"
class PolicyIdName(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
POLICY = "policy"
class PolicyScopeContract(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
TENANT = "Tenant"
PRODUCT = "Product"
API = "Api"
OPERATION = "Operation"
ALL = "All"
class PortalRevisionStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Status of the portal's revision.
"""
#: Portal's revision has been queued.
PENDING = "pending"
#: Portal's revision is being published.
PUBLISHING = "publishing"
#: Portal's revision publishing completed.
COMPLETED = "completed"
#: Portal's revision publishing failed.
FAILED = "failed"
class PreferredIPVersion(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The IP version to be used. Only IPv4 is supported for now.
"""
I_PV4 = "IPv4"
class PrivateEndpointConnectionProvisioningState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The current provisioning state.
"""
SUCCEEDED = "Succeeded"
CREATING = "Creating"
DELETING = "Deleting"
FAILED = "Failed"
class PrivateEndpointServiceConnectionStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The private endpoint connection status.
"""
PENDING = "Pending"
APPROVED = "Approved"
REJECTED = "Rejected"
class ProductState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""whether product is published or not. Published products are discoverable by users of developer
portal. Non published products are visible only to administrators. Default state of Product is
notPublished.
"""
NOT_PUBLISHED = "notPublished"
PUBLISHED = "published"
class Protocol(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
HTTP = "http"
HTTPS = "https"
WS = "ws"
WSS = "wss"
class PublicNetworkAccess(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Whether or not public endpoint access is allowed for this API Management service. Value is
optional but if passed in, must be 'Enabled' or 'Disabled'. If 'Disabled', private endpoints
are the exclusive access method. Default value is 'Enabled'
"""
ENABLED = "Enabled"
DISABLED = "Disabled"
class ResourceSkuCapacityScaleType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The scale type applicable to the sku.
"""
#: Supported scale type automatic.
AUTOMATIC = "automatic"
#: Supported scale type manual.
MANUAL = "manual"
#: Scaling not supported.
NONE = "none"
class SamplingType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Sampling type.
"""
#: Fixed-rate sampling.
FIXED = "fixed"
class SchemaType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Schema Type. Immutable.
"""
#: Xml schema type.
XML = "xml"
#: Json schema type.
JSON = "json"
class SettingsTypeName(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
PUBLIC = "public"
class Severity(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The severity of the issue.
"""
ERROR = "Error"
WARNING = "Warning"
class SkuType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Name of the Sku.
"""
#: Developer SKU of Api Management.
DEVELOPER = "Developer"
#: Standard SKU of Api Management.
STANDARD = "Standard"
#: Premium SKU of Api Management.
PREMIUM = "Premium"
#: Basic SKU of Api Management.
BASIC = "Basic"
#: Consumption SKU of Api Management.
CONSUMPTION = "Consumption"
#: Isolated SKU of Api Management.
ISOLATED = "Isolated"
class SoapApiType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Type of API to create.
* ``http`` creates a REST API
* ``soap`` creates a SOAP pass-through API
* ``websocket`` creates websocket API
* ``graphql`` creates GraphQL API.
"""
#: Imports a SOAP API having a RESTful front end.
SOAP_TO_REST = "http"
#: Imports the SOAP API having a SOAP front end.
SOAP_PASS_THROUGH = "soap"
#: Imports the API having a Websocket front end.
WEB_SOCKET = "websocket"
#: Imports the API having a GraphQL front end.
GRAPH_QL = "graphql"
class State(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Status of the issue.
"""
#: The issue is proposed.
PROPOSED = "proposed"
#: The issue is opened.
OPEN = "open"
#: The issue was removed.
REMOVED = "removed"
#: The issue is now resolved.
RESOLVED = "resolved"
#: The issue was closed.
CLOSED = "closed"
class SubscriptionState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Subscription state. Possible states are * active – the subscription is active, * suspended –
the subscription is blocked, and the subscriber cannot call any APIs of the product, *
submitted – the subscription request has been made by the developer, but has not yet been
approved or rejected, * rejected – the subscription request has been denied by an
administrator, * cancelled – the subscription has been cancelled by the developer or
administrator, * expired – the subscription reached its expiration date and was deactivated.
"""
SUSPENDED = "suspended"
ACTIVE = "active"
EXPIRED = "expired"
SUBMITTED = "submitted"
REJECTED = "rejected"
CANCELLED = "cancelled"
class TemplateName(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
APPLICATION_APPROVED_NOTIFICATION_MESSAGE = "applicationApprovedNotificationMessage"
ACCOUNT_CLOSED_DEVELOPER = "accountClosedDeveloper"
QUOTA_LIMIT_APPROACHING_DEVELOPER_NOTIFICATION_MESSAGE = "quotaLimitApproachingDeveloperNotificationMessage"
NEW_DEVELOPER_NOTIFICATION_MESSAGE = "newDeveloperNotificationMessage"
EMAIL_CHANGE_IDENTITY_DEFAULT = "emailChangeIdentityDefault"
INVITE_USER_NOTIFICATION_MESSAGE = "inviteUserNotificationMessage"
NEW_COMMENT_NOTIFICATION_MESSAGE = "newCommentNotificationMessage"
CONFIRM_SIGN_UP_IDENTITY_DEFAULT = "confirmSignUpIdentityDefault"
NEW_ISSUE_NOTIFICATION_MESSAGE = "newIssueNotificationMessage"
PURCHASE_DEVELOPER_NOTIFICATION_MESSAGE = "purchaseDeveloperNotificationMessage"
PASSWORD_RESET_IDENTITY_DEFAULT = "passwordResetIdentityDefault"
PASSWORD_RESET_BY_ADMIN_NOTIFICATION_MESSAGE = "passwordResetByAdminNotificationMessage"
REJECT_DEVELOPER_NOTIFICATION_MESSAGE = "rejectDeveloperNotificationMessage"
REQUEST_DEVELOPER_NOTIFICATION_MESSAGE = "requestDeveloperNotificationMessage"
class UserState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Account state. Specifies whether the user is active or not. Blocked users are unable to sign
into the developer portal or call any APIs of subscribed products. Default state is Active.
"""
#: User state is active.
ACTIVE = "active"
#: User is blocked. Blocked users cannot authenticate at developer portal or call API.
BLOCKED = "blocked"
#: User account is pending. Requires identity confirmation before it can be made active.
PENDING = "pending"
#: User account is closed. All identities and related entities are removed.
DELETED = "deleted"
class Verbosity(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The verbosity level applied to traces emitted by trace policies.
"""
#: All the traces emitted by trace policies will be sent to the logger attached to this diagnostic
#: instance.
VERBOSE = "verbose"
#: Traces with 'severity' set to 'information' and 'error' will be sent to the logger attached to
#: this diagnostic instance.
INFORMATION = "information"
#: Only traces with 'severity' set to 'error' will be sent to the logger attached to this
#: diagnostic instance.
ERROR = "error"
class VersioningScheme(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""An value that determines where the API Version identifier will be located in a HTTP request.
"""
#: The API Version is passed in a path segment.
SEGMENT = "Segment"
#: The API Version is passed in a query parameter.
QUERY = "Query"
#: The API Version is passed in a HTTP header.
HEADER = "Header"
class VirtualNetworkType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The type of VPN in which API Management service needs to be configured in. None (Default Value)
means the API Management service is not part of any Virtual Network, External means the API
Management deployment is set up inside a Virtual Network having an Internet Facing Endpoint,
and Internal means that API Management deployment is setup inside a Virtual Network having an
Intranet Facing Endpoint only.
"""
#: The service is not part of any Virtual Network.
NONE = "None"
#: The service is part of Virtual Network and it is accessible from Internet.
EXTERNAL = "External"
#: The service is part of Virtual Network and it is only accessible from within the virtual
#: network.
INTERNAL = "Internal"
| [
"six.with_metaclass"
] | [((589, 639), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (603, 639), False, 'from six import with_metaclass\n'), ((712, 762), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (726, 762), False, 'from six import with_metaclass\n'), ((1131, 1181), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (1145, 1181), False, 'from six import with_metaclass\n'), ((1415, 1465), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (1429, 1465), False, 'from six import with_metaclass\n'), ((1636, 1686), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (1650, 1686), False, 'from six import with_metaclass\n'), ((1865, 1915), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (1879, 1915), False, 'from six import with_metaclass\n'), ((2028, 2078), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (2042, 2078), False, 'from six import with_metaclass\n'), ((2508, 2558), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (2522, 2558), False, 'from six import with_metaclass\n'), ((2730, 2780), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (2744, 2780), False, 'from six import with_metaclass\n'), ((2973, 3023), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (2987, 3023), False, 'from six import with_metaclass\n'), ((3243, 3293), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (3257, 3293), False, 'from six import with_metaclass\n'), ((3474, 3524), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (3488, 3524), False, 'from six import with_metaclass\n'), ((3705, 3755), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (3719, 3755), False, 'from six import with_metaclass\n'), ((3953, 4003), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (3967, 4003), False, 'from six import with_metaclass\n'), ((4109, 4159), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (4123, 4159), False, 'from six import with_metaclass\n'), ((4521, 4571), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (4535, 4571), False, 'from six import with_metaclass\n'), ((4832, 4882), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (4846, 4882), False, 'from six import with_metaclass\n'), ((5044, 5094), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (5058, 5094), False, 'from six import with_metaclass\n'), ((5248, 5298), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (5262, 5298), False, 'from six import with_metaclass\n'), ((5451, 5501), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (5465, 5501), False, 'from six import with_metaclass\n'), ((5561, 5611), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (5575, 5611), False, 'from six import with_metaclass\n'), ((5943, 5993), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (5957, 5993), False, 'from six import with_metaclass\n'), ((6180, 6230), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (6194, 6230), False, 'from six import with_metaclass\n'), ((6503, 6553), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (6517, 6553), False, 'from six import with_metaclass\n'), ((6721, 6771), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (6735, 6771), False, 'from six import with_metaclass\n'), ((8118, 8168), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (8132, 8168), False, 'from six import with_metaclass\n'), ((8365, 8415), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (8379, 8415), False, 'from six import with_metaclass\n'), ((8581, 8631), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (8595, 8631), False, 'from six import with_metaclass\n'), ((8673, 8723), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (8687, 8723), False, 'from six import with_metaclass\n'), ((9375, 9425), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (9389, 9425), False, 'from six import with_metaclass\n'), ((10049, 10099), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (10063, 10099), False, 'from six import with_metaclass\n'), ((10682, 10732), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (10696, 10732), False, 'from six import with_metaclass\n'), ((10853, 10903), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (10867, 10903), False, 'from six import with_metaclass\n'), ((11097, 11147), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (11111, 11147), False, 'from six import with_metaclass\n'), ((11663, 11713), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (11677, 11713), False, 'from six import with_metaclass\n'), ((12137, 12187), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (12151, 12187), False, 'from six import with_metaclass\n'), ((12561, 12611), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (12575, 12611), False, 'from six import with_metaclass\n'), ((12747, 12797), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (12761, 12797), False, 'from six import with_metaclass\n'), ((13084, 13134), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (13098, 13134), False, 'from six import with_metaclass\n'), ((13245, 13295), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (13259, 13295), False, 'from six import with_metaclass\n'), ((13658, 13708), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (13672, 13708), False, 'from six import with_metaclass\n'), ((15202, 15252), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (15216, 15252), False, 'from six import with_metaclass\n'), ((15480, 15530), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (15494, 15530), False, 'from six import with_metaclass\n'), ((15667, 15717), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (15681, 15717), False, 'from six import with_metaclass\n'), ((16177, 16227), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (16191, 16227), False, 'from six import with_metaclass\n'), ((16792, 16842), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (16806, 16842), False, 'from six import with_metaclass\n'), ((17058, 17108), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (17072, 17108), False, 'from six import with_metaclass\n'), ((17161, 17211), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (17175, 17211), False, 'from six import with_metaclass\n'), ((17349, 17399), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (17363, 17399), False, 'from six import with_metaclass\n'), ((17759, 17809), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (17773, 17809), False, 'from six import with_metaclass\n'), ((17956, 18006), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (17970, 18006), False, 'from six import with_metaclass\n'), ((18205, 18255), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (18219, 18255), False, 'from six import with_metaclass\n'), ((18410, 18460), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (18424, 18460), False, 'from six import with_metaclass\n'), ((18770, 18820), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (18784, 18820), False, 'from six import with_metaclass\n'), ((18919, 18969), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (18933, 18969), False, 'from six import with_metaclass\n'), ((19327, 19377), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (19341, 19377), False, 'from six import with_metaclass\n'), ((19627, 19677), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (19641, 19677), False, 'from six import with_metaclass\n'), ((19777, 19827), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (19791, 19827), False, 'from six import with_metaclass\n'), ((19977, 20027), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (19991, 20027), False, 'from six import with_metaclass\n'), ((20069, 20119), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (20083, 20119), False, 'from six import with_metaclass\n'), ((20224, 20274), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (20238, 20274), False, 'from six import with_metaclass\n'), ((20719, 20769), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (20733, 20769), False, 'from six import with_metaclass\n'), ((21318, 21368), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (21332, 21368), False, 'from six import with_metaclass\n'), ((21700, 21750), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (21714, 21750), False, 'from six import with_metaclass\n'), ((22495, 22545), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (22509, 22545), False, 'from six import with_metaclass\n'), ((23654, 23704), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (23668, 23704), False, 'from six import with_metaclass\n'), ((24316, 24366), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (24330, 24366), False, 'from six import with_metaclass\n'), ((24927, 24977), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (24941, 24977), False, 'from six import with_metaclass\n'), ((25339, 25389), 'six.with_metaclass', 'with_metaclass', (['CaseInsensitiveEnumMeta', 'str', 'Enum'], {}), '(CaseInsensitiveEnumMeta, str, Enum)\n', (25353, 25389), False, 'from six import with_metaclass\n')] |
from microbit import *
import microbit_i2c_lcd as lcd
i2c.init(sda=pin15,scl=pin13)
display = lcd.lcd(i2c)
display.lcd_display_string(str(chr(247)), 1)
print("this will display a pi symbol for ROM A00 japaneese\n"+\
"display a divide symbol for the A02 ROM european")
i2c.init(sda=pin20,scl=pin19)
| [
"microbit_i2c_lcd.lcd"
] | [((96, 108), 'microbit_i2c_lcd.lcd', 'lcd.lcd', (['i2c'], {}), '(i2c)\n', (103, 108), True, 'import microbit_i2c_lcd as lcd\n')] |
import time
import textwrap
import math
import binascii
from inkfish.create_discriminant import create_discriminant
from inkfish.classgroup import ClassGroup
from inkfish.iterate_squarings import iterate_squarings
from inkfish import proof_wesolowski
from inkfish.proof_of_time import (create_proof_of_time_nwesolowski,
check_proof_of_time_nwesolowski,
generate_r_value)
from inkfish import proof_pietrzak
from tests.int_mod_n import int_mod_n
start_t = 0
time_multiplier = 1000 # Use milliseconds
def start_bench():
global start_t
start_t = time.time() * time_multiplier
def end_bench(name, iterations):
global start_t
print("%-80s" % name, round(((time.time() * time_multiplier) - start_t)
/ (iterations), 2), "ms")
def bench_classgroup():
D = create_discriminant(b"seed", 512)
g = ClassGroup.from_ab_discriminant(2, 1, D)
while g[0].bit_length() < g[2].bit_length() or g[1].bit_length() < g[2].bit_length():
g = pow(g, 2)
g2 = pow(g, 2)
start_bench()
for _ in range(0, 10000):
g2 = g2.multiply(g)
end_bench("Classgroup 512 bit multiply", 10000)
start_bench()
for _ in range(0, 10000):
g2 = g2.square()
end_bench("Classgroup 512 bit square", 10000)
D = create_discriminant(b"seed", 1024)
g = ClassGroup.from_ab_discriminant(2, 1, D)
while g[0].bit_length() < g[2].bit_length() or g[1].bit_length() < g[2].bit_length():
g = pow(g, 2)
g2 = pow(g, 2)
start_bench()
for _ in range(0, 10000):
g2 = g2.multiply(g)
end_bench("Classgroup 1024 bit multiply", 10000)
start_bench()
for _ in range(0, 10000):
g2 = g2.square()
end_bench("Classgroup 1024 bit square", 10000)
D = create_discriminant(b"seed", 2048)
g = ClassGroup.from_ab_discriminant(2, 1, D)
while g[0].bit_length() < g[2].bit_length() or g[1].bit_length() < g[2].bit_length():
g = pow(g, 2)
g2 = pow(g, 2)
start_bench()
for _ in range(0, 10000):
g2 = g2.multiply(g)
end_bench("Classgroup 2048 bit multiply", 10000)
start_bench()
for _ in range(0, 10000):
g2 = g2.square()
end_bench("Classgroup 2048 bit square", 10000)
def bench_discriminant_generation():
start_bench()
for i in range(100):
create_discriminant(i.to_bytes(32, "big"), 512)
end_bench("Generate 512 bit discriminant", 100)
start_bench()
for i in range(100):
create_discriminant(i.to_bytes(32, "big"), 1024)
end_bench("Generate 1024 bit discriminant", 100)
start_bench()
for i in range(100):
create_discriminant(i.to_bytes(32, "big"), 2048)
end_bench("Generate 2048 bit discriminant", 100)
def bench_vdf_iterations():
D = create_discriminant(b"seed", 512)
g = ClassGroup.from_ab_discriminant(2, 1, D)
start_bench()
for _ in range(10):
iterate_squarings(g, [10000])
end_bench("VDF 10000 iterations, 512bit classgroup", 10)
D = create_discriminant(b"seed", 1024)
g = ClassGroup.from_ab_discriminant(2, 1, D)
start_bench()
for _ in range(2):
iterate_squarings(g, [10000])
end_bench("VDF 10000 iterations, 1024bit classgroup", 2)
D = create_discriminant(b"seed", 2048)
g = ClassGroup.from_ab_discriminant(2, 1, D)
start_bench()
for _ in range(2):
iterate_squarings(g, [10000])
end_bench("VDF 10000 iterations, 2048bit classgroup", 2)
# 2048 bit modulus
prime = int(''.join(textwrap.dedent("""
2634427397878110232503205795695468045251992992603340168049253044454387
1080897872360133472596339100961569230393163880927301060812730934043766
3646941725034559080490451986171041751558689035115943134790395616490035
9846986660803055891526943083539429058955074960014718229954545667371414
8029627597753998530121193913181474174423003742206534823264658175666814
0135440982296559552013264268674093709650866928458407571602481922443634
2306826340229149641664159565679297958087282612514993965471602016939198
7906354607787482381087158402527243744342654041944357821920600344804411
149211019651477131981627171025001255607692340155184929729""").split(
"\n")))
initial_x = int_mod_n(15619920774592561628351138998371642294622340518469892832433140464182509560910157, prime)
start_bench()
for _ in range(2):
iterate_squarings(initial_x, [10000])
end_bench("VDF 10000 iterations, 2048bit RSA modulus", 2)
# 4096 bit modulus
prime = int(''.join(textwrap.dedent("""
8466908771297228398108729385413406312941234872779790501232479567685076
4762372651919166693555570188656362906279057098994287649807661604067499
3053172889374223358861501556862285892231110003666671700028271837785598
2711897721600334848186874197010418494909265899320941516493102418008649
1453168421248338831347183727052419170386543046753155080120058844782449
2367606252473029574371603403502901208633055707823115620627698680602710
8443465519855901353485395338769455628849759950055397510380800451786140
7656499749760023191493764704430968335226478156774628814806959050849093
5035645687560103462845054697907307302184358040130405297282437884344166
7188530230135000709764482573583664708281017375197388209508666190855611
3020636147999796942848529907410787587958203267319164458728792653638371
7065019972034334447374200594285558460255762459285837794285154075321806
4811493971019446075650166775528463987738853022894781860563097254152754
1001763544907553312158598519824602240430350073539728131177239628816329
0179188493240741373702361870220590386302554494325819514615309801491107
2710093592877658471507118356670261129465668437063636041245619411937902
0658733974883998301959084381087966405508661151837877497650143949507846
1522640311670422105209760172585337397687461""").split("\n")))
initial_x = int_mod_n(15619920774592561628351138998371642294622340518469892832433140464182509560910157, prime)
start_bench()
for _ in range(2):
iterate_squarings(initial_x, [10000])
end_bench("VDF 10000 iterations, 4096bit RSA modulus", 2)
def bench_wesolowski():
iterations = 10000
discriminant_length = 512
discriminant = create_discriminant(b"seed", discriminant_length)
L, k, _ = proof_wesolowski.approximate_parameters(iterations)
x = ClassGroup.from_ab_discriminant(2, 1, discriminant)
powers_to_calculate = [i * k * L for i in range(0, math.ceil(iterations/(k*L)) + 1)]
powers_to_calculate += [iterations]
start_t = time.time() * time_multiplier
powers = iterate_squarings(x, powers_to_calculate)
vdf_time = round(time.time() * time_multiplier - start_t)
y = powers[iterations]
identity = ClassGroup.identity_for_discriminant(discriminant)
start_t = time.time() * time_multiplier
start_bench()
for _ in range(5):
proof = proof_wesolowski.generate_proof(identity, x, y, iterations, k, L, powers)
end_bench("Wesolowski " + str(discriminant_length) + "b class group, " + str(iterations)
+ " iterations, proof", 5)
proof_time = round((time.time() * time_multiplier - start_t) / 5)
print(" - Percentage of VDF time:", (proof_time / vdf_time) * 100, "%")
start_bench()
for _ in range(10):
assert(proof_wesolowski.verify_proof(x, y, proof, iterations))
end_bench("Wesolowski " + str(discriminant_length) + "b class group, " + str(iterations)
+ " iterations, verification", 10)
def bench_nwesolowski():
iterations = 10000
discriminant_length = 512
discriminant = create_discriminant(b"seed", discriminant_length)
L, k, _ = proof_wesolowski.approximate_parameters(iterations)
x = ClassGroup.from_ab_discriminant(2, 1, discriminant)
powers_to_calculate = [i * k * L for i in range(0, math.ceil(iterations/(k*L)) + 1)]
start_t = time.time() * time_multiplier
for _ in range(20):
iterate_squarings(x, powers_to_calculate)
vdf_time = round(time.time() * time_multiplier - start_t) / 20
start_t = time.time() * time_multiplier
start_bench()
for _ in range(20):
result, proof = create_proof_of_time_nwesolowski(discriminant, x, iterations,
discriminant_length, 2, depth=0)
end_bench("n-wesolowski depth 2 " + str(discriminant_length) + "b class group, "
+ str(iterations) + " iterations, proof", 20)
proof_time = round((time.time() * time_multiplier - start_t) / 20)
print(" - Percentage of VDF time:", (((proof_time - vdf_time) / vdf_time) * 100), "%")
start_bench()
for _ in range(20):
assert(check_proof_of_time_nwesolowski(discriminant, x, result + proof, iterations, discriminant_length))
end_bench("n-wesolowski depth 2 " + str(discriminant_length) + "b class group, "
+ str(iterations) + " iterations, verification", 20)
def bench_pietrzak():
iterations = 10000
discriminant_length = 512
discriminant = create_discriminant(b"seed", discriminant_length)
delta = 8
x = ClassGroup.from_ab_discriminant(2, 1, discriminant)
powers_to_calculate = proof_pietrzak.cache_indeces_for_count(iterations)
start_t = time.time() * time_multiplier
powers = iterate_squarings(x, powers_to_calculate)
vdf_time = round(time.time() * time_multiplier - start_t)
y = powers[iterations]
identity = ClassGroup.identity_for_discriminant(discriminant)
start_t = time.time() * time_multiplier
start_bench()
for _ in range(5):
proof = proof_pietrzak.generate_proof(x, iterations, delta, y, powers,
identity, generate_r_value, discriminant_length)
end_bench("Pietrzak " + str(discriminant_length) + "b class group, " + str(iterations)
+ " iterations, proof", 10)
proof_time = round((time.time() * time_multiplier - start_t) / 10)
print(" - Percentage of VDF time:", (proof_time / vdf_time) * 100, "%")
start_bench()
for _ in range(10):
assert(proof_pietrzak.verify_proof(x, y, proof, iterations, delta,
generate_r_value, discriminant_length))
end_bench("Pietrzak " + str(discriminant_length) + "b class group, " + str(iterations)
+ " iterations, verification", 10)
def bench_main():
bench_classgroup()
bench_discriminant_generation()
bench_vdf_iterations()
bench_wesolowski()
bench_nwesolowski()
bench_pietrzak()
if __name__ == '__main__':
bench_main()
"""
Copyright 2018 Chia Network Inc
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
| [
"textwrap.dedent",
"math.ceil",
"inkfish.classgroup.ClassGroup.from_ab_discriminant",
"inkfish.proof_wesolowski.generate_proof",
"inkfish.proof_wesolowski.verify_proof",
"inkfish.proof_pietrzak.generate_proof",
"inkfish.create_discriminant.create_discriminant",
"inkfish.proof_wesolowski.approximate_pa... | [((857, 890), 'inkfish.create_discriminant.create_discriminant', 'create_discriminant', (["b'seed'", '(512)'], {}), "(b'seed', 512)\n", (876, 890), False, 'from inkfish.create_discriminant import create_discriminant\n'), ((899, 939), 'inkfish.classgroup.ClassGroup.from_ab_discriminant', 'ClassGroup.from_ab_discriminant', (['(2)', '(1)', 'D'], {}), '(2, 1, D)\n', (930, 939), False, 'from inkfish.classgroup import ClassGroup\n'), ((1332, 1366), 'inkfish.create_discriminant.create_discriminant', 'create_discriminant', (["b'seed'", '(1024)'], {}), "(b'seed', 1024)\n", (1351, 1366), False, 'from inkfish.create_discriminant import create_discriminant\n'), ((1375, 1415), 'inkfish.classgroup.ClassGroup.from_ab_discriminant', 'ClassGroup.from_ab_discriminant', (['(2)', '(1)', 'D'], {}), '(2, 1, D)\n', (1406, 1415), False, 'from inkfish.classgroup import ClassGroup\n'), ((1810, 1844), 'inkfish.create_discriminant.create_discriminant', 'create_discriminant', (["b'seed'", '(2048)'], {}), "(b'seed', 2048)\n", (1829, 1844), False, 'from inkfish.create_discriminant import create_discriminant\n'), ((1853, 1893), 'inkfish.classgroup.ClassGroup.from_ab_discriminant', 'ClassGroup.from_ab_discriminant', (['(2)', '(1)', 'D'], {}), '(2, 1, D)\n', (1884, 1893), False, 'from inkfish.classgroup import ClassGroup\n'), ((2817, 2850), 'inkfish.create_discriminant.create_discriminant', 'create_discriminant', (["b'seed'", '(512)'], {}), "(b'seed', 512)\n", (2836, 2850), False, 'from inkfish.create_discriminant import create_discriminant\n'), ((2859, 2899), 'inkfish.classgroup.ClassGroup.from_ab_discriminant', 'ClassGroup.from_ab_discriminant', (['(2)', '(1)', 'D'], {}), '(2, 1, D)\n', (2890, 2899), False, 'from inkfish.classgroup import ClassGroup\n'), ((3051, 3085), 'inkfish.create_discriminant.create_discriminant', 'create_discriminant', (["b'seed'", '(1024)'], {}), "(b'seed', 1024)\n", (3070, 3085), False, 'from inkfish.create_discriminant import create_discriminant\n'), ((3094, 3134), 'inkfish.classgroup.ClassGroup.from_ab_discriminant', 'ClassGroup.from_ab_discriminant', (['(2)', '(1)', 'D'], {}), '(2, 1, D)\n', (3125, 3134), False, 'from inkfish.classgroup import ClassGroup\n'), ((3285, 3319), 'inkfish.create_discriminant.create_discriminant', 'create_discriminant', (["b'seed'", '(2048)'], {}), "(b'seed', 2048)\n", (3304, 3319), False, 'from inkfish.create_discriminant import create_discriminant\n'), ((3328, 3368), 'inkfish.classgroup.ClassGroup.from_ab_discriminant', 'ClassGroup.from_ab_discriminant', (['(2)', '(1)', 'D'], {}), '(2, 1, D)\n', (3359, 3368), False, 'from inkfish.classgroup import ClassGroup\n'), ((4322, 4430), 'tests.int_mod_n.int_mod_n', 'int_mod_n', (['(15619920774592561628351138998371642294622340518469892832433140464182509560910157\n )', 'prime'], {}), '(\n 15619920774592561628351138998371642294622340518469892832433140464182509560910157\n , prime)\n', (4331, 4430), False, 'from tests.int_mod_n import int_mod_n\n'), ((6068, 6176), 'tests.int_mod_n.int_mod_n', 'int_mod_n', (['(15619920774592561628351138998371642294622340518469892832433140464182509560910157\n )', 'prime'], {}), '(\n 15619920774592561628351138998371642294622340518469892832433140464182509560910157\n , prime)\n', (6077, 6176), False, 'from tests.int_mod_n import int_mod_n\n'), ((6414, 6463), 'inkfish.create_discriminant.create_discriminant', 'create_discriminant', (["b'seed'", 'discriminant_length'], {}), "(b'seed', discriminant_length)\n", (6433, 6463), False, 'from inkfish.create_discriminant import create_discriminant\n'), ((6478, 6529), 'inkfish.proof_wesolowski.approximate_parameters', 'proof_wesolowski.approximate_parameters', (['iterations'], {}), '(iterations)\n', (6517, 6529), False, 'from inkfish import proof_wesolowski\n'), ((6539, 6590), 'inkfish.classgroup.ClassGroup.from_ab_discriminant', 'ClassGroup.from_ab_discriminant', (['(2)', '(1)', 'discriminant'], {}), '(2, 1, discriminant)\n', (6570, 6590), False, 'from inkfish.classgroup import ClassGroup\n'), ((6777, 6818), 'inkfish.iterate_squarings.iterate_squarings', 'iterate_squarings', (['x', 'powers_to_calculate'], {}), '(x, powers_to_calculate)\n', (6794, 6818), False, 'from inkfish.iterate_squarings import iterate_squarings\n'), ((6924, 6974), 'inkfish.classgroup.ClassGroup.identity_for_discriminant', 'ClassGroup.identity_for_discriminant', (['discriminant'], {}), '(discriminant)\n', (6960, 6974), False, 'from inkfish.classgroup import ClassGroup\n'), ((7790, 7839), 'inkfish.create_discriminant.create_discriminant', 'create_discriminant', (["b'seed'", 'discriminant_length'], {}), "(b'seed', discriminant_length)\n", (7809, 7839), False, 'from inkfish.create_discriminant import create_discriminant\n'), ((7854, 7905), 'inkfish.proof_wesolowski.approximate_parameters', 'proof_wesolowski.approximate_parameters', (['iterations'], {}), '(iterations)\n', (7893, 7905), False, 'from inkfish import proof_wesolowski\n'), ((7915, 7966), 'inkfish.classgroup.ClassGroup.from_ab_discriminant', 'ClassGroup.from_ab_discriminant', (['(2)', '(1)', 'discriminant'], {}), '(2, 1, discriminant)\n', (7946, 7966), False, 'from inkfish.classgroup import ClassGroup\n'), ((9220, 9269), 'inkfish.create_discriminant.create_discriminant', 'create_discriminant', (["b'seed'", 'discriminant_length'], {}), "(b'seed', discriminant_length)\n", (9239, 9269), False, 'from inkfish.create_discriminant import create_discriminant\n'), ((9293, 9344), 'inkfish.classgroup.ClassGroup.from_ab_discriminant', 'ClassGroup.from_ab_discriminant', (['(2)', '(1)', 'discriminant'], {}), '(2, 1, discriminant)\n', (9324, 9344), False, 'from inkfish.classgroup import ClassGroup\n'), ((9371, 9421), 'inkfish.proof_pietrzak.cache_indeces_for_count', 'proof_pietrzak.cache_indeces_for_count', (['iterations'], {}), '(iterations)\n', (9409, 9421), False, 'from inkfish import proof_pietrzak\n'), ((9479, 9520), 'inkfish.iterate_squarings.iterate_squarings', 'iterate_squarings', (['x', 'powers_to_calculate'], {}), '(x, powers_to_calculate)\n', (9496, 9520), False, 'from inkfish.iterate_squarings import iterate_squarings\n'), ((9626, 9676), 'inkfish.classgroup.ClassGroup.identity_for_discriminant', 'ClassGroup.identity_for_discriminant', (['discriminant'], {}), '(discriminant)\n', (9662, 9676), False, 'from inkfish.classgroup import ClassGroup\n'), ((627, 638), 'time.time', 'time.time', ([], {}), '()\n', (636, 638), False, 'import time\n'), ((2951, 2980), 'inkfish.iterate_squarings.iterate_squarings', 'iterate_squarings', (['g', '[10000]'], {}), '(g, [10000])\n', (2968, 2980), False, 'from inkfish.iterate_squarings import iterate_squarings\n'), ((3185, 3214), 'inkfish.iterate_squarings.iterate_squarings', 'iterate_squarings', (['g', '[10000]'], {}), '(g, [10000])\n', (3202, 3214), False, 'from inkfish.iterate_squarings import iterate_squarings\n'), ((3418, 3447), 'inkfish.iterate_squarings.iterate_squarings', 'iterate_squarings', (['g', '[10000]'], {}), '(g, [10000])\n', (3435, 3447), False, 'from inkfish.iterate_squarings import iterate_squarings\n'), ((4470, 4507), 'inkfish.iterate_squarings.iterate_squarings', 'iterate_squarings', (['initial_x', '[10000]'], {}), '(initial_x, [10000])\n', (4487, 4507), False, 'from inkfish.iterate_squarings import iterate_squarings\n'), ((6216, 6253), 'inkfish.iterate_squarings.iterate_squarings', 'iterate_squarings', (['initial_x', '[10000]'], {}), '(initial_x, [10000])\n', (6233, 6253), False, 'from inkfish.iterate_squarings import iterate_squarings\n'), ((6734, 6745), 'time.time', 'time.time', ([], {}), '()\n', (6743, 6745), False, 'import time\n'), ((6990, 7001), 'time.time', 'time.time', ([], {}), '()\n', (6999, 7001), False, 'import time\n'), ((7077, 7150), 'inkfish.proof_wesolowski.generate_proof', 'proof_wesolowski.generate_proof', (['identity', 'x', 'y', 'iterations', 'k', 'L', 'powers'], {}), '(identity, x, y, iterations, k, L, powers)\n', (7108, 7150), False, 'from inkfish import proof_wesolowski\n'), ((7493, 7547), 'inkfish.proof_wesolowski.verify_proof', 'proof_wesolowski.verify_proof', (['x', 'y', 'proof', 'iterations'], {}), '(x, y, proof, iterations)\n', (7522, 7547), False, 'from inkfish import proof_wesolowski\n'), ((8071, 8082), 'time.time', 'time.time', ([], {}), '()\n', (8080, 8082), False, 'import time\n'), ((8133, 8174), 'inkfish.iterate_squarings.iterate_squarings', 'iterate_squarings', (['x', 'powers_to_calculate'], {}), '(x, powers_to_calculate)\n', (8150, 8174), False, 'from inkfish.iterate_squarings import iterate_squarings\n'), ((8257, 8268), 'time.time', 'time.time', ([], {}), '()\n', (8266, 8268), False, 'import time\n'), ((8353, 8451), 'inkfish.proof_of_time.create_proof_of_time_nwesolowski', 'create_proof_of_time_nwesolowski', (['discriminant', 'x', 'iterations', 'discriminant_length', '(2)'], {'depth': '(0)'}), '(discriminant, x, iterations,\n discriminant_length, 2, depth=0)\n', (8385, 8451), False, 'from inkfish.proof_of_time import create_proof_of_time_nwesolowski, check_proof_of_time_nwesolowski, generate_r_value\n'), ((8873, 8974), 'inkfish.proof_of_time.check_proof_of_time_nwesolowski', 'check_proof_of_time_nwesolowski', (['discriminant', 'x', '(result + proof)', 'iterations', 'discriminant_length'], {}), '(discriminant, x, result + proof, iterations,\n discriminant_length)\n', (8904, 8974), False, 'from inkfish.proof_of_time import create_proof_of_time_nwesolowski, check_proof_of_time_nwesolowski, generate_r_value\n'), ((9436, 9447), 'time.time', 'time.time', ([], {}), '()\n', (9445, 9447), False, 'import time\n'), ((9692, 9703), 'time.time', 'time.time', ([], {}), '()\n', (9701, 9703), False, 'import time\n'), ((9779, 9894), 'inkfish.proof_pietrzak.generate_proof', 'proof_pietrzak.generate_proof', (['x', 'iterations', 'delta', 'y', 'powers', 'identity', 'generate_r_value', 'discriminant_length'], {}), '(x, iterations, delta, y, powers, identity,\n generate_r_value, discriminant_length)\n', (9808, 9894), False, 'from inkfish import proof_pietrzak\n'), ((10279, 10381), 'inkfish.proof_pietrzak.verify_proof', 'proof_pietrzak.verify_proof', (['x', 'y', 'proof', 'iterations', 'delta', 'generate_r_value', 'discriminant_length'], {}), '(x, y, proof, iterations, delta,\n generate_r_value, discriminant_length)\n', (10306, 10381), False, 'from inkfish import proof_pietrzak\n'), ((6840, 6851), 'time.time', 'time.time', ([], {}), '()\n', (6849, 6851), False, 'import time\n'), ((9542, 9553), 'time.time', 'time.time', ([], {}), '()\n', (9551, 9553), False, 'import time\n'), ((3557, 4288), 'textwrap.dedent', 'textwrap.dedent', (['"""\n 2634427397878110232503205795695468045251992992603340168049253044454387\n 1080897872360133472596339100961569230393163880927301060812730934043766\n 3646941725034559080490451986171041751558689035115943134790395616490035\n 9846986660803055891526943083539429058955074960014718229954545667371414\n 8029627597753998530121193913181474174423003742206534823264658175666814\n 0135440982296559552013264268674093709650866928458407571602481922443634\n 2306826340229149641664159565679297958087282612514993965471602016939198\n 7906354607787482381087158402527243744342654041944357821920600344804411\n 149211019651477131981627171025001255607692340155184929729"""'], {}), '(\n """\n 2634427397878110232503205795695468045251992992603340168049253044454387\n 1080897872360133472596339100961569230393163880927301060812730934043766\n 3646941725034559080490451986171041751558689035115943134790395616490035\n 9846986660803055891526943083539429058955074960014718229954545667371414\n 8029627597753998530121193913181474174423003742206534823264658175666814\n 0135440982296559552013264268674093709650866928458407571602481922443634\n 2306826340229149641664159565679297958087282612514993965471602016939198\n 7906354607787482381087158402527243744342654041944357821920600344804411\n 149211019651477131981627171025001255607692340155184929729"""\n )\n', (3572, 4288), False, 'import textwrap\n'), ((4618, 6046), 'textwrap.dedent', 'textwrap.dedent', (['"""\n 8466908771297228398108729385413406312941234872779790501232479567685076\n 4762372651919166693555570188656362906279057098994287649807661604067499\n 3053172889374223358861501556862285892231110003666671700028271837785598\n 2711897721600334848186874197010418494909265899320941516493102418008649\n 1453168421248338831347183727052419170386543046753155080120058844782449\n 2367606252473029574371603403502901208633055707823115620627698680602710\n 8443465519855901353485395338769455628849759950055397510380800451786140\n 7656499749760023191493764704430968335226478156774628814806959050849093\n 5035645687560103462845054697907307302184358040130405297282437884344166\n 7188530230135000709764482573583664708281017375197388209508666190855611\n 3020636147999796942848529907410787587958203267319164458728792653638371\n 7065019972034334447374200594285558460255762459285837794285154075321806\n 4811493971019446075650166775528463987738853022894781860563097254152754\n 1001763544907553312158598519824602240430350073539728131177239628816329\n 0179188493240741373702361870220590386302554494325819514615309801491107\n 2710093592877658471507118356670261129465668437063636041245619411937902\n 0658733974883998301959084381087966405508661151837877497650143949507846\n 1522640311670422105209760172585337397687461"""'], {}), '(\n """\n 8466908771297228398108729385413406312941234872779790501232479567685076\n 4762372651919166693555570188656362906279057098994287649807661604067499\n 3053172889374223358861501556862285892231110003666671700028271837785598\n 2711897721600334848186874197010418494909265899320941516493102418008649\n 1453168421248338831347183727052419170386543046753155080120058844782449\n 2367606252473029574371603403502901208633055707823115620627698680602710\n 8443465519855901353485395338769455628849759950055397510380800451786140\n 7656499749760023191493764704430968335226478156774628814806959050849093\n 5035645687560103462845054697907307302184358040130405297282437884344166\n 7188530230135000709764482573583664708281017375197388209508666190855611\n 3020636147999796942848529907410787587958203267319164458728792653638371\n 7065019972034334447374200594285558460255762459285837794285154075321806\n 4811493971019446075650166775528463987738853022894781860563097254152754\n 1001763544907553312158598519824602240430350073539728131177239628816329\n 0179188493240741373702361870220590386302554494325819514615309801491107\n 2710093592877658471507118356670261129465668437063636041245619411937902\n 0658733974883998301959084381087966405508661151837877497650143949507846\n 1522640311670422105209760172585337397687461"""\n )\n', (4633, 6046), False, 'import textwrap\n'), ((6646, 6677), 'math.ceil', 'math.ceil', (['(iterations / (k * L))'], {}), '(iterations / (k * L))\n', (6655, 6677), False, 'import math\n'), ((7310, 7321), 'time.time', 'time.time', ([], {}), '()\n', (7319, 7321), False, 'import time\n'), ((8022, 8053), 'math.ceil', 'math.ceil', (['(iterations / (k * L))'], {}), '(iterations / (k * L))\n', (8031, 8053), False, 'import math\n'), ((8196, 8207), 'time.time', 'time.time', ([], {}), '()\n', (8205, 8207), False, 'import time\n'), ((8674, 8685), 'time.time', 'time.time', ([], {}), '()\n', (8683, 8685), False, 'import time\n'), ((10095, 10106), 'time.time', 'time.time', ([], {}), '()\n', (10104, 10106), False, 'import time\n'), ((745, 756), 'time.time', 'time.time', ([], {}), '()\n', (754, 756), False, 'import time\n')] |
import os
import re
import sys
from setuptools import setup, find_packages
from typing import Optional, Tuple
SETUP_DIR = os.path.dirname(os.path.realpath(__file__))
POLYTRACKER_HEADER = os.path.join(SETUP_DIR, 'polytracker', 'include', 'polytracker', 'polytracker.h')
if not os.path.exists(POLYTRACKER_HEADER):
sys.stderr.write(f"Error loading polytracker.h!\nIt was expected to be here:\n{POLYTRACKER_HEADER}\n\n")
exit(1)
def polytracker_version() -> Tuple[int, int, int, Optional[str]]:
version_parts = {}
with open(POLYTRACKER_HEADER, 'r') as f:
for i, line in enumerate(f):
m = re.match(r"\s*#define\s+POLYTRACKER_VERSION_([A-Za-z_0-9]+)\s+([^\s]+)\s*$", line)
if m:
if m[1] not in ('MAJOR', 'MINOR', 'REVISION', 'SUFFIX'):
sys.stderr.write(f"Warning: Ignoring unexpected #define for \"POLYTRACKER_VERSION_{m[1]}\" on line "
f"{i + 1} of {POLYTRACKER_HEADER}\n")
else:
version_parts[m[1]] = m[2]
for required_part in ('MAJOR', 'MINOR', 'REVISION'):
if required_part not in version_parts:
sys.stderr.write(
f"Error: #define POLYTRACKER_VERSION_{required_part} not found in {POLYTRACKER_HEADER}\n\n")
sys.exit(1)
try:
version_parts[required_part] = int(version_parts[required_part])
except ValueError:
sys.stderr.write(
f"Error: POLYTRACKER_VERSION_{required_part} in {POLYTRACKER_HEADER} is not an integer!\n\n")
sys.exit(1)
suffix = version_parts.get('SUFFIX', None)
if suffix is not None:
suffix = suffix.strip()
if suffix.startswith('"') and suffix.endswith('"'):
suffix = suffix[1:-1]
return version_parts['MAJOR'], version_parts['MINOR'], version_parts['REVISION'], suffix
def polytracker_version_string() -> str:
*primary, suffix = polytracker_version()
primary = map(str, primary)
if suffix is None:
return '.'.join(primary)
else:
return f"{'.'.join(primary)}{suffix}"
setup(
name='polytracker',
description='API and Library for operating and interacting with PolyTracker',
url='https://github.com/trailofbits/polytracker',
author='<NAME>',
version=polytracker_version_string(),
packages=find_packages(),
python_requires='>=3.7',
install_requires=[
'graphviz',
'matplotlib',
'networkx',
'pygraphviz',
'pydot',
'tqdm',
'typing_extensions'
],
extras_require={
"dev": ["black", "mypy", "pytest"]
},
entry_points={
'console_scripts': [
'polyprocess = polytracker.polyprocess.__main__:main'
]
},
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Utilities'
]
)
| [
"os.path.exists",
"setuptools.find_packages",
"os.path.join",
"re.match",
"os.path.realpath",
"sys.stderr.write",
"sys.exit"
] | [((188, 273), 'os.path.join', 'os.path.join', (['SETUP_DIR', '"""polytracker"""', '"""include"""', '"""polytracker"""', '"""polytracker.h"""'], {}), "(SETUP_DIR, 'polytracker', 'include', 'polytracker',\n 'polytracker.h')\n", (200, 273), False, 'import os\n'), ((139, 165), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (155, 165), False, 'import os\n'), ((278, 312), 'os.path.exists', 'os.path.exists', (['POLYTRACKER_HEADER'], {}), '(POLYTRACKER_HEADER)\n', (292, 312), False, 'import os\n'), ((318, 432), 'sys.stderr.write', 'sys.stderr.write', (['f"""Error loading polytracker.h!\nIt was expected to be here:\n{POLYTRACKER_HEADER}\n\n"""'], {}), '(\n f"""Error loading polytracker.h!\nIt was expected to be here:\n{POLYTRACKER_HEADER}\n\n"""\n )\n', (334, 432), False, 'import sys\n'), ((2381, 2396), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (2394, 2396), False, 'from setuptools import setup, find_packages\n'), ((624, 715), 're.match', 're.match', (['"""\\\\s*#define\\\\s+POLYTRACKER_VERSION_([A-Za-z_0-9]+)\\\\s+([^\\\\s]+)\\\\s*$"""', 'line'], {}), "('\\\\s*#define\\\\s+POLYTRACKER_VERSION_([A-Za-z_0-9]+)\\\\s+([^\\\\s]+)\\\\s*$'\n , line)\n", (632, 715), False, 'import re\n'), ((1179, 1300), 'sys.stderr.write', 'sys.stderr.write', (['f"""Error: #define POLYTRACKER_VERSION_{required_part} not found in {POLYTRACKER_HEADER}\n\n"""'], {}), '(\n f"""Error: #define POLYTRACKER_VERSION_{required_part} not found in {POLYTRACKER_HEADER}\n\n"""\n )\n', (1195, 1300), False, 'import sys\n'), ((1318, 1329), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1326, 1329), False, 'import sys\n'), ((1459, 1581), 'sys.stderr.write', 'sys.stderr.write', (['f"""Error: POLYTRACKER_VERSION_{required_part} in {POLYTRACKER_HEADER} is not an integer!\n\n"""'], {}), '(\n f"""Error: POLYTRACKER_VERSION_{required_part} in {POLYTRACKER_HEADER} is not an integer!\n\n"""\n )\n', (1475, 1581), False, 'import sys\n'), ((1599, 1610), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1607, 1610), False, 'import sys\n'), ((818, 963), 'sys.stderr.write', 'sys.stderr.write', (['f"""Warning: Ignoring unexpected #define for "POLYTRACKER_VERSION_{m[1]}" on line {i + 1} of {POLYTRACKER_HEADER}\n"""'], {}), '(\n f"""Warning: Ignoring unexpected #define for "POLYTRACKER_VERSION_{m[1]}" on line {i + 1} of {POLYTRACKER_HEADER}\n"""\n )\n', (834, 963), False, 'import sys\n')] |
import os,sys, re
from math import ceil, floor
class Gazette:
"""
Loads and parses municipal gazettes.
Attributes:
file_path: The string path to a gazette.
file: The string containing a gazette's content.
city: A string for the city (or cities) of the gazette.
date: A string for the date of the gazette.
minimum_spacing_between_cols: An integer for minimum spacingbetween columns. Defaults to 1.
min_break_ratio: A float for min_break_ratio. Defaults to 0.75.
max_allowed_cols: An int for the maximum number of columns allowed per page.
split_re: A regex for splitting
pages: A list of pages, each page is a list of lines.
cols_dividers: ?
pages_avg_col: ?
total_avg_col: ?
"""
def __init__(self, file_path:str, city:str, date:str):
"""Inits Gazette with a path, a city and a date."""
self.file = self.load_file(file_path)
self.city = city
self.date = date
self.minimum_spacing_between_cols = 1
self.min_break_ratio = 0.75
self.max_allowed_cols = 5
self.pages = self.get_list_of_pages()
self.linear_text = ""
self.cols_dividers = [self.vertical_lines_finder(x) for x in self.pages]
self.pages_avg_col = [len(x)+1 for x in self.cols_dividers]
# print(self.pages_avg_col)
if self.pages_avg_col:
self.total_avg_col = sum(self.pages_avg_col) / len(self.pages_avg_col)
else:
self.total_avg_col = 0
self.split_cols()
print(self.total_avg_col)
# print(self.linear_text)
def get_list_of_pages(self, page_break='\014'):
"""
Uses file string in self.file and converts it to a list of lists.
Args:
page_break (str): A string used to delimit page separation
in the target document.
Returns:
list: A list of pages, each page is a list of lines.
"""
pages = []
page_buffer = []
for line in self.file:
if page_break not in line:
page_buffer.append(line)
else:
full_page = page_buffer
pages.append(full_page)
page_buffer = self.reset_buffer(line, page_break)
# Add last page
if len(page_buffer) > 0:
pages.append(page_buffer)
return pages
def reset_buffer(self, line, page_break):
return [line.strip(page_break)]
def split_cols(self):
"""
Splits columns of document into a linear layout
"""
column_dividers = self.cols_dividers
average_columns_per_page = self.pages_avg_col
for page_index, page in enumerate(self.pages):
page_column_dividers = column_dividers[page_index]
page_average_columns = average_columns_per_page[page_index]
page_n_of_columns = len(page_column_dividers)
if self.test_if_page_is_not_splittable(page_average_columns, page_column_dividers, page_n_of_columns):
page_add_to_linear_text = str("".join(page)) + '\014'
self.linear_text += page_add_to_linear_text
continue
page_lines_in_one_column = self.get_lines_in_one_column(page, page_column_dividers)
self.linear_text += self.lines_to_text(page_lines_in_one_column)
def get_lines_in_one_column(self, page, page_column_dividers):
"""
Args
page: A list of strings, and each string is a line in the page.
page_column_dividers: A list of ints that were selected as column dividers.
Returns: A list of strings, and each string is a line in the new page.
"""
longest_line_len = max(len(line) for line in page)
page_column_dividers.append((longest_line_len,0))
lines_to_return = []
for line in page:
column_beginning = 0
current_line = []
line_size = len(line)
for column_divider, _ in page_column_dividers:
if line_size > column_divider and line[column_divider] != ' ':
single_column = [line]
lines_to_return.append(single_column)
column_beginning = -1
break
current_column = line[column_beginning:column_divider]
current_line.append(current_column)
column_beginning = column_divider
lines_to_return.append(current_line)
return lines_to_return
def test_if_page_is_not_splittable(self, page_average_columns, page_column_dividers, page_n_of_columns):
"""
Args
page_average_columns: TODO
page_column_dividers: TODO
page_n_of_columns: TODO
Returns: boolean
"""
average_columns_in_total = self.total_avg_col
maximum_of_columns_allowed = self.max_allowed_cols
too_many_columns = page_n_of_columns >= maximum_of_columns_allowed
no_dividers = page_column_dividers == []
threshold = 1.2
more_pages_than_average = page_average_columns >= (threshold * average_columns_in_total)
min_columns = 2
too_few_columns = page_average_columns < min_columns
result = more_pages_than_average or \
too_few_columns or \
too_many_columns or \
no_dividers
return result
def lines_to_text(self, lines):
max_cols = max(map(lambda x: len(x), lines))
txt = ""
for col_i in range(max_cols):
page_has_content = False
for line in lines:
if len(line) > col_i:
if line[col_i] != '' and line[col_i].strip() != '':
txt += "".join(line[col_i].strip('\n')) + '\n'
page_has_content = True
if lines != [] and page_has_content:
txt += "\014\n"
return txt[:-1]
def vertical_lines_finder(self, page):
max_line_size = max(len(line) for line in page)
vertical_lines = self.get_contiguous_space_heights(max_line_size, page)
candidate_breakpoints = self.remove_contiguous_vertical_lines(vertical_lines, max_line_size)
return candidate_breakpoints
def remove_contiguous_vertical_lines(self, vertical_lines, max_line_size):
if vertical_lines == []:
return []
candidate_breakpoints = [vertical_lines[0]]
col_ctd = 1
while col_ctd < max_line_size and col_ctd < len(vertical_lines):
if self.columns_have_minimum_distance(col_ctd, candidate_breakpoints, vertical_lines):
if vertical_lines[col_ctd] not in candidate_breakpoints:
candidate_breakpoints.append(vertical_lines[col_ctd])
col_ctd +=1
return candidate_breakpoints
def columns_have_minimum_distance(self, col_ctd, candidate_breakpoints, vertical_lines, distance=20):
return abs(candidate_breakpoints[-1][0] - vertical_lines[col_ctd][0]) >= distance
def get_contiguous_space_heights(self, max_line_size, page):
contiguous_space_heights = []
left_delimiter = floor(0.2 * max_line_size)
rigth_delimiter = floor(0.8 * max_line_size)
parsing_window = range(rigth_delimiter, left_delimiter, -1)
for col_idx in parsing_window:
ctd = 1
max_val = 0
for line_idx, line in enumerate(page):
max_val = max(max_val, ctd)
if len(line) <= col_idx:
ctd += 1
else:
if self.col_offset_is_only_spaces(page, line_idx, col_idx):
ctd += 1
else:
ctd = 1
break_ratio = round(max_val/len(page), 2)
if break_ratio > self.min_break_ratio:
contiguous_space_heights.append((col_idx, break_ratio))
contiguous_space_heights = sorted(contiguous_space_heights, key=lambda x: x[1], reverse=True)
return contiguous_space_heights
def get_item_from_list(self, line, col_idx, default=' '):
"""
Returns an list item if it exists, or ´default´, otherwise
"""
try:
return line[col_idx]
except:
return default
def col_offset_is_only_spaces(self, page, line_idx, col_idx, offset=6):
page_slice = page[line_idx : line_idx+offset]
col_slice = [self.get_item_from_list(line, col_idx) for line in page_slice]
return all(i==' ' for i in col_slice)
@staticmethod
def load_file(path):
lines = []
with open(path, 'r') as f:
lines = f.readlines()
return lines
if __name__ == "__main__":
input_f = sys.argv[1]
output_f = sys.argv[2]
# g = Gazette(input_f, "", "")
# g.__split_cols()
# print(g.linear_text)
for file in os.listdir(input_f):
g = Gazette(input_f + '/' + file,"", "")
print(f"Parsing {file}")
with open( output_f + "/" + file, 'w') as f:
f.write(g.linear_text)
| [
"os.listdir",
"math.floor"
] | [((9122, 9141), 'os.listdir', 'os.listdir', (['input_f'], {}), '(input_f)\n', (9132, 9141), False, 'import os, sys, re\n'), ((7352, 7378), 'math.floor', 'floor', (['(0.2 * max_line_size)'], {}), '(0.2 * max_line_size)\n', (7357, 7378), False, 'from math import ceil, floor\n'), ((7405, 7431), 'math.floor', 'floor', (['(0.8 * max_line_size)'], {}), '(0.8 * max_line_size)\n', (7410, 7431), False, 'from math import ceil, floor\n')] |
# Generated by Django 3.0.2 on 2020-03-03 21:48
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('PropertyManagers', '0001_initial'),
('Properties', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Tenant',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('firstName', models.CharField(max_length=100)),
('lastName', models.CharField(max_length=100)),
('email', models.CharField(max_length=255)),
('password', models.CharField(max_length=20)),
('place', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='Properties.Property')),
('pm', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='PropertyManagers.PropertyManager')),
],
),
]
| [
"django.db.models.AutoField",
"django.db.models.CharField",
"django.db.models.ForeignKey"
] | [((421, 514), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (437, 514), False, 'from django.db import migrations, models\n'), ((543, 575), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (559, 575), False, 'from django.db import migrations, models\n'), ((607, 639), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (623, 639), False, 'from django.db import migrations, models\n'), ((668, 700), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (684, 700), False, 'from django.db import migrations, models\n'), ((732, 763), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (748, 763), False, 'from django.db import migrations, models\n'), ((792, 908), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""Properties.Property"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, to='Properties.Property')\n", (809, 908), False, 'from django.db import migrations, models\n'), ((929, 1035), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""PropertyManagers.PropertyManager"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'PropertyManagers.PropertyManager')\n", (946, 1035), False, 'from django.db import migrations, models\n')] |
import os
os.remove("fichero_generado.txt") | [
"os.remove"
] | [((11, 44), 'os.remove', 'os.remove', (['"""fichero_generado.txt"""'], {}), "('fichero_generado.txt')\n", (20, 44), False, 'import os\n')] |
# -*- coding: utf-8 -*-
from remi.gui import *
from remi import start, App
import cv2
import numpy
import chdkptp
import time
import threading
import rawpy
class OpenCVVideoWidget(Image):
def __init__(self, **kwargs):
super(OpenCVVideoWidget, self).__init__("/%s/get_image_data" % id(self), **kwargs)
self.frame_index = 0
self.frame = numpy.full((480, 720,3),155, dtype=numpy.uint8)
def update(self, app_instance):
self.frame_index = numpy.random.randint(1e8)
app_instance.execute_javascript("""
var url = '/%(id)s/get_image_data?index=%(frame_index)s';
var xhr = new XMLHttpRequest();
xhr.open('GET', url, true);
xhr.responseType = 'blob'
xhr.onload = function(e){
urlCreator = window.URL || window.webkitURL;
urlCreator.revokeObjectURL(document.getElementById('%(id)s').src);
imageUrl = urlCreator.createObjectURL(this.response);
document.getElementById('%(id)s').src = imageUrl;
}
xhr.send();
""" % {'id': id(self), 'frame_index':self.frame_index})
def get_image_data(self, index=0):
ret, jpeg = cv2.imencode('.jpeg', self.frame)
if ret:
headers = {'Content-type': 'image/jpeg'}
return [jpeg.tostring(), headers]
return None, None
class M10GUI(App):
def __init__(self, *args, **kwargs):
if not 'editing_mode' in kwargs.keys():
super(M10GUI, self).__init__(*args, static_file_path={'my_res':'./res/'})
self.stop_event = threading.Event()
self.stop_event.clear()
def log_message(self, *args, **kwargs):
pass
def idle(self):
if self.live_view_check.get_value():
vp, bm = self.get_live_view()
self.image.frame = numpy.clip(vp.astype(numpy.uint16)+ bm.astype(numpy.uint16),0,255).astype(numpy.uint8)
self.image.update(self)
if time.time()-self.timer > 10:
try:
self.temperature_label.set_text('Temp (\xb0C): '+str(self.camera.lua_execute('get_temperature(1)')))
self.battery_label.set_text('Batt (V): '+str(self.camera.lua_execute('get_vbatt()')/1000.))
except:
None
self.timer = time.time()
pass
def main(self):
self.timer = time.time()
return M10GUI.construct_ui(self)
def on_close(self):
self.stop_event.set()
super(M10GUI, self).on_close()
@staticmethod
def construct_ui(self):
container = GridBox(width='100%', height='100%', style={'margin':'0px auto', "background-color":"#d5d0c7"})
container.attributes.update({"class":"Widget","editor_constructor":"()","editor_varname":"container","editor_tag_type":"widget","editor_newclass":"False","editor_baseclass":"Widget"})
container.set_from_asciiart("""
| | | | iso_label | shutter_label | pics_label | time_label | live_view_label | zoom_label |
| shoot_button | video_button | stop_button | iso_menu | shutter_value | pics_value | time_value | live_view_check | zoom_menu |
| image | image | image | image | image | image | image | image | image |
| image | image | image | image | image | image | image | image | image |
| image | image | image | image | image | image | image | image | image |
| image | image | image | image | image | image | image | image | image |
| image | image | image | image | image | image | image | image | image |
| image | image | image | image | image | image | image | image | image |
| image | image | image | image | image | image | image | image | image |
| image | image | image | image | image | image | image | image | image |
| image | image | image | image | image | image | image | image | image |
| image | image | image | image | image | image | image | image | image |
| image | image | image | image | image | image | image | image | image |
| image | image | image | image | image | image | image | image | image |
| lua_label | lua_label | lua_value | lua_value | lua_value | lua_value | lua_value | lua_value | lua_value |
| status_label | status_label | status_label | status_label | status_label | status_label | temperature_label | battery_label | connect_button |
""", 1, 1)
self.shoot_button = Button('Shoot')
self.shoot_button.set_enabled(False)
self.shoot_button.style.update({"width":"100%","height":"100%"})
self.shoot_button.attributes.update({"class":"Button","editor_constructor":"('Shoot')","editor_varname":"shoot_button","editor_tag_type":"widget","editor_newclass":"False","editor_baseclass":"Button"})
self.shoot_button.onclick.do(self.start_shoot)
self.video_button = Button('Video')
self.video_button.set_enabled(False)
self.video_button.style.update({"width":"100%","height":"100%"})
self.video_button.attributes.update({"class":"Button","editor_constructor":"('Video')","editor_varname":"video_button","editor_tag_type":"widget","editor_newclass":"False","editor_baseclass":"Button"})
self.video_button.onclick.do(self.start_video)
self.stop_button = Button('Stop')
self.stop_button.set_enabled(False)
self.stop_button.style.update({"width":"100%","height":"100%"})
self.stop_button.attributes.update({"class":"Button","editor_constructor":"('Stop')","editor_varname":"stop_button","editor_tag_type":"widget","editor_newclass":"False","editor_baseclass":"Button"})
self.stop_button.onclick.do(self.stop_action)
self.iso_menu = DropDown.new_from_list(('Auto','100','125','160','200','250','320','400', '500','640','800','1000','1250','1600','2000','2500', '3200','4000','5000','6400','8000','10000','12800'))
self.iso_menu.set_enabled(False)
self.iso_menu.set_value('Auto')
self.iso_menu.attributes.update({"class":"DropDown","editor_constructor":"()","editor_varname":"iso_menu","editor_tag_type":"widget","editor_newclass":"False","editor_baseclass":"DropDown"})
self.iso_menu.onchange.do(self.set_iso)
self.shutter_value = TextInput(True,'')
self.shutter_value.set_enabled(False)
self.shutter_value.attributes.update({"class":"TextInput","autocomplete":"off","editor_constructor":"(False,'')","editor_varname":"shutter_value","editor_tag_type":"widget","editor_newclass":"False","editor_baseclass":"TextInput"})
self.shutter_value.onchange.do(self.change_shutter)
iso_label = Label('ISO')
iso_label.attributes.update({"class":"Label","editor_constructor":"('ISO')","editor_varname":"iso_label","editor_tag_type":"widget","editor_newclass":"False","editor_baseclass":"Label"})
shutter_label = Label('Shutter')
shutter_label.attributes.update({"class":"Label","editor_constructor":"('Shutter')","editor_varname":"shutter_label","editor_tag_type":"widget","editor_newclass":"False","editor_baseclass":"Label"})
self.pics_value = TextInput(True,'')
self.pics_value.set_enabled(False)
self.pics_value.attributes.update({"class":"TextInput","autocomplete":"off","editor_constructor":"(False,'')","editor_varname":"pics_value","editor_tag_type":"widget","editor_newclass":"False","editor_baseclass":"TextInput"})
pics_label = Label('Pics')
pics_label.attributes.update({"class":"Label","editor_constructor":"('Pics')","editor_varname":"pics_label","editor_tag_type":"widget","editor_newclass":"False","editor_baseclass":"Label"})
self.time_value = TextInput(True,'')
self.time_value.set_enabled(False)
self.time_value.attributes.update({"class":"TextInput","autocomplete":"off","editor_constructor":"(False,'')","editor_varname":"time_value","editor_tag_type":"widget","editor_newclass":"False","editor_baseclass":"TextInput"})
time_label = Label('Hold')
time_label.attributes.update({"class":"Label","editor_constructor":"('Time')","editor_varname":"time_label","editor_tag_type":"widget","editor_newclass":"False","editor_baseclass":"Label"})
self.live_view_check = CheckBox(False,'')
self.live_view_check.set_enabled(False)
self.live_view_check.onchange.do(self.toggle_live)
self.live_view_check.attributes.update({"class":"checkbox","value":"","type":"checkbox","autocomplete":"off","editor_constructor":"(False,'')","editor_varname":"live_view_check","editor_tag_type":"widget","editor_newclass":"False","editor_baseclass":"CheckBox"})
live_view_label = Label('Live')
live_view_label.attributes.update({"class":"Label","editor_constructor":"('Live')","editor_varname":"live_view_label","editor_tag_type":"widget","editor_newclass":"False","editor_baseclass":"Label"})
self.zoom_menu = DropDown.new_from_list(('1', '5', '10'))
self.zoom_menu.set_enabled(False)
self.zoom_menu.set_value('1')
self.zoom_menu.attributes.update({"class":"DropDown","editor_constructor":"()","editor_varname":"zoom_menu","editor_tag_type":"widget","editor_newclass":"False","editor_baseclass":"DropDown"})
self.zoom_menu.onchange.do(self.change_zoom)
zoom_label = Label('Zoom')
zoom_label.attributes.update({"class":"Label","editor_constructor":"('Zoom')","editor_varname":"zoom_label","editor_tag_type":"widget","editor_newclass":"False","editor_baseclass":"Label"})
self.image = OpenCVVideoWidget(width='100%', height='100%')
self.image.attributes.update({"class":"Image","width":"720","height":"480","editor_constructor":"(720,480)","editor_varname":"image","editor_tag_type":"widget","editor_newclass":"False","editor_baseclass":"Image"})
infos_label = Label('Infos')
infos_label.attributes.update({"class":"Label","editor_constructor":"('Infos')","editor_varname":"infos_label","editor_tag_type":"widget","editor_newclass":"False","editor_baseclass":"Label"})
self.temperature_label = Label('Temp (\xb0C):')
self.temperature_label.attributes.update({"class":"Label","editor_constructor":"('Temp (ºC):')","editor_varname":"temperature_label","editor_tag_type":"widget","editor_newclass":"False","editor_baseclass":"Label"})
self.battery_label = Label('Batt (V):')
self.battery_label.attributes.update({"class":"Label","editor_constructor":"('Batt (V):')","editor_varname":"battery_label","editor_tag_type":"widget","editor_newclass":"False","editor_baseclass":"Label"})
self.connect_button = Button('Connect')
self.connect_button.style.update({"width":"100%","height":"100%"})
self.connect_button.attributes.update({"class":"Button","editor_constructor":"('Connect')","editor_varname":"connect_button","editor_tag_type":"widget","editor_newclass":"False","editor_baseclass":"Button"})
self.connect_button.onclick.do(self.init_camera)
lua_label = Label('Lua Execute:')
lua_label.attributes.update({"class":"Label","editor_constructor":"('Lua Execute:')","editor_varname":"lua_label","editor_tag_type":"widget","editor_newclass":"False","editor_baseclass":"Label"})
self.lua_value = TextInput(True,'')
self.lua_value.set_enabled(False)
self.lua_value.attributes.update({"class":"TextInput","autocomplete":"off","editor_constructor":"(False,'')","editor_varname":"lua_value","editor_tag_type":"widget","editor_newclass":"False","editor_baseclass":"TextInput"})
self.lua_value.onchange.do(self.exec_lua)
self.status_label = Label('Camera not connected')
self.status_label.attributes.update({"class":"Label","editor_constructor":"('')","editor_varname":"status_label","editor_tag_type":"widget","editor_newclass":"False","editor_baseclass":"Label"})
container.append({'shoot_button':self.shoot_button, 'video_button':self.video_button, 'stop_button':self.stop_button, 'iso_menu':self.iso_menu, 'shutter_value':self.shutter_value, 'iso_label':iso_label, 'shutter_label':shutter_label, 'pics_value':self.pics_value, 'pics_label':pics_label, 'time_value':self.time_value, 'time_label':time_label, 'live_view_check':self.live_view_check, 'live_view_label':live_view_label, 'zoom_menu':self.zoom_menu, 'zoom_label':zoom_label, 'image':self.image, 'temperature_label':self.temperature_label, 'battery_label':self.battery_label, 'connect_button':self.connect_button, 'lua_label':lua_label, 'lua_value':self.lua_value, 'status_label':self.status_label})
self.container = container
return self.container
def set_status_label(self, text):
with self.update_lock:
self.status_label.set_text(text)
##### Here the GUI is over and starts the camera
def init_camera(self, widget):
def erase_ok(widget):
try:
device=chdkptp.list_devices()
self.camera=chdkptp.ChdkDevice(device[0])
except:
self.status_label.set_text('Error: camera not connected')
return
self.camera.switch_mode('record')
self.camera.lua_execute('set_backlight(0)')
self.camera.lua_execute('call_event_proc("UI.CreatePublic")')
self.purge_files()
self.status_label.set_text('Camera connected')
self.connect_button.set_enabled(False)
self.iso_menu.set_enabled(True)
self.shutter_value.set_enabled(True)
self.pics_value.set_enabled(True)
self.shoot_button.set_enabled(True)
self.video_button.set_enabled(True)
self.live_view_check.set_enabled(True)
self.lua_value.set_enabled(True)
self.iso_menu.set_value(self.get_iso())
self.shutter_value.set_value(str(self.get_camera_shutter_time()))
self.pics_value.set_value('1')
if self.camera.lua_execute('get_drive_mode()') == 1:
if float(self.shutter_value.get_value()) < 1:
self.time_value.set_enabled(True)
self.time_value.set_value('0')
else:
self.time_value.set_value('0')
self.temperature_label.set_text('Temp (\xb0C): '+str(self.camera.lua_execute('get_temperature(1)')))
self.battery_label.set_text('Batt (V): '+str(self.camera.lua_execute('get_vbatt()')/1000.))
erase_dialog=GenericDialog(title='WARNING',message='All your data on the camera will be erased!')
erase_dialog.style.update({"margin":"0px","width":"500px","height":"100px","top":"10px","left":"10px","position":"absolute","overflow":"auto"})
erase_dialog.show(self)
erase_dialog.confirm_dialog.do(erase_ok)
def toggle_live(self, widget, value):
if self.live_view_check.get_value():
self.zoom_menu.set_enabled(True)
else:
self.zoom_menu.set_enabled(False)
def get_iso(self):
return self.camera.lua_execute('get_iso_mode()')
def set_iso(self, widget, iso):
iso = self.iso_menu.get_value()
if iso == 'Auto':
iso='0'
self.camera.lua_execute('set_iso_mode('+iso+')')
self.camera.lua_execute('press("shoot_half")')
def get_camera_shutter_time(self):
time = self.camera.lua_execute('tv96_to_usec(get_user_tv96())')
if time < 1000000:
return time/1000000.
else:
return time/1000000
def change_shutter(self, widget, value):
try:
time=int(float(self.shutter_value.get_text())*1000000)
except:
self.status_label.set_text('Error: shutter time must be a number')
return
if time > 32000000:
time=32000000
if time < 250:
time=250
self.camera.lua_execute('set_user_tv96(usec_to_tv96('+str(time)+'))\n' \
'press("shoot_half")\n' \
'repeat\n' \
' sleep(10)\n' \
'until get_shooting()\n' \
'return')
self.text_line_message='Done'
def purge_files(self):
for i in self.list_files():
self.camera.delete_files(i)
def list_files(self):
file_list=[]
for i in self.camera.list_files():
if 'CANONMSC' not in i:
file_list+=self.camera.list_files(i[:-1])
return file_list
def change_zoom(self, widget, zoom):
zoom = int(self.zoom_menu.get_value())
if zoom==1:
self.camera.lua_execute('post_levent_to_ui(0x11ea,0)\n' \
'press("shoot_half")\n' \
'repeat\n' \
' sleep(10)\n' \
'until get_shooting()\n' \
'return')
self.iso_menu.set_enabled(True)
self.shutter_value.set_enabled(True)
if zoom==5:
self.camera.lua_execute('post_levent_to_ui(0x11ea,0)\n' \
'press("shoot_half")\n' \
'repeat\n' \
' sleep(10)\n' \
'until get_shooting()\n' \
'return')
self.camera.lua_execute('post_levent_to_ui(0x11ea,1)\n' \
'press("shoot_half")\n' \
'repeat\n' \
' sleep(10)\n' \
'until get_shooting()\n' \
'return')
self.iso_menu.set_enabled(False)
self.shutter_value.set_enabled(False)
if zoom==10:
self.camera.lua_execute('post_levent_to_ui(0x11ea,1)\n' \
'press("shoot_half")\n' \
'repeat\n' \
' sleep(10)\n' \
'until get_shooting()\n' \
'call_event_proc("PTM_SetCurrentItem",0x80b8,2)\n'
'press("shoot_half")\n' \
'repeat\n' \
' sleep(10)\n' \
'until get_shooting()\n' \
'return')
self.iso_menu.set_enabled(False)
self.shutter_value.set_enabled(False)
def start_shoot(self, widget):
try:
float(self.shutter_value.get_value())
float(self.time_value.get_value())
int(self.pics_value.get_value())
except:
return
self.shoot_button.set_enabled(False)
self.video_button.set_enabled(False)
self.stop_button.set_enabled(True)
self.live_view_check.set_value(False)
self.live_view_check.set_enabled(False)
tr = threading.Thread(target=self.shoot_pic, args=(self.stop_event,))
tr.start()
def start_video(self, widget):
try:
float(self.shutter_value.get_value())
float(self.time_value.get_value())
int(self.pics_value.get_value())
except:
return
if float(self.shutter_value.get_value()) < 1:
self.status_label.set_text('Video length must be at least 1 second')
return
self.shoot_button.set_enabled(False)
self.video_button.set_enabled(False)
self.stop_button.set_enabled(True)
self.live_view_check.set_value(False)
self.live_view_check.set_enabled(False)
tr = threading.Thread(target=self.shoot_video, args=(self.stop_event,))
tr.start()
def shoot_pic(self, stop_event):
record_counter = 0
timer=int(time.time())
shutter_time=str(int(numpy.rint(float(self.shutter_value.get_value())*1000000)))
while record_counter < int(self.pics_value.get_value()) and not stop_event.isSet():
if float(self.shutter_value.get_value()) >= 1 or float(self.time_value.get_value()) == 0:
self.camera.lua_execute('set_tv96_direct(usec_to_tv96('+shutter_time+'))\n' \
'press("shoot_half")\n' \
'repeat\n' \
' sleep(10)\n' \
'until get_shooting()\n' \
'press("shoot_full")\n' \
'return')
else:
self.camera.lua_execute('set_tv96_direct(usec_to_tv96('+shutter_time+'))\n' \
'press("shoot_half")\n' \
'repeat\n' \
' sleep(10)\n' \
'until get_shooting()\n' \
'press("shoot_full")\n' \
'sleep('+str(int(numpy.rint(float(self.time_value.get_value())*1000)))+')\n' \
'release("shoot_full")\n' \
'return')
if float(self.shutter_value.get_value()) <= 1:
self.status_label.set_text('Photo '+str(record_counter+1)+' of '+str(self.pics_value.get_value()))
time.sleep(float(self.shutter_value.get_value()))
else:
seconds=0
while seconds<float(self.shutter_value.get_value()):
if stop_event.isSet():
self.set_status_label('Aborting, waiting '+str(int(float(self.shutter_value.get_value())-seconds))+' seconds for the last photo')
else:
self.set_status_label('Photo '+str(record_counter+1)+' of '+str(self.pics_value.get_value())+' due in '+str(int(float(self.shutter_value.get_value())-seconds))+' seconds')
time.sleep(1)
seconds+=1
self.set_status_label('Downloading photos from the camera')
while len(self.list_files()) == 0:
time.sleep(1)
for i in self.list_files():
localfile=i.split('/')[3]
self.camera.download_file(i,localfile)
if 'JPG' in localfile:
self.image.frame=cv2.resize(cv2.imread(localfile.split('.')[0]+'.JPG'), (720, 480))
else:
raw=rawpy.imread(localfile.split('.')[0]+'.CR2')
self.image.frame=cv2.resize(raw.postprocess(half_size=True, user_flip=False)[...,::-1], (720, 480))
raw.close()
with self.update_lock:
self.image.update(self)
self.purge_files()
record_counter += 1
stop_event.clear()
self.set_status_label('Done')
with self.update_lock:
self.shoot_button.set_enabled(True)
self.video_button.set_enabled(True)
self.stop_button.set_enabled(False)
self.live_view_check.set_enabled(True)
def shoot_video(self, stop_event):
record_counter = 0
while record_counter < int(self.pics_value.get_value()) and not stop_event.isSet():
seconds=0
self.camera.lua_execute('press("video")')
while seconds<float(self.shutter_value.get_value()) and not stop_event.isSet():
self.set_status_label('Video '+str(record_counter+1)+' of '+str(self.pics_value.get_value())+' due in '+str(int(float(self.shutter_value.get_value())-seconds))+' seconds')
time.sleep(1)
seconds+=1
self.camera.lua_execute('press("video")')
self.set_status_label('Downloading video from the camera')
while self.camera.lua_execute('get_movie_status()') != 1:
time.sleep(1)
for i in self.list_files():
localfile=i.split('/')[3]
self.camera.download_file(i,localfile)
self.purge_files()
record_counter += 1
stop_event.clear()
self.set_status_label('Done')
with self.update_lock:
self.shoot_button.set_enabled(True)
self.video_button.set_enabled(True)
self.stop_button.set_enabled(False)
self.live_view_check.set_enabled(True)
def stop_action(self, widget):
self.status_label.set_text('Abotring...')
self.stop_event.set()
def get_live_view(self):
self.camera._lua.eval("""
function()
status, err = con:live_dump_start('/tmp/live_view_frame')
for i=1,1 do
status, err = con:live_get_frame(29)
status, err = con:live_dump_frame()
end
status, err = con:live_dump_end()
return err
end
""")()
lv_aspect_ratio = {0:'LV_ASPECT_4_3', 1:'LV_ASPECT_16_9', 2:'LV_ASPECT_3_2'}
fb_type = {0:12, 1:8, 2:16, 3:16, 4:8 }
file_header_dtype = numpy.dtype([('magic','int32'),('header_size', 'int32'),('version_major', 'int32'),('version_minor','int32')])
frame_length_dtype = numpy.dtype([('length','int32')])
frame_header_dtype = numpy.dtype([('version_major','int32'),('version_minor', 'int32'),('lv_aspect_ratio', 'int32'),
('palette_type','int32'), ('palette_data_start','int32'), ('vp_desc_start','int32'), ('bm_desc_start','int32'),
('bmo_desc_start','int32')])
block_description_dtype = numpy.dtype([('fb_type','int32'),('data_start','int32'),('buffer_width','int32'),
('visible_width','int32'),('visible_height','int32'),('margin_left','int32'), ('margin_top','int32'),
('margin_right','int32'),('margin_bottom','int32')])
myFile = open('/tmp/live_view_frame','r')
file_header=numpy.fromfile(myFile, dtype=file_header_dtype, count=1)
frame_length=numpy.fromfile(myFile, dtype=frame_length_dtype, count=1)
frame_header=numpy.fromfile(myFile, dtype=frame_header_dtype, count=1)
vp_description=numpy.fromfile(myFile, dtype=block_description_dtype, count=1)
vp_bpp = fb_type[int(vp_description['fb_type'])]
vp_frame_size=vp_description['buffer_width']*vp_description['visible_height']*vp_bpp/8 # in byte !
vp_frame_size = int(vp_frame_size[0])
bm_description=numpy.fromfile(myFile, dtype=block_description_dtype, count=1)
bm_bpp = fb_type[int(bm_description['fb_type'])]
bm_frame_size=bm_description['buffer_width']*bm_description['visible_height']*bm_bpp/8
bm_frame_size = int(bm_frame_size[0])
bmo_description=numpy.fromfile(myFile, dtype=block_description_dtype, count=1)
bmo_bpp = fb_type[int(bmo_description['fb_type'])]
bmo_frame_size=bmo_description['buffer_width']*bmo_description['visible_height']*bmo_bpp/8
bmo_frame_size = int(bmo_frame_size[0])
if vp_description['data_start'] > 0:
vp_raw_img=numpy.fromfile(myFile, dtype=numpy.uint8, count=vp_frame_size)
y=vp_raw_img[1::2].reshape(int(vp_description['visible_height']),int(vp_description['buffer_width']))
u=numpy.empty(vp_frame_size//2, dtype=numpy.uint8)
u[0::2]=vp_raw_img[0::4]
u[1::2]=vp_raw_img[0::4]
u=u.reshape(int(vp_description['visible_height']),int(vp_description['buffer_width']))
v=numpy.empty(vp_frame_size//2, dtype=numpy.uint8)
v[0::2]=vp_raw_img[2::4]
v[1::2]=vp_raw_img[2::4]
v=v.reshape(int(vp_description['visible_height']),int(vp_description['buffer_width']))
raw_yuv=numpy.dstack((y,u,v))[:,0:int(vp_description['visible_width']),:]
vp_rgb=cv2.cvtColor(raw_yuv, cv2.COLOR_YUV2BGR)
if bm_description['data_start'] > 0:
bm_raw_img=numpy.fromfile(myFile, dtype=numpy.uint8, count=bm_frame_size)
y=bm_raw_img[1::2].reshape(int(bm_description['visible_height']),int(bm_description['buffer_width']))
u=numpy.empty(bm_frame_size//2, dtype=numpy.uint8)
u[0::2]=bm_raw_img[0::4]
u[1::2]=bm_raw_img[0::4]
u=u.reshape(int(bm_description['visible_height']),int(bm_description['buffer_width']))
v=numpy.empty(bm_frame_size//2, dtype=numpy.uint8)
v[0::2]=bm_raw_img[2::4]
v[1::2]=bm_raw_img[2::4]
v=v.reshape(int(bm_description['visible_height']),int(bm_description['buffer_width']))
raw_yuv=numpy.dstack((y,u,v))[:,0:int(bm_description['visible_width']),:]
bm_rgb=cv2.cvtColor(raw_yuv, cv2.COLOR_YUV2BGR)
if bmo_description['data_start'] >0:
bmo_raw_img=numpy.fromfile(myFile, dtype=numpy.int32, count=bmo_frame_size)
myFile.close()
if vp_rgb.shape[0]==408: # Workaround for video mode
extension=numpy.zeros((480,720,3))
extension[36:444, :, :]=vp_rgb # (480-408)/2:480-(480-408)/2, :, :
vp_rgb=extension
return vp_rgb, bm_rgb
def exec_lua(self, widget, value):
try:
self.camera.lua_execute(str(self.lua_value.get_value())+'\n' \
'press("shoot_half")\n' \
'repeat\n' \
' sleep(10)\n' \
'until get_shooting()\n' \
'return')
self.status_label.set_text('Done')
except:
self.status_label.set_text('Error executing LUA')
if __name__ == "__main__":
start(M10GUI, address='0.0.0.0', port=8081, multiple_instance=False, enable_file_cache=True, start_browser=False, debug=False, update_interval = 0.01)
| [
"numpy.dstack",
"numpy.fromfile",
"remi.start",
"cv2.imencode",
"chdkptp.ChdkDevice",
"time.sleep",
"threading.Event",
"numpy.random.randint",
"numpy.zeros",
"numpy.empty",
"cv2.cvtColor",
"numpy.full",
"numpy.dtype",
"time.time",
"threading.Thread",
"chdkptp.list_devices"
] | [((31139, 31295), 'remi.start', 'start', (['M10GUI'], {'address': '"""0.0.0.0"""', 'port': '(8081)', 'multiple_instance': '(False)', 'enable_file_cache': '(True)', 'start_browser': '(False)', 'debug': '(False)', 'update_interval': '(0.01)'}), "(M10GUI, address='0.0.0.0', port=8081, multiple_instance=False,\n enable_file_cache=True, start_browser=False, debug=False,\n update_interval=0.01)\n", (31144, 31295), False, 'from remi import start, App\n'), ((365, 414), 'numpy.full', 'numpy.full', (['(480, 720, 3)', '(155)'], {'dtype': 'numpy.uint8'}), '((480, 720, 3), 155, dtype=numpy.uint8)\n', (375, 414), False, 'import numpy\n'), ((477, 510), 'numpy.random.randint', 'numpy.random.randint', (['(100000000.0)'], {}), '(100000000.0)\n', (497, 510), False, 'import numpy\n'), ((1223, 1256), 'cv2.imencode', 'cv2.imencode', (['""".jpeg"""', 'self.frame'], {}), "('.jpeg', self.frame)\n", (1235, 1256), False, 'import cv2\n'), ((1619, 1636), 'threading.Event', 'threading.Event', ([], {}), '()\n', (1634, 1636), False, 'import threading\n'), ((2409, 2420), 'time.time', 'time.time', ([], {}), '()\n', (2418, 2420), False, 'import time\n'), ((20296, 20360), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.shoot_pic', 'args': '(self.stop_event,)'}), '(target=self.shoot_pic, args=(self.stop_event,))\n', (20312, 20360), False, 'import threading\n'), ((21000, 21066), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.shoot_video', 'args': '(self.stop_event,)'}), '(target=self.shoot_video, args=(self.stop_event,))\n', (21016, 21066), False, 'import threading\n'), ((26509, 26628), 'numpy.dtype', 'numpy.dtype', (["[('magic', 'int32'), ('header_size', 'int32'), ('version_major', 'int32'),\n ('version_minor', 'int32')]"], {}), "([('magic', 'int32'), ('header_size', 'int32'), ('version_major',\n 'int32'), ('version_minor', 'int32')])\n", (26520, 26628), False, 'import numpy\n'), ((26649, 26683), 'numpy.dtype', 'numpy.dtype', (["[('length', 'int32')]"], {}), "([('length', 'int32')])\n", (26660, 26683), False, 'import numpy\n'), ((26712, 26971), 'numpy.dtype', 'numpy.dtype', (["[('version_major', 'int32'), ('version_minor', 'int32'), ('lv_aspect_ratio',\n 'int32'), ('palette_type', 'int32'), ('palette_data_start', 'int32'), (\n 'vp_desc_start', 'int32'), ('bm_desc_start', 'int32'), (\n 'bmo_desc_start', 'int32')]"], {}), "([('version_major', 'int32'), ('version_minor', 'int32'), (\n 'lv_aspect_ratio', 'int32'), ('palette_type', 'int32'), (\n 'palette_data_start', 'int32'), ('vp_desc_start', 'int32'), (\n 'bm_desc_start', 'int32'), ('bmo_desc_start', 'int32')])\n", (26723, 26971), False, 'import numpy\n'), ((27007, 27270), 'numpy.dtype', 'numpy.dtype', (["[('fb_type', 'int32'), ('data_start', 'int32'), ('buffer_width', 'int32'),\n ('visible_width', 'int32'), ('visible_height', 'int32'), ('margin_left',\n 'int32'), ('margin_top', 'int32'), ('margin_right', 'int32'), (\n 'margin_bottom', 'int32')]"], {}), "([('fb_type', 'int32'), ('data_start', 'int32'), ('buffer_width',\n 'int32'), ('visible_width', 'int32'), ('visible_height', 'int32'), (\n 'margin_left', 'int32'), ('margin_top', 'int32'), ('margin_right',\n 'int32'), ('margin_bottom', 'int32')])\n", (27018, 27270), False, 'import numpy\n'), ((27340, 27396), 'numpy.fromfile', 'numpy.fromfile', (['myFile'], {'dtype': 'file_header_dtype', 'count': '(1)'}), '(myFile, dtype=file_header_dtype, count=1)\n', (27354, 27396), False, 'import numpy\n'), ((27418, 27475), 'numpy.fromfile', 'numpy.fromfile', (['myFile'], {'dtype': 'frame_length_dtype', 'count': '(1)'}), '(myFile, dtype=frame_length_dtype, count=1)\n', (27432, 27475), False, 'import numpy\n'), ((27497, 27554), 'numpy.fromfile', 'numpy.fromfile', (['myFile'], {'dtype': 'frame_header_dtype', 'count': '(1)'}), '(myFile, dtype=frame_header_dtype, count=1)\n', (27511, 27554), False, 'import numpy\n'), ((27578, 27640), 'numpy.fromfile', 'numpy.fromfile', (['myFile'], {'dtype': 'block_description_dtype', 'count': '(1)'}), '(myFile, dtype=block_description_dtype, count=1)\n', (27592, 27640), False, 'import numpy\n'), ((27875, 27937), 'numpy.fromfile', 'numpy.fromfile', (['myFile'], {'dtype': 'block_description_dtype', 'count': '(1)'}), '(myFile, dtype=block_description_dtype, count=1)\n', (27889, 27937), False, 'import numpy\n'), ((28161, 28223), 'numpy.fromfile', 'numpy.fromfile', (['myFile'], {'dtype': 'block_description_dtype', 'count': '(1)'}), '(myFile, dtype=block_description_dtype, count=1)\n', (28175, 28223), False, 'import numpy\n'), ((2338, 2349), 'time.time', 'time.time', ([], {}), '()\n', (2347, 2349), False, 'import time\n'), ((21169, 21180), 'time.time', 'time.time', ([], {}), '()\n', (21178, 21180), False, 'import time\n'), ((28499, 28561), 'numpy.fromfile', 'numpy.fromfile', (['myFile'], {'dtype': 'numpy.uint8', 'count': 'vp_frame_size'}), '(myFile, dtype=numpy.uint8, count=vp_frame_size)\n', (28513, 28561), False, 'import numpy\n'), ((28690, 28740), 'numpy.empty', 'numpy.empty', (['(vp_frame_size // 2)'], {'dtype': 'numpy.uint8'}), '(vp_frame_size // 2, dtype=numpy.uint8)\n', (28701, 28740), False, 'import numpy\n'), ((28926, 28976), 'numpy.empty', 'numpy.empty', (['(vp_frame_size // 2)'], {'dtype': 'numpy.uint8'}), '(vp_frame_size // 2, dtype=numpy.uint8)\n', (28937, 28976), False, 'import numpy\n'), ((29253, 29293), 'cv2.cvtColor', 'cv2.cvtColor', (['raw_yuv', 'cv2.COLOR_YUV2BGR'], {}), '(raw_yuv, cv2.COLOR_YUV2BGR)\n', (29265, 29293), False, 'import cv2\n'), ((29362, 29424), 'numpy.fromfile', 'numpy.fromfile', (['myFile'], {'dtype': 'numpy.uint8', 'count': 'bm_frame_size'}), '(myFile, dtype=numpy.uint8, count=bm_frame_size)\n', (29376, 29424), False, 'import numpy\n'), ((29553, 29603), 'numpy.empty', 'numpy.empty', (['(bm_frame_size // 2)'], {'dtype': 'numpy.uint8'}), '(bm_frame_size // 2, dtype=numpy.uint8)\n', (29564, 29603), False, 'import numpy\n'), ((29789, 29839), 'numpy.empty', 'numpy.empty', (['(bm_frame_size // 2)'], {'dtype': 'numpy.uint8'}), '(bm_frame_size // 2, dtype=numpy.uint8)\n', (29800, 29839), False, 'import numpy\n'), ((30116, 30156), 'cv2.cvtColor', 'cv2.cvtColor', (['raw_yuv', 'cv2.COLOR_YUV2BGR'], {}), '(raw_yuv, cv2.COLOR_YUV2BGR)\n', (30128, 30156), False, 'import cv2\n'), ((30226, 30289), 'numpy.fromfile', 'numpy.fromfile', (['myFile'], {'dtype': 'numpy.int32', 'count': 'bmo_frame_size'}), '(myFile, dtype=numpy.int32, count=bmo_frame_size)\n', (30240, 30289), False, 'import numpy\n'), ((30396, 30422), 'numpy.zeros', 'numpy.zeros', (['(480, 720, 3)'], {}), '((480, 720, 3))\n', (30407, 30422), False, 'import numpy\n'), ((2001, 2012), 'time.time', 'time.time', ([], {}), '()\n', (2010, 2012), False, 'import time\n'), ((14060, 14082), 'chdkptp.list_devices', 'chdkptp.list_devices', ([], {}), '()\n', (14080, 14082), False, 'import chdkptp\n'), ((14111, 14140), 'chdkptp.ChdkDevice', 'chdkptp.ChdkDevice', (['device[0]'], {}), '(device[0])\n', (14129, 14140), False, 'import chdkptp\n'), ((23568, 23581), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (23578, 23581), False, 'import time\n'), ((25043, 25056), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (25053, 25056), False, 'import time\n'), ((25295, 25308), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (25305, 25308), False, 'import time\n'), ((29168, 29191), 'numpy.dstack', 'numpy.dstack', (['(y, u, v)'], {}), '((y, u, v))\n', (29180, 29191), False, 'import numpy\n'), ((30031, 30054), 'numpy.dstack', 'numpy.dstack', (['(y, u, v)'], {}), '((y, u, v))\n', (30043, 30054), False, 'import numpy\n'), ((23387, 23400), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (23397, 23400), False, 'import time\n')] |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import multiprocessing
import numpy as np
import os
import torch
import triton_python_backend_utils as pb_utils
from torch.utils.dlpack import to_dlpack, from_dlpack
from swig_decoders import ctc_beam_search_decoder_batch, Scorer, map_batch
class WenetModel(object):
def __init__(self, model_config, device):
params = self.parse_model_parameters(model_config)
self.device = device
print("Using device", device)
print("Successfully load model !")
# load vocabulary
ret = self.load_vocab(params["vocab_path"])
self.id2vocab, self.vocab, space_id, blank_id, sos_eos = ret
self.space_id = space_id if space_id else -1
self.blank_id = blank_id if blank_id else 0
self.eos = self.sos = sos_eos if sos_eos else len(self.vocab) - 1
print("Successfully load vocabulary !")
self.params = params
# beam search setting
self.beam_size = params.get("beam_size")
self.cutoff_prob = params.get("cutoff_prob")
# language model
lm_path = params.get("lm_path", None)
alpha, beta = params.get('alpha'), params.get('beta')
self.scorer = None
if os.path.exists(lm_path):
self.scorer = Scorer(alpha, beta, lm_path, self.vocab)
self.bidecoder = params.get('bidecoder')
# rescore setting
self.rescoring = params.get("rescoring", 0)
print("Using rescoring:", bool(self.rescoring))
print("Successfully load all parameters!")
self.dtype = torch.float16
def generate_init_cache(self):
encoder_out = None
return encoder_out
def load_vocab(self, vocab_file):
"""
load lang_char.txt
"""
id2vocab = {}
space_id, blank_id, sos_eos = None, None, None
with open(vocab_file, "r", encoding="utf-8") as f:
for line in f:
line = line.strip()
char, id = line.split()
id2vocab[int(id)] = char
if char == " ":
space_id = int(id)
elif char == "<blank>":
blank_id = int(id)
elif char == "<sos/eos>":
sos_eos = int(id)
vocab = [0] * len(id2vocab)
for id, char in id2vocab.items():
vocab[id] = char
return (id2vocab, vocab, space_id, blank_id, sos_eos)
def parse_model_parameters(self, model_parameters):
model_p = {"beam_size": 10,
"cutoff_prob": 0.999,
"vocab_path": None,
"lm_path": None,
"alpha": 2.0,
"beta": 1.0,
"rescoring": 0,
"bidecoder": 1}
# get parameter configurations
for li in model_parameters.items():
key, value = li
true_value = value["string_value"]
if key not in model_p:
continue
key_type = type(model_p[key])
if key_type == type(None):
model_p[key] = true_value
else:
model_p[key] = key_type(true_value)
assert model_p["vocab_path"] is not None
return model_p
def infer(self, batch_log_probs, batch_log_probs_idx,
seq_lens, rescore_index, batch_states):
"""
batch_states = [trieVector, batch_start,
batch_encoder_hist, cur_encoder_out]
"""
trie_vector, batch_start, batch_encoder_hist, cur_encoder_out = batch_states
num_processes = min(multiprocessing.cpu_count(), len(batch_log_probs))
score_hyps = self.batch_ctc_prefix_beam_search_cpu(batch_log_probs,
batch_log_probs_idx,
seq_lens,
trie_vector,
batch_start,
self.beam_size,
self.blank_id,
self.space_id,
self.cutoff_prob,
num_processes,
self.scorer)
if self.rescoring and len(rescore_index) != 0:
# find the end of sequence
rescore_encoder_hist = []
rescore_encoder_lens = []
rescore_hyps = []
res_idx = list(rescore_index.keys())
max_length = -1
for idx in res_idx:
hist_enc = batch_encoder_hist[idx]
if hist_enc is None:
cur_enc = cur_encoder_out[idx]
else:
cur_enc = torch.cat([hist_enc, cur_encoder_out[idx]], axis=0)
rescore_encoder_hist.append(cur_enc)
cur_mask_len = int(len(hist_enc) + seq_lens[idx])
rescore_encoder_lens.append(cur_mask_len)
rescore_hyps.append(score_hyps[idx])
if cur_enc.shape[0] > max_length:
max_length = cur_enc.shape[0]
best_index = self.batch_rescoring(rescore_hyps, rescore_encoder_hist,
rescore_encoder_lens, max_length)
best_sent = []
j = 0
for idx, li in enumerate(score_hyps):
if idx in rescore_index and self.rescoring:
best_sent.append(li[best_index[j]][1])
j += 1
else:
best_sent.append(li[0][1])
final_result = map_batch(best_sent, self.vocab, num_processes)
return final_result, cur_encoder_out
def batch_ctc_prefix_beam_search_cpu(self, batch_log_probs_seq,
batch_log_probs_idx,
batch_len, batch_root,
batch_start, beam_size,
blank_id, space_id,
cutoff_prob, num_processes,
scorer):
"""
Return: Batch x Beam_size elements, each element is a tuple
(score, list of ids),
"""
batch_len_list = batch_len
batch_log_probs_seq_list = []
batch_log_probs_idx_list = []
for i in range(len(batch_len_list)):
cur_len = int(batch_len_list[i])
batch_log_probs_seq_list.append(batch_log_probs_seq[i][0:cur_len].tolist())
batch_log_probs_idx_list.append(batch_log_probs_idx[i][0:cur_len].tolist())
score_hyps = ctc_beam_search_decoder_batch(batch_log_probs_seq_list,
batch_log_probs_idx_list,
batch_root,
batch_start,
beam_size,
num_processes,
blank_id,
space_id,
cutoff_prob,
scorer)
return score_hyps
def batch_rescoring(self, score_hyps, hist_enc, hist_mask_len, max_len):
"""
score_hyps: [((ctc_score, (id1, id2, id3, ....)), (), ...), ....]
hist_enc: [len1xF, len2xF, .....]
hist_mask: [1x1xlen1, 1x1xlen2]
return bzx1 best_index
"""
bz = len(hist_enc)
f = hist_enc[0].shape[-1]
beam_size = self.beam_size
encoder_lens = np.zeros((bz, 1), dtype=np.int32)
encoder_out = torch.zeros((bz, max_len, f), dtype=self.dtype)
hyps = []
ctc_score = torch.zeros((bz, beam_size), dtype=self.dtype)
max_seq_len = 0
for i in range(bz):
cur_len = hist_enc[i].shape[0]
encoder_out[i, 0:cur_len] = hist_enc[i]
encoder_lens[i, 0] = hist_mask_len[i]
# process candidate
if len(score_hyps[i]) < beam_size:
to_append = (beam_size - len(score_hyps[i])) * [(-10000, ())]
score_hyps[i] = list(score_hyps[i]) + to_append
for idx, c in enumerate(score_hyps[i]):
score, idlist = c
if score < -10000:
score = -10000
ctc_score[i][idx] = score
hyps.append(list(idlist))
if len(hyps[-1]) > max_seq_len:
max_seq_len = len(hyps[-1])
max_seq_len += 2
hyps_pad_sos_eos = np.ones((bz, beam_size, max_seq_len), dtype=np.int64)
hyps_pad_sos_eos = hyps_pad_sos_eos * self.eos # fill eos
if self.bidecoder:
r_hyps_pad_sos_eos = np.ones((bz, beam_size, max_seq_len), dtype=np.int64)
r_hyps_pad_sos_eos = r_hyps_pad_sos_eos * self.eos
hyps_lens_sos = np.ones((bz, beam_size), dtype=np.int32)
bz_id = 0
for idx, cand in enumerate(hyps):
bz_id = idx // beam_size
length = len(cand) + 2
bz_offset = idx % beam_size
pad_cand = [self.sos] + cand + [self.eos]
hyps_pad_sos_eos[bz_id][bz_offset][0 : length] = pad_cand
if self.bidecoder:
r_pad_cand = [self.sos] + cand[::-1] + [self.eos]
r_hyps_pad_sos_eos[bz_id][bz_offset][0:length] = r_pad_cand
hyps_lens_sos[bz_id][idx % beam_size] = len(cand) + 1
in0 = pb_utils.Tensor.from_dlpack("encoder_out", to_dlpack(encoder_out))
in1 = pb_utils.Tensor("encoder_out_lens", encoder_lens)
in2 = pb_utils.Tensor("hyps_pad_sos_eos", hyps_pad_sos_eos)
in3 = pb_utils.Tensor("hyps_lens_sos", hyps_lens_sos)
input_tensors = [in0, in1, in2, in3]
if self.bidecoder:
in4 = pb_utils.Tensor("r_hyps_pad_sos_eos", r_hyps_pad_sos_eos)
input_tensors.append(in4)
in5 = pb_utils.Tensor.from_dlpack("ctc_score", to_dlpack(ctc_score))
input_tensors.append(in5)
request = pb_utils.InferenceRequest(model_name='decoder',
requested_output_names=['best_index'],
inputs=input_tensors)
response = request.exec()
best_index = pb_utils.get_output_tensor_by_name(response, 'best_index')
best_index = from_dlpack(best_index.to_dlpack()).clone()
best_index = best_index.numpy()[:, 0]
return best_index
def __del__(self):
print("remove wenet model")
| [
"os.path.exists",
"numpy.ones",
"triton_python_backend_utils.InferenceRequest",
"swig_decoders.map_batch",
"torch.utils.dlpack.to_dlpack",
"triton_python_backend_utils.Tensor",
"swig_decoders.Scorer",
"multiprocessing.cpu_count",
"numpy.zeros",
"triton_python_backend_utils.get_output_tensor_by_nam... | [((1805, 1828), 'os.path.exists', 'os.path.exists', (['lm_path'], {}), '(lm_path)\n', (1819, 1828), False, 'import os\n'), ((6428, 6475), 'swig_decoders.map_batch', 'map_batch', (['best_sent', 'self.vocab', 'num_processes'], {}), '(best_sent, self.vocab, num_processes)\n', (6437, 6475), False, 'from swig_decoders import ctc_beam_search_decoder_batch, Scorer, map_batch\n'), ((7491, 7672), 'swig_decoders.ctc_beam_search_decoder_batch', 'ctc_beam_search_decoder_batch', (['batch_log_probs_seq_list', 'batch_log_probs_idx_list', 'batch_root', 'batch_start', 'beam_size', 'num_processes', 'blank_id', 'space_id', 'cutoff_prob', 'scorer'], {}), '(batch_log_probs_seq_list,\n batch_log_probs_idx_list, batch_root, batch_start, beam_size,\n num_processes, blank_id, space_id, cutoff_prob, scorer)\n', (7520, 7672), False, 'from swig_decoders import ctc_beam_search_decoder_batch, Scorer, map_batch\n'), ((8559, 8592), 'numpy.zeros', 'np.zeros', (['(bz, 1)'], {'dtype': 'np.int32'}), '((bz, 1), dtype=np.int32)\n', (8567, 8592), True, 'import numpy as np\n'), ((8615, 8662), 'torch.zeros', 'torch.zeros', (['(bz, max_len, f)'], {'dtype': 'self.dtype'}), '((bz, max_len, f), dtype=self.dtype)\n', (8626, 8662), False, 'import torch\n'), ((8701, 8747), 'torch.zeros', 'torch.zeros', (['(bz, beam_size)'], {'dtype': 'self.dtype'}), '((bz, beam_size), dtype=self.dtype)\n', (8712, 8747), False, 'import torch\n'), ((9556, 9609), 'numpy.ones', 'np.ones', (['(bz, beam_size, max_seq_len)'], {'dtype': 'np.int64'}), '((bz, beam_size, max_seq_len), dtype=np.int64)\n', (9563, 9609), True, 'import numpy as np\n'), ((9879, 9919), 'numpy.ones', 'np.ones', (['(bz, beam_size)'], {'dtype': 'np.int32'}), '((bz, beam_size), dtype=np.int32)\n', (9886, 9919), True, 'import numpy as np\n'), ((10550, 10599), 'triton_python_backend_utils.Tensor', 'pb_utils.Tensor', (['"""encoder_out_lens"""', 'encoder_lens'], {}), "('encoder_out_lens', encoder_lens)\n", (10565, 10599), True, 'import triton_python_backend_utils as pb_utils\n'), ((10614, 10667), 'triton_python_backend_utils.Tensor', 'pb_utils.Tensor', (['"""hyps_pad_sos_eos"""', 'hyps_pad_sos_eos'], {}), "('hyps_pad_sos_eos', hyps_pad_sos_eos)\n", (10629, 10667), True, 'import triton_python_backend_utils as pb_utils\n'), ((10682, 10729), 'triton_python_backend_utils.Tensor', 'pb_utils.Tensor', (['"""hyps_lens_sos"""', 'hyps_lens_sos'], {}), "('hyps_lens_sos', hyps_lens_sos)\n", (10697, 10729), True, 'import triton_python_backend_utils as pb_utils\n'), ((11045, 11158), 'triton_python_backend_utils.InferenceRequest', 'pb_utils.InferenceRequest', ([], {'model_name': '"""decoder"""', 'requested_output_names': "['best_index']", 'inputs': 'input_tensors'}), "(model_name='decoder', requested_output_names=[\n 'best_index'], inputs=input_tensors)\n", (11070, 11158), True, 'import triton_python_backend_utils as pb_utils\n'), ((11297, 11355), 'triton_python_backend_utils.get_output_tensor_by_name', 'pb_utils.get_output_tensor_by_name', (['response', '"""best_index"""'], {}), "(response, 'best_index')\n", (11331, 11355), True, 'import triton_python_backend_utils as pb_utils\n'), ((1856, 1896), 'swig_decoders.Scorer', 'Scorer', (['alpha', 'beta', 'lm_path', 'self.vocab'], {}), '(alpha, beta, lm_path, self.vocab)\n', (1862, 1896), False, 'from swig_decoders import ctc_beam_search_decoder_batch, Scorer, map_batch\n'), ((4213, 4240), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (4238, 4240), False, 'import multiprocessing\n'), ((9737, 9790), 'numpy.ones', 'np.ones', (['(bz, beam_size, max_seq_len)'], {'dtype': 'np.int64'}), '((bz, beam_size, max_seq_len), dtype=np.int64)\n', (9744, 9790), True, 'import numpy as np\n'), ((10512, 10534), 'torch.utils.dlpack.to_dlpack', 'to_dlpack', (['encoder_out'], {}), '(encoder_out)\n', (10521, 10534), False, 'from torch.utils.dlpack import to_dlpack, from_dlpack\n'), ((10820, 10877), 'triton_python_backend_utils.Tensor', 'pb_utils.Tensor', (['"""r_hyps_pad_sos_eos"""', 'r_hyps_pad_sos_eos'], {}), "('r_hyps_pad_sos_eos', r_hyps_pad_sos_eos)\n", (10835, 10877), True, 'import triton_python_backend_utils as pb_utils\n'), ((10971, 10991), 'torch.utils.dlpack.to_dlpack', 'to_dlpack', (['ctc_score'], {}), '(ctc_score)\n', (10980, 10991), False, 'from torch.utils.dlpack import to_dlpack, from_dlpack\n'), ((5581, 5632), 'torch.cat', 'torch.cat', (['[hist_enc, cur_encoder_out[idx]]'], {'axis': '(0)'}), '([hist_enc, cur_encoder_out[idx]], axis=0)\n', (5590, 5632), False, 'import torch\n')] |
# 获取学生所有的挑战题目信息
# 包括时间、结果、代码等
# 写入数据库stuoj的stuquestionbh表中
from selenium import webdriver
# from selenium.webdriver.common.by import By
import pymysql
import re
from bs4 import BeautifulSoup
import connsql
# import loginzznuoj
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import ojtmxx
import time
# driver_path = "D:\\ChromeCoreDownloads\\chromedriver_win32\\chromedriver.exe"
# driver = webdriver.Chrome()
driver = webdriver.PhantomJS()
cur = connsql.conn.cursor()
# 得到OJ平台上某学生提交某题目的信息列表
def getOjQuesNo(stuno,quesno):# 学号,题目号
stuquesUrl = "http://47.95.10.46/status.php?pid="+str(quesno)+"&uid="+str(stuno)+"&language=-1&judgeresult=-1"
driver.get(stuquesUrl)
# pidtxtbox = driver.find_element_by_name("pid") # 输入题目id的textbox的id
# uidtxtbox = driver.find_element_by_name("uid") # 输入用户ID的textbox的id
# pidtxtbox.send_keys(quesno) # 填入题目ID
# uidtxtbox.send_keys(stuno) # 填入用户ID
# driver.find_elements_by_xpath("//button[@class='btn btn-default']")[0].click()
# button.click()
questrs = driver.find_elements_by_xpath("//tbody/tr")
sql = "insert into stuchallenged(challengeid,stuno,questionid,result,memory,timecost," \
"language,codelength,challtime) values(%s,%s,%s,%s,%s,%s,%s,%s,%s)"
if len(questrs)>0:
for trbj in questrs:
trbjsplit = str(trbj.text).split(' ', 8)
# trbjsplit[1] = str(stuno)
# print(trbjsplit)
# trbjsplit[1] = '204215091027'
cur.execute(sql, trbjsplit)
# print(soup.text)
# print(button.text)
def loginzznuoj(): # 登陆
loginurl = 'http://47.95.10.46/loginpage.php'
driver.get(loginurl)
driver.find_element_by_name("username").send_keys('slp')
driver.find_element_by_name("password").send_keys('<PASSWORD>')
submitbutton = driver.find_element_by_tag_name("button")
# print(submitbutton.text)
submitbutton.click()
# try:
# WebDriverWait(driver, 10).until(EC.title_is("ZZNUOJ"))
# finally:
# pass
def getsubmitcode(quesno,submitno):# 由提交号,题目号得到提交的代码
url = "http://47.95.10.46/problemsubmit.php?sid="+str(submitno)+"&pid="+str(quesno) # 访问提交页面
driver.get(url)
# 将显示代码的textarea的displays属性由none改为block,以获取其中的代码
js = "document.getElementById('source').style.display='block';"
driver.execute_script(js)
codes = driver.find_element_by_name("source").text
return codes
# 调用js脚本print(codeee)
def getstuquestions(stuno):#按学号搜索学生通过题目数、挑战题目数
# 访问郑州师范学院OJ平台的“排名”页面
driver.get("http://47.95.10.46/ranklist.php")
# 找到页面上的输入用户名的文本框
driver.find_element_by_name("keyword").send_keys(stuno) # 输入学生学号
button = driver.find_elements_by_xpath("//button[@class='btn btn-default']")[1] # .click() # 单击搜索按钮
# container.row.input-group.input-group-btn.btn.btn-default
button.click()
# 找到学生名字、通过题目数、提交数等的超链接
link1 = driver.find_elements_by_xpath("//div[@class='col-md-12']/table/tbody/tr/td/a")
i = 0
link = link1[0]
link.click()
# 找到题号的超链接
timuhaos = driver.find_elements_by_xpath("//div[@class='well collapse in']/a")
# print(timuhaos)
for tihaolink in timuhaos:
# print(tihaolink.text) #输出题号
# 将学生做的题号插入到数据库
sql="insert into stuques(stuno,questionbh)values(%s,%s)"
cur.execute(sql, (stuno, tihaolink.text))
# print(stuno, tihaolink.text)
def getStuChallengenum(stuno):# 按学号获取学生挑战次数
url = "http://47.95.10.46/ranklist.php?keyword="+str(stuno)
driver.get(url)
challed = driver.find_elements_by_xpath("//tbody//a")
cnum=challed[3].text # 挑战数量
return cnum
# print(cnum)
def getstudentno(banjiid): # 根据班级ID得到学生学号列表
sql = "select stuno from student where banjiid =" + str(banjiid)
cur.execute(sql)
results = cur.fetchall() # 用于返回多条数据,得到全部学生学号
# cur.close()
return results
def getstudentxuehao():
banjiids = tuple(range(1, 2))# 各个班级的Id元组
for banjiid in banjiids:
print(banjiid)
sql = "select stuno from student where banjiid =" + str(banjiid)
cur.execute(sql)
results = cur.fetchall() # 用于返回多条数据,得到全部学生学号
for stuno in results: # print(result[0])
# 得到学生完成的题目数
getstuquestions(stuno)
# getstudentxuehao()# 得到所有学生做的题号
# getOjQuesNo('204215091001', '1003')
# cur.close()
def getBanjiChallengeNum(banjino):# 得到一个班的学生的挑战数量,写入数据库
stuojusernamelist = getbanjistuojusername(banjino) # 得到该班学生用户名
sqlupdate = "update student set challenge=%s where ojusername=%s"
# cur = connsql.conn.cursor()
for stuojusername in stuojusernamelist:
# print(stuojusername[0])
# print(getStuChallengenum(stuojusername[0]))
cur.execute(sqlupdate,(getStuChallengenum(stuojusername[0]),stuojusername[0]))# 更新学生挑战的次数
def getbanjistuojusername(banjino): # 根据班级id得到该班学生的用户名列表
sql = "select ojusername from student where banjiid=%s"
cur.execute(sql, (banjino,))
return cur.fetchall()
# 得到学生的挑战信息,根据学生学号
def getstuchallenge():
sql = "select `questionid`,`challengeid` from `stuchallenged` where `code` is null"
cur.execute(sql)
results = cur.fetchall()
return results
def updatequescode(quesno, submitno, code): # 更新数据库中的代码
sql = "update `stuchallenged` set `code`=%s where `questionid`=%s and `challengeid`=%s"
cur.execute(sql,(code, quesno, submitno))
if __name__ == '__main__':
# cur = connsql.conn.cursor() # 引用 connsql 中的conn变量
# getBanjiChallengeNum(1)
# getStuChallengenum('204215091001')
# getOjQuesNo('204215091001', '1003')
# print(getbanjistuojusername(1))
# cur.close()
loginzznuoj() # 登陆校OJ平台
time.sleep(2)
# codes = getsubmitcode(1000,1063215)
# print(codes)
# options.addArguments(
# "--user-data-dir=" + System.getenv("USERPROFILE") + "/AppData/Local/Google/Chrome/User Data/Default");
# driver.get("http://4172.16.17.32/problemsubmit.php?sid=1063215&pid=1000")
results = getstuchallenge()
for result in results:
print(result)
# url = "http://47.95.10.46/problemsubmit.php?sid=" + str(result[1]) + "&pid=" + str(result[0]) # 访问提交页面
# # driver.get(url)
codes = getsubmitcode(result[0], result[1])
if len(codes) > 5000:
codes = codes[0:5000]
# print(codes)
updatequescode(result[0], result[1], str(codes).strip()) # 更新学生提交的代码
# getsubmitcode('1003', '1068443')
# 接下来,把每个学生提交的每道题都抓下来2021.1.17
# stuxhlist = getbanjistuojusername(1) # 1班的学生学号列表
# questionnolist= ojtmxx.getojallquesnofromdatabase() # 从数据库中得到题目ID
# print(questionnolist)
# for stuno in stuxhlist:
# stuno1 = stuno[0] # 学生用户名
# for i in range(33, 35):
# stuno1='2042150910'+str(i)
# # if int(stuno1) > 204215091003:
# for questionno in questionnolist:
# questionno0 = questionno[0]
# print((stuno1, questionno0))
# getOjQuesNo(stuno1, questionno0)
# stuno1 = '204215091032'
# for questionno0 in range(1000, 2200):
# print((stuno1, questionno0))
# getOjQuesNo(stuno1, questionno0)
cur.close()
driver.close()
| [
"selenium.webdriver.PhantomJS",
"time.sleep",
"connsql.conn.cursor"
] | [((497, 518), 'selenium.webdriver.PhantomJS', 'webdriver.PhantomJS', ([], {}), '()\n', (516, 518), False, 'from selenium import webdriver\n'), ((525, 546), 'connsql.conn.cursor', 'connsql.conn.cursor', ([], {}), '()\n', (544, 546), False, 'import connsql\n'), ((5715, 5728), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (5725, 5728), False, 'import time\n')] |
import asyncio
import time
import uvloop
import importlib
from pyrogram import Client as Bot, idle
from .config import API_ID, API_HASH, BOT_TOKEN, MONGO_DB_URI, SUDO_USERS, LOG_GROUP_ID
from Yukki import BOT_NAME, ASSNAME, app, chacha, aiohttpsession
from Yukki.YukkiUtilities.database.functions import clean_restart_stage
from Yukki.YukkiUtilities.database.queue import (get_active_chats, remove_active_chat)
from .YukkiUtilities.tgcallsrun import run
from pyrogram import Client, idle
from motor.motor_asyncio import AsyncIOMotorClient as MongoClient
import time
Bot(
':yukki:',
API_ID,
API_HASH,
bot_token=BOT_TOKEN,
plugins={'root': 'Yukki.Plugins'},
).start()
print(f"[INFO]: BOT STARTED AS {BOT_NAME}!")
print(f"[INFO]: ASSISTANT STARTED AS {ASSNAME}!")
async def load_start():
restart_data = await clean_restart_stage()
if restart_data:
print("[INFO]: SENDING RESTART STATUS")
try:
await app.edit_message_text(
restart_data["chat_id"],
restart_data["message_id"],
"**Restarted the Bot Successfully.**",
)
except Exception:
pass
served_chats = []
try:
chats = await get_active_chats()
for chat in chats:
served_chats.append(int(chat["chat_id"]))
except Exception as e:
print("Error came while clearing db")
for served_chat in served_chats:
try:
await remove_active_chat(served_chat)
except Exception as e:
print("Error came while clearing db")
pass
await app.send_message(LOG_GROUP_ID, "Bot Started")
await chacha.send_message(LOG_GROUP_ID, "Assistant Started")
print("[INFO]: STARTED")
loop = asyncio.get_event_loop()
loop.run_until_complete(load_start())
run()
loop.close()
print("[LOG] CLOSING BOT")
| [
"Yukki.YukkiUtilities.database.queue.remove_active_chat",
"Yukki.chacha.send_message",
"Yukki.app.send_message",
"Yukki.YukkiUtilities.database.queue.get_active_chats",
"Yukki.app.edit_message_text",
"Yukki.YukkiUtilities.database.functions.clean_restart_stage",
"asyncio.get_event_loop",
"pyrogram.Cli... | [((1814, 1838), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1836, 1838), False, 'import asyncio\n'), ((567, 659), 'pyrogram.Client', 'Bot', (['""":yukki:"""', 'API_ID', 'API_HASH'], {'bot_token': 'BOT_TOKEN', 'plugins': "{'root': 'Yukki.Plugins'}"}), "(':yukki:', API_ID, API_HASH, bot_token=BOT_TOKEN, plugins={'root':\n 'Yukki.Plugins'})\n", (570, 659), True, 'from pyrogram import Client as Bot, idle\n'), ((836, 857), 'Yukki.YukkiUtilities.database.functions.clean_restart_stage', 'clean_restart_stage', ([], {}), '()\n', (855, 857), False, 'from Yukki.YukkiUtilities.database.functions import clean_restart_stage\n'), ((1658, 1703), 'Yukki.app.send_message', 'app.send_message', (['LOG_GROUP_ID', '"""Bot Started"""'], {}), "(LOG_GROUP_ID, 'Bot Started')\n", (1674, 1703), False, 'from Yukki import BOT_NAME, ASSNAME, app, chacha, aiohttpsession\n'), ((1714, 1768), 'Yukki.chacha.send_message', 'chacha.send_message', (['LOG_GROUP_ID', '"""Assistant Started"""'], {}), "(LOG_GROUP_ID, 'Assistant Started')\n", (1733, 1768), False, 'from Yukki import BOT_NAME, ASSNAME, app, chacha, aiohttpsession\n'), ((1231, 1249), 'Yukki.YukkiUtilities.database.queue.get_active_chats', 'get_active_chats', ([], {}), '()\n', (1247, 1249), False, 'from Yukki.YukkiUtilities.database.queue import get_active_chats, remove_active_chat\n'), ((958, 1075), 'Yukki.app.edit_message_text', 'app.edit_message_text', (["restart_data['chat_id']", "restart_data['message_id']", '"""**Restarted the Bot Successfully.**"""'], {}), "(restart_data['chat_id'], restart_data['message_id'],\n '**Restarted the Bot Successfully.**')\n", (979, 1075), False, 'from Yukki import BOT_NAME, ASSNAME, app, chacha, aiohttpsession\n'), ((1472, 1503), 'Yukki.YukkiUtilities.database.queue.remove_active_chat', 'remove_active_chat', (['served_chat'], {}), '(served_chat)\n', (1490, 1503), False, 'from Yukki.YukkiUtilities.database.queue import get_active_chats, remove_active_chat\n')] |
import hashlib
from abc import abstractmethod, ABC
from typing import TYPE_CHECKING
from .. import Signature, ExternalAddress, Hash32
from loopchain.crypto.hashing import build_hash_generator
if TYPE_CHECKING:
from secp256k1 import PrivateKey
from . import Transaction, TransactionVersioner
class TransactionBuilder(ABC):
_hash_salt = None
def __init__(self, hash_generator_version: int):
self._hash_generator = build_hash_generator(hash_generator_version, self._hash_salt)
# Attributes that must be assigned
self.private_key: 'PrivateKey' = None
# Attributes to be generated
self.from_address: 'ExternalAddress' = None
self.hash: 'Hash32' = None
self.signature: 'Signature' = None
self.origin_data: dict = None
self.raw_data: dict = None
def reset_cache(self):
self.from_address = None
self.hash = None
self.signature = None
self.origin_data = None
self.raw_data = None
@abstractmethod
def build(self) -> 'Transaction':
raise NotImplementedError
def build_hash(self):
if self.origin_data is None:
raise RuntimeError(f"origin data is required. Run build_origin_data.")
self.hash = self._build_hash()
return self.hash
def _build_hash(self):
return Hash32(self._hash_generator.generate_hash(self.origin_data))
def build_from_address(self):
if self.private_key is None:
raise RuntimeError(f"private_key is required.")
self.from_address = self._build_from_address()
return self.from_address
def _build_from_address(self):
serialized_pub = self.private_key.pubkey.serialize(compressed=False)
hashed_pub = hashlib.sha3_256(serialized_pub[1:]).digest()
return ExternalAddress(hashed_pub[-20:])
@abstractmethod
def build_raw_data(self) -> dict:
pass
@abstractmethod
def build_origin_data(self) -> dict:
pass
def sign(self):
if self.hash is None:
self.build_hash()
self.signature = self._sign()
return self.signature
def _sign(self):
raw_sig = self.private_key.ecdsa_sign_recoverable(msg=self.hash,
raw=True,
digest=hashlib.sha3_256)
serialized_sig, recover_id = self.private_key.ecdsa_recoverable_serialize(raw_sig)
signature = serialized_sig + bytes((recover_id, ))
return Signature(signature)
@classmethod
def new(cls, version: str, versioner: 'TransactionVersioner'):
from . import genesis, v2, v3
hash_generator_version = versioner.get_hash_generator_version(version)
if version == genesis.version:
return genesis.TransactionBuilder(hash_generator_version)
elif version == v2.version:
return v2.TransactionBuilder(hash_generator_version)
elif version == v3.version:
return v3.TransactionBuilder(hash_generator_version)
raise RuntimeError(f"Not supported tx version({version})")
| [
"loopchain.crypto.hashing.build_hash_generator",
"hashlib.sha3_256"
] | [((441, 502), 'loopchain.crypto.hashing.build_hash_generator', 'build_hash_generator', (['hash_generator_version', 'self._hash_salt'], {}), '(hash_generator_version, self._hash_salt)\n', (461, 502), False, 'from loopchain.crypto.hashing import build_hash_generator\n'), ((1775, 1811), 'hashlib.sha3_256', 'hashlib.sha3_256', (['serialized_pub[1:]'], {}), '(serialized_pub[1:])\n', (1791, 1811), False, 'import hashlib\n')] |
import csv
import os
resource_dir="/Users/jyj/OneDrive/A_A_Data_Analysis/MINSTP201808DATA2/03-Python/Homework/PyBank/Resources"
file_path=os.path.join(resource_dir,"budget_data.csv")
with open(file_path,newline="") as data_file:
csvreader=csv.reader(data_file,delimiter=",")
next(csvreader)
i=0
Num_month=0
Pro_each_month=[]
months=[]
for row in csvreader:
#print(row)
months.append(row[0])
Pro_each_month.append(float(row[1]))
# if i==5:
# break
# i=i+1
Num_month=Num_month+1
print("Financial Analysis")
print("____________________")
print("Total Months:{}".format(Num_month))
print("Total:${}".format(sum(Pro_each_month)))
ss1=Pro_each_month[:-1]
ss2=Pro_each_month[1:]
ss=[ss2[i]-ss1[i] for i in range(Num_month-1)]
print("Average change:${}".format(sum(ss)/(Num_month-1)))
print("Greatest increase in Profits :{} (${})".format(months[ss.index(max(ss))+1],max(ss)))
print("Greatest Decrease in Profits :{} (${})".format(months[ss.index(min(ss))+1],min(ss)))
| [
"os.path.join",
"csv.reader"
] | [((140, 185), 'os.path.join', 'os.path.join', (['resource_dir', '"""budget_data.csv"""'], {}), "(resource_dir, 'budget_data.csv')\n", (152, 185), False, 'import os\n'), ((247, 283), 'csv.reader', 'csv.reader', (['data_file'], {'delimiter': '""","""'}), "(data_file, delimiter=',')\n", (257, 283), False, 'import csv\n')] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import json
from kubernetes.client import models as k8s_models
from kubernetes.client import apis as k8s_apis
from kubernetes.client.rest import ApiException
from urllib3.exceptions import MaxRetryError
from . import VERSION_RX
from .. import config
from ..client import models as openshift_models
from ..client import apis as openshift_apis
from ..client import ApiClient, ConfigurationObject
from .base import BaseObjectHelper
from .exceptions import OpenShiftException
class OpenShiftObjectHelper(BaseObjectHelper):
@staticmethod
def client_from_config(config_file, context):
if not config_file:
return ApiClient(config=ConfigurationObject())
return config.new_client_from_config(config_file, context)
@classmethod
def available_apis(cls):
apis = ['OapiApi']
apis.extend([x for x in dir(openshift_apis) if VERSION_RX.search(x)])
apis.extend([x for x in dir(k8s_apis) if VERSION_RX.search(x)])
return apis
@staticmethod
def get_exception_class():
return OpenShiftException
@staticmethod
def model_class_from_name(model_name):
try:
return getattr(openshift_models, model_name)
except AttributeError:
return getattr(k8s_models, model_name)
@staticmethod
def api_class_from_name(api_name):
try:
return getattr(openshift_apis, api_name)
except AttributeError:
return getattr(k8s_apis, api_name)
def create_project(self, metadata, display_name=None, description=None):
""" Creating a project requires using the project_request endpoint. """
# TODO: handle admin-level project creation
w, stream = self._create_stream(None)
try:
proj_req = openshift_models.V1ProjectRequest(metadata=metadata, display_name=display_name, description=description)
openshift_apis.OapiApi(self.api_client).create_project_request(proj_req)
except ApiException as exc:
msg = json.loads(exc.body).get('message', exc.reason) if exc.body.startswith('{') else exc.body
raise OpenShiftException(msg, status=exc.status)
except MaxRetryError as ex:
raise OpenShiftException(str(ex.reason))
self._read_stream(w, stream, metadata.name)
return self._wait_for_response(metadata.name, None, 'create')
| [
"json.loads"
] | [((2091, 2111), 'json.loads', 'json.loads', (['exc.body'], {}), '(exc.body)\n', (2101, 2111), False, 'import json\n')] |
"""Temporal VAE with gaussian margial and laplacian transition prior"""
import torch
import numpy as np
import ipdb as pdb
import torch.nn as nn
import pytorch_lightning as pl
import torch.distributions as D
from torch.nn import functional as F
from .components.beta import BetaVAE_MLP
from .metrics.correlation import compute_mcc
from .components.base import GroupLinearLayer
from .components.transforms import ComponentWiseSpline
def reconstruction_loss(x, x_recon, distribution):
batch_size = x.size(0)
assert batch_size != 0
if distribution == 'bernoulli':
recon_loss = F.binary_cross_entropy_with_logits(
x_recon, x, size_average=False).div(batch_size)
elif distribution == 'gaussian':
recon_loss = F.mse_loss(x_recon, x, size_average=False).div(batch_size)
elif distribution == 'sigmoid':
x_recon = F.sigmoid(x_recon)
recon_loss = F.mse_loss(x_recon, x, size_average=False).div(batch_size)
return recon_loss
def compute_cross_ent_normal(mu, logvar):
return 0.5 * (mu**2 + torch.exp(logvar)) + np.log(np.sqrt(2 * np.pi))
def compute_ent_normal(logvar):
return 0.5 * (logvar + np.log(2 * np.pi * np.e))
def compute_sparsity(mu, normed=True):
# assume couples, compute normalized sparsity
diff = mu[::2] - mu[1::2]
if normed:
norm = torch.norm(diff, dim=1, keepdim=True)
norm[norm == 0] = 1 # keep those that are same, dont divide by 0
diff = diff / norm
return torch.mean(torch.abs(diff))
class AfflineVAESynthetic(pl.LightningModule):
def __init__(
self,
input_dim,
lag=1,
beta=1,
alpha=1,
lr=1e-4,
z_dim=10,
gamma=10,
rate_prior=1,
hidden_dim=128,
diagonal=False,
decoder_dist='gaussian',
nonlinear_type='gaussian'
):
'''Import Beta-VAE as encoder/decoder'''
super().__init__()
self.net = BetaVAE_MLP(input_dim=input_dim,
z_dim=z_dim,
hidden_dim=hidden_dim)
self.trans_func = GroupLinearLayer(din=z_dim,
dout=z_dim,
num_blocks=lag,
diagonal=diagonal)
self.spline = ComponentWiseSpline(input_dim=z_dim,
bound=5,
count_bins=8,
order="linear")
self.f1 = nn.Sequential(nn.Linear(z_dim, z_dim),
nn.LeakyReLU(0.2))
self.f2 = nn.Sequential(nn.Linear(z_dim, z_dim),
nn.LeakyReLU(0.2))
self.coff = nn.Linear(z_dim, z_dim)
# self.spline.load_state_dict(torch.load("/home/yuewen/spline.pth"))
self.lr = lr
self.lag = lag
self.beta = beta
self.z_dim = z_dim
self.alpha = alpha
self.gamma = gamma
self.input_dim = input_dim
self.rate_prior = rate_prior
self.decoder_dist = decoder_dist
self.nonlinear_type = nonlinear_type
self.b = nn.Parameter(0.01 * torch.randn(1, z_dim))
# base distribution for calculation of log prob under the model
self.register_buffer('base_dist_var', torch.eye(self.z_dim))
self.register_buffer('base_dist_mean', torch.zeros(self.z_dim))
@property
def base_dist(self):
return D.MultivariateNormal(self.base_dist_mean, self.base_dist_var)
def forward(self, batch):
xt, xt_ = batch["xt"], batch["xt_"]
batch_size, _, _ = xt.shape
x = torch.cat((xt, xt_), dim=1)
x = x.view(-1, self.input_dim)
return self.net(x)
def compute_cross_ent_laplace(self, mean, logvar, rate_prior):
var = torch.exp(logvar)
sigma = torch.sqrt(var)
ce = - torch.log(rate_prior / 2) + rate_prior * sigma *\
np.sqrt(2 / np.pi) * torch.exp(- mean**2 / (2 * var)) -\
rate_prior * mean * (
1 - 2 * self.normal_dist.cdf(mean / sigma))
return ce
def training_step(self, batch, batch_idx):
xt, xt_ = batch["xt"], batch["xt_"]
batch_size, _, _ = xt.shape
x = torch.cat((xt, xt_), dim=1)
x = x.view(-1, self.input_dim)
x_recon, mu, logvar, z = self.net(x)
# Normal VAE loss: recon_loss + kld_loss
recon_loss = reconstruction_loss(x, x_recon, self.decoder_dist)
mu = mu.view(batch_size, -1, self.z_dim)
logvar = logvar.view(batch_size, -1, self.z_dim)
z = z.view(batch_size, -1, self.z_dim)
mut, mut_ = mu[:,:-1,:], mu[:,-1:,:]
logvart, logvart_ = logvar[:,:-1,:], logvar[:,-1:,:]
zt, zt_ = z[:,:-1,:], z[:,-1:,:]
# Past KLD divergenve
p1 = D.Normal(torch.zeros_like(mut), torch.ones_like(logvart))
q1 = D.Normal(mut, torch.exp(logvart / 2))
log_qz_normal = q1.log_prob(zt)
log_pz_normal = p1.log_prob(zt)
kld_normal = log_qz_normal - log_pz_normal
kld_normal = torch.sum(torch.sum(kld_normal,dim=-1),dim=-1).mean()
'''
have question on this part...
'''
# Current KLD divergence
if self.nonlinear_type == "gaussian":
zt_mid = self.f1(zt)
noise_term = nn.Parameter(torch.randn(zt.shape).cuda())
zt_mid = zt_mid + self.coff(noise_term)
zt_bar = self.f2(zt_mid)
recon_zt_ = reconstruction_loss(zt_, zt_bar, self.decoder_dist)
else:
zt_mid = self.f1(zt)
noise_term = nn.Parameter(torch.distributions.laplace.Laplace(0,1).rsample(zt.shape).cuda())
zt_mid = zt_mid + self.coff(noise_term)
zt_bar = self.f2(zt_mid)
recon_zt_ = reconstruction_loss(zt_, zt_bar, self.decoder_dist)
coff = torch.abs(self.coff.weight).mean()
# # Current KLD divergence
# ut = self.trans_func(zt)
# ut = torch.sum(ut, dim=1) + self.b
# epst_ = zt_.squeeze() + ut
# et_, logabsdet = self.spline(epst_)
# log_pz_laplace = self.base_dist.log_prob(et_) + logabsdet
# q_laplace = D.Normal(mut_, torch.exp(logvart_ / 2))
# log_qz_laplace = q_laplace.log_prob(zt_)
# kld_laplace = torch.sum(torch.sum(log_qz_laplace,dim=-1),dim=-1) - log_pz_laplace
# kld_laplace = kld_laplace.mean()
loss = (self.lag+1) * recon_loss + self.beta * kld_normal + self.gamma * recon_zt_ + self.alpha * coff
# loss = (self.lag+1) * recon_loss + self.beta * kld_normal
zt_recon = mu[:,-1,:].T.detach().cpu().numpy()
zt_true = batch["yt_"].squeeze().T.detach().cpu().numpy()
mcc = compute_mcc(zt_recon, zt_true, "Pearson")
self.log("train_mcc", mcc)
self.log("train_coff", coff)
self.log("train_elbo_loss", loss)
self.log("train_recon_zt_", recon_zt_)
self.log("train_recon_loss", recon_loss)
self.log("train_kld_normal", kld_normal)
return loss
def validation_step(self, batch, batch_idx):
xt, xt_ = batch["xt"], batch["xt_"]
batch_size, _, _ = xt.shape
x = torch.cat((xt, xt_), dim=1)
x = x.view(-1, self.input_dim)
x_recon, mu, logvar, z = self.net(x)
# Normal VAE loss: recon_loss + kld_loss
recon_loss = reconstruction_loss(x, x_recon, self.decoder_dist)
mu = mu.view(batch_size, -1, self.z_dim)
logvar = logvar.view(batch_size, -1, self.z_dim)
z = z.view(batch_size, -1, self.z_dim)
mut, mut_ = mu[:,:-1,:], mu[:,-1:,:]
logvart, logvart_ = logvar[:,:-1,:], logvar[:,-1:,:]
zt, zt_ = z[:,:-1,:], z[:,-1:,:]
# Past KLD divergenve
p1 = D.Normal(torch.zeros_like(mut), torch.ones_like(logvart))
q1 = D.Normal(mut, torch.exp(logvart / 2))
log_qz_normal = q1.log_prob(zt)
log_pz_normal = p1.log_prob(zt)
kld_normal = log_qz_normal - log_pz_normal
kld_normal = torch.sum(torch.sum(kld_normal,dim=-1),dim=-1).mean()
# Current KLD divergence
if self.nonlinear_type == "gaussian":
zt_mid = self.f1(zt)
noise_term = nn.Parameter(torch.randn(zt.shape).cuda())
zt_mid = zt_mid + self.coff(noise_term)
zt_bar = self.f2(zt_mid)
recon_zt_ = reconstruction_loss(zt_, zt_bar, self.decoder_dist)
else:
zt_mid = self.f1(zt)
noise_term = nn.Parameter(torch.distributions.laplace.Laplace(0,1).rsample(zt.shape).cuda())
zt_mid = zt_mid + self.coff(noise_term)
zt_bar = self.f2(zt_mid)
recon_zt_ = reconstruction_loss(zt_, zt_bar, self.decoder_dist)
coff = torch.abs(self.coff.weight).mean()
loss = (self.lag+1) * recon_loss + self.beta * kld_normal + self.gamma * recon_zt_ + self.alpha * coff
# loss = (self.lag+1) * recon_loss + self.beta * kld_normal
zt_recon = mu[:,-1,:].T.detach().cpu().numpy()
zt_true = batch["yt_"].squeeze().T.detach().cpu().numpy()
mcc = compute_mcc(zt_recon, zt_true, "Pearson")
self.log("val_mcc", mcc)
self.log("val_coff", coff)
self.log("val_elbo_loss", loss)
self.log("val_recon_zt_", recon_zt_)
self.log("val_recon_loss", recon_loss)
self.log("val_kld_normal", kld_normal)
return loss
def sample(self, xt):
batch_size = xt.shape[0]
e = torch.randn(batch_size, self.z_dim).to(xt.device)
eps, _ = self.spline.inverse(e)
return eps
def reconstruct(self):
return self.forward(batch)[0]
def configure_optimizers(self):
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, self.parameters()), lr=self.lr)
# An scheduler is optional, but can help in flows to get the last bpd improvement
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1, gamma=0.99)
return [optimizer], [scheduler]
| [
"numpy.sqrt",
"numpy.log",
"torch.sqrt",
"torch.exp",
"torch.nn.functional.sigmoid",
"torch.sum",
"torch.eye",
"torch.zeros_like",
"torch.randn",
"torch.ones_like",
"torch.abs",
"torch.nn.functional.mse_loss",
"torch.nn.LeakyReLU",
"torch.distributions.laplace.Laplace",
"torch.norm",
"... | [((1386, 1423), 'torch.norm', 'torch.norm', (['diff'], {'dim': '(1)', 'keepdim': '(True)'}), '(diff, dim=1, keepdim=True)\n', (1396, 1423), False, 'import torch\n'), ((1550, 1565), 'torch.abs', 'torch.abs', (['diff'], {}), '(diff)\n', (1559, 1565), False, 'import torch\n'), ((2875, 2898), 'torch.nn.Linear', 'nn.Linear', (['z_dim', 'z_dim'], {}), '(z_dim, z_dim)\n', (2884, 2898), True, 'import torch.nn as nn\n'), ((3631, 3692), 'torch.distributions.MultivariateNormal', 'D.MultivariateNormal', (['self.base_dist_mean', 'self.base_dist_var'], {}), '(self.base_dist_mean, self.base_dist_var)\n', (3651, 3692), True, 'import torch.distributions as D\n'), ((3822, 3849), 'torch.cat', 'torch.cat', (['(xt, xt_)'], {'dim': '(1)'}), '((xt, xt_), dim=1)\n', (3831, 3849), False, 'import torch\n'), ((4003, 4020), 'torch.exp', 'torch.exp', (['logvar'], {}), '(logvar)\n', (4012, 4020), False, 'import torch\n'), ((4038, 4053), 'torch.sqrt', 'torch.sqrt', (['var'], {}), '(var)\n', (4048, 4053), False, 'import torch\n'), ((4458, 4485), 'torch.cat', 'torch.cat', (['(xt, xt_)'], {'dim': '(1)'}), '((xt, xt_), dim=1)\n', (4467, 4485), False, 'import torch\n'), ((7517, 7544), 'torch.cat', 'torch.cat', (['(xt, xt_)'], {'dim': '(1)'}), '((xt, xt_), dim=1)\n', (7526, 7544), False, 'import torch\n'), ((10323, 10380), 'torch.optim.lr_scheduler.StepLR', 'torch.optim.lr_scheduler.StepLR', (['optimizer', '(1)'], {'gamma': '(0.99)'}), '(optimizer, 1, gamma=0.99)\n', (10354, 10380), False, 'import torch\n'), ((1121, 1139), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (1128, 1139), True, 'import numpy as np\n'), ((1204, 1228), 'numpy.log', 'np.log', (['(2 * np.pi * np.e)'], {}), '(2 * np.pi * np.e)\n', (1210, 1228), True, 'import numpy as np\n'), ((2667, 2690), 'torch.nn.Linear', 'nn.Linear', (['z_dim', 'z_dim'], {}), '(z_dim, z_dim)\n', (2676, 2690), True, 'import torch.nn as nn\n'), ((2725, 2742), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (2737, 2742), True, 'import torch.nn as nn\n'), ((2777, 2800), 'torch.nn.Linear', 'nn.Linear', (['z_dim', 'z_dim'], {}), '(z_dim, z_dim)\n', (2786, 2800), True, 'import torch.nn as nn\n'), ((2835, 2852), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (2847, 2852), True, 'import torch.nn as nn\n'), ((3476, 3497), 'torch.eye', 'torch.eye', (['self.z_dim'], {}), '(self.z_dim)\n', (3485, 3497), False, 'import torch\n'), ((3547, 3570), 'torch.zeros', 'torch.zeros', (['self.z_dim'], {}), '(self.z_dim)\n', (3558, 3570), False, 'import torch\n'), ((5059, 5080), 'torch.zeros_like', 'torch.zeros_like', (['mut'], {}), '(mut)\n', (5075, 5080), False, 'import torch\n'), ((5082, 5106), 'torch.ones_like', 'torch.ones_like', (['logvart'], {}), '(logvart)\n', (5097, 5106), False, 'import torch\n'), ((5136, 5158), 'torch.exp', 'torch.exp', (['(logvart / 2)'], {}), '(logvart / 2)\n', (5145, 5158), False, 'import torch\n'), ((8118, 8139), 'torch.zeros_like', 'torch.zeros_like', (['mut'], {}), '(mut)\n', (8134, 8139), False, 'import torch\n'), ((8141, 8165), 'torch.ones_like', 'torch.ones_like', (['logvart'], {}), '(logvart)\n', (8156, 8165), False, 'import torch\n'), ((8195, 8217), 'torch.exp', 'torch.exp', (['(logvart / 2)'], {}), '(logvart / 2)\n', (8204, 8217), False, 'import torch\n'), ((620, 686), 'torch.nn.functional.binary_cross_entropy_with_logits', 'F.binary_cross_entropy_with_logits', (['x_recon', 'x'], {'size_average': '(False)'}), '(x_recon, x, size_average=False)\n', (654, 686), True, 'from torch.nn import functional as F\n'), ((896, 914), 'torch.nn.functional.sigmoid', 'F.sigmoid', (['x_recon'], {}), '(x_recon)\n', (905, 914), True, 'from torch.nn import functional as F\n'), ((1093, 1110), 'torch.exp', 'torch.exp', (['logvar'], {}), '(logvar)\n', (1102, 1110), False, 'import torch\n'), ((3333, 3354), 'torch.randn', 'torch.randn', (['(1)', 'z_dim'], {}), '(1, z_dim)\n', (3344, 3354), False, 'import torch\n'), ((6132, 6159), 'torch.abs', 'torch.abs', (['self.coff.weight'], {}), '(self.coff.weight)\n', (6141, 6159), False, 'import torch\n'), ((9128, 9155), 'torch.abs', 'torch.abs', (['self.coff.weight'], {}), '(self.coff.weight)\n', (9137, 9155), False, 'import torch\n'), ((9888, 9923), 'torch.randn', 'torch.randn', (['batch_size', 'self.z_dim'], {}), '(batch_size, self.z_dim)\n', (9899, 9923), False, 'import torch\n'), ((779, 821), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['x_recon', 'x'], {'size_average': '(False)'}), '(x_recon, x, size_average=False)\n', (789, 821), True, 'from torch.nn import functional as F\n'), ((4070, 4095), 'torch.log', 'torch.log', (['(rate_prior / 2)'], {}), '(rate_prior / 2)\n', (4079, 4095), False, 'import torch\n'), ((4155, 4188), 'torch.exp', 'torch.exp', (['(-mean ** 2 / (2 * var))'], {}), '(-mean ** 2 / (2 * var))\n', (4164, 4188), False, 'import torch\n'), ((5326, 5355), 'torch.sum', 'torch.sum', (['kld_normal'], {'dim': '(-1)'}), '(kld_normal, dim=-1)\n', (5335, 5355), False, 'import torch\n'), ((8385, 8414), 'torch.sum', 'torch.sum', (['kld_normal'], {'dim': '(-1)'}), '(kld_normal, dim=-1)\n', (8394, 8414), False, 'import torch\n'), ((937, 979), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['x_recon', 'x'], {'size_average': '(False)'}), '(x_recon, x, size_average=False)\n', (947, 979), True, 'from torch.nn import functional as F\n'), ((4134, 4152), 'numpy.sqrt', 'np.sqrt', (['(2 / np.pi)'], {}), '(2 / np.pi)\n', (4141, 4152), True, 'import numpy as np\n'), ((5595, 5616), 'torch.randn', 'torch.randn', (['zt.shape'], {}), '(zt.shape)\n', (5606, 5616), False, 'import torch\n'), ((8591, 8612), 'torch.randn', 'torch.randn', (['zt.shape'], {}), '(zt.shape)\n', (8602, 8612), False, 'import torch\n'), ((5881, 5922), 'torch.distributions.laplace.Laplace', 'torch.distributions.laplace.Laplace', (['(0)', '(1)'], {}), '(0, 1)\n', (5916, 5922), False, 'import torch\n'), ((8877, 8918), 'torch.distributions.laplace.Laplace', 'torch.distributions.laplace.Laplace', (['(0)', '(1)'], {}), '(0, 1)\n', (8912, 8918), False, 'import torch\n')] |
###############################################################################
# Copyright (c) 2019, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory
# Written by the Merlin dev team, listed in the CONTRIBUTORS file.
# <<EMAIL>>
#
# LLNL-CODE-797170
# All rights reserved.
# This file is part of Merlin, Version: 1.5.0.
#
# For details, see https://github.com/LLNL/merlin.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
###############################################################################
"""Updated celery configuration."""
from __future__ import (
absolute_import,
print_function,
)
import logging
import os
import billiard
import psutil
from celery import Celery
from celery.signals import worker_process_init
import merlin.common.security.encrypt_backend_traffic
from merlin.config import (
broker,
results_backend,
)
from merlin.log_formatter import FORMATS
from merlin.router import route_for_task
LOG = logging.getLogger(__name__)
broker_ssl = True
results_ssl = False
try:
BROKER_URI = broker.get_connection_string()
LOG.info(f"broker: {broker.get_connection_string(include_password=False)}")
broker_ssl = broker.get_ssl_config()
LOG.info(f"broker_ssl = {broker_ssl}")
RESULTS_BACKEND_URI = results_backend.get_connection_string()
results_ssl = results_backend.get_ssl_config(celery_check=True)
LOG.info(
f"results: {results_backend.get_connection_string(include_password=False)}"
)
LOG.info(f"results: redis_backed_use_ssl = {results_ssl}")
except ValueError:
# These variables won't be set if running with '--local'.
BROKER_URI = None
RESULTS_BACKEND_URI = None
app = Celery(
"merlin",
broker=BROKER_URI,
backend=RESULTS_BACKEND_URI,
broker_use_ssl=broker_ssl,
redis_backend_use_ssl=results_ssl,
)
app.conf.update(
task_serializer="pickle", accept_content=["pickle"], result_serializer="pickle"
)
app.autodiscover_tasks(["merlin.common"])
app.conf.update(
task_acks_late=True,
task_reject_on_worker_lost=True,
task_publish_retry_policy={
"interval_start": 10,
"interval_step": 10,
"interval_max": 60,
},
redis_max_connections=100000,
)
# Set a one hour timeout to acknowledge a task before it's available to grab
# again.
app.conf.broker_transport_options = {"visibility_timeout": 7200, "max_connections": 100}
app.conf.update(broker_pool_limit=0)
# Task routing: call our default queue merlin
app.conf.task_routes = (route_for_task,)
app.conf.task_default_queue = "merlin"
# Log formatting
app.conf.worker_log_color = True
app.conf.worker_log_format = FORMATS["DEFAULT"]
app.conf.worker_task_log_format = FORMATS["WORKER"]
@worker_process_init.connect()
def setup(**kwargs):
"""
Set affinity for the worker on startup (works on toss3 nodes)
:param `**kwargs`: keyword arguments
"""
if "CELERY_AFFINITY" in os.environ and int(os.environ["CELERY_AFFINITY"]) > 1:
# Number of cpus between workers.
cpu_skip = int(os.environ["CELERY_AFFINITY"])
npu = psutil.cpu_count()
p = psutil.Process()
current = billiard.current_process()
prefork_id = current._identity[0] - 1 # range 0:nworkers-1
cpu_slot = (prefork_id * cpu_skip) % npu
p.cpu_affinity(list(range(cpu_slot, cpu_slot + cpu_skip)))
| [
"logging.getLogger",
"celery.signals.worker_process_init.connect",
"celery.Celery",
"merlin.config.broker.get_connection_string",
"psutil.Process",
"billiard.current_process",
"merlin.config.results_backend.get_connection_string",
"psutil.cpu_count",
"merlin.config.broker.get_ssl_config",
"merlin.... | [((2003, 2030), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2020, 2030), False, 'import logging\n'), ((2730, 2860), 'celery.Celery', 'Celery', (['"""merlin"""'], {'broker': 'BROKER_URI', 'backend': 'RESULTS_BACKEND_URI', 'broker_use_ssl': 'broker_ssl', 'redis_backend_use_ssl': 'results_ssl'}), "('merlin', broker=BROKER_URI, backend=RESULTS_BACKEND_URI,\n broker_use_ssl=broker_ssl, redis_backend_use_ssl=results_ssl)\n", (2736, 2860), False, 'from celery import Celery\n'), ((3765, 3794), 'celery.signals.worker_process_init.connect', 'worker_process_init.connect', ([], {}), '()\n', (3792, 3794), False, 'from celery.signals import worker_process_init\n'), ((2092, 2122), 'merlin.config.broker.get_connection_string', 'broker.get_connection_string', ([], {}), '()\n', (2120, 2122), False, 'from merlin.config import broker, results_backend\n'), ((2220, 2243), 'merlin.config.broker.get_ssl_config', 'broker.get_ssl_config', ([], {}), '()\n', (2241, 2243), False, 'from merlin.config import broker, results_backend\n'), ((2313, 2352), 'merlin.config.results_backend.get_connection_string', 'results_backend.get_connection_string', ([], {}), '()\n', (2350, 2352), False, 'from merlin.config import broker, results_backend\n'), ((2371, 2420), 'merlin.config.results_backend.get_ssl_config', 'results_backend.get_ssl_config', ([], {'celery_check': '(True)'}), '(celery_check=True)\n', (2401, 2420), False, 'from merlin.config import broker, results_backend\n'), ((4133, 4151), 'psutil.cpu_count', 'psutil.cpu_count', ([], {}), '()\n', (4149, 4151), False, 'import psutil\n'), ((4164, 4180), 'psutil.Process', 'psutil.Process', ([], {}), '()\n', (4178, 4180), False, 'import psutil\n'), ((4199, 4225), 'billiard.current_process', 'billiard.current_process', ([], {}), '()\n', (4223, 4225), False, 'import billiard\n'), ((2147, 2199), 'merlin.config.broker.get_connection_string', 'broker.get_connection_string', ([], {'include_password': '(False)'}), '(include_password=False)\n', (2175, 2199), False, 'from merlin.config import broker, results_backend\n'), ((2455, 2516), 'merlin.config.results_backend.get_connection_string', 'results_backend.get_connection_string', ([], {'include_password': '(False)'}), '(include_password=False)\n', (2492, 2516), False, 'from merlin.config import broker, results_backend\n')] |
from datetime import datetime, timedelta
from peewee import SqliteDatabase, Model, PrimaryKeyField, IntegerField, CharField, BooleanField, DateTimeField
from bot.data.config import STATIC_DIR
from bot.utils.logging import logger
db = SqliteDatabase(f"{STATIC_DIR}/db.sqlite3")
class User(Model):
"""
Клас описывающий поля в таблице для юзера
"""
id = PrimaryKeyField(null=False, unique=True)
user_id = IntegerField(null=False, unique=True)
full_name = CharField(null=False, max_length=255)
username = CharField(null=True, max_length=128)
is_subscribe = BooleanField(null=False, default=False)
created = DateTimeField(default=datetime.now())
def add_user(self, user_id: int, full_name: str, username: str) -> bool:
"""
Функция для добавления пользователя в Базу данных
:param user_id: ID пользователя в Телеграме
:param username: <NAME>
:param full_name: Полное имя аккаунта
:return:
"""
try:
return self.create(user_id=user_id,
full_name=full_name,
username=username)
except Exception as addUserError:
print(addUserError)
def get_user(self, user_id: int) -> [Model, bool]:
"""
Функция для проверки наличия пользователя в Базе Данных
:param user_id: ID пользователя в Телегрме
:return: Булевое значения True если пользователь найден
"""
res = self.get_or_none(User.user_id == user_id)
if res: # User is find
return res
return False
class Meta:
database = db
class Archive(Model):
"""
Модель дял хранения записей
"""
id = PrimaryKeyField(null=False, unique=True)
start_date = DateTimeField(null=False)
finish_date = DateTimeField(null=False)
file_id = CharField(null=False, max_length=50)
class Meta:
database = db
def get_archive(self, hour, day, month, year):
"""
Получения архима исходя из часа, дня, месяца и года
:param hour:
:param day:
:param month:
:param year:
:return:
"""
archive_date = datetime.strptime(f"{year}/{month}/{day}-{hour}", "%Y/%m/%d-%H").strftime("%Y-%m-%d %H")
return self.get_or_none(Archive.start_date >= archive_date)
def add_archive(self, start_date, file_id):
"""
Добавления архива записи в базу
:param start_date:
:param file_id:
:return:
"""
check = self.get_or_none(Archive.start_date == start_date)
if check:
check.file_id = file_id
check.save()
logger.info(f"Update archive [{start_date}] with file [{file_id}]")
return self.get(Archive.start_date == start_date)
return self.create(
start_date=start_date,
finish_date=start_date + timedelta(hours=1),
file_id=file_id
)
User.create_table(safe=True)
Archive.create_table(safe=True)
user = User()
archive = Archive()
| [
"peewee.BooleanField",
"peewee.CharField",
"peewee.SqliteDatabase",
"datetime.datetime.strptime",
"bot.utils.logging.logger.info",
"peewee.IntegerField",
"datetime.datetime.now",
"peewee.PrimaryKeyField",
"peewee.DateTimeField",
"datetime.timedelta"
] | [((237, 279), 'peewee.SqliteDatabase', 'SqliteDatabase', (['f"""{STATIC_DIR}/db.sqlite3"""'], {}), "(f'{STATIC_DIR}/db.sqlite3')\n", (251, 279), False, 'from peewee import SqliteDatabase, Model, PrimaryKeyField, IntegerField, CharField, BooleanField, DateTimeField\n'), ((372, 412), 'peewee.PrimaryKeyField', 'PrimaryKeyField', ([], {'null': '(False)', 'unique': '(True)'}), '(null=False, unique=True)\n', (387, 412), False, 'from peewee import SqliteDatabase, Model, PrimaryKeyField, IntegerField, CharField, BooleanField, DateTimeField\n'), ((427, 464), 'peewee.IntegerField', 'IntegerField', ([], {'null': '(False)', 'unique': '(True)'}), '(null=False, unique=True)\n', (439, 464), False, 'from peewee import SqliteDatabase, Model, PrimaryKeyField, IntegerField, CharField, BooleanField, DateTimeField\n'), ((481, 518), 'peewee.CharField', 'CharField', ([], {'null': '(False)', 'max_length': '(255)'}), '(null=False, max_length=255)\n', (490, 518), False, 'from peewee import SqliteDatabase, Model, PrimaryKeyField, IntegerField, CharField, BooleanField, DateTimeField\n'), ((534, 570), 'peewee.CharField', 'CharField', ([], {'null': '(True)', 'max_length': '(128)'}), '(null=True, max_length=128)\n', (543, 570), False, 'from peewee import SqliteDatabase, Model, PrimaryKeyField, IntegerField, CharField, BooleanField, DateTimeField\n'), ((590, 629), 'peewee.BooleanField', 'BooleanField', ([], {'null': '(False)', 'default': '(False)'}), '(null=False, default=False)\n', (602, 629), False, 'from peewee import SqliteDatabase, Model, PrimaryKeyField, IntegerField, CharField, BooleanField, DateTimeField\n'), ((1737, 1777), 'peewee.PrimaryKeyField', 'PrimaryKeyField', ([], {'null': '(False)', 'unique': '(True)'}), '(null=False, unique=True)\n', (1752, 1777), False, 'from peewee import SqliteDatabase, Model, PrimaryKeyField, IntegerField, CharField, BooleanField, DateTimeField\n'), ((1795, 1820), 'peewee.DateTimeField', 'DateTimeField', ([], {'null': '(False)'}), '(null=False)\n', (1808, 1820), False, 'from peewee import SqliteDatabase, Model, PrimaryKeyField, IntegerField, CharField, BooleanField, DateTimeField\n'), ((1839, 1864), 'peewee.DateTimeField', 'DateTimeField', ([], {'null': '(False)'}), '(null=False)\n', (1852, 1864), False, 'from peewee import SqliteDatabase, Model, PrimaryKeyField, IntegerField, CharField, BooleanField, DateTimeField\n'), ((1879, 1915), 'peewee.CharField', 'CharField', ([], {'null': '(False)', 'max_length': '(50)'}), '(null=False, max_length=50)\n', (1888, 1915), False, 'from peewee import SqliteDatabase, Model, PrimaryKeyField, IntegerField, CharField, BooleanField, DateTimeField\n'), ((666, 680), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (678, 680), False, 'from datetime import datetime, timedelta\n'), ((2711, 2778), 'bot.utils.logging.logger.info', 'logger.info', (['f"""Update archive [{start_date}] with file [{file_id}]"""'], {}), "(f'Update archive [{start_date}] with file [{file_id}]')\n", (2722, 2778), False, 'from bot.utils.logging import logger\n'), ((2215, 2279), 'datetime.datetime.strptime', 'datetime.strptime', (['f"""{year}/{month}/{day}-{hour}"""', '"""%Y/%m/%d-%H"""'], {}), "(f'{year}/{month}/{day}-{hour}', '%Y/%m/%d-%H')\n", (2232, 2279), False, 'from datetime import datetime, timedelta\n'), ((2942, 2960), 'datetime.timedelta', 'timedelta', ([], {'hours': '(1)'}), '(hours=1)\n', (2951, 2960), False, 'from datetime import datetime, timedelta\n')] |
# /usr/bin/env python3.5
# -*- mode: python -*-
# =============================================================================
# @@-COPYRIGHT-START-@@
#
# Copyright (c) 2019-2020, Qualcomm Innovation Center, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# @@-COPYRIGHT-END-@@
# =============================================================================
""" utilities for fused batchnorm op """
from typing import Union
import numpy as np
import tensorflow as tf
from tensorflow.contrib import graph_editor as ge
from aimet_common.utils import AimetLogger
from aimet_tensorflow.utils import constants
logger = AimetLogger.get_area_logger(AimetLogger.LogAreas.Utils)
_BN_STRUCTURE_ERROR_MSG = "BN op doesn't have the expected structure"
class BNUtils:
""" Batch Norm/ fused Batch Norm op related utils"""
@staticmethod
def skip_bn_op(sess: tf.compat.v1.Session, bn_op: tf.Operation, in_tensor: tf.Tensor, out_tensor: tf.Tensor):
"""
Skip given bn op specified (fused batch norm op).
Note: supports only Fused bn op types.
:param sess: Tensorflow session
:param bn_op: Batchnorm op to be skipped
:param in_tensor: Input tensor to the batchnorm op
:param out_tensor: Output tensor of the batchnorm op
"""
if in_tensor is None or out_tensor is None:
logger.error("Error, input and output tensors must be provided for skipping the op")
assert False
else:
with sess.graph.as_default():
if bn_op.type in ['FusedBatchNormV3', 'FusedBatchNorm']:
ge.detach_outputs(in_tensor.op)
ge.reroute_ts(in_tensor, out_tensor)
BNUtils.remove_bn_op_from_update_ops(sess, bn_op)
else:
logger.error("Error, Unknown BN op")
assert False
@staticmethod
def _get_tensor_read_var_op_trainable_bn_op(input_tensor: tf.Tensor) -> tf.Tensor:
"""
Generic helper to find a read op tensor associated with input tensor that can be evaluated, when the bn op is
marked trainable.
:param input_tensor: Input tensor to find corresponding read op tensor that can be evaluated
:return: read var op type tensor as tf.Tensor type.
"""
logger.debug('Fetching params from trainable BN op type')
assert input_tensor.op.inputs[0].op.inputs is not None
# inputs of 0 is beta tensor , get readVarOp associated with it
var_tensor = input_tensor.op.inputs[0].op.inputs[0]
assert var_tensor.op.outputs is not None
assert len(var_tensor.consumers()) >= 3
tensor_consumers = var_tensor.consumers()
var_read_tensor = None
# get read variable op tensor from these consumers
# do not pick the one with _1 , it is not fetch-able
for consumer in tensor_consumers:
if consumer.type == 'ReadVariableOp' and 'ReadVariableOp_1' not in consumer.name:
assert consumer.outputs is not None
var_read_tensor = consumer.outputs[0]
break
assert var_read_tensor is not None
return var_read_tensor
@staticmethod
def get_beta_read_op(bn_op: tf.Operation) -> tf.Operation:
"""
Get beta read op from BN op specified.
:param bn_op: bn_op obtained from connected graph using get_modules (is mul_1 op inside BN scope)
:return: beta read op
"""
if bn_op.type in ['Mul']:
# For regular BN
# mul_1 -> add_1 <-- sub <-- beta_read
assert len(bn_op.outputs) >= 1, _BN_STRUCTURE_ERROR_MSG
add_1 = bn_op.outputs[0].consumers()[0]
assert len(add_1.inputs) >= 2, _BN_STRUCTURE_ERROR_MSG
sub = add_1.inputs[1].op
assert len(sub.inputs) >= 1, _BN_STRUCTURE_ERROR_MSG
beta_read = sub.inputs[0].op
elif bn_op.type in ['FusedBatchNormV3', 'FusedBatchNorm']:
assert len(bn_op.inputs) == 5
beta_read = bn_op.inputs[constants.BN_OP_PARAM_INDICES['beta']].op
if beta_read.type == 'Switch': # tf slim bn using training tensor form
beta_read = beta_read.inputs[0].op
assert 'read' in beta_read.name
else:
logger.error("Error, unknown BN op")
assert False
assert beta_read.type in ['ReadVariableOp', 'Identity'] # Will be identity for tf slim BNs
return beta_read
@staticmethod
def _get_beta_read_var_op_tensor_using_structure(bn_op: tf.Operation) -> tf.Tensor:
"""
Get beta readVariableOp tensor from BN op specified.
:param bn_op: FusedBatchNorm as tf.Operation
:return: tensor associated with bn op beta readVariableOp type, as tf.Tensor
"""
assert bn_op.type in ['FusedBatchNormV3', 'FusedBatchNorm', 'Mul']
beta_read_tensor = BNUtils.get_beta_read_op(bn_op).outputs[0]
assert beta_read_tensor is not None
if beta_read_tensor.op.inputs[0].op.type == 'Switch':
logger.debug('Fetching params from trainable BN op type')
beta_read_tensor = BNUtils._get_tensor_read_var_op_trainable_bn_op(beta_read_tensor)
return beta_read_tensor
@staticmethod
def get_beta_read_var_op_tensor(graph: tf.Graph, bn_op: tf.Operation) -> tf.Tensor:
"""
Get beta readVariableOp tensor from BN op specified.
:param graph: TensorFlow graph
:param bn_op: FusedBatchNorm as tf.Operation
:return: tensor associated with bn op beta readVariableOp type, as tf.Tensor
"""
try:
# try name based tensor look up for Keras layers
beta_read_tensor = BNUtils._get_bn_param_tensor_using_name(graph, bn_op,
constants.BNOpParamType.beta)
except KeyError:
# if we can't find the tensor name, use structure match
# to figure out the read tensor for param
beta_read_tensor = BNUtils._get_beta_read_var_op_tensor_using_structure(bn_op)
return beta_read_tensor
@staticmethod
def get_beta_as_numpy_data(sess: tf.compat.v1.Session, bn_op: tf.Operation) -> np.ndarray:
"""
Get beta param from BN op specified.
:param sess: tensorflow session
:param bn_op: bn_op as tf.Operation
:return: beta tensor as numpy data
"""
beta_tensor = BNUtils.get_beta_read_var_op_tensor(sess.graph, bn_op)
with sess.graph.as_default():
numpy_data = sess.run(beta_tensor)
return numpy_data
@staticmethod
def get_gamma_as_read_op(bn_op: tf.Operation) -> tf.Operation:
"""
Get gamma read op from BN op specified.
:param bn_op: bn_op obtained from connected graph using get_modules (is mul_1 op inside BN scope)
:return: gamma read op
"""
if bn_op.type in ['Mul']:
# For regular BN
# mul_1 <-- mul <-- gamma_read <-- gamma_tensor
assert len(bn_op.inputs) >= 2, _BN_STRUCTURE_ERROR_MSG
mul = bn_op.inputs[1].op
assert len(mul.inputs) >= 2, _BN_STRUCTURE_ERROR_MSG
gamma_read = mul.inputs[1].op
elif bn_op.type in ['FusedBatchNormV3', 'FusedBatchNorm']:
assert len(bn_op.inputs) == 5
gamma_read = bn_op.inputs[constants.BN_OP_PARAM_INDICES['gamma']].op
if gamma_read.type == 'Switch': # tf slim bn using training tensor form
gamma_read = gamma_read.inputs[0].op
assert 'read' in gamma_read.name or gamma_read.type == 'Const'
else:
logger.error("Error, unknown BN op")
assert False
assert gamma_read.type in ['ReadVariableOp', 'Identity', 'Const'] # Will be identity for tf slim BNs
return gamma_read
@staticmethod
def _get_gamma_read_var_op_tensor_using_structure(bn_op: tf.Operation) -> tf.Tensor:
"""
Get the gamma read var op tensor associated with the batchnorm op.
:param bn_op: Batchnorm op to get gamma read var op tensor from
:return: Gamma read var op tensor associated with bn_op
"""
assert bn_op.type in ['FusedBatchNormV3', 'FusedBatchNorm', 'Mul']
gamma_read_tensor = BNUtils.get_gamma_as_read_op(bn_op).outputs[0]
assert gamma_read_tensor is not None
if gamma_read_tensor.op.inputs and gamma_read_tensor.op.inputs[0].op.type == 'Switch':
logger.debug('Fetching params from trainable BN op type')
gamma_read_tensor = BNUtils._get_tensor_read_var_op_trainable_bn_op(gamma_read_tensor)
return gamma_read_tensor
@staticmethod
def get_gamma_read_var_op_tensor(graph: tf.Graph, bn_op: tf.Operation) -> tf.Tensor:
"""
Get the gamma read var op tensor associated with the batchnorm op.
:param graph: TensorFlow graph
:param bn_op: Batchnorm op to get gamma read var op tensor from
:return: Gamma read var op tensor associated with bn_op
"""
try:
# try name based tensor look up for Keras layers
gamma_read_tensor = BNUtils._get_bn_param_tensor_using_name(graph, bn_op,
constants.BNOpParamType.gamma)
except KeyError:
# if we can't find the tensor name, use structure match
# to figure out the read tensor for param
gamma_read_tensor = BNUtils._get_gamma_read_var_op_tensor_using_structure(bn_op)
return gamma_read_tensor
@staticmethod
def get_gamma_as_numpy_data(sess: tf.compat.v1.Session, bn_op: tf.Operation) -> np.ndarray:
"""
Get gamma param from BN op specified.
:param sess: tensorflow session
:param bn_op: bn_op obtained from connected graph using get_modules (is mul_1 op inside BN scope)
:return: gamma as numpy data
"""
gamma_tensor = BNUtils.get_gamma_read_var_op_tensor(sess.graph, bn_op)
with sess.graph.as_default():
numpy_data = sess.run(gamma_tensor)
return numpy_data
@staticmethod
def _bn_op_var_struct_1(bn_op: tf.Operation) -> Union[tf.Operation, None]:
"""
Return moving_variance op corresponding to batchnorm with training tensor.
:param bn_op: bn_op obtained from connected graph using get_modules a mul_1 op inside BN scope.
:return: Read operation for moving_variance
"""
try:
mul_op = bn_op.inputs[1].op
assert mul_op.type == 'Mul'
rsqrt_op = mul_op.inputs[0].op
assert rsqrt_op.type == 'Rsqrt'
add_op = rsqrt_op.inputs[0].op
assert add_op.type == 'AddV2'
merge_op = add_op.inputs[0].op
assert merge_op.type == 'Merge'
read_op = merge_op.inputs[0].op
assert read_op.type in ['ReadVariableOp']
return read_op
except: # pylint: disable=bare-except
return None
@staticmethod
def _bn_op_var_struct_2(bn_op: tf.Operation) -> Union[tf.Operation, None]:
"""
Return moving_variance op corresponding to batchnorm with training=True.
:param bn_op: bn_op obtained from connected graph using get_modules a mul_1 op inside BN scope.
:return: Read operation for moving_variance
"""
try:
mul_op = bn_op.inputs[1].op
assert mul_op.type == 'Mul'
rsqrt_op = mul_op.inputs[0].op
assert rsqrt_op.type == 'Rsqrt'
add_op = rsqrt_op.inputs[0].op
assert add_op.type == 'AddV2'
squeeze_1_op = add_op.inputs[0].op
assert squeeze_1_op.type == 'Squeeze'
sub_op = squeeze_1_op.outputs[0].consumers()[0]
assert sub_op.type == 'Sub'
read_op = sub_op.inputs[0].op
assert read_op.type in ['ReadVariableOp']
return read_op
except: # pylint: disable=bare-except
return None
@staticmethod
def _bn_op_var_struct_3(bn_op: tf.Operation) -> Union[tf.Operation, None]:
"""
Return moving_variance op corresponding to batchnorm with training=False.
:param bn_op: bn_op obtained from connected graph using get_modules a mul_1 op inside BN scope.
:return: Read operation for moving_variance
"""
try:
mul_op = bn_op.inputs[1].op
assert mul_op.type == 'Mul'
rsqrt_op = mul_op.inputs[0].op
assert rsqrt_op.type == 'Rsqrt'
add_op = rsqrt_op.inputs[0].op
assert add_op.type == 'AddV2'
read_op = add_op.inputs[0].op
assert read_op.type in ['ReadVariableOp']
return read_op
except: # pylint: disable=bare-except
return None
@staticmethod
def get_moving_variance_as_read_op(bn_op: tf.Operation) -> Union[tf.Operation, None]:
"""
Get moving variance read op from BN op specified.
:param bn_op: bn_op obtained from connected graph using get_modules a mul_1 op inside BN scope.
:return: moving variance as read op
"""
# register handlers for different structures
bn_op_struct_for_variance_handlers = [BNUtils._bn_op_var_struct_1,
BNUtils._bn_op_var_struct_2,
BNUtils._bn_op_var_struct_3]
if bn_op.type in ['FusedBatchNormV3', 'FusedBatchNorm']:
assert len(bn_op.inputs) == 5
moving_var_read = bn_op.inputs[constants.BN_OP_PARAM_INDICES['movingvariance']].op
if moving_var_read.type == 'Switch': # tf slim bn using training tensor form
moving_var_read = moving_var_read.inputs[0].op
assert 'read' in moving_var_read.name
elif bn_op.type in ['Mul']:
# For regular BN
moving_var_read = None
# try all handlers available
for handler in bn_op_struct_for_variance_handlers:
if moving_var_read is None:
moving_var_read = handler(bn_op)
else:
break
assert moving_var_read is not None, _BN_STRUCTURE_ERROR_MSG
else:
logger.error("Error, unknown BN op")
assert False
if moving_var_read.type == 'Identity':
assert len(moving_var_read.inputs) == 1, _BN_STRUCTURE_ERROR_MSG
assert moving_var_read.type in ['ReadVariableOp', 'Const', 'Identity']
return moving_var_read
@staticmethod
def _get_moving_variance_read_var_op_tensor_using_structure(bn_op: tf.Operation) -> tf.Tensor:
"""
Get moving variance readVariableOp tensor from BN op specified.
:param bn_op: FusedBatchNorm as tf.Operation
:return: tensor associated with bn op moving variance readVariableOp type, as tf.Tensor
"""
# only support fused BN
assert bn_op.type in ['FusedBatchNormV3', 'FusedBatchNorm', 'Mul']
moving_var_read_tensor = BNUtils.get_moving_variance_as_read_op(bn_op).outputs[0]
assert moving_var_read_tensor is not None
if moving_var_read_tensor.op.type == 'Const':
logger.debug("BN op has const type op for moving variance")
# get the sub_1 op associated with moving variance read op
assert len(bn_op.outputs) >= 2
moving_avg_1_sub_1 = bn_op.outputs[2].consumers()[0]
all_inputs = moving_avg_1_sub_1.inputs
# among inputs figure out the read var op type that can be "evaluated"
for input_t in all_inputs:
if input_t.op.type == 'ReadVariableOp':
moving_var_read_tensor = input_t
elif input_t.op.type == 'Identity' and 'read:0' in input_t.name: # tf slim form
moving_var_read_tensor = input_t
elif moving_var_read_tensor.op.inputs[0].op.type == 'Switch':
logger.debug("Fetch moving var from a trainable BN op structure")
moving_var_read_tensor = BNUtils._get_tensor_read_var_op_trainable_bn_op(moving_var_read_tensor)
return moving_var_read_tensor
@staticmethod
def get_moving_variance_read_var_op_tensor(graph: tf.Graph, bn_op: tf.Operation) -> tf.Tensor:
"""
Get moving variance readVariableOp tensor from BN op specified.
:param graph: TensorFlow graph
:param bn_op: FusedBatchNorm as tf.Operation
:return: tensor associated with bn op moving variance readVariableOp type, as tf.Tensor
"""
try:
# try name based tensor look up for Keras layers
moving_var_read_tensor = BNUtils._get_bn_param_tensor_using_name(graph, bn_op,
constants.BNOpParamType.moving_variance)
except KeyError:
# if we can't find the tensor name, use structure match
# to figure out the read tensor for param
moving_var_read_tensor = BNUtils._get_moving_variance_read_var_op_tensor_using_structure(bn_op)
return moving_var_read_tensor
@staticmethod
def get_moving_variance_as_numpy_data(sess: tf.compat.v1.Session, bn_op: tf.Operation) -> np.ndarray:
"""
Get moving variance param from BN op specified.
:param sess: tensorflow session
:param bn_op: bn_op obtained from connected graph using get_modules a mul_1 op inside BN scope.
:return: moving variance as numpy data
"""
moving_var_tensor = BNUtils.get_moving_variance_read_var_op_tensor(sess.graph, bn_op)
with sess.graph.as_default():
numpy_data = sess.run(moving_var_tensor)
return numpy_data
@staticmethod
def _bn_op_mean_struct_1(bn_op: tf.Operation) -> Union[tf.Operation, None]:
"""
Return moving_mean op corresponding to batchnorm with training tensor.
:param bn_op: bn_op obtained from connected graph using get_modules a mul_1 op inside BN scope.
:return: Read operation for moving_mean
"""
try:
mul_op = bn_op.inputs[1].op
assert mul_op.type == 'Mul'
mul_2_op = mul_op.outputs[0].consumers()[1]
assert mul_2_op.type == 'Mul'
merge_op = mul_2_op.inputs[0].op
assert merge_op.type == 'Merge'
read_op = merge_op.inputs[0].op
assert read_op.type in ['ReadVariableOp']
return read_op
except: # pylint: disable=bare-except
return None
@staticmethod
def _bn_op_mean_struct_2(bn_op: tf.Operation) -> Union[tf.Operation, None]:
"""
Return moving_mean op corresponding to batchnorm with training=True.
:param bn_op: bn_op obtained from connected graph using get_modules a mul_1 op inside BN scope.
:return: Read operation for moving_mean
"""
try:
mul_op = bn_op.inputs[1].op
assert mul_op.type == 'Mul'
mul_2_op = mul_op.outputs[0].consumers()[1]
assert mul_2_op.type == 'Mul'
squeeze_op = mul_2_op.inputs[0].op
assert squeeze_op.type == 'Squeeze'
sub_op = squeeze_op.outputs[0].consumers()[0]
assert sub_op.type == 'Sub'
read_op = sub_op.inputs[0].op
assert read_op.type in ['ReadVariableOp']
return read_op
except: # pylint: disable=bare-except
return None
@staticmethod
def _bn_op_mean_struct_3(bn_op: tf.Operation) -> Union[tf.Operation, None]:
"""
Return moving_mean op corresponding to batchnorm with training=False.
:param bn_op: bn_op obtained from connected graph using get_modules
a mul_1 op inside BN scope.
:return: Read operation for moving_mean
"""
try:
mul_op = bn_op.inputs[1].op
assert mul_op.type == 'Mul'
mul_2_op = mul_op.outputs[0].consumers()[1]
assert mul_2_op.type == 'Mul'
read_op = mul_2_op.inputs[0].op
assert read_op.type in ['ReadVariableOp']
return read_op
except: # pylint: disable=bare-except
return None
@staticmethod
def get_moving_mean_as_read_op(bn_op: tf.Operation) -> Union[tf.Operation, None]:
"""
Get moving mean read op from BN op specified.
:param bn_op: bn_op obtained from connected graph using get_modules a mul_1 op inside BN scope.
:return: moving mean read op
"""
if bn_op.type in ['FusedBatchNormV3', 'FusedBatchNorm']:
assert len(bn_op.inputs) == 5
moving_mean_read = bn_op.inputs[constants.BN_OP_PARAM_INDICES['movingmean']].op
if moving_mean_read.type == 'Switch': # tf slim bn using training tensor form
moving_mean_read = moving_mean_read.inputs[0].op
assert 'read' in moving_mean_read.name
elif bn_op.type in ['Mul']:
# For regular BN
# mul_1 << - mul --> mul_2 <-- cond/merge <-- switch2 <-- moving mean read < moving mean tensor
# inputs[1] is mul .op.inputs[1] is gamma:read op whose input is gamma tensor as variable v2
# register handlers for different structures
bn_op_struct_for_mean_handlers = [BNUtils._bn_op_mean_struct_1,
BNUtils._bn_op_mean_struct_2,
BNUtils._bn_op_mean_struct_3]
moving_mean_read = None
# try all handlers available
for handler in bn_op_struct_for_mean_handlers:
if moving_mean_read is None:
moving_mean_read = handler(bn_op)
else:
break
assert moving_mean_read is not None, _BN_STRUCTURE_ERROR_MSG
else:
logger.error("Error, unknown BN op")
assert False
if moving_mean_read.type == 'Identity':
assert len(moving_mean_read.inputs) == 1, _BN_STRUCTURE_ERROR_MSG
assert moving_mean_read.type in ['ReadVariableOp', 'Const', 'Identity']
return moving_mean_read
@staticmethod
def _get_moving_mean_read_var_op_tensor_using_structure(bn_op: tf.Operation) -> tf.Tensor:
"""
Get moving mean readVariableOp tensor from BN op specified.
:param bn_op: FusedBatchNorm as tf.Operation
:return: tensor associated with bn op moving mean readVariableOp type, as tf.Tensor
"""
# only support fused BN
assert bn_op.type in ['FusedBatchNormV3', 'FusedBatchNorm', 'Mul']
moving_mean_read_tensor = BNUtils.get_moving_mean_as_read_op(bn_op).outputs[0]
assert moving_mean_read_tensor is not None
if moving_mean_read_tensor.op.type == 'Const':
logger.debug("BN op has const type op for moving variance")
# get the read var type from bn op
# get the sub_1 op associated with moving mean read op
assert len(bn_op.outputs) > 1
moving_avg_sub_1 = bn_op.outputs[1].consumers()[0]
all_inputs = moving_avg_sub_1.inputs
# among inputs figure out the read var op type that can be "evaluated"
for input_t in all_inputs:
if input_t.op.type == 'ReadVariableOp':
moving_mean_read_tensor = input_t
elif input_t.op.type == 'Identity' and 'read:0' in input_t.name: # tf slim form
moving_mean_read_tensor = input_t
elif moving_mean_read_tensor.op.inputs[0].op.type == 'Switch':
logger.debug("Fetch moving var from a trainable BN op structure")
moving_mean_read_tensor = BNUtils._get_tensor_read_var_op_trainable_bn_op(moving_mean_read_tensor)
return moving_mean_read_tensor
@staticmethod
def get_moving_mean_read_var_op_tensor(graph: tf.Graph, bn_op: tf.Operation) -> tf.Tensor:
"""
Get moving mean readVariableOp tensor from BN op specified.
:param graph: TensorFlow graph
:param bn_op: FusedBatchNorm as tf.Operation
:return: tensor associated with bn op moving mean readVariableOp type, as tf.Tensor
"""
try:
# try name based tensor look up for Keras layers
moving_mean_read_tensor = BNUtils._get_bn_param_tensor_using_name(graph, bn_op,
constants.BNOpParamType.moving_mean)
except KeyError:
# if we can't find the tensor name, use structure match
# to figure out the read tensor for param
moving_mean_read_tensor = BNUtils._get_moving_mean_read_var_op_tensor_using_structure(bn_op)
return moving_mean_read_tensor
@staticmethod
def get_moving_mean_as_numpy_data(sess: tf.compat.v1.Session, bn_op: tf.Operation) -> np.ndarray:
"""
Get moving mean param from BN op specified.
:param sess: tensorflow session
:param bn_op: bn_op obtained from connected graph using get_modules a mul_1 op inside BN scope.
:return: moving mean as numpy data
"""
moving_mean_tensor = BNUtils.get_moving_mean_read_var_op_tensor(sess.graph, bn_op)
with sess.graph.as_default():
numpy_data = sess.run(moving_mean_tensor)
return numpy_data
@staticmethod
def get_epsilon(bn_op: tf.Operation) -> float:
"""
Returns epsilon extracted from given bn op.
:param bn_op: bn_op obtained from connected graph using get_modules a mul_1 op inside BN scope.
:return: epsilon value
"""
if bn_op.type in ['Mul']:
assert len(bn_op.inputs) >= 2, _BN_STRUCTURE_ERROR_MSG
mul = bn_op.inputs[1].op
assert len(mul.inputs) >= 1, _BN_STRUCTURE_ERROR_MSG
rsqrt = mul.inputs[0].op
assert len(rsqrt.inputs) >= 1, _BN_STRUCTURE_ERROR_MSG
add = rsqrt.inputs[0].op
assert len(add.inputs) >= 2, _BN_STRUCTURE_ERROR_MSG
epsilon = add.inputs[1].op
numpy_epsilon = epsilon.get_attr('value').float_val[0]
elif bn_op.type in ['FusedBatchNormV3', 'FusedBatchNorm']:
# epsilon can be derived as attribute value
numpy_epsilon = bn_op.get_attr("epsilon")
else:
logger.error("Error, unknown BN op")
assert False
return numpy_epsilon
@staticmethod
def get_assign_moving_avg_op(bn_op: tf.Operation) -> Union[tf.Operation, None]:
"""
Get assign_moving_avg op corresponding with the bn_op, if it exists.
:param bn_op: Batchnorm op to search for corresponding assign_moving_avg op
:return: assign_moving_op corresponding with the bn op, or None if it does not exist.
"""
assert bn_op.type in ['FusedBatchNormV3', 'FusedBatchNorm']
assert len(bn_op.outputs) == 6 or len(bn_op.outputs) == 5
if bn_op.outputs[1].consumers():
child_op = bn_op.outputs[1].consumers()[0]
if child_op.type == 'Merge':
sub_op = child_op.outputs[0].consumers()[0]
else:
sub_op = child_op
assert sub_op.type == 'Sub'
mul_op = sub_op.outputs[0].consumers()[0]
assert mul_op.type == 'Mul'
assign_moving_avg_op = mul_op.outputs[0].consumers()[0]
assert assign_moving_avg_op.type in ['AssignSub', 'AssignSubVariableOp']
return assign_moving_avg_op
return None
@staticmethod
def get_assign_moving_avg_1_op(bn_op: tf.Operation) -> Union[tf.Operation, None]:
"""
Get assign_moving_avg_1 op corresponding with the bn_op, if it exists.
:param bn_op: Batchnorm op to search for corresponding assign_moving_avg_1 op
:return: assign_moving_avg_1 corresponding with the bn op, or None if it does not exist.
"""
assert bn_op.type in ['FusedBatchNormV3', 'FusedBatchNorm']
assert len(bn_op.outputs) == 6 or len(bn_op.outputs) == 5
if bn_op.outputs[2].consumers():
child_op = bn_op.outputs[2].consumers()[0]
if child_op.type == 'Merge':
sub_op = child_op.outputs[0].consumers()[0]
else:
sub_op = child_op
assert sub_op.type == 'Sub'
mul_op = sub_op.outputs[0].consumers()[0]
assert mul_op.type == 'Mul'
assign_moving_avg_op = mul_op.outputs[0].consumers()[0]
assert assign_moving_avg_op.type in ['AssignSub', 'AssignSubVariableOp']
return assign_moving_avg_op
return None
@staticmethod
def remove_bn_op_from_update_ops(sess: tf.compat.v1.Session, bn_op: tf.Operation):
"""
Remove batchnorm assign_moving_avg and assign_moving_avg_1 ops from update ops.
:param sess: tf.compat.v1.Session
:param bn_op: BatchNorm operation whose assign_moving_avg and assign_moving_avg_1 ops should be removed.
"""
with sess.graph.as_default():
update_ops = tf.compat.v1.get_collection_ref(tf.compat.v1.GraphKeys.UPDATE_OPS)
assign_moving_avg_op = BNUtils.get_assign_moving_avg_op(bn_op)
assign_moving_avg_op_1 = BNUtils.get_assign_moving_avg_1_op(bn_op)
if assign_moving_avg_op and assign_moving_avg_op in update_ops:
update_ops.remove(assign_moving_avg_op)
logger.debug('Removed %s from update ops', assign_moving_avg_op.name)
if assign_moving_avg_op_1 and assign_moving_avg_op_1 in update_ops:
update_ops.remove(assign_moving_avg_op_1)
logger.debug('Removed %s from update ops', assign_moving_avg_op_1.name)
@staticmethod
def _get_bn_param_tensor_using_name(graph: tf.Graph, bn_op: tf.Operation, param_type: constants.BNOpParamType):
"""
Helper to get BN op param read tensor.
:param graph: TensorFlow graph
:param bn_op: BN op from which param read tensor is to be extracted
:param param_type: param type for which param tensor is to be extracted, as constants.BNOpParamType (supported
types are beta, gamma, moving_mean or moving_variance)
:return: param read tensor
"""
if param_type not in vars(constants.BNOpParamType).values():
assert 0, 'Error, get_bn_param_using_name() invalid param type requested'
# name of the fused bn contains bn_name/FusedBatchNormV3 or
# bn_name/cond/FusedBatchNormV3_1
# we need only the bn_name to make param tensor names
op_name = bn_op.name.split('/')[0]
param_tensor_name = op_name + constants.BN_OP_PARAM_NAME_SUFFIX[param_type]
param_tensor = graph.get_tensor_by_name(param_tensor_name)
return param_tensor
@staticmethod
def _bn_op_momentum_struct_1(bn_op: tf.Operation) -> Union[float, None]:
"""
Return momentum value corresponding to batchnorm with training tensor.
:param bn_op: bn_op obtained from connected graph using get_modules a mul_1 op inside BN scope.
:return: momentum value
"""
try:
mul_op = bn_op.inputs[1].op
assert mul_op.type == 'Mul'
mul_2_op = mul_op.outputs[0].consumers()[1]
assert mul_2_op.type == 'Mul'
merge_op = mul_2_op.inputs[0].op
assert merge_op.type == 'Merge'
switch_1_op = merge_op.outputs[0].consumers()[0]
assert switch_1_op.type == 'Switch'
sub_op = switch_1_op.outputs[1].consumers()[0]
assert sub_op.type == 'Sub'
assign_moving_avg_mul_op = sub_op.outputs[0].consumers()[0]
assert assign_moving_avg_mul_op.type == 'Mul'
decay_op = assign_moving_avg_mul_op.inputs[1].op
assert decay_op.type == 'Const'
decay = decay_op.get_attr('value').float_val[0]
return 1 - decay
except: # pylint: disable=bare-except
return None
@staticmethod
def _bn_op_momentum_struct_2(bn_op: tf.Operation) -> Union[float, None]:
"""
Return momentum value corresponding to batchnorm with training=True.
:param bn_op: bn_op obtained from connected graph using get_modules a mul_1 op inside BN scope.
:return: momentum value
"""
try:
mul_op = bn_op.inputs[1].op
assert mul_op.type == 'Mul'
mul_2_op = mul_op.outputs[0].consumers()[1]
assert mul_2_op.type == 'Mul'
squeeze_op = mul_2_op.inputs[0].op
assert squeeze_op.type == 'Squeeze'
sub_op = squeeze_op.outputs[0].consumers()[0]
assert sub_op.type == 'Sub'
assign_moving_avg_mul_op = sub_op.outputs[0].consumers()[0]
assert assign_moving_avg_mul_op.type == 'Mul'
decay_op = assign_moving_avg_mul_op.inputs[1].op
assert decay_op.type == 'Const'
decay = decay_op.get_attr('value').float_val[0]
return 1 - decay
except: # pylint: disable=bare-except
return None
@staticmethod
def _fused_bn_op_momentum_struct_1(bn_op: tf.Operation) -> Union[float, None]:
"""
Return momentum value corresponding to fused batchnorm with training tensor.
:param bn_op: bn_op obtained from connected graph using get_modules a mul_1 op inside BN scope.
:return: momentum value
"""
try:
merge_1_op = bn_op.outputs[1].consumers()[0]
assert merge_1_op.type == 'Merge'
sub_op = merge_1_op.outputs[0].consumers()[0]
assert sub_op.type == 'Sub'
mul_op = sub_op.outputs[0].consumers()[0]
assert mul_op.type == 'Mul'
sub_2_op = mul_op.inputs[1].op
assert sub_2_op.type == 'Sub'
merge_op = sub_2_op.inputs[1].op
assert merge_op.type == 'Merge'
decay_op = merge_op.inputs[1].op
assert decay_op.type == 'Const'
decay = decay_op.get_attr('value').float_val[0]
return decay
except: # pylint: disable=bare-except
return None
@staticmethod
def _fused_bn_op_momentum_struct_2(bn_op: tf.Operation) -> Union[float, None]:
"""
Return momentum value corresponding to fused batchnorm with training=True.
:param bn_op: bn_op obtained from connected graph using get_modules a mul_1 op inside BN scope.
:return: momentum value
"""
try:
sub_op = bn_op.outputs[1].consumers()[0]
assert sub_op.type == 'Sub'
mul_op = sub_op.outputs[0].consumers()[0]
assert mul_op.type == 'Mul'
sub_2_op = mul_op.inputs[1].op
assert sub_2_op.type == 'Sub'
decay_op = sub_2_op.inputs[1].op
assert decay_op.type == 'Const'
decay = decay_op.get_attr('value').float_val[0]
return decay
except: # pylint: disable=bare-except
return None
@staticmethod
def get_momentum(bn_op: tf.Operation) -> float:
"""
Returns momentum extracted from given bn op. If bn op is training=False mode, momentum will be none.
:param bn_op: bn_op obtained from connected graph using get_modules a mul_1 op inside BN scope.
:return: momentum value
"""
# register handlers for different structures
bn_op_struct_for_momentum_handlers = [BNUtils._bn_op_momentum_struct_1,
BNUtils._bn_op_momentum_struct_2]
fused_bn_op_struct_for_momentum_handlers = [BNUtils._fused_bn_op_momentum_struct_1,
BNUtils._fused_bn_op_momentum_struct_2]
decay = None
if bn_op.type in ['Mul']:
# try all handlers available
for handler in bn_op_struct_for_momentum_handlers:
if decay is None:
decay = handler(bn_op)
else:
break
elif bn_op.type in ['FusedBatchNormV3', 'FusedBatchNorm']:
# try all handlers available
for handler in fused_bn_op_struct_for_momentum_handlers:
if decay is None:
decay = handler(bn_op)
else:
break
else:
logger.error("Error, unknown BN op")
assert False
return decay
@staticmethod
def get_training(bn_op: tf.Operation) -> Union[None, bool, tf.Tensor]:
"""
Returns either a boolean of whether the BN op training mode is True or False, or the is_training tensor
feeding into the BN op if it is using a tensor to determine the mode dynamically.
:param bn_op: bn_op obtained in the connected graph
:return: True or False for training mode, or tf.Tensor that determines the mode dynamically.
"""
assert bn_op.type in ['FusedBatchNormV3', 'FusedBatchNorm', 'Mul']
if bn_op.type == 'FusedBatchNormV3' or bn_op.type == 'FusedBatchNorm':
if 'FusedBatchNormV3_1' in bn_op.name:
switch_op = bn_op.inputs[0].op
pred_id_op = switch_op.inputs[1].op
training = pred_id_op.inputs[0]
else:
training = bn_op.get_attr('is_training')
return training
# Non fused batchnorm case
mul_op = bn_op.inputs[1].op
assert mul_op.type == 'Mul'
rsqrt_op = mul_op.inputs[0].op
assert rsqrt_op.type == 'Rsqrt'
add_op = rsqrt_op.inputs[0].op
assert add_op.type == 'AddV2'
add_input_op = add_op.inputs[0].op
if add_input_op.type == 'Squeeze':
return True
if add_input_op.type == 'ReadVariableOp':
return False
if add_input_op.type == 'Merge':
switch_op = add_input_op.inputs[1].op
assert switch_op.type == 'Switch'
pred_id_op = switch_op.inputs[1].op
assert pred_id_op.type == 'Identity'
return pred_id_op.inputs[0]
logger.error('Error, unknown BN structure')
return None
| [
"tensorflow.compat.v1.get_collection_ref",
"tensorflow.contrib.graph_editor.detach_outputs",
"aimet_common.utils.AimetLogger.get_area_logger",
"tensorflow.contrib.graph_editor.reroute_ts"
] | [((2155, 2210), 'aimet_common.utils.AimetLogger.get_area_logger', 'AimetLogger.get_area_logger', (['AimetLogger.LogAreas.Utils'], {}), '(AimetLogger.LogAreas.Utils)\n', (2182, 2210), False, 'from aimet_common.utils import AimetLogger\n'), ((31151, 31217), 'tensorflow.compat.v1.get_collection_ref', 'tf.compat.v1.get_collection_ref', (['tf.compat.v1.GraphKeys.UPDATE_OPS'], {}), '(tf.compat.v1.GraphKeys.UPDATE_OPS)\n', (31182, 31217), True, 'import tensorflow as tf\n'), ((3152, 3183), 'tensorflow.contrib.graph_editor.detach_outputs', 'ge.detach_outputs', (['in_tensor.op'], {}), '(in_tensor.op)\n', (3169, 3183), True, 'from tensorflow.contrib import graph_editor as ge\n'), ((3204, 3240), 'tensorflow.contrib.graph_editor.reroute_ts', 'ge.reroute_ts', (['in_tensor', 'out_tensor'], {}), '(in_tensor, out_tensor)\n', (3217, 3240), True, 'from tensorflow.contrib import graph_editor as ge\n')] |
import os
from flask import Flask, Response, render_template, redirect
from flask_restful import reqparse,request, abort, Api, Resource, fields, marshal_with
from flask_sqlalchemy import SQLAlchemy
import sqlite3
app = Flask(__name__)
p_dir = os.path.dirname(os.path.abspath(__file__))
db_file = "sqlite:///{}".format(os.path.join(p_dir, "notes.db"))
app.config['SQLALCHEMY_DATABASE_URI'] = db_file
api = Api(app)
db = SQLAlchemy(app)
def copy_data(note, to):
note = to(title = note.title, note_id = note.id, content = note.content, created_date = note.created_date, modified_date = note.modified_date)
db.session.add(note)
return note
def find_and_abort_if_doesnt_exist(number):
note = DB_Notes.query.filter_by(id=number).first()
if note is None:
abort(404, message="Note number {} doesn't exist".format(number))
else:
return note
parser = reqparse.RequestParser(bundle_errors=True)
#parser.add_argument('id', required=False,help='No id provided')
parser.add_argument('title', required=True, help='No title provided')
parser.add_argument('content', required=True, help='No content provided')
parserPut = reqparse.RequestParser(bundle_errors=True)
parserPut.add_argument('content', required=True, help='No content provided')
## sqlalchemy classes to be mapped to db
class DB_BaseColumns(object):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(200))
content = db.Column(db.String(800))
class DB_Notes(DB_BaseColumns,db.Model):
created_date = db.Column(db.DateTime, default=db.func.now())
modified_date = db.Column(db.DateTime, default=db.func.now(), onupdate=db.func.now())
class DB_NotesHistory(DB_BaseColumns,db.Model):
note_id = db.Column(db.Integer)
created_date = db.Column(db.DateTime)
modified_date = db.Column(db.DateTime)
class DB_NotesDeleted(DB_BaseColumns, db.Model):
note_id = db.Column(db.Integer)
created_date = db.Column(db.DateTime)
modified_date = db.Column(db.DateTime)
deletion_date = db.Column(db.DateTime, default=db.func.now(), onupdate=db.func.now())
## fields needed for json output
note_fields = {
'id': fields.Integer,
'title': fields.String,
'content': fields.String,
'created_date': fields.DateTime,
'modified_date': fields.DateTime
}
noteH_fields = dict(note_fields)
noteH_fields.update({
'note_id': fields.Integer,
} )
noteD_fields = dict(noteH_fields)
noteD_fields.update({
'deletion_date': fields.DateTime,
} )
class Home(Resource):
def get(self):
return Response(render_template('home.html', Notes = DB_Notes.query.all(), NotesHistory = DB_NotesHistory.query.all(), NotesDeleted = DB_NotesDeleted.query.all()), mimetype='text/html')
##flask classes for routing
class Note(Resource):
@marshal_with(note_fields)
def get(self, number):
return find_and_abort_if_doesnt_exist(number), 200
def delete(self, number):
note = find_and_abort_if_doesnt_exist(number)
copy_data(note,DB_NotesHistory)
copy_data(note,DB_NotesDeleted)
db.session.delete(note)
db.session.commit()
#return redirect("/"), 204
return "", 204
def put(self, number):
args = parserPut.parse_args()
note = find_and_abort_if_doesnt_exist(number)
noteH = copy_data(note,DB_NotesHistory)
note.content = args['content']
noteH.modified_date = db.func.now()
db.session.commit()
return args['content'], 201
class NotesList(Resource):
@marshal_with(note_fields)
def get(self):
return DB_Notes.query.all(), 200
def post(self):
args = parser.parse_args()
note = DB_Notes(title = args['title'], content = args['content'])
db.session.add(note)
db.session.commit()
return 201
class NotesHistory(Resource):
@marshal_with(noteH_fields)
def get(self, number):
note = DB_NotesHistory.query.filter_by(note_id=number).order_by(DB_NotesHistory.modified_date.desc()).all()
if note is None:
abort(404, message="History of note number {} doesn't exist".format(number))
return note, 200
#return Response([note.title, "\n",note.content, "\n", note.created_date.strftime('%m/%d/%Y'), "\n", note.modified_date.strftime('%m/%d/%Y')])
class NotesDeleted(Resource):
@marshal_with(noteD_fields)
def get(self):
note = DB_NotesDeleted.query.all()
if note is None:
abort(404, message="No deleted notes")
return note, 200
##setup the Api resource routing
api.add_resource(Home, '/')
api.add_resource(Note, '/note/<int:number>')
api.add_resource(NotesHistory, '/note/<int:number>/history')
api.add_resource(NotesList, '/notes')
api.add_resource(NotesDeleted, '/deleted')
if __name__ == '__main__':
app.run(debug=False)
| [
"flask_restful.reqparse.RequestParser",
"flask_restful.Api",
"flask.Flask",
"os.path.join",
"flask_restful.marshal_with",
"flask_restful.abort",
"os.path.abspath",
"flask_sqlalchemy.SQLAlchemy"
] | [((222, 237), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (227, 237), False, 'from flask import Flask, Response, render_template, redirect\n'), ((410, 418), 'flask_restful.Api', 'Api', (['app'], {}), '(app)\n', (413, 418), False, 'from flask_restful import reqparse, request, abort, Api, Resource, fields, marshal_with\n'), ((424, 439), 'flask_sqlalchemy.SQLAlchemy', 'SQLAlchemy', (['app'], {}), '(app)\n', (434, 439), False, 'from flask_sqlalchemy import SQLAlchemy\n'), ((891, 933), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {'bundle_errors': '(True)'}), '(bundle_errors=True)\n', (913, 933), False, 'from flask_restful import reqparse, request, abort, Api, Resource, fields, marshal_with\n'), ((1156, 1198), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {'bundle_errors': '(True)'}), '(bundle_errors=True)\n', (1178, 1198), False, 'from flask_restful import reqparse, request, abort, Api, Resource, fields, marshal_with\n'), ((263, 288), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (278, 288), False, 'import os\n'), ((322, 353), 'os.path.join', 'os.path.join', (['p_dir', '"""notes.db"""'], {}), "(p_dir, 'notes.db')\n", (334, 353), False, 'import os\n'), ((2817, 2842), 'flask_restful.marshal_with', 'marshal_with', (['note_fields'], {}), '(note_fields)\n', (2829, 2842), False, 'from flask_restful import reqparse, request, abort, Api, Resource, fields, marshal_with\n'), ((3561, 3586), 'flask_restful.marshal_with', 'marshal_with', (['note_fields'], {}), '(note_fields)\n', (3573, 3586), False, 'from flask_restful import reqparse, request, abort, Api, Resource, fields, marshal_with\n'), ((3889, 3915), 'flask_restful.marshal_with', 'marshal_with', (['noteH_fields'], {}), '(noteH_fields)\n', (3901, 3915), False, 'from flask_restful import reqparse, request, abort, Api, Resource, fields, marshal_with\n'), ((4385, 4411), 'flask_restful.marshal_with', 'marshal_with', (['noteD_fields'], {}), '(noteD_fields)\n', (4397, 4411), False, 'from flask_restful import reqparse, request, abort, Api, Resource, fields, marshal_with\n'), ((4511, 4549), 'flask_restful.abort', 'abort', (['(404)'], {'message': '"""No deleted notes"""'}), "(404, message='No deleted notes')\n", (4516, 4549), False, 'from flask_restful import reqparse, request, abort, Api, Resource, fields, marshal_with\n')] |
import sys
import os
import json
import csv
from time import strftime
from datetime import timedelta, date, datetime
from flask import Blueprint, render_template, redirect, request, url_for, flash
import server.configuration as cfg
from server.postalservice import checkTemp
from server.helpers import LoginRequired, pwIsValid, resource_path
from server.models import SzenzorAdatok
app = sys.modules['__main__']
device_bp = Blueprint('device_bp', __name__, template_folder='templates')
@device_bp.route('/allomas/<id>', methods=['GET'])
@LoginRequired
def allomas(id):
print(id)
if os.listdir(resource_path("logs/{}/".format(id))):
if request.method == 'GET' and request.args.get('sdate') and request.args.get('edate'):
start_date = datetime.strptime(request.args.get('sdate'), '%Y-%m-%d')
end_date = datetime.strptime(request.args.get('edate'), '%Y-%m-%d')
if start_date > datetime.now():
flash("Hiba a bevitt adatokban!", category='danger')
start_date = date.today() - timedelta(days=0)
end_date = date.today() + timedelta(days=1)
elif start_date == end_date:
start_date = start_date
end_date = start_date + timedelta(days=1)
elif start_date > end_date:
flash("Hiba a bevitt adatokban! a záró dátum korábbi, mint a kezdő dátum.", category='danger')
start_date = date.today() - timedelta(days=0)
end_date = date.today() + timedelta(days=1)
else:
# Past 24h as deafault
start_date = date.today() - timedelta(days=0)
end_date = date.today() + timedelta(days=1)
path = os.path.join(app.GLOBAL_CONFIG['SERVER']['WORKDIR'], "allomasok.json")
allomasok = json.load(open(path, encoding="utf-8"))
jelenlegiAllomas = allomasok[id]
print('DATES: [{}]-[{}]'.format(start_date, end_date))
adatok = SzenzorAdatok(start_date, end_date, id)
try:
fajlnev = adatok.generateCsv()
except Exception as error:
flash(error)
print(adatok.nev)
try:
latest = SzenzorAdatok(date.today() - timedelta(days=0), date.today() + timedelta(days=1), id)
latest.adatok = latest.adatok[::-1]
latest = latest.adatok[0]
# Legfrissebb adatok
latest['homerseklet'] = round(Decimal(str(latest['homerseklet']).replace(",", ".")), 1)
latest['paratartalom'] = round(Decimal(str(latest['paratartalom']).replace(",", ".")), 1)
except Exception as error:
pass
ctx = {
"jallomas": app.GLOBAL_STATION_CONFIG[id],
"id": id,
"mero_nev": jelenlegiAllomas['allomasnev'],
"datumok": {"ma": date.today() - timedelta(days=0), "holnap": date.today() + timedelta(days=1),
"hetmulva": date.today() - timedelta(days=7), "honapmulva": date.today() - timedelta(days=30)},
"stat": adatok.stat,
"latest": latest,
"adatok": adatok,
"fajlnev": fajlnev,
"sdate": start_date.strftime("%Y-%m-%d"),
"edate": end_date.strftime("%Y-%m-%d")
}
return render_template("layout.html", ctx=ctx)
else:
flash("Ezen az állomáson még nincs felvett adat", category="warning")
return redirect(url_for('allomasok'))
@device_bp.route('/log/<id>', methods=['GET'])
def log(id):
if request.method == 'GET':
print("[SERVER] GET REQUEST FROM: {}".format(request.remote_addr))
app.GLOBAL_STATION_CONFIG[id]['ip'] = str(request.remote_addr)
cfg.save_allomas()
homerseklet = request.args.get('homerseklet')
# Hőmérséklet határérték ellenörzése:
checkTemp(homerseklet, id)
paratartalom = request.args.get('paratartalom')
currDate = strftime("%Y/%m/%d")
currTime = strftime("%H:%M:%S")
try:
dir = os.path.dirname(__file__)
filename = os.path.join(dir, strftime("logs/{}/%Y/%m/%d.csv".format(id)))
os.makedirs(os.path.dirname(filename), exist_ok=True)
ofile = open(filename, "a")
writer = csv.writer(ofile, delimiter=';', lineterminator='\n')
writer.writerow([currDate] + [currTime] + [homerseklet] + [paratartalom])
ofile.close()
return "Siker!"
except Exception as error:
return str(error)
else:
return "Not Get"
@device_bp.route('/deletestation', methods=['POST'])
@LoginRequired
def deletestation():
password_candidate = request.form['password']
if request.method == 'POST' and pwIsValid(password_candidate, app.GLOBAL_CONFIG['HozzaferesiKulcs']):
app.GLOBAL_STATION_CONFIG.pop(request.form['id'], None)
flash("Sikeresen törölve a " + str(request.form['id']) + " állomás!", category='success')
cfg.save_allomas()
return redirect(url_for('allomasok'))
else:
flash("Helytelen kulcs! Hozzáférés megtagadva!", category='danger')
return redirect(url_for('allomasok'))
@device_bp.route('/addnewstation')
@LoginRequired
def newstation():
if request.method == 'GET':
dict = {
request.args.get('id'): {"allomasnev": request.args.get('megnev'), "allomashely": request.args.get('hely')}}
print("bevitt adatok:")
print(dict)
app.GLOBAL_STATION_CONFIG[request.args.get('id')] = {"allomasnev": request.args.get('megnev'),
"allomashely": request.args.get('hely'), "ip": "0.0.0.0",
"minT": float(request.args.get('mint')),
"maxT": float(request.args.get('maxt'))}
dir = os.path.dirname(__file__)
path = os.path.join(dir, strftime("logs/{}/".format(request.args.get('id'))))
os.makedirs(os.path.dirname(path), exist_ok=True)
flash("Sikeresen hozzáadva!", category='success')
cfg.save_allomas()
return redirect(url_for('allomasok'))
| [
"flask.render_template",
"flask.request.args.get",
"server.helpers.pwIsValid",
"server.models.SzenzorAdatok",
"flask.flash",
"time.strftime",
"os.path.join",
"csv.writer",
"flask.url_for",
"datetime.timedelta",
"os.path.dirname",
"server.configuration.save_allomas",
"server.postalservice.che... | [((428, 489), 'flask.Blueprint', 'Blueprint', (['"""device_bp"""', '__name__'], {'template_folder': '"""templates"""'}), "('device_bp', __name__, template_folder='templates')\n", (437, 489), False, 'from flask import Blueprint, render_template, redirect, request, url_for, flash\n'), ((6142, 6160), 'server.configuration.save_allomas', 'cfg.save_allomas', ([], {}), '()\n', (6158, 6160), True, 'import server.configuration as cfg\n'), ((1728, 1798), 'os.path.join', 'os.path.join', (["app.GLOBAL_CONFIG['SERVER']['WORKDIR']", '"""allomasok.json"""'], {}), "(app.GLOBAL_CONFIG['SERVER']['WORKDIR'], 'allomasok.json')\n", (1740, 1798), False, 'import os\n'), ((1980, 2019), 'server.models.SzenzorAdatok', 'SzenzorAdatok', (['start_date', 'end_date', 'id'], {}), '(start_date, end_date, id)\n', (1993, 2019), False, 'from server.models import SzenzorAdatok\n'), ((3288, 3327), 'flask.render_template', 'render_template', (['"""layout.html"""'], {'ctx': 'ctx'}), "('layout.html', ctx=ctx)\n", (3303, 3327), False, 'from flask import Blueprint, render_template, redirect, request, url_for, flash\n'), ((3346, 3415), 'flask.flash', 'flash', (['"""Ezen az állomáson még nincs felvett adat"""'], {'category': '"""warning"""'}), "('Ezen az állomáson még nincs felvett adat', category='warning')\n", (3351, 3415), False, 'from flask import Blueprint, render_template, redirect, request, url_for, flash\n'), ((3710, 3728), 'server.configuration.save_allomas', 'cfg.save_allomas', ([], {}), '()\n', (3726, 3728), True, 'import server.configuration as cfg\n'), ((3751, 3782), 'flask.request.args.get', 'request.args.get', (['"""homerseklet"""'], {}), "('homerseklet')\n", (3767, 3782), False, 'from flask import Blueprint, render_template, redirect, request, url_for, flash\n'), ((3837, 3863), 'server.postalservice.checkTemp', 'checkTemp', (['homerseklet', 'id'], {}), '(homerseklet, id)\n', (3846, 3863), False, 'from server.postalservice import checkTemp\n'), ((3887, 3919), 'flask.request.args.get', 'request.args.get', (['"""paratartalom"""'], {}), "('paratartalom')\n", (3903, 3919), False, 'from flask import Blueprint, render_template, redirect, request, url_for, flash\n'), ((3939, 3959), 'time.strftime', 'strftime', (['"""%Y/%m/%d"""'], {}), "('%Y/%m/%d')\n", (3947, 3959), False, 'from time import strftime\n'), ((3979, 3999), 'time.strftime', 'strftime', (['"""%H:%M:%S"""'], {}), "('%H:%M:%S')\n", (3987, 3999), False, 'from time import strftime\n'), ((4741, 4809), 'server.helpers.pwIsValid', 'pwIsValid', (['password_candidate', "app.GLOBAL_CONFIG['HozzaferesiKulcs']"], {}), "(password_candidate, app.GLOBAL_CONFIG['HozzaferesiKulcs'])\n", (4750, 4809), False, 'from server.helpers import LoginRequired, pwIsValid, resource_path\n'), ((4981, 4999), 'server.configuration.save_allomas', 'cfg.save_allomas', ([], {}), '()\n', (4997, 4999), True, 'import server.configuration as cfg\n'), ((5064, 5131), 'flask.flash', 'flash', (['"""Helytelen kulcs! Hozzáférés megtagadva!"""'], {'category': '"""danger"""'}), "('Helytelen kulcs! Hozzáférés megtagadva!', category='danger')\n", (5069, 5131), False, 'from flask import Blueprint, render_template, redirect, request, url_for, flash\n'), ((5910, 5935), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (5925, 5935), False, 'import os\n'), ((6088, 6137), 'flask.flash', 'flash', (['"""Sikeresen hozzáadva!"""'], {'category': '"""success"""'}), "('Sikeresen hozzáadva!', category='success')\n", (6093, 6137), False, 'from flask import Blueprint, render_template, redirect, request, url_for, flash\n'), ((6181, 6201), 'flask.url_for', 'url_for', (['"""allomasok"""'], {}), "('allomasok')\n", (6188, 6201), False, 'from flask import Blueprint, render_template, redirect, request, url_for, flash\n'), ((684, 709), 'flask.request.args.get', 'request.args.get', (['"""sdate"""'], {}), "('sdate')\n", (700, 709), False, 'from flask import Blueprint, render_template, redirect, request, url_for, flash\n'), ((714, 739), 'flask.request.args.get', 'request.args.get', (['"""edate"""'], {}), "('edate')\n", (730, 739), False, 'from flask import Blueprint, render_template, redirect, request, url_for, flash\n'), ((3440, 3460), 'flask.url_for', 'url_for', (['"""allomasok"""'], {}), "('allomasok')\n", (3447, 3460), False, 'from flask import Blueprint, render_template, redirect, request, url_for, flash\n'), ((4031, 4056), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (4046, 4056), False, 'import os\n'), ((4270, 4323), 'csv.writer', 'csv.writer', (['ofile'], {'delimiter': '""";"""', 'lineterminator': '"""\n"""'}), "(ofile, delimiter=';', lineterminator='\\n')\n", (4280, 4323), False, 'import csv\n'), ((5024, 5044), 'flask.url_for', 'url_for', (['"""allomasok"""'], {}), "('allomasok')\n", (5031, 5044), False, 'from flask import Blueprint, render_template, redirect, request, url_for, flash\n'), ((5156, 5176), 'flask.url_for', 'url_for', (['"""allomasok"""'], {}), "('allomasok')\n", (5163, 5176), False, 'from flask import Blueprint, render_template, redirect, request, url_for, flash\n'), ((5309, 5331), 'flask.request.args.get', 'request.args.get', (['"""id"""'], {}), "('id')\n", (5325, 5331), False, 'from flask import Blueprint, render_template, redirect, request, url_for, flash\n'), ((5504, 5526), 'flask.request.args.get', 'request.args.get', (['"""id"""'], {}), "('id')\n", (5520, 5526), False, 'from flask import Blueprint, render_template, redirect, request, url_for, flash\n'), ((5545, 5571), 'flask.request.args.get', 'request.args.get', (['"""megnev"""'], {}), "('megnev')\n", (5561, 5571), False, 'from flask import Blueprint, render_template, redirect, request, url_for, flash\n'), ((5649, 5673), 'flask.request.args.get', 'request.args.get', (['"""hely"""'], {}), "('hely')\n", (5665, 5673), False, 'from flask import Blueprint, render_template, redirect, request, url_for, flash\n'), ((6042, 6063), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (6057, 6063), False, 'import os\n'), ((784, 809), 'flask.request.args.get', 'request.args.get', (['"""sdate"""'], {}), "('sdate')\n", (800, 809), False, 'from flask import Blueprint, render_template, redirect, request, url_for, flash\n'), ((864, 889), 'flask.request.args.get', 'request.args.get', (['"""edate"""'], {}), "('edate')\n", (880, 889), False, 'from flask import Blueprint, render_template, redirect, request, url_for, flash\n'), ((931, 945), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (943, 945), False, 'from datetime import timedelta, date, datetime\n'), ((963, 1015), 'flask.flash', 'flash', (['"""Hiba a bevitt adatokban!"""'], {'category': '"""danger"""'}), "('Hiba a bevitt adatokban!', category='danger')\n", (968, 1015), False, 'from flask import Blueprint, render_template, redirect, request, url_for, flash\n'), ((1624, 1636), 'datetime.date.today', 'date.today', ([], {}), '()\n', (1634, 1636), False, 'from datetime import timedelta, date, datetime\n'), ((1639, 1656), 'datetime.timedelta', 'timedelta', ([], {'days': '(0)'}), '(days=0)\n', (1648, 1656), False, 'from datetime import timedelta, date, datetime\n'), ((1680, 1692), 'datetime.date.today', 'date.today', ([], {}), '()\n', (1690, 1692), False, 'from datetime import timedelta, date, datetime\n'), ((1695, 1712), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (1704, 1712), False, 'from datetime import timedelta, date, datetime\n'), ((2123, 2135), 'flask.flash', 'flash', (['error'], {}), '(error)\n', (2128, 2135), False, 'from flask import Blueprint, render_template, redirect, request, url_for, flash\n'), ((4167, 4192), 'os.path.dirname', 'os.path.dirname', (['filename'], {}), '(filename)\n', (4182, 4192), False, 'import os\n'), ((5348, 5374), 'flask.request.args.get', 'request.args.get', (['"""megnev"""'], {}), "('megnev')\n", (5364, 5374), False, 'from flask import Blueprint, render_template, redirect, request, url_for, flash\n'), ((5391, 5415), 'flask.request.args.get', 'request.args.get', (['"""hely"""'], {}), "('hely')\n", (5407, 5415), False, 'from flask import Blueprint, render_template, redirect, request, url_for, flash\n'), ((5767, 5791), 'flask.request.args.get', 'request.args.get', (['"""mint"""'], {}), "('mint')\n", (5783, 5791), False, 'from flask import Blueprint, render_template, redirect, request, url_for, flash\n'), ((5869, 5893), 'flask.request.args.get', 'request.args.get', (['"""maxt"""'], {}), "('maxt')\n", (5885, 5893), False, 'from flask import Blueprint, render_template, redirect, request, url_for, flash\n'), ((1045, 1057), 'datetime.date.today', 'date.today', ([], {}), '()\n', (1055, 1057), False, 'from datetime import timedelta, date, datetime\n'), ((1060, 1077), 'datetime.timedelta', 'timedelta', ([], {'days': '(0)'}), '(days=0)\n', (1069, 1077), False, 'from datetime import timedelta, date, datetime\n'), ((1105, 1117), 'datetime.date.today', 'date.today', ([], {}), '()\n', (1115, 1117), False, 'from datetime import timedelta, date, datetime\n'), ((1120, 1137), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (1129, 1137), False, 'from datetime import timedelta, date, datetime\n'), ((2210, 2222), 'datetime.date.today', 'date.today', ([], {}), '()\n', (2220, 2222), False, 'from datetime import timedelta, date, datetime\n'), ((2225, 2242), 'datetime.timedelta', 'timedelta', ([], {'days': '(0)'}), '(days=0)\n', (2234, 2242), False, 'from datetime import timedelta, date, datetime\n'), ((2244, 2256), 'datetime.date.today', 'date.today', ([], {}), '()\n', (2254, 2256), False, 'from datetime import timedelta, date, datetime\n'), ((2259, 2276), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (2268, 2276), False, 'from datetime import timedelta, date, datetime\n'), ((2835, 2847), 'datetime.date.today', 'date.today', ([], {}), '()\n', (2845, 2847), False, 'from datetime import timedelta, date, datetime\n'), ((2850, 2867), 'datetime.timedelta', 'timedelta', ([], {'days': '(0)'}), '(days=0)\n', (2859, 2867), False, 'from datetime import timedelta, date, datetime\n'), ((2879, 2891), 'datetime.date.today', 'date.today', ([], {}), '()\n', (2889, 2891), False, 'from datetime import timedelta, date, datetime\n'), ((2894, 2911), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (2903, 2911), False, 'from datetime import timedelta, date, datetime\n'), ((2949, 2961), 'datetime.date.today', 'date.today', ([], {}), '()\n', (2959, 2961), False, 'from datetime import timedelta, date, datetime\n'), ((2964, 2981), 'datetime.timedelta', 'timedelta', ([], {'days': '(7)'}), '(days=7)\n', (2973, 2981), False, 'from datetime import timedelta, date, datetime\n'), ((2997, 3009), 'datetime.date.today', 'date.today', ([], {}), '()\n', (3007, 3009), False, 'from datetime import timedelta, date, datetime\n'), ((3012, 3030), 'datetime.timedelta', 'timedelta', ([], {'days': '(30)'}), '(days=30)\n', (3021, 3030), False, 'from datetime import timedelta, date, datetime\n'), ((5996, 6018), 'flask.request.args.get', 'request.args.get', (['"""id"""'], {}), "('id')\n", (6012, 6018), False, 'from flask import Blueprint, render_template, redirect, request, url_for, flash\n'), ((1259, 1276), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (1268, 1276), False, 'from datetime import timedelta, date, datetime\n'), ((1333, 1431), 'flask.flash', 'flash', (['"""Hiba a bevitt adatokban! a záró dátum korábbi, mint a kezdő dátum."""'], {'category': '"""danger"""'}), "('Hiba a bevitt adatokban! a záró dátum korábbi, mint a kezdő dátum.',\n category='danger')\n", (1338, 1431), False, 'from flask import Blueprint, render_template, redirect, request, url_for, flash\n'), ((1457, 1469), 'datetime.date.today', 'date.today', ([], {}), '()\n', (1467, 1469), False, 'from datetime import timedelta, date, datetime\n'), ((1472, 1489), 'datetime.timedelta', 'timedelta', ([], {'days': '(0)'}), '(days=0)\n', (1481, 1489), False, 'from datetime import timedelta, date, datetime\n'), ((1517, 1529), 'datetime.date.today', 'date.today', ([], {}), '()\n', (1527, 1529), False, 'from datetime import timedelta, date, datetime\n'), ((1532, 1549), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (1541, 1549), False, 'from datetime import timedelta, date, datetime\n')] |
from functools import partial
from gdsfactory.cell import cell
from gdsfactory.component import Component
from gdsfactory.components.compass import compass
from gdsfactory.components.via_stack import via_stack_slab_npp_m3
from gdsfactory.types import ComponentSpec, Floats, LayerSpecs, Optional
pad_via_stack_slab_npp = partial(via_stack_slab_npp_m3, size=(80, 80))
@cell
def resistance_sheet(
width: float = 10,
layers: LayerSpecs = ("SLAB90", "NPP"),
layer_offsets: Floats = (0, 0.2),
pad: ComponentSpec = pad_via_stack_slab_npp,
pad_pitch: float = 100.0,
ohms_per_square: Optional[float] = None,
port_orientation1: int = 180,
port_orientation2: int = 0,
) -> Component:
"""Returns Sheet resistance.
keeps connectivity for pads and first layer in layers
Args:
width: in um.
layers: for the middle part.
layer_offsets: from edge, positive: over, negative: inclusion.
pad: function to create a pad.
pad_pitch: in um.
ohms_per_square: optional sheet resistance to compute info.resistance.
port_orientation1: in degrees.
port_orientation2: in degrees.
"""
c = Component()
pad = pad()
length = pad_pitch - pad.get_setting("size")[0]
pad1 = c << pad
pad2 = c << pad
r0 = c << compass(
size=(length + layer_offsets[0], width + layer_offsets[0]), layer=layers[0]
)
for layer, offset in zip(layers[1:], layer_offsets[1:]):
c << compass(size=(length + 2 * offset, width + 2 * offset), layer=layer)
pad1.connect("e3", r0.ports["e1"])
pad2.connect("e1", r0.ports["e3"])
c.info["resistance"] = ohms_per_square * width * length if ohms_per_square else None
c.add_port(
"pad1",
port_type="vertical_dc",
midpoint=pad1.center,
layer=list(layers)[-1],
width=width,
orientation=port_orientation1,
)
c.add_port(
"pad2",
port_type="vertical_dc",
midpoint=pad2.center,
layer=list(layers)[-1],
width=width,
orientation=port_orientation2,
)
return c
if __name__ == "__main__":
# import gdsfactory as gf
# sweep = [resistance_sheet(width=width, layers=((1,0), (1,1))) for width in [1, 10, 100]]
# c = gf.pack(sweep)[0]
c = resistance_sheet(width=40)
c.show()
# import gdsfactory as gf
# sweep_resistance = list(map(resistance_sheet, (5, 10, 80)))
# c = gf.grid(sweep_resistance)
# c.show()
| [
"gdsfactory.components.compass.compass",
"gdsfactory.component.Component",
"functools.partial"
] | [((322, 367), 'functools.partial', 'partial', (['via_stack_slab_npp_m3'], {'size': '(80, 80)'}), '(via_stack_slab_npp_m3, size=(80, 80))\n', (329, 367), False, 'from functools import partial\n'), ((1180, 1191), 'gdsfactory.component.Component', 'Component', ([], {}), '()\n', (1189, 1191), False, 'from gdsfactory.component import Component\n'), ((1316, 1405), 'gdsfactory.components.compass.compass', 'compass', ([], {'size': '(length + layer_offsets[0], width + layer_offsets[0])', 'layer': 'layers[0]'}), '(size=(length + layer_offsets[0], width + layer_offsets[0]), layer=\n layers[0])\n', (1323, 1405), False, 'from gdsfactory.components.compass import compass\n'), ((1490, 1558), 'gdsfactory.components.compass.compass', 'compass', ([], {'size': '(length + 2 * offset, width + 2 * offset)', 'layer': 'layer'}), '(size=(length + 2 * offset, width + 2 * offset), layer=layer)\n', (1497, 1558), False, 'from gdsfactory.components.compass import compass\n')] |
import openmoc
import openmc.openmoc_compatible
import openmc.mgxs
import numpy as np
import matplotlib
# Enable Matplotib to work for headless nodes
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.ioff()
opts = openmoc.options.Options()
openmoc.log.set_log_level('NORMAL')
###############################################################################
# Eigenvalue Calculation w/o SPH Factors
###############################################################################
# Initialize 2-group OpenMC multi-group cross section library for a pin cell
mgxs_lib = openmc.mgxs.Library.load_from_file(filename='mgxs', directory='.')
# Create an OpenMOC Geometry from the OpenMOC Geometry
openmoc_geometry = \
openmc.openmoc_compatible.get_openmoc_geometry(mgxs_lib.geometry)
# Load cross section data
openmoc_materials = \
openmoc.materialize.load_openmc_mgxs_lib(mgxs_lib, openmoc_geometry)
# Initialize FSRs
openmoc_geometry.initializeFlatSourceRegions()
# Initialize an OpenMOC TrackGenerator
track_generator = openmoc.TrackGenerator(
openmoc_geometry, opts.num_azim, opts.azim_spacing)
track_generator.generateTracks()
# Initialize an OpenMOC Solver
solver = openmoc.CPUSolver(track_generator)
solver.setConvergenceThreshold(opts.tolerance)
solver.setNumThreads(opts.num_omp_threads)
# Run an eigenvalue calulation with the MGXS from OpenMC
solver.computeEigenvalue(opts.max_iters)
solver.printTimerReport()
keff_no_sph = solver.getKeff()
# Extract the OpenMOC scalar fluxes
fluxes_no_sph = openmoc.process.get_scalar_fluxes(solver)
###############################################################################
# Eigenvalue Calculation with SPH Factors
###############################################################################
# Compute SPH factors
sph, sph_mgxs_lib, sph_indices = \
openmoc.materialize.compute_sph_factors(
mgxs_lib, azim_spacing=opts.azim_spacing,
num_azim=opts.num_azim, num_threads=opts.num_omp_threads)
# Load the SPH-corrected MGXS library data
materials = \
openmoc.materialize.load_openmc_mgxs_lib(sph_mgxs_lib, openmoc_geometry)
# Run an eigenvalue calculation with the SPH-corrected modified MGXS library
solver.computeEigenvalue(opts.max_iters)
solver.printTimerReport()
keff_with_sph = solver.getKeff()
# Report the OpenMC and OpenMOC eigenvalues
openmoc.log.py_printf('RESULT', 'OpenMOC keff w/o SPH: \t%1.5f', keff_no_sph)
openmoc.log.py_printf('RESULT', 'OpenMOC keff w/ SPH: \t%1.5f', keff_with_sph)
openmoc.log.py_printf('RESULT', 'OpenMC keff: \t\t1.17574 +/- 0.00086')
###############################################################################
# Extracting Scalar Fluxes
###############################################################################
openmoc.log.py_printf('NORMAL', 'Plotting data...')
# Plot the cells
openmoc.plotter.plot_cells(openmoc_geometry)
# Extract the OpenMOC scalar fluxes
fluxes_sph = openmoc.process.get_scalar_fluxes(solver)
fluxes_sph *= sph
# Extract the OpenMC scalar fluxes
num_fsrs = openmoc_geometry.getNumFSRs()
num_groups = openmoc_geometry.getNumEnergyGroups()
openmc_fluxes = np.zeros((num_fsrs, num_groups), dtype=np.float64)
nufission_xs = np.zeros((num_fsrs, num_groups), dtype=np.float64)
# Get the OpenMC flux in each FSR
for fsr in range(num_fsrs):
# Find the OpenMOC cell and volume for this FSR
openmoc_cell = openmoc_geometry.findCellContainingFSR(fsr)
cell_id = openmoc_cell.getId()
fsr_volume = track_generator.getFSRVolume(fsr)
# Store the volume-averaged flux
mgxs = mgxs_lib.get_mgxs(cell_id, 'nu-fission')
flux = mgxs.tallies['flux'].mean.flatten()
flux = np.flipud(flux) / fsr_volume
openmc_fluxes[fsr, :] = flux
nufission_xs[fsr, :] = mgxs.get_xs(nuclide='all')
# Extract energy group edges
group_edges = mgxs_lib.energy_groups.group_edges
group_edges += 1e-3 # Adjust lower bound to 1e-3 eV (for loglog scaling)
# Compute difference in energy bounds for each group
group_edges = np.flipud(group_edges)
# Normalize fluxes with the fission source
openmc_fluxes /= np.sum(openmc_fluxes * nufission_xs)
fluxes_sph /= np.sum(fluxes_sph * nufission_xs)
fluxes_no_sph /= np.sum(fluxes_no_sph * nufission_xs)
###############################################################################
# Plot the OpenMC, OpenMOC Scalar Fluxes
###############################################################################
# Extend the mgxs values array for matplotlib's step plot of fluxes
openmc_fluxes = np.insert(openmc_fluxes, 0, openmc_fluxes[:,0], axis=1)
fluxes_no_sph = np.insert(fluxes_no_sph, 0, fluxes_no_sph[:,0], axis=1)
fluxes_sph = np.insert(fluxes_sph, 0, fluxes_sph[:,0], axis=1)
# Plot OpenMOC and OpenMC fluxes in each FSR
for fsr in range(num_fsrs):
# Get the OpenMOC cell and material for this FSR
cell = openmoc_geometry.findCellContainingFSR(fsr)
material_name = cell.getFillMaterial().getName()
# Create a step plot for the MGXS
fig = plt.figure()
plt.plot(group_edges, openmc_fluxes[fsr,:],
drawstyle='steps', color='r', linewidth=2)
plt.plot(group_edges, fluxes_no_sph[fsr,:],
drawstyle='steps', color='b', linewidth=2)
plt.plot(group_edges, fluxes_sph[fsr,:],
drawstyle='steps', color='g', linewidth=2)
plt.yscale('log')
plt.xscale('log')
plt.xlabel('Energy [eV]')
plt.ylabel('Flux')
plt.title('Normalized Flux ({0})'.format(material_name))
plt.xlim((min(group_edges), max(group_edges)))
plt.legend(['openmc', 'openmoc w/o sph', 'openmoc w/ sph'], loc='best')
plt.grid()
filename = 'plots/flux-{0}.png'.format(material_name.replace(' ', '-'))
plt.savefig(filename, bbox_inches='tight')
plt.close()
###############################################################################
# Plot OpenMC-to-OpenMOC Scalar Flux Errors
###############################################################################
# Compute the percent relative error in the flux
rel_err_no_sph = np.zeros(openmc_fluxes.shape)
rel_err_sph = np.zeros(openmc_fluxes.shape)
for fsr in range(num_fsrs):
delta_flux_no_sph = fluxes_no_sph[fsr,:] - openmc_fluxes[fsr,:]
delta_flux_sph = fluxes_sph[fsr,:] - openmc_fluxes[fsr,:]
rel_err_no_sph[fsr,:] = delta_flux_no_sph / openmc_fluxes[fsr,:] * 100.
rel_err_sph[fsr,:] = delta_flux_sph / openmc_fluxes[fsr,:] * 100.
# Plot OpenMOC relative flux errors in each FSR
for fsr in range(num_fsrs):
# Get the OpenMOC cell and material for this FSR
cell = openmoc_geometry.findCellContainingFSR(fsr)
material_name = cell.getFillMaterial().getName()
# Create a step plot for the MGXS
fig = plt.figure()
plt.plot(group_edges, rel_err_no_sph[fsr,:],
drawstyle='steps', color='r', linewidth=2)
plt.plot(group_edges, rel_err_sph[fsr,:],
drawstyle='steps', color='b', linewidth=2)
plt.xscale('log')
plt.xlabel('Energy [eV]')
plt.ylabel('Relative Error [%]')
plt.title('OpenMOC-to-OpenMC Flux Rel. Err. ({0})'.format(material_name))
plt.xlim((min(group_edges), max(group_edges)))
plt.legend(['openmoc w/o sph', 'openmoc w/ sph'], loc='best')
plt.grid()
filename = 'plots/rel-err-{0}.png'.format(material_name.replace(' ', '-'))
plt.savefig(filename, bbox_inches='tight')
plt.close()
| [
"matplotlib.pyplot.grid",
"openmoc.plotter.plot_cells",
"matplotlib.pyplot.ylabel",
"openmoc.materialize.compute_sph_factors",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"openmoc.log.set_log_level",
"matplotlib.pyplot.yscale",
"openmoc.options.Options",
"mat... | [((152, 173), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (166, 173), False, 'import matplotlib\n'), ((206, 216), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (214, 216), True, 'import matplotlib.pyplot as plt\n'), ((226, 251), 'openmoc.options.Options', 'openmoc.options.Options', ([], {}), '()\n', (249, 251), False, 'import openmoc\n'), ((252, 287), 'openmoc.log.set_log_level', 'openmoc.log.set_log_level', (['"""NORMAL"""'], {}), "('NORMAL')\n", (277, 287), False, 'import openmoc\n'), ((863, 931), 'openmoc.materialize.load_openmc_mgxs_lib', 'openmoc.materialize.load_openmc_mgxs_lib', (['mgxs_lib', 'openmoc_geometry'], {}), '(mgxs_lib, openmoc_geometry)\n', (903, 931), False, 'import openmoc\n'), ((1056, 1130), 'openmoc.TrackGenerator', 'openmoc.TrackGenerator', (['openmoc_geometry', 'opts.num_azim', 'opts.azim_spacing'], {}), '(openmoc_geometry, opts.num_azim, opts.azim_spacing)\n', (1078, 1130), False, 'import openmoc\n'), ((1210, 1244), 'openmoc.CPUSolver', 'openmoc.CPUSolver', (['track_generator'], {}), '(track_generator)\n', (1227, 1244), False, 'import openmoc\n'), ((1544, 1585), 'openmoc.process.get_scalar_fluxes', 'openmoc.process.get_scalar_fluxes', (['solver'], {}), '(solver)\n', (1577, 1585), False, 'import openmoc\n'), ((1867, 2011), 'openmoc.materialize.compute_sph_factors', 'openmoc.materialize.compute_sph_factors', (['mgxs_lib'], {'azim_spacing': 'opts.azim_spacing', 'num_azim': 'opts.num_azim', 'num_threads': 'opts.num_omp_threads'}), '(mgxs_lib, azim_spacing=opts.\n azim_spacing, num_azim=opts.num_azim, num_threads=opts.num_omp_threads)\n', (1906, 2011), False, 'import openmoc\n'), ((2086, 2158), 'openmoc.materialize.load_openmc_mgxs_lib', 'openmoc.materialize.load_openmc_mgxs_lib', (['sph_mgxs_lib', 'openmoc_geometry'], {}), '(sph_mgxs_lib, openmoc_geometry)\n', (2126, 2158), False, 'import openmoc\n'), ((2382, 2459), 'openmoc.log.py_printf', 'openmoc.log.py_printf', (['"""RESULT"""', '"""OpenMOC keff w/o SPH: \t%1.5f"""', 'keff_no_sph'], {}), "('RESULT', 'OpenMOC keff w/o SPH: \\t%1.5f', keff_no_sph)\n", (2403, 2459), False, 'import openmoc\n'), ((2460, 2538), 'openmoc.log.py_printf', 'openmoc.log.py_printf', (['"""RESULT"""', '"""OpenMOC keff w/ SPH: \t%1.5f"""', 'keff_with_sph'], {}), "('RESULT', 'OpenMOC keff w/ SPH: \\t%1.5f', keff_with_sph)\n", (2481, 2538), False, 'import openmoc\n'), ((2539, 2610), 'openmoc.log.py_printf', 'openmoc.log.py_printf', (['"""RESULT"""', '"""OpenMC keff: \t\t1.17574 +/- 0.00086"""'], {}), "('RESULT', 'OpenMC keff: \\t\\t1.17574 +/- 0.00086')\n", (2560, 2610), False, 'import openmoc\n'), ((2825, 2876), 'openmoc.log.py_printf', 'openmoc.log.py_printf', (['"""NORMAL"""', '"""Plotting data..."""'], {}), "('NORMAL', 'Plotting data...')\n", (2846, 2876), False, 'import openmoc\n'), ((2895, 2939), 'openmoc.plotter.plot_cells', 'openmoc.plotter.plot_cells', (['openmoc_geometry'], {}), '(openmoc_geometry)\n', (2921, 2939), False, 'import openmoc\n'), ((2990, 3031), 'openmoc.process.get_scalar_fluxes', 'openmoc.process.get_scalar_fluxes', (['solver'], {}), '(solver)\n', (3023, 3031), False, 'import openmoc\n'), ((3194, 3244), 'numpy.zeros', 'np.zeros', (['(num_fsrs, num_groups)'], {'dtype': 'np.float64'}), '((num_fsrs, num_groups), dtype=np.float64)\n', (3202, 3244), True, 'import numpy as np\n'), ((3260, 3310), 'numpy.zeros', 'np.zeros', (['(num_fsrs, num_groups)'], {'dtype': 'np.float64'}), '((num_fsrs, num_groups), dtype=np.float64)\n', (3268, 3310), True, 'import numpy as np\n'), ((4064, 4086), 'numpy.flipud', 'np.flipud', (['group_edges'], {}), '(group_edges)\n', (4073, 4086), True, 'import numpy as np\n'), ((4148, 4184), 'numpy.sum', 'np.sum', (['(openmc_fluxes * nufission_xs)'], {}), '(openmc_fluxes * nufission_xs)\n', (4154, 4184), True, 'import numpy as np\n'), ((4199, 4232), 'numpy.sum', 'np.sum', (['(fluxes_sph * nufission_xs)'], {}), '(fluxes_sph * nufission_xs)\n', (4205, 4232), True, 'import numpy as np\n'), ((4250, 4286), 'numpy.sum', 'np.sum', (['(fluxes_no_sph * nufission_xs)'], {}), '(fluxes_no_sph * nufission_xs)\n', (4256, 4286), True, 'import numpy as np\n'), ((4591, 4647), 'numpy.insert', 'np.insert', (['openmc_fluxes', '(0)', 'openmc_fluxes[:, 0]'], {'axis': '(1)'}), '(openmc_fluxes, 0, openmc_fluxes[:, 0], axis=1)\n', (4600, 4647), True, 'import numpy as np\n'), ((4663, 4719), 'numpy.insert', 'np.insert', (['fluxes_no_sph', '(0)', 'fluxes_no_sph[:, 0]'], {'axis': '(1)'}), '(fluxes_no_sph, 0, fluxes_no_sph[:, 0], axis=1)\n', (4672, 4719), True, 'import numpy as np\n'), ((4732, 4782), 'numpy.insert', 'np.insert', (['fluxes_sph', '(0)', 'fluxes_sph[:, 0]'], {'axis': '(1)'}), '(fluxes_sph, 0, fluxes_sph[:, 0], axis=1)\n', (4741, 4782), True, 'import numpy as np\n'), ((6118, 6147), 'numpy.zeros', 'np.zeros', (['openmc_fluxes.shape'], {}), '(openmc_fluxes.shape)\n', (6126, 6147), True, 'import numpy as np\n'), ((6162, 6191), 'numpy.zeros', 'np.zeros', (['openmc_fluxes.shape'], {}), '(openmc_fluxes.shape)\n', (6170, 6191), True, 'import numpy as np\n'), ((5067, 5079), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5077, 5079), True, 'import matplotlib.pyplot as plt\n'), ((5084, 5175), 'matplotlib.pyplot.plot', 'plt.plot', (['group_edges', 'openmc_fluxes[fsr, :]'], {'drawstyle': '"""steps"""', 'color': '"""r"""', 'linewidth': '(2)'}), "(group_edges, openmc_fluxes[fsr, :], drawstyle='steps', color='r',\n linewidth=2)\n", (5092, 5175), True, 'import matplotlib.pyplot as plt\n'), ((5188, 5279), 'matplotlib.pyplot.plot', 'plt.plot', (['group_edges', 'fluxes_no_sph[fsr, :]'], {'drawstyle': '"""steps"""', 'color': '"""b"""', 'linewidth': '(2)'}), "(group_edges, fluxes_no_sph[fsr, :], drawstyle='steps', color='b',\n linewidth=2)\n", (5196, 5279), True, 'import matplotlib.pyplot as plt\n'), ((5292, 5380), 'matplotlib.pyplot.plot', 'plt.plot', (['group_edges', 'fluxes_sph[fsr, :]'], {'drawstyle': '"""steps"""', 'color': '"""g"""', 'linewidth': '(2)'}), "(group_edges, fluxes_sph[fsr, :], drawstyle='steps', color='g',\n linewidth=2)\n", (5300, 5380), True, 'import matplotlib.pyplot as plt\n'), ((5394, 5411), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (5404, 5411), True, 'import matplotlib.pyplot as plt\n'), ((5416, 5433), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (5426, 5433), True, 'import matplotlib.pyplot as plt\n'), ((5438, 5463), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Energy [eV]"""'], {}), "('Energy [eV]')\n", (5448, 5463), True, 'import matplotlib.pyplot as plt\n'), ((5468, 5486), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Flux"""'], {}), "('Flux')\n", (5478, 5486), True, 'import matplotlib.pyplot as plt\n'), ((5603, 5674), 'matplotlib.pyplot.legend', 'plt.legend', (["['openmc', 'openmoc w/o sph', 'openmoc w/ sph']"], {'loc': '"""best"""'}), "(['openmc', 'openmoc w/o sph', 'openmoc w/ sph'], loc='best')\n", (5613, 5674), True, 'import matplotlib.pyplot as plt\n'), ((5679, 5689), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (5687, 5689), True, 'import matplotlib.pyplot as plt\n'), ((5770, 5812), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {'bbox_inches': '"""tight"""'}), "(filename, bbox_inches='tight')\n", (5781, 5812), True, 'import matplotlib.pyplot as plt\n'), ((5817, 5828), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5826, 5828), True, 'import matplotlib.pyplot as plt\n'), ((6786, 6798), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6796, 6798), True, 'import matplotlib.pyplot as plt\n'), ((6803, 6895), 'matplotlib.pyplot.plot', 'plt.plot', (['group_edges', 'rel_err_no_sph[fsr, :]'], {'drawstyle': '"""steps"""', 'color': '"""r"""', 'linewidth': '(2)'}), "(group_edges, rel_err_no_sph[fsr, :], drawstyle='steps', color='r',\n linewidth=2)\n", (6811, 6895), True, 'import matplotlib.pyplot as plt\n'), ((6908, 6997), 'matplotlib.pyplot.plot', 'plt.plot', (['group_edges', 'rel_err_sph[fsr, :]'], {'drawstyle': '"""steps"""', 'color': '"""b"""', 'linewidth': '(2)'}), "(group_edges, rel_err_sph[fsr, :], drawstyle='steps', color='b',\n linewidth=2)\n", (6916, 6997), True, 'import matplotlib.pyplot as plt\n'), ((7011, 7028), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (7021, 7028), True, 'import matplotlib.pyplot as plt\n'), ((7033, 7058), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Energy [eV]"""'], {}), "('Energy [eV]')\n", (7043, 7058), True, 'import matplotlib.pyplot as plt\n'), ((7063, 7095), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Relative Error [%]"""'], {}), "('Relative Error [%]')\n", (7073, 7095), True, 'import matplotlib.pyplot as plt\n'), ((7229, 7290), 'matplotlib.pyplot.legend', 'plt.legend', (["['openmoc w/o sph', 'openmoc w/ sph']"], {'loc': '"""best"""'}), "(['openmoc w/o sph', 'openmoc w/ sph'], loc='best')\n", (7239, 7290), True, 'import matplotlib.pyplot as plt\n'), ((7295, 7305), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (7303, 7305), True, 'import matplotlib.pyplot as plt\n'), ((7389, 7431), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {'bbox_inches': '"""tight"""'}), "(filename, bbox_inches='tight')\n", (7400, 7431), True, 'import matplotlib.pyplot as plt\n'), ((7436, 7447), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7445, 7447), True, 'import matplotlib.pyplot as plt\n'), ((3724, 3739), 'numpy.flipud', 'np.flipud', (['flux'], {}), '(flux)\n', (3733, 3739), True, 'import numpy as np\n')] |
# SPDX-License-Identifier: MIT
# Copyright 2022 hirmiura (https://github.com/hirmiura)
#
# TamaTouを生成するスクリプト
#
# 使い方
# 1. fontforgeでコンソールを出す(fontforge-console.bat)
# 2. ディレクトリ移動
# 3. ffpython TamaTou.py
# 4. 待つ
#
# Orbitron → オービトロン → オーブ → 玉 → Tamaやな!
# Noto → No Toufu → 豆腐だらけだし → Touやな!
# →→ TamaTou
#
import fontforge
for weight in ['Regular', 'Bold']:
print('玉改変')
fn = fontforge.open(f'Orbitron-{weight}.ttf')
fn.encoding = 'UnicodeFull'
fn.save(f'tmp1-{weight}.sfd')
fn.close()
print(f'能登改変 {weight}')
fn = fontforge.open(f'NotoSansJP-{weight}.otf')
fn.encoding = 'UnicodeFull'
fn.cidFlatten()
# fn.ascent = 800
# fn.descent = 200
# fn.upos = -125
# fn.em = 1000
fn.save(f'tmp2-{weight}.sfd')
fn.close()
print('作成')
name = 'TamaTou'
copyright = 'Copyright (c) 2022, <NAME> (https://github.com/hirmiura) with Reserved Font Name TamaTou.'
version = '1.0.0'
license = 'Open Font License'
fn = fontforge.open(f'tmp1-{weight}.sfd')
fn.fontname = name
fn.familyname = name
fn.fullname = name
fn.weight = weight
fn.version = version
fn.sfntRevision = None
fn.copyright = copyright
fn.appendSFNTName(0x411, 0, copyright)
fn.appendSFNTName(0x411, 1, name)
fn.appendSFNTName(0x411, 2, '')
fn.appendSFNTName(0x411, 3, '')
fn.appendSFNTName(0x411, 4, name)
fn.appendSFNTName(0x411, 5, version)
fn.appendSFNTName(0x411, 6, name + '-' + weight)
fn.appendSFNTName(0x411, 7, '')
fn.appendSFNTName(0x411, 8, '')
fn.appendSFNTName(0x411, 9, '')
fn.appendSFNTName(0x411, 10, '')
fn.appendSFNTName(0x411, 11, '')
fn.appendSFNTName(0x411, 12, '')
fn.appendSFNTName(0x411, 13, license)
fn.appendSFNTName(0x411, 14, '')
fn.appendSFNTName(0x411, 15, '')
fn.appendSFNTName(0x411, 16, name)
fn.appendSFNTName(0x411, 17, '')
fn.appendSFNTName(0x409, 0, copyright)
fn.appendSFNTName(0x409, 1, name)
fn.appendSFNTName(0x409, 2, '')
fn.appendSFNTName(0x409, 3, '')
fn.appendSFNTName(0x409, 4, name)
fn.appendSFNTName(0x409, 5, version)
fn.appendSFNTName(0x409, 6, name + '-' + weight)
fn.appendSFNTName(0x409, 7, '')
fn.appendSFNTName(0x409, 8, '')
fn.appendSFNTName(0x409, 9, '')
fn.appendSFNTName(0x409, 10, '')
fn.appendSFNTName(0x409, 11, '')
fn.appendSFNTName(0x409, 12, '')
fn.appendSFNTName(0x409, 13, license)
fn.appendSFNTName(0x409, 14, '')
fn.appendSFNTName(0x409, 15, '')
fn.appendSFNTName(0x409, 16, name)
fn.appendSFNTName(0x409, 17, '')
# fn.mergeFonts(f'tmp1-{weight}.sfd')
fn.mergeFonts(f'tmp2-{weight}.sfd')
fn.save(f'tmp3-{weight}.sfd')
fn.generate(f'TamaTou-{weight}.otf')
fn.close()
| [
"fontforge.open"
] | [((389, 429), 'fontforge.open', 'fontforge.open', (['f"""Orbitron-{weight}.ttf"""'], {}), "(f'Orbitron-{weight}.ttf')\n", (403, 429), False, 'import fontforge\n'), ((549, 591), 'fontforge.open', 'fontforge.open', (['f"""NotoSansJP-{weight}.otf"""'], {}), "(f'NotoSansJP-{weight}.otf')\n", (563, 591), False, 'import fontforge\n'), ((989, 1025), 'fontforge.open', 'fontforge.open', (['f"""tmp1-{weight}.sfd"""'], {}), "(f'tmp1-{weight}.sfd')\n", (1003, 1025), False, 'import fontforge\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
"""
bambu
------
pandas RDF functionality
Installation
--------------
::
# pip install pandas
pip install rdflib
"""
import sys
import pandas as pd
import rdflib
def bambu():
"""
mainfunc
"""
pass
def to_rdf(df):
"""
Args:
df (DataFrame): pandas DataFrame to serialize to RDF
kwargs (dict): TODO
Returns:
rdflib.Graph: a serializable RDFLib Graph
"""
def read_rdf(path, **kwargs):
"""
Args:
path (str): path to an RDF source
kwargs (dict): TODO
Returns:
DataFrame: pandas DataFrame
"""
def to_rdfa(df, **kwargs):
"""
Args:
df (DataFrame): pandas DataFrame to serialize to RDF
kwargs (dict): TODO
Returns:
(list, StringIO): namespaces, RDFa table
"""
def read_rdfa(path, **kwargs):
"""
Args:
path (str): path to an RDF source
kwargs (dict): TODO
Returns:
DataFrame: pandas DataFrame
"""
def to_jsonld(df, **kwargs):
"""
Args:
df (DataFrame): pandas DataFrame to serialize to RDF
kwargs (dict): TODO
Returns:
(context, StringIO): JSONLD context, JSONLD data
"""
def read_jsonld(path, **kwargs):
"""
Args:
path (str): path to a JSONLD source
kwargs (dict): TODO
Returns:
DataFrame: pandas DataFrame
"""
import unittest
class Test_bambu(unittest.TestCase):
def test_bambu(self):
pass
def test_10_to_rdf(self):
df = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
output = to_rdf(df)
print(output)
self.assertTrue(output)
def main(*args):
import optparse
import logging
prs = optparse.OptionParser(usage="%prog: [args]")
prs.add_option('-v', '--verbose',
dest='verbose',
action='store_true',)
prs.add_option('-q', '--quiet',
dest='quiet',
action='store_true',)
prs.add_option('-t', '--test',
dest='run_tests',
action='store_true',)
args = args and list(args) or sys.argv[1:]
(opts, args) = prs.parse_args(args)
if not opts.quiet:
logging.basicConfig()
if opts.verbose:
logging.getLogger().setLevel(logging.DEBUG)
if opts.run_tests:
sys.argv = [sys.argv[0]] + args
import unittest
sys.exit(unittest.main())
output = bambu()
return 0
if __name__ == "__main__":
sys.exit(main())
| [
"logging.getLogger",
"logging.basicConfig",
"optparse.OptionParser",
"unittest.main",
"pandas.DataFrame"
] | [((1810, 1854), 'optparse.OptionParser', 'optparse.OptionParser', ([], {'usage': '"""%prog: [args]"""'}), "(usage='%prog: [args]')\n", (1831, 1854), False, 'import optparse\n'), ((1608, 1658), 'pandas.DataFrame', 'pd.DataFrame', (['[[1, 2], [3, 4]]'], {'columns': "['A', 'B']"}), "([[1, 2], [3, 4]], columns=['A', 'B'])\n", (1620, 1658), True, 'import pandas as pd\n'), ((2313, 2334), 'logging.basicConfig', 'logging.basicConfig', ([], {}), '()\n', (2332, 2334), False, 'import logging\n'), ((2522, 2537), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2535, 2537), False, 'import unittest\n'), ((2373, 2392), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (2390, 2392), False, 'import logging\n')] |
import brainrender
brainrender.SHADER_STYLE = 'cartoon'
from brainrender.scene import Scene
sharptrack_file = 'Examples/example_files/sharptrack_probe_points.mat'
scene = Scene(use_default_key_bindings=True)
scene.add_brain_regions('TH', alpha=.2, wireframe=True)
scene.add_probe_from_sharptrack(sharptrack_file)
scene.render() | [
"brainrender.scene.Scene"
] | [((174, 210), 'brainrender.scene.Scene', 'Scene', ([], {'use_default_key_bindings': '(True)'}), '(use_default_key_bindings=True)\n', (179, 210), False, 'from brainrender.scene import Scene\n')] |
# Import Modules
import os
import csv
# Set the path
filepath = os.path.join("Resources","budget_data.csv")
# Open the CSV file
with open(filepath) as csvfile:
csvreader = csv.reader(csvfile, delimiter=",")
# Skip the header row
next(csvreader)
# Set up some numbers
month = 0
total = 0
maxpro = 0
minpro = 0
# .reader can only iterate the file once. So you need to get ouput from one single loop.
for row in csvreader:
month += 1
total += int(row[1])
if maxpro < int(row[1]):
maxpro = int(row[1])
maxmon = row[0]
if minpro > int(row[1]):
minpro = int(row[1])
minmon = row[0]
# Direct print to txt file
f = open("analysis/output.txt", "a")
print("Financial Analysis", file =f)
print("----------------------------", file = f)
print("Total Months: " + str(month), file = f)
print("Total: $" + str(total), file=f)
print("Average Change: $" + str(total/month), file = f)
print("Greatest Increase in Profits: " + maxmon + " ($" + str(maxpro) +")", file =f)
print("Greatest Decrease in Profits: " + minmon + " ($" + str(minpro) +")", file =f)
f.close()
# Print out to terminal
print("Financial Analysis")
print("----------------------------")
print("Total Months: " + str(month))
print("Total: $" + str(total))
print("Average Change: $" + str(total/month))
print("Greatest Increase in Profits: " + maxmon + " ($" + str(maxpro) +")")
print("Greatest Decrease in Profits: " + minmon + " ($" + str(minpro) +")") | [
"os.path.join",
"csv.reader"
] | [((65, 109), 'os.path.join', 'os.path.join', (['"""Resources"""', '"""budget_data.csv"""'], {}), "('Resources', 'budget_data.csv')\n", (77, 109), False, 'import os\n'), ((178, 212), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (188, 212), False, 'import csv\n')] |
#!/usr/bin/python
import sys
import argparse
sys.path.append('./')
from src.utils.list_to_dict import list_to_dict
from src.utils.read_csv import read_csv
from src.utils.map_to_list_csv import map_to_list_csv
from src.gephi.write_csv import write_csv
print("")
print("-----------------------------")
print("Anonymize attributes")
print("-----------------------------")
print("")
parser = argparse.ArgumentParser(description="Anonymizes a given attributes CSV")
required_parser = parser.add_argument_group('required named arguments')
required_parser.add_argument("--attributes-file", dest="attrs", help="Attributes, a given file with attributes for Gephi", required=True)
required_parser.add_argument("--person-file", dest="persons", help="Personss, a list of persons and their anonymized tokens", required=True)
args = parser.parse_args()
attributes_file = args.attrs
persons_file = args.persons
print("Reading attributes file...")
attributes_raw = read_csv(attributes_file)
attributes = list_to_dict(attributes_raw[1:])
print("Reading persons file...")
persons = list_to_dict(read_csv(persons_file)[1:])
print("Anonymizing...")
anonymized_attributes = list()
for key, value in attributes.items():
name = persons[key][0]
row = value
row.insert(0, name)
anonymized_attributes.append(row)
print("Write anonymized attributes to attributes file again")
anonymized_attributes.insert(0, attributes_raw[0])
write_csv(anonymized_attributes, attributes_file)
print("All done!")
| [
"argparse.ArgumentParser",
"src.utils.read_csv.read_csv",
"src.gephi.write_csv.write_csv",
"src.utils.list_to_dict.list_to_dict",
"sys.path.append"
] | [((47, 68), 'sys.path.append', 'sys.path.append', (['"""./"""'], {}), "('./')\n", (62, 68), False, 'import sys\n'), ((394, 466), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Anonymizes a given attributes CSV"""'}), "(description='Anonymizes a given attributes CSV')\n", (417, 466), False, 'import argparse\n'), ((958, 983), 'src.utils.read_csv.read_csv', 'read_csv', (['attributes_file'], {}), '(attributes_file)\n', (966, 983), False, 'from src.utils.read_csv import read_csv\n'), ((997, 1029), 'src.utils.list_to_dict.list_to_dict', 'list_to_dict', (['attributes_raw[1:]'], {}), '(attributes_raw[1:])\n', (1009, 1029), False, 'from src.utils.list_to_dict import list_to_dict\n'), ((1430, 1479), 'src.gephi.write_csv.write_csv', 'write_csv', (['anonymized_attributes', 'attributes_file'], {}), '(anonymized_attributes, attributes_file)\n', (1439, 1479), False, 'from src.gephi.write_csv import write_csv\n'), ((1087, 1109), 'src.utils.read_csv.read_csv', 'read_csv', (['persons_file'], {}), '(persons_file)\n', (1095, 1109), False, 'from src.utils.read_csv import read_csv\n')] |
from sklearn.cluster import KMeans
import cv2
import PIL
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
from matplotlib import image as img1
import pandas as pd
from scipy.cluster.vq import whiten
import os
class DominantColors:
CLUSTERS = None
IMAGEPATH = None
IMAGE = None
COLORS = None
LABELS = None
BASEWIDTH = 256
def __init__(self, image, clusters=3):
self.CLUSTERS = clusters
self.IMAGEPATH = image
def dominantColors(self):
# read image
img = cv2.imread(self.IMAGEPATH)
# resize image
imgh, imgw, _ = img.shape
wpercent = (self.BASEWIDTH / float(imgw))
hsize = int((float(imgh) * float(wpercent)))
img = cv2.resize(img, (self.BASEWIDTH, hsize), PIL.Image.ANTIALIAS)
# convert to rgb from bgr
img = cv2.cvtColor(img, cv2.COLOR_RGB2Luv)
# reshaping to a list of pixels
img = img.reshape((img.shape[0] * img.shape[1], 3))
# save image after operations
self.IMAGE = img
# using k-means to cluster pixels
kmeans = KMeans(n_clusters=self.CLUSTERS)
kmeans.fit(img)
# the cluster centers are our dominant colors.
self.COLORS = kmeans.cluster_centers_
# save labels
self.LABELS = kmeans.labels_
# returning after converting to integer from float
return self.COLORS.astype(int)
def rgb_to_hex(self, rgb):
return '#%02x%02x%02x' % (int(rgb[0]), int(rgb[1]), int(rgb[2]))
def analyseRGB(self):
r = []
g = []
b = []
image = img1.imread(self.IMAGEPATH)
for line in image:
for pixel in line:
# print(pixel)
temp_r, temp_g, temp_b = pixel
r.append(temp_r)
g.append(temp_g)
b.append(temp_b)
fig = plt.figure()
ax = Axes3D(fig)
ax.scatter(r, g, b)
plt.show()
df = pd.DataFrame({'red': r, 'blue': b, 'green': g})
df['scaled_red'] = whiten(df['red'])
df['scaled_blue'] = whiten(df['blue'])
df['scaled_green'] = whiten(df['green'])
df.sample(n=10)
from scipy.cluster.vq import kmeans
cluster_centers, distortion = kmeans(df[['scaled_red', 'scaled_green', 'scaled_blue']], 2)
print(cluster_centers)
colors = []
r_std, g_std, b_std = df[['red', 'green', 'blue']].std()
for cluster_center in cluster_centers:
scaled_r, scaled_g, scaled_b = cluster_center
colors.append((scaled_r * r_std / 255, scaled_g * g_std / 255, scaled_b * b_std / 255))
plt.imshow([colors])
plt.show()
def plotClusters(self):
# plotting
fig = plt.figure()
ax = Axes3D(fig)
for label, pix in zip(self.LABELS, self.IMAGE):
ax.scatter(pix[0], pix[1], pix[2], color=self.rgb_to_hex(self.COLORS[label]))
plt.show()
def plotHistogram(self):
# labels form 0 to no. of clusters
numLabels = np.arange(0, self.CLUSTERS + 1)
# create frequency count tables
(hist, _) = np.histogram(self.LABELS, bins=numLabels)
hist = hist.astype("float")
hist /= hist.sum()
# appending frequencies to cluster centers
colors = self.COLORS
# descending order sorting as per frequency count
colors = colors[(-hist).argsort()]
hist = hist[(-hist).argsort()]
# creating empty chart
chart = np.zeros((50, 500, 3), np.uint8)
start = 0
# creating color rectangles
for i in range(self.CLUSTERS):
end = start + hist[i] * 500
# getting rgb values
r = colors[i][0]
g = colors[i][1]
b = colors[i][2]
# using cv2.rectangle to plot colors
cv2.rectangle(chart, (int(start), 0), (int(end), 50), (r, g, b), -1)
start = end
# display chart
plt.figure()
plt.axis("off")
plt.imshow(chart)
plt.show()
def _main_():
clusters = 8
for img in sorted(os.listdir('output\\predicted\\')):
print(img)
dc = DominantColors('..\\..\\data\\output\\predicted\\{0}'.format(img), clusters)
colors = dc.dominantColors()
dc.analyseRGB()
if __name__ == '__main__':
_main_()
| [
"sklearn.cluster.KMeans",
"matplotlib.pyplot.imshow",
"numpy.histogram",
"scipy.cluster.vq.kmeans",
"os.listdir",
"cv2.resize",
"numpy.arange",
"matplotlib.image.imread",
"scipy.cluster.vq.whiten",
"matplotlib.pyplot.figure",
"numpy.zeros",
"cv2.cvtColor",
"pandas.DataFrame",
"matplotlib.p... | [((564, 590), 'cv2.imread', 'cv2.imread', (['self.IMAGEPATH'], {}), '(self.IMAGEPATH)\n', (574, 590), False, 'import cv2\n'), ((766, 827), 'cv2.resize', 'cv2.resize', (['img', '(self.BASEWIDTH, hsize)', 'PIL.Image.ANTIALIAS'], {}), '(img, (self.BASEWIDTH, hsize), PIL.Image.ANTIALIAS)\n', (776, 827), False, 'import cv2\n'), ((877, 913), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2Luv'], {}), '(img, cv2.COLOR_RGB2Luv)\n', (889, 913), False, 'import cv2\n'), ((1139, 1171), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'self.CLUSTERS'}), '(n_clusters=self.CLUSTERS)\n', (1145, 1171), False, 'from sklearn.cluster import KMeans\n'), ((1180, 1195), 'scipy.cluster.vq.kmeans.fit', 'kmeans.fit', (['img'], {}), '(img)\n', (1190, 1195), False, 'from scipy.cluster.vq import kmeans\n'), ((1652, 1679), 'matplotlib.image.imread', 'img1.imread', (['self.IMAGEPATH'], {}), '(self.IMAGEPATH)\n', (1663, 1679), True, 'from matplotlib import image as img1\n'), ((1929, 1941), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1939, 1941), True, 'import matplotlib.pyplot as plt\n'), ((1955, 1966), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', (['fig'], {}), '(fig)\n', (1961, 1966), False, 'from mpl_toolkits.mplot3d import Axes3D\n'), ((2003, 2013), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2011, 2013), True, 'import matplotlib.pyplot as plt\n'), ((2027, 2074), 'pandas.DataFrame', 'pd.DataFrame', (["{'red': r, 'blue': b, 'green': g}"], {}), "({'red': r, 'blue': b, 'green': g})\n", (2039, 2074), True, 'import pandas as pd\n'), ((2102, 2119), 'scipy.cluster.vq.whiten', 'whiten', (["df['red']"], {}), "(df['red'])\n", (2108, 2119), False, 'from scipy.cluster.vq import whiten\n'), ((2148, 2166), 'scipy.cluster.vq.whiten', 'whiten', (["df['blue']"], {}), "(df['blue'])\n", (2154, 2166), False, 'from scipy.cluster.vq import whiten\n'), ((2196, 2215), 'scipy.cluster.vq.whiten', 'whiten', (["df['green']"], {}), "(df['green'])\n", (2202, 2215), False, 'from scipy.cluster.vq import whiten\n'), ((2322, 2382), 'scipy.cluster.vq.kmeans', 'kmeans', (["df[['scaled_red', 'scaled_green', 'scaled_blue']]", '(2)'], {}), "(df[['scaled_red', 'scaled_green', 'scaled_blue']], 2)\n", (2328, 2382), False, 'from scipy.cluster.vq import kmeans\n'), ((2712, 2732), 'matplotlib.pyplot.imshow', 'plt.imshow', (['[colors]'], {}), '([colors])\n', (2722, 2732), True, 'import matplotlib.pyplot as plt\n'), ((2741, 2751), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2749, 2751), True, 'import matplotlib.pyplot as plt\n'), ((2815, 2827), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2825, 2827), True, 'import matplotlib.pyplot as plt\n'), ((2841, 2852), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', (['fig'], {}), '(fig)\n', (2847, 2852), False, 'from mpl_toolkits.mplot3d import Axes3D\n'), ((3007, 3017), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3015, 3017), True, 'import matplotlib.pyplot as plt\n'), ((3112, 3143), 'numpy.arange', 'np.arange', (['(0)', '(self.CLUSTERS + 1)'], {}), '(0, self.CLUSTERS + 1)\n', (3121, 3143), True, 'import numpy as np\n'), ((3205, 3246), 'numpy.histogram', 'np.histogram', (['self.LABELS'], {'bins': 'numLabels'}), '(self.LABELS, bins=numLabels)\n', (3217, 3246), True, 'import numpy as np\n'), ((3580, 3612), 'numpy.zeros', 'np.zeros', (['(50, 500, 3)', 'np.uint8'], {}), '((50, 500, 3), np.uint8)\n', (3588, 3612), True, 'import numpy as np\n'), ((4060, 4072), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4070, 4072), True, 'import matplotlib.pyplot as plt\n'), ((4081, 4096), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (4089, 4096), True, 'import matplotlib.pyplot as plt\n'), ((4105, 4122), 'matplotlib.pyplot.imshow', 'plt.imshow', (['chart'], {}), '(chart)\n', (4115, 4122), True, 'import matplotlib.pyplot as plt\n'), ((4131, 4141), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4139, 4141), True, 'import matplotlib.pyplot as plt\n'), ((4196, 4229), 'os.listdir', 'os.listdir', (['"""output\\\\predicted\\\\"""'], {}), "('output\\\\predicted\\\\')\n", (4206, 4229), False, 'import os\n')] |
import torch
from torch import nn
import numpy as np
from models.AttentionLayer import AttentionLayer
from models.SelfAttentionLayer import SelfAttention, SelfAttentionPytorch,\
BertSelfAttentionScores, BertSelfAttentionScoresP, BertMultiSelfAttentionScoresP,\
BertMultiAttentionScoresP, BertAttentionClsQuery
from pytorch_transformers.modeling_bert import BertAttention, BertSelfAttention
from utils import visualize_attention
__author__ = "<NAME>"
class EmbracementLayer(nn.Module):
def __init__(self, config, hidden_size, p, max_seq_length):
super(EmbracementLayer, self).__init__()
self.p = p
self.hidden_size = hidden_size # 768
self.max_seq_length = max_seq_length # 128
if self.p == 'selfattention':
self.self_attention = SelfAttention(self.hidden_size) #self.max_seq_length) # AttentionLayer(self.hidden_size)
elif self.p == 'multihead_bertselfattention':
self.self_attention = BertSelfAttention(config)
elif self.p == 'multihead_bertselfattention_in_p':
config.num_attention_heads = 1
self.self_attention = BertSelfAttentionScoresP(config)
elif self.p == 'multihead_bertattention':
self.self_attention = BertAttention(config)
elif self.p == 'multihead_bertattention_clsquery':
config.output_attentions = True
self.self_attention = BertAttentionClsQuery(config, hidden_size)
self.softmax = nn.Softmax()
elif self.p == 'attention_clsquery':
self.self_attention = AttentionLayer(self.hidden_size)
elif self.p == 'attention_clsquery_weights':
self.self_attention = AttentionLayer(self.hidden_size, return_att_weights=True)
self.softmax = nn.Softmax(dim=-1)
elif self.p == 'multiheadattention':
config_att = config
config_att.output_attentions = True
self.self_attention = BertSelfAttentionScores(config_att)
elif self.p == 'selfattention_pytorch':
self.self_attention = SelfAttentionPytorch(self.max_seq_length) # 128
elif self.p == 'multiple_multihead_bertselfattention_in_p':
config.num_attention_heads = 1
self.self_attention = BertMultiSelfAttentionScoresP(config)
elif self.p == 'multiple_multihead_bertattention_in_p':
config.num_attention_heads = 1
config.output_attentions = True
self.self_attention = BertMultiAttentionScoresP(config, max_seq_length)
self.softmax = nn.Softmax(dim=-1)
def forward(self, output_tokens_from_bert, cls_token=None):
# pooled_enc_output = bs x 768
# output_tokens_from_bert = bert_output[0]
# cls_output = bert_output[1] # CLS
# Note: Docking layer not needed given that all features have the same size
# tokens_to_embrace = output_tokens_from_bert[:, 1:, :] # (8, 128, 768) = (bs, sequence_length (where the first index is CLS), embedding_size)
tokens_to_embrace = output_tokens_from_bert[:, :, :] # (8, 128, 768) = (bs, sequence_length, embedding_size)
[bs, seq_len, emb_size] = tokens_to_embrace.size()
tokens_to_embrace = tokens_to_embrace.cpu().detach().numpy()
# Note: Consider each token in the sequence of 128 as one modality.
embraced_features_token = []
for i_bs in range(bs):
# 1. Choose feature indexes to be considered in the Embrace vector
if self.p == 'multinomial':
# A. Multinomial distribution: randomly choose features from all 128 with same probability for each index feature
probability = torch.tensor(np.ones(seq_len), dtype=torch.float)
embraced_features_index = torch.multinomial(probability, emb_size, replacement=True) # shape = [768]
embraced_features_index = embraced_features_index.cpu().detach().numpy() # shape = 768
elif self.p == 'multihead_bertselfattention' or self.p == 'multihead_bertattention':
tokens_to_embrace_bs = tokens_to_embrace[i_bs, :, :]
head_mask = torch.ones([1, 1, 1, np.shape(tokens_to_embrace_bs)[0]]).cuda()
attention_mask = torch.zeros([1, 1, 1, np.shape(tokens_to_embrace_bs)[0]]).cuda()
tokens_to_embrace_bs_tensor = torch.tensor(tokens_to_embrace_bs, dtype=torch.float).unsqueeze(0).cuda()
#selfattention_scores = self.self_attention(tokens_to_embrace_bs_tensor, attention_mask, head_mask=None)
selfattention_scores = self.self_attention(tokens_to_embrace_bs_tensor, attention_mask, head_mask=head_mask)
selfattention_scores = selfattention_scores[0]
elif self.p == 'multihead_bertattention_clsquery':
print("TODO. Use cls_token - Come back to this")
tokens_to_embrace_bs = tokens_to_embrace[i_bs, :, :]
cls_token_bs = cls_token[i_bs, :]
#cls_token_bs = torch.tensor(cls_token_bs, dtype=torch.float).unsqueeze(0).unsqueeze(0).cuda()
attention_mask = torch.ones([1, 1, 1, np.shape(tokens_to_embrace_bs)[0]]).cuda()
tokens_to_embrace_bs_tensor = torch.tensor(tokens_to_embrace_bs, dtype=torch.float).unsqueeze(0).cuda()
cls_token_bs = torch.tensor(cls_token_bs, dtype=torch.float).unsqueeze(0)
cls_token_bs = cls_token_bs.repeat(self.max_seq_length, 1)
cls_token_bs = cls_token_bs.unsqueeze(0).cuda()
selfattention_scores = self.self_attention(tokens_to_embrace_bs_tensor, head_mask=attention_mask, cls_query=cls_token_bs)
selfattention_scores = self.softmax(selfattention_scores[1])
print("")
elif self.p == 'attention_clsquery':
tokens_to_embrace_bs = tokens_to_embrace[i_bs, :, :]
cls_token_bs = cls_token[i_bs, :]
tokens_to_embrace_bs_tensor = torch.tensor(tokens_to_embrace_bs, dtype=torch.float).cuda()
cls_token_bs = torch.tensor(cls_token_bs, dtype=torch.float).unsqueeze(0).cuda()
selfattention_scores = self.self_attention(cls_token_bs, tokens_to_embrace_bs_tensor, unsqueeze_idx=0)
selfattention_scores = selfattention_scores[0]
elif self.p == 'multiple_multihead_bertselfattention_in_p' or self.p == 'multiple_multihead_bertattention_in_p':
tokens_to_embrace_bs = tokens_to_embrace[i_bs, :, :]
attention_mask = torch.ones([1, 1, 1, np.shape(tokens_to_embrace_bs)[0]]).cuda()
tokens_to_embrace_bs_tensor = torch.tensor(tokens_to_embrace_bs, dtype=torch.float).unsqueeze(0).cuda()
selfattention_scores = self.self_attention(tokens_to_embrace_bs_tensor, head_mask=attention_mask,
is_visualize_attention=False)
if self.p == 'multiple_multihead_bertattention_in_p':
selfattention_scores = selfattention_scores.squeeze()
selfattention_scores = self.softmax(selfattention_scores)
# Choose features using information from self-attention
multiple_embrace_vectors = []
for i in range(self.max_seq_length): # 128
score = selfattention_scores[i, :]
#attention_probs_img = score.unsqueeze(0).cpu().detach().numpy()
#visualize_attention(attention_probs_img)
embraced_features_index = torch.multinomial(score, emb_size, replacement=True) # shape = [768]
embraced_features_index = embraced_features_index.cpu().detach().numpy() # shape = 768
embraced_features_token_bs = []
for i_emb, e in enumerate(embraced_features_index):
embraced_features_token_bs.append(tokens_to_embrace[i_bs, e, i_emb])
multiple_embrace_vectors.append(embraced_features_token_bs)
multiple_embrace_vectors = torch.tensor(multiple_embrace_vectors, dtype=torch.float)
else:
# B. Self-attention used to choose most important indexes -> p = softmax(mean(self_att))
# 'selfattention_scores' shape -> (bs, 128)
tokens_to_embrace_bs = tokens_to_embrace[i_bs, :, :]
# ADD THE NEXT 2 LINES TO CONDENSED
# attention_mask_bs = attention_mask[i_bs, :]
# _, selfattention_scores = self.self_attention(tokens_to_embrace_bs, attention_mask_bs)
# Original attention_mask ranges from 0 to -1000
# If we want to mask the scores by multiplying between 0 and 1, we should give the attention_mask
# as head_mask
if self.p == 'selfattention':
_, selfattention_scores = self.self_attention(tokens_to_embrace_bs)
elif self.p == 'attention_clsquery_weights':
tokens_to_embrace_bs = tokens_to_embrace[i_bs, :, :]
cls_token_bs = cls_token[i_bs, :]
tokens_to_embrace_bs_tensor = torch.tensor(tokens_to_embrace_bs, dtype=torch.float).cuda()
cls_token_bs = torch.tensor(cls_token_bs, dtype=torch.float).unsqueeze(0).cuda()
selfattention_scores = self.self_attention(cls_token_bs, tokens_to_embrace_bs_tensor, unsqueeze_idx=0)
selfattention_scores = selfattention_scores[1]
selfattention_scores = self.softmax(selfattention_scores).squeeze()
elif self.p == 'multiheadattention': # BertAttention
attention_mask = torch.ones([1, 1, 1, np.shape(tokens_to_embrace_bs)[0]]).cuda()
tokens_to_embrace_bs_tensor = torch.tensor(tokens_to_embrace_bs, dtype=torch.float).unsqueeze(0).cuda()
#selfattention_scores = self.self_attention(tokens_to_embrace_bs_tensor, attention_mask, head_mask=None)
selfattention_scores = self.self_attention(tokens_to_embrace_bs_tensor, head_mask=attention_mask)
elif self.p == 'multihead_bertselfattention_in_p':
attention_mask = torch.ones([1, 1, 1, np.shape(tokens_to_embrace_bs)[0]]).cuda()
tokens_to_embrace_bs_tensor = torch.tensor(tokens_to_embrace_bs, dtype=torch.float).unsqueeze(0).cuda()
#selfattention_scores = self.self_attention(tokens_to_embrace_bs_tensor, attention_mask, head_mask=None)
selfattention_scores = self.self_attention(tokens_to_embrace_bs_tensor, head_mask=attention_mask)
else: # 'selfattention_pytorch'
tokens_to_embrace_bs_tensor = torch.tensor(tokens_to_embrace_bs, dtype=torch.float).unsqueeze(
0).cuda()
selfattention_scores = self.self_attention(tokens_to_embrace_bs_tensor)
# Choose features using information from self-attention
selfattention_scores = torch.tensor(selfattention_scores, dtype=torch.float)
embraced_features_index = torch.multinomial(selfattention_scores, emb_size, replacement=True) # shape = [768]
embraced_features_index = embraced_features_index.cpu().detach().numpy() # shape = 768
# 2. Add features into one of size (bs, embedding_size)
embraced_features_token_bs = []
if self.p == 'multihead_bertselfattention' or self.p == 'multihead_bertattention':
embraced_features_index = torch.sum(selfattention_scores, dim=1)
embraced_features_token_bs = embraced_features_index.squeeze()
embraced_features_token_bs = embraced_features_token_bs.cpu().detach().numpy()
elif self.p == 'multiple_multihead_bertselfattention_in_p' or self.p == 'multiple_multihead_bertattention_in_p':
embraced_features_token_bs = torch.sum(multiple_embrace_vectors, dim=0)
embraced_features_token_bs = embraced_features_token_bs.squeeze()
embraced_features_token_bs = embraced_features_token_bs.cpu().detach().numpy()
elif self.p == 'attention_clsquery':
embraced_features_token_bs = selfattention_scores.cpu().detach().numpy()
else:
for i_emb, e in enumerate(embraced_features_index):
embraced_features_token_bs.append(tokens_to_embrace[i_bs, e, i_emb])
embraced_features_token.append(embraced_features_token_bs) # (768)
embraced_features_token = torch.tensor(embraced_features_token, dtype=torch.float) # (bs, 768)
return embraced_features_token
| [
"models.SelfAttentionLayer.BertAttentionClsQuery",
"models.SelfAttentionLayer.SelfAttentionPytorch",
"torch.multinomial",
"numpy.ones",
"torch.nn.Softmax",
"models.SelfAttentionLayer.BertMultiAttentionScoresP",
"models.AttentionLayer.AttentionLayer",
"models.SelfAttentionLayer.BertMultiSelfAttentionSc... | [((12737, 12793), 'torch.tensor', 'torch.tensor', (['embraced_features_token'], {'dtype': 'torch.float'}), '(embraced_features_token, dtype=torch.float)\n', (12749, 12793), False, 'import torch\n'), ((800, 831), 'models.SelfAttentionLayer.SelfAttention', 'SelfAttention', (['self.hidden_size'], {}), '(self.hidden_size)\n', (813, 831), False, 'from models.SelfAttentionLayer import SelfAttention, SelfAttentionPytorch, BertSelfAttentionScores, BertSelfAttentionScoresP, BertMultiSelfAttentionScoresP, BertMultiAttentionScoresP, BertAttentionClsQuery\n'), ((979, 1004), 'pytorch_transformers.modeling_bert.BertSelfAttention', 'BertSelfAttention', (['config'], {}), '(config)\n', (996, 1004), False, 'from pytorch_transformers.modeling_bert import BertAttention, BertSelfAttention\n'), ((3792, 3850), 'torch.multinomial', 'torch.multinomial', (['probability', 'emb_size'], {'replacement': '(True)'}), '(probability, emb_size, replacement=True)\n', (3809, 3850), False, 'import torch\n'), ((11707, 11745), 'torch.sum', 'torch.sum', (['selfattention_scores'], {'dim': '(1)'}), '(selfattention_scores, dim=1)\n', (11716, 11745), False, 'import torch\n'), ((1141, 1173), 'models.SelfAttentionLayer.BertSelfAttentionScoresP', 'BertSelfAttentionScoresP', (['config'], {}), '(config)\n', (1165, 1173), False, 'from models.SelfAttentionLayer import SelfAttention, SelfAttentionPytorch, BertSelfAttentionScores, BertSelfAttentionScoresP, BertMultiSelfAttentionScoresP, BertMultiAttentionScoresP, BertAttentionClsQuery\n'), ((3713, 3729), 'numpy.ones', 'np.ones', (['seq_len'], {}), '(seq_len)\n', (3720, 3729), True, 'import numpy as np\n'), ((12090, 12132), 'torch.sum', 'torch.sum', (['multiple_embrace_vectors'], {'dim': '(0)'}), '(multiple_embrace_vectors, dim=0)\n', (12099, 12132), False, 'import torch\n'), ((1258, 1279), 'pytorch_transformers.modeling_bert.BertAttention', 'BertAttention', (['config'], {}), '(config)\n', (1271, 1279), False, 'from pytorch_transformers.modeling_bert import BertAttention, BertSelfAttention\n'), ((1417, 1459), 'models.SelfAttentionLayer.BertAttentionClsQuery', 'BertAttentionClsQuery', (['config', 'hidden_size'], {}), '(config, hidden_size)\n', (1438, 1459), False, 'from models.SelfAttentionLayer import SelfAttention, SelfAttentionPytorch, BertSelfAttentionScores, BertSelfAttentionScoresP, BertMultiSelfAttentionScoresP, BertMultiAttentionScoresP, BertAttentionClsQuery\n'), ((1487, 1499), 'torch.nn.Softmax', 'nn.Softmax', ([], {}), '()\n', (1497, 1499), False, 'from torch import nn\n'), ((1579, 1611), 'models.AttentionLayer.AttentionLayer', 'AttentionLayer', (['self.hidden_size'], {}), '(self.hidden_size)\n', (1593, 1611), False, 'from models.AttentionLayer import AttentionLayer\n'), ((5363, 5408), 'torch.tensor', 'torch.tensor', (['cls_token_bs'], {'dtype': 'torch.float'}), '(cls_token_bs, dtype=torch.float)\n', (5375, 5408), False, 'import torch\n'), ((8136, 8193), 'torch.tensor', 'torch.tensor', (['multiple_embrace_vectors'], {'dtype': 'torch.float'}), '(multiple_embrace_vectors, dtype=torch.float)\n', (8148, 8193), False, 'import torch\n'), ((11172, 11225), 'torch.tensor', 'torch.tensor', (['selfattention_scores'], {'dtype': 'torch.float'}), '(selfattention_scores, dtype=torch.float)\n', (11184, 11225), False, 'import torch\n'), ((11268, 11335), 'torch.multinomial', 'torch.multinomial', (['selfattention_scores', 'emb_size'], {'replacement': '(True)'}), '(selfattention_scores, emb_size, replacement=True)\n', (11285, 11335), False, 'import torch\n'), ((1699, 1756), 'models.AttentionLayer.AttentionLayer', 'AttentionLayer', (['self.hidden_size'], {'return_att_weights': '(True)'}), '(self.hidden_size, return_att_weights=True)\n', (1713, 1756), False, 'from models.AttentionLayer import AttentionLayer\n'), ((1784, 1802), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (1794, 1802), False, 'from torch import nn\n'), ((4374, 4427), 'torch.tensor', 'torch.tensor', (['tokens_to_embrace_bs'], {'dtype': 'torch.float'}), '(tokens_to_embrace_bs, dtype=torch.float)\n', (4386, 4427), False, 'import torch\n'), ((6016, 6069), 'torch.tensor', 'torch.tensor', (['tokens_to_embrace_bs'], {'dtype': 'torch.float'}), '(tokens_to_embrace_bs, dtype=torch.float)\n', (6028, 6069), False, 'import torch\n'), ((7618, 7670), 'torch.multinomial', 'torch.multinomial', (['score', 'emb_size'], {'replacement': '(True)'}), '(score, emb_size, replacement=True)\n', (7635, 7670), False, 'import torch\n'), ((1962, 1997), 'models.SelfAttentionLayer.BertSelfAttentionScores', 'BertSelfAttentionScores', (['config_att'], {}), '(config_att)\n', (1985, 1997), False, 'from models.SelfAttentionLayer import SelfAttention, SelfAttentionPytorch, BertSelfAttentionScores, BertSelfAttentionScoresP, BertMultiSelfAttentionScoresP, BertMultiAttentionScoresP, BertAttentionClsQuery\n'), ((4187, 4217), 'numpy.shape', 'np.shape', (['tokens_to_embrace_bs'], {}), '(tokens_to_embrace_bs)\n', (4195, 4217), True, 'import numpy as np\n'), ((4285, 4315), 'numpy.shape', 'np.shape', (['tokens_to_embrace_bs'], {}), '(tokens_to_embrace_bs)\n', (4293, 4315), True, 'import numpy as np\n'), ((5258, 5311), 'torch.tensor', 'torch.tensor', (['tokens_to_embrace_bs'], {'dtype': 'torch.float'}), '(tokens_to_embrace_bs, dtype=torch.float)\n', (5270, 5311), False, 'import torch\n'), ((2080, 2121), 'models.SelfAttentionLayer.SelfAttentionPytorch', 'SelfAttentionPytorch', (['self.max_seq_length'], {}), '(self.max_seq_length)\n', (2100, 2121), False, 'from models.SelfAttentionLayer import SelfAttention, SelfAttentionPytorch, BertSelfAttentionScores, BertSelfAttentionScoresP, BertMultiSelfAttentionScoresP, BertMultiAttentionScoresP, BertAttentionClsQuery\n'), ((5169, 5199), 'numpy.shape', 'np.shape', (['tokens_to_embrace_bs'], {}), '(tokens_to_embrace_bs)\n', (5177, 5199), True, 'import numpy as np\n'), ((6108, 6153), 'torch.tensor', 'torch.tensor', (['cls_token_bs'], {'dtype': 'torch.float'}), '(cls_token_bs, dtype=torch.float)\n', (6120, 6153), False, 'import torch\n'), ((2274, 2311), 'models.SelfAttentionLayer.BertMultiSelfAttentionScoresP', 'BertMultiSelfAttentionScoresP', (['config'], {}), '(config)\n', (2303, 2311), False, 'from models.SelfAttentionLayer import SelfAttention, SelfAttentionPytorch, BertSelfAttentionScores, BertSelfAttentionScoresP, BertMultiSelfAttentionScoresP, BertMultiAttentionScoresP, BertAttentionClsQuery\n'), ((6693, 6746), 'torch.tensor', 'torch.tensor', (['tokens_to_embrace_bs'], {'dtype': 'torch.float'}), '(tokens_to_embrace_bs, dtype=torch.float)\n', (6705, 6746), False, 'import torch\n'), ((9258, 9311), 'torch.tensor', 'torch.tensor', (['tokens_to_embrace_bs'], {'dtype': 'torch.float'}), '(tokens_to_embrace_bs, dtype=torch.float)\n', (9270, 9311), False, 'import torch\n'), ((2497, 2546), 'models.SelfAttentionLayer.BertMultiAttentionScoresP', 'BertMultiAttentionScoresP', (['config', 'max_seq_length'], {}), '(config, max_seq_length)\n', (2522, 2546), False, 'from models.SelfAttentionLayer import SelfAttention, SelfAttentionPytorch, BertSelfAttentionScores, BertSelfAttentionScoresP, BertMultiSelfAttentionScoresP, BertMultiAttentionScoresP, BertAttentionClsQuery\n'), ((2574, 2592), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (2584, 2592), False, 'from torch import nn\n'), ((6604, 6634), 'numpy.shape', 'np.shape', (['tokens_to_embrace_bs'], {}), '(tokens_to_embrace_bs)\n', (6612, 6634), True, 'import numpy as np\n'), ((9354, 9399), 'torch.tensor', 'torch.tensor', (['cls_token_bs'], {'dtype': 'torch.float'}), '(cls_token_bs, dtype=torch.float)\n', (9366, 9399), False, 'import torch\n'), ((9919, 9972), 'torch.tensor', 'torch.tensor', (['tokens_to_embrace_bs'], {'dtype': 'torch.float'}), '(tokens_to_embrace_bs, dtype=torch.float)\n', (9931, 9972), False, 'import torch\n'), ((9826, 9856), 'numpy.shape', 'np.shape', (['tokens_to_embrace_bs'], {}), '(tokens_to_embrace_bs)\n', (9834, 9856), True, 'import numpy as np\n'), ((10454, 10507), 'torch.tensor', 'torch.tensor', (['tokens_to_embrace_bs'], {'dtype': 'torch.float'}), '(tokens_to_embrace_bs, dtype=torch.float)\n', (10466, 10507), False, 'import torch\n'), ((10870, 10923), 'torch.tensor', 'torch.tensor', (['tokens_to_embrace_bs'], {'dtype': 'torch.float'}), '(tokens_to_embrace_bs, dtype=torch.float)\n', (10882, 10923), False, 'import torch\n'), ((10361, 10391), 'numpy.shape', 'np.shape', (['tokens_to_embrace_bs'], {}), '(tokens_to_embrace_bs)\n', (10369, 10391), True, 'import numpy as np\n')] |
###############################################################################
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
###############################################################################
import csv
from io import StringIO
import logging
from eccodes import (codes_bufr_new_from_samples, codes_release)
import pytest
from csv2bufr import (validate_mapping, apply_scaling, validate_value,
transform, SUCCESS)
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel("DEBUG")
# test data
@pytest.fixture
def mapping_dict():
return {
"inputDelayedDescriptorReplicationFactor": [],
"number_header_rows": 1,
"names_on_row": 1,
"header": [
{"eccodes_key": "edition", "value": 4}, # noqa
{"eccodes_key": "masterTableNumber", "value": 0}, # noqa
{"eccodes_key": "bufrHeaderCentre", "value": 0}, # noqa
{"eccodes_key": "bufrHeaderSubCentre", "value": 0}, # noqa
{"eccodes_key": "updateSequenceNumber", "value": 0}, # noqa
{"eccodes_key": "dataCategory", "value": 0}, # noqa
{"eccodes_key": "internationalDataSubCategory", "value": 6}, # noqa
{"eccodes_key": "masterTablesVersionNumber", "value": 36}, # noqa
{"eccodes_key": "numberOfSubsets", "value": 1}, # noqa
{"eccodes_key": "observedData", "value": 1}, # noqa
{"eccodes_key": "compressedData", "value": 0}, # noqa
{"eccodes_key": "typicalYear", "csv_column": "year"}, # noqa
{"eccodes_key": "typicalMonth", "csv_column": "month"}, # noqa
{"eccodes_key": "typicalDay", "csv_column": "day"}, # noqa
{"eccodes_key": "typicalHour", "csv_column": "hour"}, # noqa
{"eccodes_key": "typicalMinute", "csv_column": "minute"}, # noqa
{"eccodes_key": "unexpandedDescriptors","value": [301021, 301011, 301012, 10051, 12101]} # noqa
],
"data": [
{"eccodes_key": "#1#year", "csv_column": "year"}, # noqa
{"eccodes_key": "#1#month", "csv_column": "month"}, # noqa
{"eccodes_key": "#1#day", "csv_column": "day"}, # noqa
{"eccodes_key": "#1#hour", "csv_column": "hour"}, # noqa
{"eccodes_key": "#1#minute", "csv_column": "minute"}, # noqa
{"eccodes_key": "#1#latitude", "csv_column": "latitude"}, # noqa
{"eccodes_key": "#1#longitude", "csv_column": "longitude"}, # noqa
{"eccodes_key": "#1#pressureReducedToMeanSeaLevel", "csv_column": "pressure"}, # noqa
{"eccodes_key": "#1#airTemperature", "csv_column": "air_temperature"} # noqa
]
}
@pytest.fixture
def data_dict():
return {
"air_temperature": 290.31,
"pressure": 100130,
"latitude": 55.154,
"longitude": 0.0,
"year": 2021,
"month": 11,
"day": 18,
"hour": 18,
"minute": 0
}
@pytest.fixture
def data_to_encode():
return {
"edition": 4,
"masterTableNumber": 0,
"bufrHeaderCentre": 0,
"bufrHeaderSubCentre": 0,
"updateSequenceNumber": 0,
"section1Flags": 0,
"dataCategory": 0,
"internationalDataSubCategory": 6,
"masterTablesVersionNumber": 36,
"numberOfSubsets": 1,
"observedData": 1,
"compressedData": 0,
"typicalYear": 2021.0,
"typicalMonth": 11.0,
"typicalDay": 18.0,
"typicalHour": 18.0,
"typicalMinute": 0.0,
"unexpandedDescriptors": [
301021,
301011,
301012,
10051,
12101
],
"#1#year": 2021.0,
"#1#month": 11.0,
"#1#day": 18.0,
"#1#hour": 18.0,
"#1#minute": 0.0,
"#1#latitude": 55.154,
"#1#longitude": 0.0,
"#1#pressureReducedToMeanSeaLevel": 100130.0,
"#1#airTemperature": 290.31
}
@pytest.fixture
def station_dict():
return {
"metadata": {
"last-sync": "2021-10-22"
},
"data": {
"station-name": "test data"
},
"wigosIds": [
{"wid": "0-1-2-ABCD"}
]
}
# test to check whether eccodes is installed
def test_eccodes():
# call to eccodes library to test if accessible
bufr_msg = codes_bufr_new_from_samples('BUFR4')
# call to release the BUFR message
codes_release(bufr_msg)
assert True
# test to check validate_mapping is not broken
def test_validate_mapping_pass(mapping_dict):
success = validate_mapping(mapping_dict)
assert success == SUCCESS
# test to check validate_mapping fails when we expect it to
def test_validate_mapping_fail():
# not sure on this one, do we need this test and if so have many
# different exceptions do we want to test?
test_data = {
"inputDelayedDescriptorReplicationFactor": [],
"header": [],
"data": [
{"eccodes_key": "abc", "value": 1, "offset": 1}, # noqa
{"eccodes_key": "def", "csv_column": "col1", "valid-min": 0, "valid-max": 10}, # noqa
{"eccodes_key": "ghi", "csv_column": "col2", "valid-min": 250.0, "valid-max": 350.0, "scale": 0.0, "offset": 273.15}, # noqa
{"eccodes_key": "jkl", "csv_column": "col3", "valid-min": 90000.0, "valid-max": 120000.0, "scale": 2.0, "offset": 0.0} # noqa
]
}
try:
success = validate_mapping(test_data)
except Exception:
success = False
assert success != SUCCESS
# test to make sure apply_scaling works as expected
def test_apply_scaling():
scale = 1
offset = 20.0
test_value = 10.0
assert 120.0 == apply_scaling(test_value, scale, offset)
# test to check that valid_value works
def test_validate_value_pass():
input_value = 10.0
try:
value = validate_value("test value", input_value, 0.0, 100.0, False)
except Exception:
assert False
assert value == input_value
# test to check that valid_value fails when we expect it to
def test_validate_value_fail():
input_value = 10.0
try:
_ = validate_value("test value", input_value, 0.0, 9.9, False)
except Exception:
return
assert False
# test to check that valid_value returns null value when we expect it to
def test_validate_value_nullify():
input_value = 10.0
try:
value = validate_value("test value", input_value, 0.0, 9.9, True)
except Exception:
assert False
assert value is None
# check that test transform works
def test_transform(data_dict, station_dict, mapping_dict):
# create CSV
output = StringIO()
writer = csv.DictWriter(output, quoting=csv.QUOTE_NONNUMERIC,
fieldnames=data_dict.keys())
writer.writeheader()
writer.writerow(data_dict)
data = output.getvalue()
result = transform(data, station_dict, mapping_dict)
for item in result:
assert isinstance(item, dict)
assert "_meta" in item
item_meta_keys = ['data_category', 'data_date', 'identifier',
'md5', 'originating_centre', 'wigos_id']
assert sorted(item["_meta"].keys()) == item_meta_keys
assert item["_meta"]["md5"] == "981938dbd97be3e5adc8e7b1c6eb642c"
| [
"logging.getLogger",
"eccodes.codes_release",
"csv2bufr.validate_mapping",
"csv2bufr.apply_scaling",
"csv2bufr.validate_value",
"eccodes.codes_bufr_new_from_samples",
"csv2bufr.transform",
"io.StringIO"
] | [((1204, 1231), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1221, 1231), False, 'import logging\n'), ((5262, 5298), 'eccodes.codes_bufr_new_from_samples', 'codes_bufr_new_from_samples', (['"""BUFR4"""'], {}), "('BUFR4')\n", (5289, 5298), False, 'from eccodes import codes_bufr_new_from_samples, codes_release\n'), ((5342, 5365), 'eccodes.codes_release', 'codes_release', (['bufr_msg'], {}), '(bufr_msg)\n', (5355, 5365), False, 'from eccodes import codes_bufr_new_from_samples, codes_release\n'), ((5491, 5521), 'csv2bufr.validate_mapping', 'validate_mapping', (['mapping_dict'], {}), '(mapping_dict)\n', (5507, 5521), False, 'from csv2bufr import validate_mapping, apply_scaling, validate_value, transform, SUCCESS\n'), ((7582, 7592), 'io.StringIO', 'StringIO', ([], {}), '()\n', (7590, 7592), False, 'from io import StringIO\n'), ((7814, 7857), 'csv2bufr.transform', 'transform', (['data', 'station_dict', 'mapping_dict'], {}), '(data, station_dict, mapping_dict)\n', (7823, 7857), False, 'from csv2bufr import validate_mapping, apply_scaling, validate_value, transform, SUCCESS\n'), ((6366, 6393), 'csv2bufr.validate_mapping', 'validate_mapping', (['test_data'], {}), '(test_data)\n', (6382, 6393), False, 'from csv2bufr import validate_mapping, apply_scaling, validate_value, transform, SUCCESS\n'), ((6624, 6664), 'csv2bufr.apply_scaling', 'apply_scaling', (['test_value', 'scale', 'offset'], {}), '(test_value, scale, offset)\n', (6637, 6664), False, 'from csv2bufr import validate_mapping, apply_scaling, validate_value, transform, SUCCESS\n'), ((6786, 6846), 'csv2bufr.validate_value', 'validate_value', (['"""test value"""', 'input_value', '(0.0)', '(100.0)', '(False)'], {}), "('test value', input_value, 0.0, 100.0, False)\n", (6800, 6846), False, 'from csv2bufr import validate_mapping, apply_scaling, validate_value, transform, SUCCESS\n'), ((7060, 7118), 'csv2bufr.validate_value', 'validate_value', (['"""test value"""', 'input_value', '(0.0)', '(9.9)', '(False)'], {}), "('test value', input_value, 0.0, 9.9, False)\n", (7074, 7118), False, 'from csv2bufr import validate_mapping, apply_scaling, validate_value, transform, SUCCESS\n'), ((7331, 7388), 'csv2bufr.validate_value', 'validate_value', (['"""test value"""', 'input_value', '(0.0)', '(9.9)', '(True)'], {}), "('test value', input_value, 0.0, 9.9, True)\n", (7345, 7388), False, 'from csv2bufr import validate_mapping, apply_scaling, validate_value, transform, SUCCESS\n')] |
from django.urls import path
from flights import views
urlpatterns = [path("", views.index)]
| [
"django.urls.path"
] | [((71, 92), 'django.urls.path', 'path', (['""""""', 'views.index'], {}), "('', views.index)\n", (75, 92), False, 'from django.urls import path\n')] |
import sys
from setuptools import setup, find_packages
def get_version(fname):
import re
verstrline = open(fname, "rt").read()
mo = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", verstrline, re.M)
if mo:
return mo.group(1)
else:
raise RuntimeError("Unable to find version string in %s." % (fname,))
def get_test_requirements():
requirements = []
if sys.version_info[0:2] == (2, 6):
requirements.append('unittest2')
return requirements
setup(
name='configy',
version=get_version('configy/__init__.py'),
description='Simple Configuration manager, plays well with testing',
long_description=open('README.rst').read(),
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/grigi/configy',
zip_safe=False,
test_suite='configy.test_suite',
# Dependencies
install_requires=[
'PyYAML',
],
tests_require=get_test_requirements(),
# Packages
packages=find_packages(),
include_package_data=True,
# Scripts
scripts=[],
# Classifiers
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
| [
"setuptools.find_packages",
"re.search"
] | [((145, 216), 're.search', 're.search', (['"""^__version__ = [\'\\\\"]([^\'\\\\"]*)[\'\\\\"]"""', 'verstrline', 're.M'], {}), '(\'^__version__ = [\\\'\\\\"]([^\\\'\\\\"]*)[\\\'\\\\"]\', verstrline, re.M)\n', (154, 216), False, 'import re\n'), ((982, 997), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (995, 997), False, 'from setuptools import setup, find_packages\n')] |
# -*- coding: utf-8 -*-
"""Usage:
acli ec2 (ls | list | summary) [options] [--region=<region>]
acli ec2 (start | stop | reboot | terminate | info | cpu | vols | net) <instance_id> [options]
-f, --filter=<term> filter results by term
-s, --start=<start_date> metrics start-date
-e, --end=<end_date> metrics end-date
-p, --period=<period> metrics period
-i, --intervals=<intervals> metrics intervals
-h, --help
"""
from __future__ import (absolute_import, print_function, unicode_literals)
from docopt import docopt
from acli.services import (ec2, cloudwatch)
def ec2_command(argv=None, aws_config=None):
ec2_res = docopt(__doc__, argv=argv)
if any((ec2_res.get('ls'), ec2_res.get('list'))):
ec2.ec2_list(aws_config, filter_term=ec2_res.get('--filter'))
elif ec2_res.get('info'):
ec2.ec2_info(aws_config, instance_id=ec2_res.get('<instance_id>'))
elif ec2_res.get('stop'):
ec2.ec2_manage(aws_config, instance_id=ec2_res.get('<instance_id>'), action="stop")
elif ec2_res.get('reboot'):
ec2.ec2_manage(aws_config, instance_id=ec2_res.get('<instance_id>'), action="reboot")
elif ec2_res.get('start'):
ec2.ec2_manage(aws_config, instance_id=ec2_res.get('<instance_id>'), action="start")
elif ec2_res.get('terminate'):
ec2.ec2_manage(aws_config, instance_id=ec2_res.get('<instance_id>'), action="terminate")
elif ec2_res.get('cpu'):
cloudwatch.ec2_cpu(aws_config=aws_config, instance_id=ec2_res.get('<instance_id>'))
elif ec2_res.get('net'):
cloudwatch.ec2_net(aws_config=aws_config,
instance_id=ec2_res.get('<instance_id>'),
start=ec2_res.get('--start'),
period=ec2_res.get('--end'),
intervals=ec2_res.get('intervals')
)
elif ec2_res.get('vols'):
cloudwatch.ec2_vol(aws_config=aws_config,
instance_id=ec2_res.get('<instance_id>'),
start=ec2_res.get('--start'),
period=ec2_res.get('--end'),
intervals=ec2_res.get('intervals')
)
elif ec2_res.get('summary'):
ec2.ec2_summary(aws_config=aws_config)
if __name__ == '__main__':
print(docopt(__doc__))
| [
"acli.services.ec2.ec2_summary",
"docopt.docopt"
] | [((696, 722), 'docopt.docopt', 'docopt', (['__doc__'], {'argv': 'argv'}), '(__doc__, argv=argv)\n', (702, 722), False, 'from docopt import docopt\n'), ((2400, 2415), 'docopt.docopt', 'docopt', (['__doc__'], {}), '(__doc__)\n', (2406, 2415), False, 'from docopt import docopt\n'), ((2323, 2361), 'acli.services.ec2.ec2_summary', 'ec2.ec2_summary', ([], {'aws_config': 'aws_config'}), '(aws_config=aws_config)\n', (2338, 2361), False, 'from acli.services import ec2, cloudwatch\n')] |
import json
from requests import ConnectionError
from config import *
from utils import *
from get_auth import TOKEN
# Create network
create_network_url = "http://{}:9696/v2.0/networks".format(IP)
token_headers = {
'X-Auth-Token': TOKEN,
'Content-Type': 'application/json'
}
# Create router
create_router_url = "http://{}:9696/v2.0/routers".format(IP)
# Get network for DELETE
get_network_list_url = create_network_url
future = send_request(get_network_list_url, 'GET', headers=token_headers)
result = future.result().content
result = json.loads(result)
list_networks = result.get("networks")
list_networks = [network for network in list_networks if "testing" in network.get('name')]
# Get routers for DELETE
get_router_list_url = create_router_url
future = send_request(get_router_list_url, 'GET', headers=token_headers)
result = future.result().content
result = json.loads(result)
list_routers = result.get("routers")
list_routers = [router for router in list_routers if "testing" in router.get('name')]
# Update network
# We should have separate network for updating --> ensure have network for update, that is.
NETWORK_ID = "f6e3556e-29ab-4ee7-ba64-7fab0c423e26"
# Update router
# We should have separate router for updating --> ensure have router for update, that is.
ROUTER_ID = "b0e19990-d9ba-4981-9da7-5aeec2957c77"
if __name__ == '__main__':
i = 1
while continue_test:
time.sleep(0.3)
try:
# Create network
create_network_data = {
"network": {
"name": "new_network_{}".format(i)
}
}
i += 1
future = send_request(create_network_url, 'POST',
headers=token_headers,
data=json.JSONEncoder().encode(create_network_data))
try:
result = future.result().content
result = json.loads(result)
network = result.get('network')
if type(network) is dict:
network_id = result['network']['id']
create_subnet_data = {
"subnet": {
"network_id": network_id,
"ip_version": 4,
"cidr": "192.168.199.0/24"
}
}
create_subnet_url = "http://{}:9696/v2.0/subnets".format(IP)
send_request(create_subnet_url, 'POST',
headers=token_headers,
data=json.JSONEncoder().encode(create_subnet_data))
except:
pass
# Get and delete network
if not (len(list_networks) == 0):
delete_network = list_networks.pop()
delete_network_id = delete_network.get("id")
get_network_url = "http://{}:9696/v2.0/networks/{}".format(IP, delete_network_id)
send_request(get_network_url, 'GET', headers=token_headers)
send_request(get_network_url, 'DELETE', headers=token_headers)
# Update network name
update_network_data = {
"network": {
"name": "new_name_{}".format(i)
}
}
update_network_url = "http://{}:9696/v2.0/networks/{}".format(IP, NETWORK_ID)
send_request(update_network_url, 'PUT',
headers=token_headers,
data=json.JSONEncoder().encode(update_network_data))
# Create router
create_router_data = {
"router": {
"name": "new_router_{}".format(i)
}
}
future = send_request(create_router_url, 'POST',
headers=token_headers,
data=json.JSONEncoder().encode(create_router_data))
# Get and delete network
if not (len(list_routers) == 0):
delete_router = list_routers.pop()
delete_router_id = delete_router.get("id")
get_router_url = "http://{}:9696/v2.0/routers/{}".format(IP, delete_router_id)
send_request(get_router_url, 'GET', headers=token_headers)
send_request(get_router_url, 'DELETE', headers=token_headers)
# Update router name
update_router_data = {
"router": {
"name": "new_name_{}".format(i)
}
}
update_router_url = "http://{}:9696/v2.0/routers/{}".format(IP, ROUTER_ID)
send_request(update_router_url, 'PUT',
headers=token_headers,
data=json.JSONEncoder().encode(update_router_data))
except ConnectionError:
pass
| [
"json.loads",
"json.JSONEncoder"
] | [((547, 565), 'json.loads', 'json.loads', (['result'], {}), '(result)\n', (557, 565), False, 'import json\n'), ((877, 895), 'json.loads', 'json.loads', (['result'], {}), '(result)\n', (887, 895), False, 'import json\n'), ((1936, 1954), 'json.loads', 'json.loads', (['result'], {}), '(result)\n', (1946, 1954), False, 'import json\n'), ((1797, 1815), 'json.JSONEncoder', 'json.JSONEncoder', ([], {}), '()\n', (1813, 1815), False, 'import json\n'), ((3561, 3579), 'json.JSONEncoder', 'json.JSONEncoder', ([], {}), '()\n', (3577, 3579), False, 'import json\n'), ((3944, 3962), 'json.JSONEncoder', 'json.JSONEncoder', ([], {}), '()\n', (3960, 3962), False, 'import json\n'), ((4829, 4847), 'json.JSONEncoder', 'json.JSONEncoder', ([], {}), '()\n', (4845, 4847), False, 'import json\n'), ((2618, 2636), 'json.JSONEncoder', 'json.JSONEncoder', ([], {}), '()\n', (2634, 2636), False, 'import json\n')] |
import os
import sys
import signal
import asyncio
import json
import time
import traceback
import typing
import socket
import re
import select
import websockets
if sys.platform != "win32":
import termios
import tty
else:
import msvcrt
import win32api
from .. import api
from ..shared import constants, log, types as t
from ..shared.constants import State
import conducto.internal.host_detection as hostdet
if sys.version_info < (3, 7):
# create_task is stdlib in 3.7, but we can declare it as a synonym for the
# 3.6 ensure_future
asyncio.create_task = asyncio.ensure_future
STATE_TO_COLOR = {
State.PENDING: log.Color.TRUEWHITE,
State.QUEUED: log.Color.GRAY,
State.RUNNING: log.Color.BLUE,
State.DONE: log.Color.GREEN,
State.ERROR: log.Color.RED,
State.WORKER_ERROR: log.Color.PURPLE,
}
class Listener(object):
def update_node(self, name, data):
pass
async def background_task(self, title):
pass
async def key_press(self, char):
# Listeners are passed the quit_func so that they can decide when to exit
pass
def render(self):
pass
def shutdown(self):
pass
def connect(token: t.Token, pipeline_id: t.PipelineId, starthelp: str):
pipeline = api.Pipeline().get(pipeline_id, token=token)
ui = ShellUI(token, pipeline, starthelp)
if sys.platform == "win32":
win32api.SetConsoleCtrlHandler(ui.ctrl_c, True)
try:
asyncio.get_event_loop().run_until_complete(ui.run())
except Exception:
ui.reset_stdin()
traceback.print_exc()
class ShellUI(object):
def __init__(self, token, pipeline: dict, starthelp: str):
self.pipeline = pipeline
self.quitting = False
self.loop = asyncio.get_event_loop()
self.gw_socket = None
self.start_func_complete = None
self.starthelp = starthelp
from . import one_line, full_screen
self.listeners: typing.List[Listener] = [one_line.OneLineDisplay(self)]
@property
def allow_sleep(self):
# TODO: This is an ugly time-out to avoid shutting down the shell UI
# because the NS cache still believes the pipeline is sleeping.
return self.start_func_complete and time.time() > self.start_func_complete + 3.0
async def view_loop(self):
"""
Every 0.25 seconds render the pipeline
"""
log.info("[view] starting")
while True:
await asyncio.sleep(0.25)
for listener in self.listeners:
listener.render()
def set_gw(self, gw_socket):
self.gw_socket = gw_socket
async def wait_gw(self):
while self.gw_socket is None:
await asyncio.sleep(0.1)
async def start_pipeline(self):
if self.gw_socket is None:
pipeline_id = self.pipeline["pipeline_id"]
api.Manager().launch(pipeline_id)
await self.wait_gw()
payload = {"type": "SET_AUTORUN", "payload": {"value": True}}
await self.gw_socket.send(json.dumps(payload))
async def sleep_pipeline(self):
if self.gw_socket is None:
pipeline_id = self.pipeline["pipeline_id"]
api.Pipeline().sleep_standby(pipeline_id)
else:
payload = {"type": "CLOSE_PROGRAM", "payload": None}
await self.gw_socket.send(json.dumps(payload))
async def reset(self):
if self.gw_socket is None:
pipeline_id = self.pipeline["pipeline_id"]
api.Manager().launch(pipeline_id)
await self.wait_gw()
payload = {"type": "RESET", "payload": ["/"]}
await self.gw_socket.send(json.dumps(payload))
async def gw_socket_loop(self):
"""
Loop and listen for socket messages
"""
start_tasks = await self.run_start_func()
pl = constants.PipelineLifecycle
while True:
if (
self.pipeline is None
or self.pipeline.get("status", None) not in pl.active
):
await asyncio.sleep(0.5)
continue
if start_tasks is not None:
tasks = start_tasks
# we clear the start_tasks now since later reconnects should
# show reconnecting.
start_tasks = None
else:
msg = "Connection lost. Reconnecting"
pretasks = [xx.background_task(msg) for xx in self.listeners]
tasks = [asyncio.create_task(task) for task in pretasks]
try:
websocket = await api.connect_to_pipeline(self.pipeline["pipeline_id"])
except PermissionError:
print()
print("You are not permitted to connect to this pipeline.")
self.quit()
break
except ConnectionError:
self.quit()
break
for task in tasks:
task.cancel()
for listener in self.listeners:
listener.install_normal_key_mode()
self.set_gw(websocket)
was_slept = False
try:
await websocket.send(
json.dumps({"type": "RENDER_NODE", "payload": "/"})
)
log.info("[gw_socket_loop] starting")
async for msg_text in websocket:
msg = json.loads(msg_text)
if msg["type"] in ("NODES_STATE_UPDATE", "RENDER_NODE"):
log.debug(f"incoming gw message {msg['type']}")
for name, data in msg["payload"].items():
for listener in self.listeners:
listener.update_node(name, data)
elif msg["type"] == "SLEEP":
was_slept = True
# we are done here, do not try to reconnect.
break
except websockets.ConnectionClosedError as e:
log.debug(f"ConnectionClosedError {e.code} {e.reason}")
self.set_gw(None)
if was_slept:
break
def get_ns_url(self):
url = api.Config().get_url()
url = re.sub("^http", "ws", url) + "/ns/"
return url
async def reconnect_ns(self):
ns_url = self.get_ns_url()
log.debug("[run] Connecting to", ns_url)
header = {"Authorization": f"bearer {api.Config().get_token(refresh=False)}"}
# we retry connection for roughly 2 minutes
for i in range(45):
try:
websocket = await websockets.connect(ns_url, extra_headers=header)
break
except (
websockets.ConnectionClosedError,
websockets.InvalidStatusCode,
socket.gaierror,
):
log.debug(f"cannot connect to ns ... waiting {i}")
await asyncio.sleep(min(3.0, (2 ** i) / 8))
else:
self.quit()
return None
log.debug("[run] ns Connected")
return websocket
async def ns_socket_loop(self):
"""
Loop and listen for socket messages
"""
while True:
msg = "Connection lost. Reconnecting"
if self.start_func_complete is not None:
pretasks = [xx.background_task(msg) for xx in self.listeners]
else:
pretasks = []
tasks = [asyncio.create_task(task) for task in pretasks]
websocket = await self.reconnect_ns()
for task in tasks:
task.cancel()
if websocket is None:
if self.start_func_complete is not None:
for listener in self.listeners:
listener.install_disconnect_mode()
self.quit()
break
if self.start_func_complete is not None:
for listener in self.listeners:
listener.install_normal_key_mode()
subscribe = {
"type": "SUBSCRIBE",
"payload": {"pipeline_id": self.pipeline["pipeline_id"]},
}
await websocket.send(json.dumps(subscribe))
try:
log.info("[ns_socket_loop] starting")
async for msg_text in websocket:
msg = json.loads(msg_text)
if msg["type"] in ("FULL_INFO_UPDATE",):
log.debug(f"incoming ns message {msg['type']}")
progs = msg["payload"]["programIdToInfo"]
try:
self.pipeline = progs[self.pipeline["pipeline_id"]]
except KeyError:
# TODO: the NS cache may not yet have the pipeline,
# this is to allow for that.
if self.allow_sleep:
raise
else:
continue
if "state" not in self.pipeline["meta"]:
self.pipeline["meta"] = {
"state": "pending",
"stateCounts": {x: 0 for x in STATE_TO_COLOR.keys()},
}
pl = constants.PipelineLifecycle
if self.pipeline["status"] in pl.sleeping and self.allow_sleep:
self.quit()
elif self.pipeline["status"] not in pl.active:
for listener in self.listeners:
listener.update_node("/", self.pipeline["meta"])
except websockets.ConnectionClosedError:
pass
def ctrl_c(self, a, b=None):
# This is the windows control C handler
self.quit()
return True
async def key_loop(self):
"""
Loop and listen for key inputs
"""
log.info("[key_loop] starting")
if sys.platform != "win32":
self.old_settings = termios.tcgetattr(sys.stdin.fileno())
tty.setraw(sys.stdin.fileno())
async for char in stream_as_char_generator(self.loop, sys.stdin):
if ord(char) in (3, 4):
# Ctrl+c (sigint) & Ctrl+d (eof) get captured as a non-printing
# characters with ASCII code 3 & 4 respectively. Quit
# gracefully.
self.quit()
elif ord(char) == 26:
# Ctrl+z gets captured as a non-printing character with ASCII
# code 26. Send SIGSTOP and reset the terminal.
self.reset_stdin()
os.kill(os.getpid(), signal.SIGSTOP)
if sys.platform != "win32":
self.old_settings = termios.tcgetattr(sys.stdin.fileno())
tty.setraw(sys.stdin.fileno())
for listener in self.listeners:
await listener.key_press(char)
self.reset_stdin()
def reset_stdin(self):
if hasattr(self, "old_settings"):
termios.tcsetattr(sys.stdin.fileno(), termios.TCSADRAIN, self.old_settings)
async def run_start_func(self):
pretasks = [
xx.background_task(self.starthelp, immediate=True) for xx in self.listeners
]
tasks = [asyncio.create_task(task) for task in pretasks]
self.start_func_complete = time.time()
return tasks
async def run(self):
# Start all the loops. The view and socket loops are nonblocking The
# key_loop needs to be run separately because it blocks on user input
tasks = [
self.loop.create_task(self.view_loop()),
self.loop.create_task(self.gw_socket_loop()),
self.loop.create_task(self.ns_socket_loop()),
self.loop.create_task(self.key_loop()),
]
# Wait on all of them. The `gather` variable can be cancelled in
# `key_task()` if the user Ctrl+c's, which will cause the other loops
# to be cancelled gracefully.
self.gather_handle = asyncio.gather(*tasks)
try:
await self.gather_handle
except asyncio.CancelledError:
return
except websockets.ConnectionClosedError:
self.reset_stdin()
return
else:
log.error("gather_handle returned but it shouldn't have!")
raise Exception("gather_handle returned but it shouldn't have!")
finally:
for listener in self.listeners:
listener.shutdown()
def disconnect(self):
self.quit()
def quit(self):
"""
Make all event loops quit
"""
self.reset_stdin()
self.quitting = True
self.gather_handle.cancel()
def stdin_data():
return select.select([sys.stdin], [], [], 0) == ([sys.stdin], [], [])
async def stream_as_char_generator(loop, stream):
if sys.platform != "win32":
has_key = stdin_data
read_key = lambda: stream.read(1)
else:
has_key = msvcrt.kbhit
read_key = lambda: msvcrt.getch().decode("ascii")
while True:
await asyncio.sleep(0.05)
if has_key():
char = read_key()
if not char: # EOF.
break
yield char
| [
"select.select",
"sys.stdin.fileno",
"json.loads",
"asyncio.sleep",
"json.dumps",
"msvcrt.getch",
"traceback.print_exc",
"websockets.connect",
"win32api.SetConsoleCtrlHandler",
"os.getpid",
"asyncio.gather",
"re.sub",
"asyncio.get_event_loop",
"time.time",
"asyncio.create_task"
] | [((1409, 1456), 'win32api.SetConsoleCtrlHandler', 'win32api.SetConsoleCtrlHandler', (['ui.ctrl_c', '(True)'], {}), '(ui.ctrl_c, True)\n', (1439, 1456), False, 'import win32api\n'), ((1776, 1800), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1798, 1800), False, 'import asyncio\n'), ((11615, 11626), 'time.time', 'time.time', ([], {}), '()\n', (11624, 11626), False, 'import time\n'), ((12297, 12319), 'asyncio.gather', 'asyncio.gather', (['*tasks'], {}), '(*tasks)\n', (12311, 12319), False, 'import asyncio\n'), ((13036, 13073), 'select.select', 'select.select', (['[sys.stdin]', '[]', '[]', '(0)'], {}), '([sys.stdin], [], [], 0)\n', (13049, 13073), False, 'import select\n'), ((1583, 1604), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (1602, 1604), False, 'import traceback\n'), ((6308, 6334), 're.sub', 're.sub', (['"""^http"""', '"""ws"""', 'url'], {}), "('^http', 'ws', url)\n", (6314, 6334), False, 'import re\n'), ((11531, 11556), 'asyncio.create_task', 'asyncio.create_task', (['task'], {}), '(task)\n', (11550, 11556), False, 'import asyncio\n'), ((13384, 13403), 'asyncio.sleep', 'asyncio.sleep', (['(0.05)'], {}), '(0.05)\n', (13397, 13403), False, 'import asyncio\n'), ((1474, 1498), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1496, 1498), False, 'import asyncio\n'), ((2268, 2279), 'time.time', 'time.time', ([], {}), '()\n', (2277, 2279), False, 'import time\n'), ((2490, 2509), 'asyncio.sleep', 'asyncio.sleep', (['(0.25)'], {}), '(0.25)\n', (2503, 2509), False, 'import asyncio\n'), ((2743, 2761), 'asyncio.sleep', 'asyncio.sleep', (['(0.1)'], {}), '(0.1)\n', (2756, 2761), False, 'import asyncio\n'), ((3073, 3092), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (3083, 3092), False, 'import json\n'), ((3699, 3718), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (3709, 3718), False, 'import json\n'), ((7566, 7591), 'asyncio.create_task', 'asyncio.create_task', (['task'], {}), '(task)\n', (7585, 7591), False, 'import asyncio\n'), ((10263, 10281), 'sys.stdin.fileno', 'sys.stdin.fileno', ([], {}), '()\n', (10279, 10281), False, 'import sys\n'), ((10306, 10324), 'sys.stdin.fileno', 'sys.stdin.fileno', ([], {}), '()\n', (10322, 10324), False, 'import sys\n'), ((11300, 11318), 'sys.stdin.fileno', 'sys.stdin.fileno', ([], {}), '()\n', (11316, 11318), False, 'import sys\n'), ((3392, 3411), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (3402, 3411), False, 'import json\n'), ((4099, 4117), 'asyncio.sleep', 'asyncio.sleep', (['(0.5)'], {}), '(0.5)\n', (4112, 4117), False, 'import asyncio\n'), ((4544, 4569), 'asyncio.create_task', 'asyncio.create_task', (['task'], {}), '(task)\n', (4563, 4569), False, 'import asyncio\n'), ((5471, 5491), 'json.loads', 'json.loads', (['msg_text'], {}), '(msg_text)\n', (5481, 5491), False, 'import json\n'), ((6700, 6748), 'websockets.connect', 'websockets.connect', (['ns_url'], {'extra_headers': 'header'}), '(ns_url, extra_headers=header)\n', (6718, 6748), False, 'import websockets\n'), ((8320, 8341), 'json.dumps', 'json.dumps', (['subscribe'], {}), '(subscribe)\n', (8330, 8341), False, 'import json\n'), ((8490, 8510), 'json.loads', 'json.loads', (['msg_text'], {}), '(msg_text)\n', (8500, 8510), False, 'import json\n'), ((13322, 13336), 'msvcrt.getch', 'msvcrt.getch', ([], {}), '()\n', (13334, 13336), False, 'import msvcrt\n'), ((5271, 5322), 'json.dumps', 'json.dumps', (["{'type': 'RENDER_NODE', 'payload': '/'}"], {}), "({'type': 'RENDER_NODE', 'payload': '/'})\n", (5281, 5322), False, 'import json\n'), ((10879, 10890), 'os.getpid', 'os.getpid', ([], {}), '()\n', (10888, 10890), False, 'import os\n'), ((11010, 11028), 'sys.stdin.fileno', 'sys.stdin.fileno', ([], {}), '()\n', (11026, 11028), False, 'import sys\n'), ((11061, 11079), 'sys.stdin.fileno', 'sys.stdin.fileno', ([], {}), '()\n', (11077, 11079), False, 'import sys\n')] |
""" Functions for ionospheric modelling: see SDP memo 97
"""
import astropy.units as u
import numpy
from astropy.coordinates import SkyCoord
from data_models.memory_data_models import BlockVisibility
from processing_components.calibration.operations import create_gaintable_from_blockvisibility, \
create_gaintable_from_rows
from processing_components.calibration.iterators import gaintable_timeslice_iter
from processing_components.image.operations import copy_image, create_empty_image_like
from processing_components.visibility.base import create_visibility_from_rows
from processing_components.visibility.iterators import vis_timeslice_iter
from processing_library.util.coordinate_support import xyz_to_uvw, skycoord_to_lmn
import logging
log = logging.getLogger(__name__)
def find_pierce_points(station_locations, ha, dec, phasecentre, height):
"""Find the pierce points for a flat screen at specified height
:param station_locations: All station locations [:3]
:param ha: Hour angle
:param dec: Declination
:param phasecentre: Phase centre
:param height: Height of screen
:return:
"""
source_direction = SkyCoord(ra=ha, dec=dec, frame='icrs', equinox='J2000')
local_locations = xyz_to_uvw(station_locations, ha, dec)
local_locations -= numpy.average(local_locations, axis=0)
lmn = numpy.array(skycoord_to_lmn(source_direction, phasecentre))
lmn[2] += 1.0
pierce_points = local_locations + height * numpy.array(lmn)
return pierce_points
def create_gaintable_from_screen(vis, sc, screen, height=3e5, vis_slices=None, scale=1.0, **kwargs):
""" Create gaintables from a screen calculated using ARatmospy
:param vis:
:param sc: Sky components for which pierce points are needed
:param screen:
:param height: Height (in m) of screen above telescope e.g. 3e5
:param scale: Multiply the screen by this factor
:return:
"""
assert isinstance(vis, BlockVisibility)
station_locations = vis.configuration.xyz
nant = station_locations.shape[0]
t2r = numpy.pi / 43200.0
gaintables = [create_gaintable_from_blockvisibility(vis, **kwargs) for i in sc]
# The time in the Visibility is hour angle in seconds!
for iha, rows in enumerate(vis_timeslice_iter(vis, vis_slices=vis_slices)):
v = create_visibility_from_rows(vis, rows)
ha = numpy.average(v.time)
number_bad = 0
number_good = 0
for icomp, comp in enumerate(sc):
pp = find_pierce_points(station_locations, (comp.direction.ra.rad + t2r * ha) * u.rad, comp.direction.dec,
height=height, phasecentre=vis.phasecentre)
scr = numpy.zeros([nant])
for ant in range(nant):
pp0 = pp[ant][0:2]
worldloc = [pp0[0], pp0[1], ha, 1e8]
try:
pixloc = screen.wcs.wcs_world2pix([worldloc], 0)[0].astype('int')
scr[ant] = scale * screen.data[pixloc[3], pixloc[2], pixloc[1], pixloc[0]]
number_good += 1
except:
number_bad += 1
scr[ant] = 0.0
gaintables[icomp].gain[iha, :, :, :] = numpy.exp(1j * scr[:, numpy.newaxis, numpy.newaxis, numpy.newaxis])
gaintables[icomp].phasecentre = comp.direction
if number_bad > 0:
log.warning("create_gaintable_from_screen: %d pierce points are inside the screen image" % (number_good))
log.warning("create_gaintable_from_screen: %d pierce points are outside the screen image" % (number_bad))
return gaintables
def grid_gaintable_to_screen(vis, gaintables, screen, height=3e5, gaintable_slices=None, scale=1.0, **kwargs):
""" Grid a gaintable to a screen image
The phases are just average per grid cell, no phase unwrapping is performed.
:param vis:
:param sc: Sky components for which pierce points are needed
:param screen:
:param height: Height (in m) of screen above telescope e.g. 3e5
:param scale: Multiply the screen by this factor
:return: gridded screen image, weights image
"""
assert isinstance(vis, BlockVisibility)
station_locations = vis.configuration.xyz
nant = station_locations.shape[0]
t2r = numpy.pi / 43200.0
newscreen = create_empty_image_like(screen)
weights = create_empty_image_like(screen)
nchan, ntimes, ny, nx = screen.shape
# The time in the Visibility is hour angle in seconds!
number_no_weight = 0
for gaintable in gaintables:
for iha, rows in enumerate(gaintable_timeslice_iter(gaintable, gaintable_slices=gaintable_slices)):
gt = create_gaintable_from_rows(gaintable, rows)
ha = numpy.average(gt.time)
pp = find_pierce_points(station_locations,
(gt.phasecentre.ra.rad + t2r * ha) * u.rad,
gt.phasecentre.dec,
height=height,
phasecentre=vis.phasecentre)
scr = numpy.angle(gt.gain[0, :, 0, 0, 0])
wt = gt.weight[0, :, 0, 0, 0]
for ant in range(nant):
pp0 = pp[ant][0:2]
worldloc = [pp0[0], pp0[1], ha, 1e8]
pixloc = newscreen.wcs.wcs_world2pix([worldloc], 0)[0].astype('int')
assert pixloc[0] >= 0
assert pixloc[0] < nx
assert pixloc[1] >= 0
assert pixloc[1] < ny
newscreen.data[pixloc[3], pixloc[2], pixloc[1], pixloc[0]] += wt[ant] * scr[ant]
weights.data[pixloc[3], pixloc[2], pixloc[1], pixloc[0]] += wt[ant]
if wt[ant] == 0.0:
number_no_weight += 1
if number_no_weight > 0:
print("grid_gaintable_to_screen: %d pierce points are have no weight" % (number_no_weight))
log.warning("grid_gaintable_to_screen: %d pierce points are have no weight" % (number_no_weight))
newscreen.data[weights.data > 0.0] = newscreen.data[weights.data > 0.0] / weights.data[weights.data > 0.0]
return newscreen, weights
def calculate_sf_from_screen(screen):
""" Calculate structure function image from screen
Screen axes are ['XX', 'YY', 'TIME', 'FREQ']
:param screen:
:return:
"""
from scipy.signal import fftconvolve
nchan, ntimes, ny, nx = screen.data.shape
sf = numpy.zeros([nchan, 1, 2 * ny - 1, 2 * nx - 1])
for chan in range(nchan):
sf[chan, 0, ...] = fftconvolve(screen.data[chan, 0, ...], screen.data[chan, 0, ::-1, ::-1])
for itime in range(ntimes):
sf += fftconvolve(screen.data[chan, itime, ...], screen.data[chan, itime, ::-1, ::-1])
sf[chan, 0, ...] /= numpy.max(sf[chan, 0, ...])
sf[chan, 0, ...] = 1.0 - sf[chan, 0, ...]
sf_image = copy_image(screen)
sf_image.data = sf[:, :, (ny - ny // 4):(ny + ny // 4), (nx - nx // 4):(nx + nx // 4)]
sf_image.wcs.wcs.crpix[0] = ny // 4 + 1
sf_image.wcs.wcs.crpix[1] = ny // 4 + 1
sf_image.wcs.wcs.crpix[2] = 1
return sf_image
def plot_gaintable_on_screen(vis, gaintables, height=3e5, gaintable_slices=None, plotfile=None):
""" Plot a gaintable on an ionospheric screen
:param vis:
:param sc: Sky components for which pierce points are needed
:param height: Height (in m) of screen above telescope e.g. 3e5
:param scale: Multiply the screen by this factor
:return: gridded screen image, weights image
"""
import matplotlib.pyplot as plt
assert isinstance(vis, BlockVisibility)
station_locations = vis.configuration.xyz
t2r = numpy.pi / 43200.0
# The time in the Visibility is hour angle in seconds!
plt.clf()
for gaintable in gaintables:
for iha, rows in enumerate(gaintable_timeslice_iter(gaintable, gaintable_slices=gaintable_slices)):
gt = create_gaintable_from_rows(gaintable, rows)
ha = numpy.average(gt.time)
pp = find_pierce_points(station_locations,
(gt.phasecentre.ra.rad + t2r * ha) * u.rad,
gt.phasecentre.dec,
height=height,
phasecentre=vis.phasecentre)
phases = numpy.angle(gt.gain[0, :, 0, 0, 0])
plt.scatter(pp[:,0],pp[:,1], c=phases, cmap='hsv', alpha=0.75, s=0.1)
plt.title('Pierce point phases')
plt.xlabel('X (m)')
plt.ylabel('Y (m)')
if plotfile is not None:
plt.savefig(plotfile)
plt.show() | [
"logging.getLogger",
"matplotlib.pyplot.ylabel",
"processing_components.calibration.operations.create_gaintable_from_blockvisibility",
"numpy.array",
"processing_components.calibration.operations.create_gaintable_from_rows",
"processing_library.util.coordinate_support.skycoord_to_lmn",
"matplotlib.pyplo... | [((757, 784), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (774, 784), False, 'import logging\n'), ((1160, 1215), 'astropy.coordinates.SkyCoord', 'SkyCoord', ([], {'ra': 'ha', 'dec': 'dec', 'frame': '"""icrs"""', 'equinox': '"""J2000"""'}), "(ra=ha, dec=dec, frame='icrs', equinox='J2000')\n", (1168, 1215), False, 'from astropy.coordinates import SkyCoord\n'), ((1238, 1276), 'processing_library.util.coordinate_support.xyz_to_uvw', 'xyz_to_uvw', (['station_locations', 'ha', 'dec'], {}), '(station_locations, ha, dec)\n', (1248, 1276), False, 'from processing_library.util.coordinate_support import xyz_to_uvw, skycoord_to_lmn\n'), ((1300, 1338), 'numpy.average', 'numpy.average', (['local_locations'], {'axis': '(0)'}), '(local_locations, axis=0)\n', (1313, 1338), False, 'import numpy\n'), ((4395, 4426), 'processing_components.image.operations.create_empty_image_like', 'create_empty_image_like', (['screen'], {}), '(screen)\n', (4418, 4426), False, 'from processing_components.image.operations import copy_image, create_empty_image_like\n'), ((4441, 4472), 'processing_components.image.operations.create_empty_image_like', 'create_empty_image_like', (['screen'], {}), '(screen)\n', (4464, 4472), False, 'from processing_components.image.operations import copy_image, create_empty_image_like\n'), ((6536, 6583), 'numpy.zeros', 'numpy.zeros', (['[nchan, 1, 2 * ny - 1, 2 * nx - 1]'], {}), '([nchan, 1, 2 * ny - 1, 2 * nx - 1])\n', (6547, 6583), False, 'import numpy\n'), ((6975, 6993), 'processing_components.image.operations.copy_image', 'copy_image', (['screen'], {}), '(screen)\n', (6985, 6993), False, 'from processing_components.image.operations import copy_image, create_empty_image_like\n'), ((7884, 7893), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (7891, 7893), True, 'import matplotlib.pyplot as plt\n'), ((8612, 8644), 'matplotlib.pyplot.title', 'plt.title', (['"""Pierce point phases"""'], {}), "('Pierce point phases')\n", (8621, 8644), True, 'import matplotlib.pyplot as plt\n'), ((8649, 8668), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X (m)"""'], {}), "('X (m)')\n", (8659, 8668), True, 'import matplotlib.pyplot as plt\n'), ((8673, 8692), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Y (m)"""'], {}), "('Y (m)')\n", (8683, 8692), True, 'import matplotlib.pyplot as plt\n'), ((8762, 8772), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8770, 8772), True, 'import matplotlib.pyplot as plt\n'), ((1366, 1412), 'processing_library.util.coordinate_support.skycoord_to_lmn', 'skycoord_to_lmn', (['source_direction', 'phasecentre'], {}), '(source_direction, phasecentre)\n', (1381, 1412), False, 'from processing_library.util.coordinate_support import xyz_to_uvw, skycoord_to_lmn\n'), ((2119, 2171), 'processing_components.calibration.operations.create_gaintable_from_blockvisibility', 'create_gaintable_from_blockvisibility', (['vis'], {}), '(vis, **kwargs)\n', (2156, 2171), False, 'from processing_components.calibration.operations import create_gaintable_from_blockvisibility, create_gaintable_from_rows\n'), ((2280, 2326), 'processing_components.visibility.iterators.vis_timeslice_iter', 'vis_timeslice_iter', (['vis'], {'vis_slices': 'vis_slices'}), '(vis, vis_slices=vis_slices)\n', (2298, 2326), False, 'from processing_components.visibility.iterators import vis_timeslice_iter\n'), ((2341, 2379), 'processing_components.visibility.base.create_visibility_from_rows', 'create_visibility_from_rows', (['vis', 'rows'], {}), '(vis, rows)\n', (2368, 2379), False, 'from processing_components.visibility.base import create_visibility_from_rows\n'), ((2393, 2414), 'numpy.average', 'numpy.average', (['v.time'], {}), '(v.time)\n', (2406, 2414), False, 'import numpy\n'), ((6641, 6713), 'scipy.signal.fftconvolve', 'fftconvolve', (['screen.data[chan, 0, ...]', 'screen.data[chan, 0, ::-1, ::-1]'], {}), '(screen.data[chan, 0, ...], screen.data[chan, 0, ::-1, ::-1])\n', (6652, 6713), False, 'from scipy.signal import fftconvolve\n'), ((6877, 6904), 'numpy.max', 'numpy.max', (['sf[chan, 0, ...]'], {}), '(sf[chan, 0, ...])\n', (6886, 6904), False, 'import numpy\n'), ((8735, 8756), 'matplotlib.pyplot.savefig', 'plt.savefig', (['plotfile'], {}), '(plotfile)\n', (8746, 8756), True, 'import matplotlib.pyplot as plt\n'), ((1479, 1495), 'numpy.array', 'numpy.array', (['lmn'], {}), '(lmn)\n', (1490, 1495), False, 'import numpy\n'), ((2722, 2741), 'numpy.zeros', 'numpy.zeros', (['[nant]'], {}), '([nant])\n', (2733, 2741), False, 'import numpy\n'), ((3264, 3333), 'numpy.exp', 'numpy.exp', (['(1.0j * scr[:, numpy.newaxis, numpy.newaxis, numpy.newaxis])'], {}), '(1.0j * scr[:, numpy.newaxis, numpy.newaxis, numpy.newaxis])\n', (3273, 3333), False, 'import numpy\n'), ((4667, 4737), 'processing_components.calibration.iterators.gaintable_timeslice_iter', 'gaintable_timeslice_iter', (['gaintable'], {'gaintable_slices': 'gaintable_slices'}), '(gaintable, gaintable_slices=gaintable_slices)\n', (4691, 4737), False, 'from processing_components.calibration.iterators import gaintable_timeslice_iter\n'), ((4757, 4800), 'processing_components.calibration.operations.create_gaintable_from_rows', 'create_gaintable_from_rows', (['gaintable', 'rows'], {}), '(gaintable, rows)\n', (4783, 4800), False, 'from processing_components.calibration.operations import create_gaintable_from_blockvisibility, create_gaintable_from_rows\n'), ((4818, 4840), 'numpy.average', 'numpy.average', (['gt.time'], {}), '(gt.time)\n', (4831, 4840), False, 'import numpy\n'), ((5175, 5210), 'numpy.angle', 'numpy.angle', (['gt.gain[0, :, 0, 0, 0]'], {}), '(gt.gain[0, :, 0, 0, 0])\n', (5186, 5210), False, 'import numpy\n'), ((6768, 6853), 'scipy.signal.fftconvolve', 'fftconvolve', (['screen.data[chan, itime, ...]', 'screen.data[chan, itime, ::-1, ::-1]'], {}), '(screen.data[chan, itime, ...], screen.data[chan, itime, ::-1, ::-1]\n )\n', (6779, 6853), False, 'from scipy.signal import fftconvolve\n'), ((7962, 8032), 'processing_components.calibration.iterators.gaintable_timeslice_iter', 'gaintable_timeslice_iter', (['gaintable'], {'gaintable_slices': 'gaintable_slices'}), '(gaintable, gaintable_slices=gaintable_slices)\n', (7986, 8032), False, 'from processing_components.calibration.iterators import gaintable_timeslice_iter\n'), ((8052, 8095), 'processing_components.calibration.operations.create_gaintable_from_rows', 'create_gaintable_from_rows', (['gaintable', 'rows'], {}), '(gaintable, rows)\n', (8078, 8095), False, 'from processing_components.calibration.operations import create_gaintable_from_blockvisibility, create_gaintable_from_rows\n'), ((8113, 8135), 'numpy.average', 'numpy.average', (['gt.time'], {}), '(gt.time)\n', (8126, 8135), False, 'import numpy\n'), ((8477, 8512), 'numpy.angle', 'numpy.angle', (['gt.gain[0, :, 0, 0, 0]'], {}), '(gt.gain[0, :, 0, 0, 0])\n', (8488, 8512), False, 'import numpy\n'), ((8525, 8597), 'matplotlib.pyplot.scatter', 'plt.scatter', (['pp[:, 0]', 'pp[:, 1]'], {'c': 'phases', 'cmap': '"""hsv"""', 'alpha': '(0.75)', 's': '(0.1)'}), "(pp[:, 0], pp[:, 1], c=phases, cmap='hsv', alpha=0.75, s=0.1)\n", (8536, 8597), True, 'import matplotlib.pyplot as plt\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import collections
from simpysql.Util.Expression import expression as expr, Expression
from simpysql.Util.Response import Response
from .BaseBuilder import BaseBuilder
from simpysql.Util.Dynamic import Dynamic
class SqlServerBuilder(BaseBuilder):
operators = [
'=', '<', '>', '<=', '>=', '<>', '!=',
'like', 'like binary', 'not like', 'between', 'ilike',
'&', '|', '^', '<<', '>>',
'rlike', 'regexp', 'not regexp',
'~', '~*', '!~', '!~*', 'similar to',
'not similar to', 'not ilike', '~~*', '!~~*', 'in', 'not in', 'not between'
]
def __init__(self, model, alias=None):
self.__model__ = model
self.__alias__ = alias
self.__where__ = []
self.__orwhere__ = [] # orwhere处理逻辑
self.__whereor__ = [] # orwhere处理逻辑
self.__select__ = [] # 检索的字段
self.__limit__ = 0 # 检索的数据条数
self.__orderby__ = [] # 排序字段
self.__groupby__ = [] # 排序字段
self.__offset__ = None # offset
self.__lock__ = None # lock
self.__join__ = [] # leftjoin
self.__union__ = [] # union & unionall
self.__on__ = [] # leftjoin
self.__having__ = None # having
self.__subquery__ = [] # subquery
def first(self):
self.__limit__ = 1
data = self.get()
if data:
return data.pop()
return data
def one(self):
data = self.get()
if data:
return data.pop()
return data
def get(self):
return [Dynamic(index) for index in self._get_connection().execute(self._compile_select())]
def lists(self, columns):
return Response(self._get_connection().execute(self._compile_select())).tolist(columns)
def data(self):
return Response(self._get_connection().execute(self._compile_select())).data()
def response(self):
return Response(self._get_connection().execute(self._compile_select()))
def max(self, column):
if isinstance(column, str) and column in self.__model__.columns:
self.__select__ = ['max({}) as aggregate'.format(column)]
data = self.one()
return data['aggregate'] if data else None
raise Exception('param invalid in function max')
def min(self, column):
if isinstance(column, str) and column in self.__model__.columns:
self.__select__ = ['min({}) as aggregate'.format(column)]
data = self.one()
return data['aggregate'] if data else None
raise Exception('param invalid in function min')
def avg(self, column):
if isinstance(column, str) and column in self.__model__.columns:
self.__select__ = ['avg({}) as aggregate'.format(column)]
data = self.one()
return data['aggregate'] if data else None
raise Exception('param invalid in function avg')
def sum(self, column):
if isinstance(column, str) and column in self.__model__.columns:
self.__select__ = ['sum({}) as aggregate'.format(column)]
data = self.one()
return data['aggregate'] if data else None
raise Exception('param invalid in function sum')
def count(self):
self.__select__ = ['count(*) as aggregate']
data = self.one()
return data['aggregate'] if data else None
def exist(self):
return True if self.count() > 0 else False
def update(self, data):
if data and isinstance(data, dict):
data = self._set_update_time(data)
return self._get_connection().execute(self._compile_update(data))
def increment(self, key, amount=1):
if isinstance(amount, int) and amount > 0:
data = collections.defaultdict(dict)
data[key] = '{}+{}'.format(expr.format_column(key), str(amount))
data = self._set_update_time(data)
return self._get_connection().execute(self._compile_increment(data))
def decrement(self, key, amount=1):
if isinstance(amount, int) and amount > 0:
data = collections.defaultdict(dict)
data[key] = '{}-{}'.format(expr.format_column(key), str(amount))
data = self._set_update_time(data)
return self._get_connection().execute(self._compile_increment(data))
def create(self, data):
if data:
if data and isinstance(data, dict):
data = [data]
data = self._set_create_time(data)
self._get_connection().execute(self._compile_create(data))
return self
def insert(self, columns, data):
self._get_connection().execute(self._compile_insert(columns, data))
return self
def lastid(self):
data = self._get_connection().execute(self._compile_lastid())
return data[0][0] if data and data[0] and data[0][0] else None
def delete(self):
return self._get_connection().execute(self._compile_delete())
def take(self, number):
if number <= 0:
raise Exception('take number invalid')
self.__limit__ = int(number)
return self
def select(self, *args):
self.__select__ = self._format_columns(list(args))
return self
def groupby(self, *args):
self.__groupby__ = self._format_columns(list(args))
return self
def offset(self, number):
if number <= 0:
raise Exception('offset number invalid')
self.__offset__ = int(number)
return self
def tosql(self):
return self._compile_select()
def where(self, *args):
length = args.__len__()
if length == 1 and isinstance(args[0], dict):
self.__where__.append(args[0])
elif length == 2:
self.__where__.append({args[0]: self._check_columns_value(args[1])})
elif length == 3:
if args[1] in self.operators:
if args[1] == '=':
self.__where__.append({args[0]: self._check_columns_value(args[2])})
else:
self.__where__.append((args[0], args[1], self._check_columns_value(args[2])))
else:
raise Exception('operator key world not found: "{}"'.format(args[1]))
else:
raise Exception('bad parameters in where function')
return self
def orwhere(self, *args):
length = args.__len__()
if length == 1 and isinstance(args[0], dict):
self.__orwhere__.append(args[0])
elif length == 1 and isinstance(args[0], list):
self.__orwhere__.append(args[0])
elif length == 2:
self.__orwhere__.append({args[0]: args[1]})
elif length == 3:
if args[1] in self.operators:
if args[1] == '=':
self.__orwhere__.append({args[0]: args[2]})
else:
self.__orwhere__.append((args[0], args[1], args[2]))
else:
raise Exception('operator key world not found: "{}"'.format(args[1]))
else:
raise Exception('bad parameters in where function')
return self
def whereor(self, *args):
length = args.__len__()
if length == 1 and isinstance(args[0], list):
self.__whereor__.append(args[0])
else:
raise Exception('bad parameters in where function')
return self
def orderby(self, column, direction='asc'):
if direction.lower() == 'asc':
self.__orderby__.append(expr.format_column(column))
else:
self.__orderby__.append(expr.format_column(column) + ' desc')
return self
def execute(self, sql):
return self._get_connection().execute(sql)
def having(self, *args):
length = args.__len__()
if length == 2:
self.__having__ = ' having {} {} {}'.format(args[0], '=', expr.format_string(args[1]))
elif length == 3:
self.__having__ = ' having {} {} {}'.format(args[0], args[1], expr.format_string(args[2]))
else:
raise Exception('invalid parameter in having function')
return self
def lock_for_update(self):
self.__lock__ = ' for update'
return self
def lock_for_share(self):
self.__lock__ = ' lock in share mode'
return self
def leftjoin(self, model):
if not (isinstance(model, BaseBuilder)):
raise TypeError('invalid parameter type in leftjoin')
self.__join__.append(('left join', model))
return self
def rightjoin(self, model):
if not (isinstance(model, BaseBuilder)):
raise TypeError('invalid parameter type in rightjoin')
self.__join__.append(('right join', model))
return self
def join(self, model):
return self.innerjoin(model)
def innerjoin(self, model):
if not (isinstance(model, BaseBuilder)):
raise TypeError('invalid parameter type in innerjoin')
self.__join__.append(('inner join', model))
return self
def union(self, model):
if not (isinstance(model, BaseBuilder)):
raise TypeError('invalid parameter type in union')
self.__union__.append(('union', model))
return self
def unionall(self, model):
if not (isinstance(model, BaseBuilder)):
raise TypeError('invalid parameter type in unionall')
self.__union__.append(('union all', model))
return self
def on(self, *args):
length = args.__len__()
if length == 2:
self.__on__.append((args[0], '=', args[1]))
elif length == 3:
self.__on__.append((args[0], args[1], args[2]))
else:
raise Exception('invalid parameter in on function')
return self
def subquery(self, model, alias='tmp'):
self.__subquery__.append((alias, model))
return self
def _compile_select(self):
if len(self.__select__) == 0:
self.__select__.append('*')
subsql = ''.join(
[self._compile_where(), self._compile_whereor(), self._compile_orwhere(), self._compile_groupby(), self._compile_orderby(),
self._compile_having(), self._compile_offset(), self._compile_lock()])
joinsql = ''.join(self._compile_leftjoin())
returnsql = "select {}{} from {}{}{}".format(self._compile_limit(), ','.join(self.__select__), self._tablename(), joinsql, subsql)
if self.__union__:
return '{}'.format(returnsql) + self._compile_union()
return returnsql
def _compile_create(self, data):
return "insert into {} {} values {}".format(self._tablename(), self._columnize(data[0]), self._valueize(data))
def _compile_insert(self, columns, data):
return "insert into {} {} values {}".format(self._tablename(), self._columnize(columns), ','.join([tuple(index).__str__() for index in data]))
def _compile_update(self, data):
return "update {} set {}{}".format(self._tablename(), ','.join(self._compile_dict(data)), self._compile_where())
def _compile_increment(self, data):
subsql = ','.join(['{}={}'.format(expr.format_column(index), value) for index, value in data.items()])
return "update {} set {}{}".format(self._tablename(), subsql, self._compile_where())
def _compile_delete(self):
return 'delete from {}{}'.format(self._tablename(), self._compile_where())
def _compile_lastid(self):
return 'select last_insert_id() as lastid'
def _columnize(self, columns):
return tuple(columns).__str__().replace('\'', '`')
def _valueize(self, data):
return ','.join([tuple(index.values()).__str__() for index in data])
def _compile_groupby(self):
return '' if len(self.__groupby__) == 0 else ' group by ' + ','.join(self.__groupby__)
def _compile_orderby(self):
return '' if len(self.__orderby__) == 0 else ' order by ' + ','.join(self.__orderby__)
def _compile_limit(self):
return '' if self.__limit__ == 0 else 'top ({}) '.format(self.__limit__)
def _compile_offset(self):
if self.__offset__:
if self.__orderby__:
return '' if self.__offset__ is None else ' offset {} rows fetch next {} rows only'.format(self.__offset__, self.__limit__)
raise Exception('orderby function not set exception')
return ''
def _compile_lock(self):
return '' if self.__lock__ is None else self.__lock__
def _compile_leftjoin(self):
if self.__join__:
return ' ' + ' '.join(['{} {} on {}'.format(index, value._tablename(), value._compile_on()) for (index, value) in
self.__join__])
return ''
def _compile_union(self):
if self.__union__:
return ' ' + ' '.join(['{} ({})'.format(index, value.tosql()) for (index, value) in self.__union__])
return ''
def _compile_on(self):
sqlstr = ['{} {} {}'.format(index[0], index[1], index[2]) for index in self.__on__]
return ' and '.join(sqlstr)
def _compile_having(self):
if self.__having__:
return self.__having__
return ''
def _compile_where(self):
if len(self.__where__) > 0:
sqlstr = []
for index in self.__where__:
if isinstance(index, dict):
sqlstr.append(' and '.join(self._compile_dict(index)))
elif isinstance(index, tuple):
sqlstr.append(self._compile_tuple(index))
return ' where {}'.format(' and '.join(sqlstr))
return ''
def _compile_orwhere(self):
if len(self.__orwhere__) > 0:
sqlstr = []
for index in self.__orwhere__:
if isinstance(index, dict):
subsql = self._compile_dict(index)
if len(subsql) == 1:
sqlstr.append(subsql.pop())
else:
sqlstr.append('({})'.format(' and '.join(subsql)))
elif isinstance(index, tuple):
sqlstr.append(self._compile_tuple(index))
elif isinstance(index, list):
subsql = []
for items in index:
if len(items) == 2:
subsql.append(self._compile_keyvalue(items[0], items[1]))
if len(items) == 3:
subsql.append(self._compile_tuple((items[0], items[1], items[2])))
sqlstr.append('({})'.format(' and '.join(subsql)))
else:
raise Exception('undefined query condition {}'.format(index.__str__()))
if len(self.__where__) > 0:
return ' or {}'.format(' or '.join(sqlstr))
return ' where {}'.format(' or '.join(sqlstr))
return ''
def _compile_whereor(self):
if len(self.__whereor__) > 0:
sqlstr = []
for index in self.__whereor__:
subsql = []
for item in index:
if isinstance(item, dict):
if len(item) == 1:
subsql.append(self._compile_dict(item).pop())
else:
subsql.append('(' + ' and '.join(self._compile_dict(item)) + ')')
elif isinstance(item, list):
if isinstance(item[0], str):
subsql.append(self._compile_tuple(tuple(item)))
else:
subsql.append(self._compile_lists(item))
elif isinstance(item, tuple):
subsql.append(self._compile_tuple(item))
else:
raise Exception('whereor param invalid')
sqlstr.append(' or '.join(subsql))
if len(self.__where__) > 0:
return ' and ({})'.format(' or '.join(sqlstr))
return ' where ({})'.format(' or '.join(sqlstr))
return ''
def _compile_dict(self, data):
return ['{}={}'.format(expr.format_column(index), expr.format_string(value)) for index, value in data.items()]
def _compile_tuple(self, data):
if data[1] in ['in', 'not in']:
return self._compile_in((data[0], data[1], data[2]))
elif data[1] in ['between', 'not between']:
return self._compile_between((data[0], data[1], data[2]))
return '{} {} {}'.format(expr.format_column(data[0]), data[1], expr.format_string(data[2]))
def _compile_in(self, data):
return '{} {} {}'.format(expr.format_column(data[0]), data[1], expr.list_to_str(data[2]))
def _compile_list(self, data):
length = len(data)
if length == 2:
return self._compile_keyvalue(data[0], data[1])
if length == 3:
return self._compile_tuple((data[0], data[1], data[2]))
def _compile_lists(self, data):
return_data = []
for index in data:
if isinstance(index, list):
return_data.append(self._compile_list(index))
if isinstance(index, tuple):
return_data.append(self._compile_tuple(index))
return '(' + ' and '.join(return_data) + ')'
def _compile_between(self, data):
if not (len(data) == 3 and len(data[2]) == 2):
raise Exception('between param invalid')
return '{} {} {} and {}'.format(expr.format_column(data[0]), data[1], expr.format_string(data[2][0]),
expr.format_string(data[2][1]))
def _compile_keyvalue(self, key, value):
return '{}={}'.format(expr.format_column(key), expr.format_string(value))
def _compile_subquery(self):
subquery = []
for index, value in self.__subquery__:
if isinstance(value, str):
subquery.append('{} as {}'.format(value, index))
else:
subquery.append('({}) as {}'.format(value.tosql(), index))
return ','.join(subquery)
def _get_connection(self):
return self.connect(self.__model__)
def _check_columns_value(self, value):
if self.__subquery__ and len(self.__subquery__) >= 2 and isinstance(value, str):
tmp = value.split('.')
if len(tmp) == 2 and tmp[0] in self._get_subquery_alias():
return Expression(value)
return value
def _get_subquery_alias(self):
return [index for index, value in self.__subquery__]
def database(self, name):
self.__model__.__database__ = name
return self
def _tablename(self):
if self.__subquery__:
return self._compile_subquery()
if self.__alias__ is None:
return self.__model__.__tablename__
return self.__model__.__tablename__ + ' as {}'.format(self.__alias__)
def _format_columns(self, columns):
return list(map(lambda index: expr.format_column(index), columns))
def _set_create_time(self, data):
currtime = self.__model__.fresh_timestamp()
update_column = self.__model__.update_time_column()
create_column = self.__model__.create_time_column()
for index in data:
if create_column and create_column not in index:
index[create_column] = currtime
if update_column and update_column not in index:
index[update_column] = currtime
return data
def _set_update_time(self, data):
currtime = self.__model__.fresh_timestamp()
update_column = self.__model__.update_time_column()
if update_column and update_column not in data:
data[update_column] = currtime
return data
def transaction(self, callback):
return self._get_connection().transaction(callback)
def transaction_wrapper(self, callback):
return self._get_connection().transaction_wrapper(callback)
| [
"simpysql.Util.Dynamic.Dynamic",
"simpysql.Util.Expression.expression.list_to_str",
"simpysql.Util.Expression.expression.format_column",
"collections.defaultdict",
"simpysql.Util.Expression.Expression",
"simpysql.Util.Expression.expression.format_string"
] | [((1927, 1941), 'simpysql.Util.Dynamic.Dynamic', 'Dynamic', (['index'], {}), '(index)\n', (1934, 1941), False, 'from simpysql.Util.Dynamic import Dynamic\n'), ((4136, 4165), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (4159, 4165), False, 'import collections\n'), ((4482, 4511), 'collections.defaultdict', 'collections.defaultdict', (['dict'], {}), '(dict)\n', (4505, 4511), False, 'import collections\n'), ((17040, 17067), 'simpysql.Util.Expression.expression.format_column', 'expr.format_column', (['data[0]'], {}), '(data[0])\n', (17058, 17067), True, 'from simpysql.Util.Expression import expression as expr, Expression\n'), ((17078, 17105), 'simpysql.Util.Expression.expression.format_string', 'expr.format_string', (['data[2]'], {}), '(data[2])\n', (17096, 17105), True, 'from simpysql.Util.Expression import expression as expr, Expression\n'), ((17174, 17201), 'simpysql.Util.Expression.expression.format_column', 'expr.format_column', (['data[0]'], {}), '(data[0])\n', (17192, 17201), True, 'from simpysql.Util.Expression import expression as expr, Expression\n'), ((17212, 17237), 'simpysql.Util.Expression.expression.list_to_str', 'expr.list_to_str', (['data[2]'], {}), '(data[2])\n', (17228, 17237), True, 'from simpysql.Util.Expression import expression as expr, Expression\n'), ((18013, 18040), 'simpysql.Util.Expression.expression.format_column', 'expr.format_column', (['data[0]'], {}), '(data[0])\n', (18031, 18040), True, 'from simpysql.Util.Expression import expression as expr, Expression\n'), ((18051, 18081), 'simpysql.Util.Expression.expression.format_string', 'expr.format_string', (['data[2][0]'], {}), '(data[2][0])\n', (18069, 18081), True, 'from simpysql.Util.Expression import expression as expr, Expression\n'), ((18136, 18166), 'simpysql.Util.Expression.expression.format_string', 'expr.format_string', (['data[2][1]'], {}), '(data[2][1])\n', (18154, 18166), True, 'from simpysql.Util.Expression import expression as expr, Expression\n'), ((18244, 18267), 'simpysql.Util.Expression.expression.format_column', 'expr.format_column', (['key'], {}), '(key)\n', (18262, 18267), True, 'from simpysql.Util.Expression import expression as expr, Expression\n'), ((18269, 18294), 'simpysql.Util.Expression.expression.format_string', 'expr.format_string', (['value'], {}), '(value)\n', (18287, 18294), True, 'from simpysql.Util.Expression import expression as expr, Expression\n'), ((4205, 4228), 'simpysql.Util.Expression.expression.format_column', 'expr.format_column', (['key'], {}), '(key)\n', (4223, 4228), True, 'from simpysql.Util.Expression import expression as expr, Expression\n'), ((4551, 4574), 'simpysql.Util.Expression.expression.format_column', 'expr.format_column', (['key'], {}), '(key)\n', (4569, 4574), True, 'from simpysql.Util.Expression import expression as expr, Expression\n'), ((7949, 7975), 'simpysql.Util.Expression.expression.format_column', 'expr.format_column', (['column'], {}), '(column)\n', (7967, 7975), True, 'from simpysql.Util.Expression import expression as expr, Expression\n'), ((8322, 8349), 'simpysql.Util.Expression.expression.format_string', 'expr.format_string', (['args[1]'], {}), '(args[1])\n', (8340, 8349), True, 'from simpysql.Util.Expression import expression as expr, Expression\n'), ((16655, 16680), 'simpysql.Util.Expression.expression.format_column', 'expr.format_column', (['index'], {}), '(index)\n', (16673, 16680), True, 'from simpysql.Util.Expression import expression as expr, Expression\n'), ((16682, 16707), 'simpysql.Util.Expression.expression.format_string', 'expr.format_string', (['value'], {}), '(value)\n', (16700, 16707), True, 'from simpysql.Util.Expression import expression as expr, Expression\n'), ((18968, 18985), 'simpysql.Util.Expression.Expression', 'Expression', (['value'], {}), '(value)\n', (18978, 18985), False, 'from simpysql.Util.Expression import expression as expr, Expression\n'), ((8027, 8053), 'simpysql.Util.Expression.expression.format_column', 'expr.format_column', (['column'], {}), '(column)\n', (8045, 8053), True, 'from simpysql.Util.Expression import expression as expr, Expression\n'), ((8451, 8478), 'simpysql.Util.Expression.expression.format_string', 'expr.format_string', (['args[2]'], {}), '(args[2])\n', (8469, 8478), True, 'from simpysql.Util.Expression import expression as expr, Expression\n'), ((11620, 11645), 'simpysql.Util.Expression.expression.format_column', 'expr.format_column', (['index'], {}), '(index)\n', (11638, 11645), True, 'from simpysql.Util.Expression import expression as expr, Expression\n'), ((19539, 19564), 'simpysql.Util.Expression.expression.format_column', 'expr.format_column', (['index'], {}), '(index)\n', (19557, 19564), True, 'from simpysql.Util.Expression import expression as expr, Expression\n')] |
import json
import logging
from pathlib import Path
from hermes.common.lex_utils import success, error
logger = logging.getLogger(__name__)
script_path = Path.cwd().joinpath('hermes/help/script.json')
with script_path.open() as f: script = json.load(f)
def handler(event, context):
help_text = '\n'.join(script['help_text'])
return success(message=help_text)
if __name__ == '__main__':
res = handler(event={}, context={})
print(json.dumps(res, indent=3)) | [
"logging.getLogger",
"pathlib.Path.cwd",
"json.dumps",
"hermes.common.lex_utils.success",
"json.load"
] | [((113, 140), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (130, 140), False, 'import logging\n'), ((242, 254), 'json.load', 'json.load', (['f'], {}), '(f)\n', (251, 254), False, 'import json\n'), ((345, 371), 'hermes.common.lex_utils.success', 'success', ([], {'message': 'help_text'}), '(message=help_text)\n', (352, 371), False, 'from hermes.common.lex_utils import success, error\n'), ((156, 166), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (164, 166), False, 'from pathlib import Path\n'), ((452, 477), 'json.dumps', 'json.dumps', (['res'], {'indent': '(3)'}), '(res, indent=3)\n', (462, 477), False, 'import json\n')] |
from typing import Optional, Sequence
import torch
from ...gpu import Device
from ...models.encoders import EncoderFactory
from ...models.optimizers import OptimizerFactory
from ...models.q_functions import QFunctionFactory
from ...preprocessing import ActionScaler, RewardScaler, Scaler
from ...torch_utility import TorchMiniBatch
from .ddpg_impl import DDPGImpl
class TD3Impl(DDPGImpl):
_target_smoothing_sigma: float
_target_smoothing_clip: float
def __init__(
self,
observation_shape: Sequence[int],
action_size: int,
actor_learning_rate: float,
critic_learning_rate: float,
actor_optim_factory: OptimizerFactory,
critic_optim_factory: OptimizerFactory,
actor_encoder_factory: EncoderFactory,
critic_encoder_factory: EncoderFactory,
q_func_factory: QFunctionFactory,
gamma: float,
tau: float,
n_critics: int,
target_reduction_type: str,
target_smoothing_sigma: float,
target_smoothing_clip: float,
use_gpu: Optional[Device],
scaler: Optional[Scaler],
action_scaler: Optional[ActionScaler],
reward_scaler: Optional[RewardScaler],
):
super().__init__(
observation_shape=observation_shape,
action_size=action_size,
actor_learning_rate=actor_learning_rate,
critic_learning_rate=critic_learning_rate,
actor_optim_factory=actor_optim_factory,
critic_optim_factory=critic_optim_factory,
actor_encoder_factory=actor_encoder_factory,
critic_encoder_factory=critic_encoder_factory,
q_func_factory=q_func_factory,
gamma=gamma,
tau=tau,
n_critics=n_critics,
target_reduction_type=target_reduction_type,
use_gpu=use_gpu,
scaler=scaler,
action_scaler=action_scaler,
reward_scaler=reward_scaler,
)
self._target_smoothing_sigma = target_smoothing_sigma
self._target_smoothing_clip = target_smoothing_clip
def compute_target(self, batch: TorchMiniBatch) -> torch.Tensor:
assert self._targ_policy is not None
assert self._targ_q_func is not None
with torch.no_grad():
action = self._targ_policy(batch.next_observations)
# smoothing target
noise = torch.randn(action.shape, device=batch.device)
scaled_noise = self._target_smoothing_sigma * noise
clipped_noise = scaled_noise.clamp(
-self._target_smoothing_clip, self._target_smoothing_clip
)
smoothed_action = action + clipped_noise
clipped_action = smoothed_action.clamp(-1.0, 1.0)
return self._targ_q_func.compute_target(
batch.next_observations,
clipped_action,
reduction=self._target_reduction_type,
)
| [
"torch.no_grad",
"torch.randn"
] | [((2284, 2299), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2297, 2299), False, 'import torch\n'), ((2416, 2462), 'torch.randn', 'torch.randn', (['action.shape'], {'device': 'batch.device'}), '(action.shape, device=batch.device)\n', (2427, 2462), False, 'import torch\n')] |
from time import sleep
from igata.predictors import PredictorBase
class DummyPredictorNoInputNoOutput(PredictorBase):
def predict(self, inputs, meta):
result = {"result": 0.222, "class": "car", "is_valid": True}
return result
class DummyPredictorNoInputNoOutputVariableOutput(PredictorBase):
def __init__(self, *args, **kwargs):
default_result = {"result": 0.222, "class": "car", "is_valid": True}
self.result = kwargs.get("result", default_result)
def predict(self, input, meta=None):
return self.result
class DummyPredictorNoOutput(PredictorBase):
def preprocess_input(self, record, meta=None):
return {}
def predict(self, record, meta):
return record
class DummyPredictorNoInputNoOutputWithPredictTimeout5s(PredictorBase):
def predict(self, inputs, meta):
self.set_predict_timeout(3)
sleep(10)
result = {"result": 0.222, "class": "car", "is_valid": True}
return result
class DummyPredictorOptionalValidStaticMethods(PredictorBase):
@staticmethod
def get_pandas_read_csv_kwargs(self):
return {"x": 1}
def predict(self, inputs, meta):
return {"result": 0.222, "class": "car", "is_valid": True}
@staticmethod
def get_pandas_to_csv_kwargs(self):
return {"y": 2}
@staticmethod
def set_additional_dynamodb_request_update_attributes(self):
return {"v": True}
class DummyPredictorOptionalInValidStaticMethods(PredictorBase):
def get_pandas_read_csv_kwargs(self):
return {"x": 1}
def predict(self, inputs, meta):
return {"result": 0.222, "class": "car", "is_valid": True}
def get_pandas_to_csv_kwargs(self):
return {"y": 2}
def set_additional_dynamodb_request_update_attributes(self):
return {"v": True}
class DummyInPandasDataFrameOutPandasCSVPredictor(PredictorBase):
def predict(self, inputs, meta):
raise NotImplementedError
| [
"time.sleep"
] | [((894, 903), 'time.sleep', 'sleep', (['(10)'], {}), '(10)\n', (899, 903), False, 'from time import sleep\n')] |
# This is a collection of very short demo-plugins to illustrate how
# to create and register hooks into the various parts of Mailpile
#
# To start creating a new plugin, it may make sense to copy this file,
# globally search/replace the word "Demo" with your preferred plugin
# name and then go delete sections you aren't going to use.
#
# Happy hacking!
from gettext import gettext as _
import mailpile.plugins
##[ Pluggable configuration ]#################################################
# FIXME
##[ Pluggable keyword extractors ]############################################
# FIXME
##[ Pluggable search terms ]##################################################
# Pluggable search terms allow plugins to enhance the behavior of the
# search engine in various ways. Examples of basic enhanced search terms
# are the date: and size: keywords, which accept human-friendly ranges
# and input, and convert those to a list of "low level" keywords to
# actually search for.
# FIXME
##[ Pluggable vcard functions ]###############################################
from mailpile.vcard import *
class DemoVCardImporter(VCardImporter):
"""
This VCard importer simply generates VCards based on data in the
configuration. This is not particularly useful, but it demonstrates
how each importer can define (and use) its own settings.
"""
FORMAT_NAME = _('Demo Contacts')
FORMAT_DESCRPTION = _('This is the demo importer')
SHORT_NAME = 'demo'
CONFIG_RULES = {
'active': [_('Activate demo importer'), bool, True],
'name': [_('Contact name'), str, '<NAME>'],
'email': [_('Contact email'), 'email', '<EMAIL>']
}
def get_vcards(self):
"""Returns just a single contact, based on data from the config."""
# Notes to implementors:
#
# - It is important to only return one card per (set of)
# e-mail addresses, as internal overwriting may cause
# unexpected results.
# - If data is to be deleted from the contact list, it
# is important to return a VCard for that e-mail address
# which has the relevant data removed.
#
if not self.config.active:
return []
return [SimpleVCard(
VCardLine(name='fn', value=self.config.name),
VCardLine(name='email', value=self.config.email)
)]
mailpile.plugins.register_vcard_importers(DemoVCardImporter)
##[ Pluggable cron jobs ]#####################################################
def TickJob(session):
"""
This is a very minimal cron job - just a function that runs within
a session.
Note that generally it is a better pattern to create a Command which
is then invoked by the cron job, so power users can access the
functionality directly. It is also a good idea to make the interval
configurable by registering a setting and referencing that instead of
a fixed number. See compose.py for an example of how this is done.
"""
session.ui.notify('Tick!')
mailpile.plugins.register_fast_periodic_job('tick-05', # Job name
5, # Interval in seconds
TickJob) # Callback
mailpile.plugins.register_slow_periodic_job('tick-15', 15, TickJob)
##[ Pluggable commands ]######################################################
from mailpile.commands import Command
from mailpile.util import md5_hex
class md5sumCommand(Command):
"""This command calculates MD5 sums"""
SYNOPSIS = (None, 'md5sum', 'md5sum', '[<data to hash>]')
SPLIT_ARG = False
HTTP_CALLABLE = ('GET', 'POST')
HTTP_QUERY_VARS = {
'data': 'Data to hash'
}
def command(self):
if 'data' in self.data:
data = self.data['data']
else:
data = ''.join(self.args)
if 'gross' in data or not data:
return self._error(_('I refuse to work with empty or gross data'),
info={'data': data})
return self._success(_('I hashed your data for you, yay!'),
result=md5_hex(data))
mailpile.plugins.register_commands(md5sumCommand)
| [
"mailpile.util.md5_hex",
"gettext.gettext"
] | [((1377, 1395), 'gettext.gettext', '_', (['"""Demo Contacts"""'], {}), "('Demo Contacts')\n", (1378, 1395), True, 'from gettext import gettext as _\n'), ((1420, 1450), 'gettext.gettext', '_', (['"""This is the demo importer"""'], {}), "('This is the demo importer')\n", (1421, 1450), True, 'from gettext import gettext as _\n'), ((1515, 1542), 'gettext.gettext', '_', (['"""Activate demo importer"""'], {}), "('Activate demo importer')\n", (1516, 1542), True, 'from gettext import gettext as _\n'), ((1574, 1591), 'gettext.gettext', '_', (['"""Contact name"""'], {}), "('Contact name')\n", (1575, 1591), True, 'from gettext import gettext as _\n'), ((1627, 1645), 'gettext.gettext', '_', (['"""Contact email"""'], {}), "('Contact email')\n", (1628, 1645), True, 'from gettext import gettext as _\n'), ((4090, 4127), 'gettext.gettext', '_', (['"""I hashed your data for you, yay!"""'], {}), "('I hashed your data for you, yay!')\n", (4091, 4127), True, 'from gettext import gettext as _\n'), ((3960, 4006), 'gettext.gettext', '_', (['"""I refuse to work with empty or gross data"""'], {}), "('I refuse to work with empty or gross data')\n", (3961, 4006), True, 'from gettext import gettext as _\n'), ((4165, 4178), 'mailpile.util.md5_hex', 'md5_hex', (['data'], {}), '(data)\n', (4172, 4178), False, 'from mailpile.util import md5_hex\n')] |
import concurrent.futures
import logging
from logging import StreamHandler
import time
import timeit
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
)
logger = logging.getLogger(__name__)
def do_something(wait_time):
logger.info("Waiting for %d seconds.", wait_time)
time.sleep(wait_time)
return f"Wait was done for {wait_time} seconds."
start = timeit.default_timer()
with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executer:
secs = [5, 4, 3, 2, 1]
results = [executer.submit(do_something, sec) for sec in secs]
for f in concurrent.futures.as_completed(results):
logger.info("result %s", f.result())
end = timeit.default_timer()
logger.info("Execution took %f seconds", end - start)
| [
"logging.basicConfig",
"timeit.default_timer",
"logging.getLogger",
"time.sleep"
] | [((103, 210), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""'}), "(level=logging.INFO, format=\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n", (122, 210), False, 'import logging\n'), ((226, 253), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (243, 253), False, 'import logging\n'), ((428, 450), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (448, 450), False, 'import timeit\n'), ((725, 747), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (745, 747), False, 'import timeit\n'), ((343, 364), 'time.sleep', 'time.sleep', (['wait_time'], {}), '(wait_time)\n', (353, 364), False, 'import time\n')] |
# $Filename$
# $Authors$
# Last Changed: $Date$ $Committer$ $Revision-Id$
#
# Copyright (c) 2003-2011, German Aerospace Center (DLR)
#
# All rights reserved.
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are
#
#met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the German Aerospace Center nor the names of
# its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
#A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
#OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
#SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
#LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
#THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Implements test cases for the custom meta data persistence format.
"""
from datetime import datetime
import decimal
import sys
import unicodedata
import unittest
from datafinder.persistence.error import PersistenceError
from datafinder.persistence.metadata.value_mapping import\
MetadataValue, getPersistenceRepresentation
__version__ = "$Revision-Id$"
_AE = unicodedata.lookup("LATIN SMALL LETTER A WITH DIAERESIS")
class MetadataValueTestCase(unittest.TestCase):
def testInvalidPersistenceValue(self):
self.assertRaises(PersistenceError, MetadataValue, None)
def testComparison(self):
self.assertEquals(MetadataValue("a"), MetadataValue("a"))
self.assertEquals(hash(MetadataValue("a")),
hash(MetadataValue("a")))
self.assertNotEquals(MetadataValue("a"), MetadataValue("b"))
self.assertNotEquals(hash(MetadataValue("a")),
hash(MetadataValue("b")))
self.assertNotEquals(MetadataValue("a"), None)
self.assertNotEquals(hash(MetadataValue("a")), hash(None))
def testRepresentation(self):
self.assertEquals(str(MetadataValue("a")), "'a'")
def testBoolValue(self):
self.assertTrue(MetadataValue("1").value)
self.assertFalse(MetadataValue("0").value)
def testStringValue(self):
self.assertEquals(MetadataValue(u"test").value, u"test")
self.assertEquals(MetadataValue("test").value, "test")
# Special escaped sequences
self.assertEquals(MetadataValue("\\____EMPTY____LIST____").value,
"____EMPTY____LIST____")
self.assertEquals(MetadataValue("\\;").value, ";")
def testNumericValue(self):
self.assertEquals(MetadataValue(u"4.5").value, decimal.Decimal("4.5"))
self.assertEquals(MetadataValue(u"5").value, decimal.Decimal("5"))
def testDatetimeValue(self):
# From time stamp
metdataValue = MetadataValue("0", expectedType=datetime)
self.assertEquals(metdataValue.value, datetime(1970, 1, 1, 1, 0))
# From RFC 822.
persistedValue = u"Wed, 02 Oct 2002 13:00:00 GMT"
metdataValue = MetadataValue(persistedValue)
self.assertEquals(metdataValue.value, datetime(2002, 10, 2, 15, 0))
# From Iso8601.
persistedValue = u"2006-10-16T08:19:39Z"
metdataValue = MetadataValue(persistedValue)
self.assertEquals(metdataValue.value, datetime(2006, 10, 16, 10, 19, 39))
def testListValue(self):
# Success
self.assertEquals(MetadataValue("a;b;1").value,
["a", "b", decimal.Decimal(1)])
# Special cases
persistedValue = u"____EMPTY____LIST____"
metdataValue = MetadataValue(persistedValue)
self.assertEquals(metdataValue.value, list())
self.assertEquals(MetadataValue(";").value, ";")
self.assertEquals(MetadataValue("a\\;b;c").value, ["a;b", "c"])
def testDictValues(self):
metdataValue = MetadataValue("{}")
self.assertEquals(metdataValue.value, dict())
def testGuessRepresentation(self):
# Success
self.assertEquals(MetadataValue("").guessRepresentation(), [None])
self.assertEquals(MetadataValue("1").guessRepresentation(),
[True, decimal.Decimal("1"),
datetime(1970, 1, 1, 1, 0, 1), u"1"])
class GetPersistenceRepresentationTestCase(unittest.TestCase):
def testBoolValue(self):
self.assertEquals(getPersistenceRepresentation(True), "1")
self.assertEquals(getPersistenceRepresentation(False), "0")
def testNoneValue(self):
self.assertEquals(getPersistenceRepresentation(None), "")
self.assertRaises(PersistenceError, getPersistenceRepresentation, tuple())
def testStringValue(self):
self.assertEquals(getPersistenceRepresentation(u"test"), u"test")
self.assertEquals(getPersistenceRepresentation("test"), u"test")
# Special escaped sequences
self.assertEquals(getPersistenceRepresentation(";"), "\\;")
self.assertEquals(getPersistenceRepresentation("____EMPTY____LIST____"),
"\\____EMPTY____LIST____")
# Invalid raw string
orignalFunction = sys.getdefaultencoding
sys.getdefaultencoding = lambda: None # Mock encoding determination
try:
self.assertRaises(
PersistenceError, getPersistenceRepresentation, _AE.encode("Latin-1)"))
finally:
sys.getdefaultencoding = orignalFunction
def testNumericValue(self):
# Decimals
persistedValue = decimal.Decimal("4.5")
self.assertEquals(getPersistenceRepresentation(persistedValue), u"4.5")
persistedValue = decimal.Decimal("5")
self.assertEquals(getPersistenceRepresentation(persistedValue), u"5")
# Raw integer
self.assertEquals(getPersistenceRepresentation(5), u"5")
#Raw float
self.assertEquals(getPersistenceRepresentation(4.5), u"4.5")
def testDatetimeValue(self):
persistedValue = datetime(2006, 10, 16, 10, 19, 39)
self.assertEquals(getPersistenceRepresentation(persistedValue),
u"2006-10-16T08:19:39Z")
def testListValue(self):
persistedValue = [decimal.Decimal("2006"), decimal.Decimal("10"),
decimal.Decimal("16"), decimal.Decimal("10")]
self.assertEquals(getPersistenceRepresentation(persistedValue),
u"2006;10;16;10;")
persistedValue = list()
self.assertEquals(getPersistenceRepresentation(persistedValue),
u"____EMPTY____LIST____")
def testDictValue(self):
self.assertEquals(getPersistenceRepresentation(dict()), u"{}")
| [
"datetime.datetime",
"datafinder.persistence.metadata.value_mapping.getPersistenceRepresentation",
"datafinder.persistence.metadata.value_mapping.MetadataValue",
"unicodedata.lookup",
"decimal.Decimal"
] | [((2093, 2150), 'unicodedata.lookup', 'unicodedata.lookup', (['"""LATIN SMALL LETTER A WITH DIAERESIS"""'], {}), "('LATIN SMALL LETTER A WITH DIAERESIS')\n", (2111, 2150), False, 'import unicodedata\n'), ((3779, 3820), 'datafinder.persistence.metadata.value_mapping.MetadataValue', 'MetadataValue', (['"""0"""'], {'expectedType': 'datetime'}), "('0', expectedType=datetime)\n", (3792, 3820), False, 'from datafinder.persistence.metadata.value_mapping import MetadataValue, getPersistenceRepresentation\n'), ((4004, 4033), 'datafinder.persistence.metadata.value_mapping.MetadataValue', 'MetadataValue', (['persistedValue'], {}), '(persistedValue)\n', (4017, 4033), False, 'from datafinder.persistence.metadata.value_mapping import MetadataValue, getPersistenceRepresentation\n'), ((4210, 4239), 'datafinder.persistence.metadata.value_mapping.MetadataValue', 'MetadataValue', (['persistedValue'], {}), '(persistedValue)\n', (4223, 4239), False, 'from datafinder.persistence.metadata.value_mapping import MetadataValue, getPersistenceRepresentation\n'), ((4599, 4628), 'datafinder.persistence.metadata.value_mapping.MetadataValue', 'MetadataValue', (['persistedValue'], {}), '(persistedValue)\n', (4612, 4628), False, 'from datafinder.persistence.metadata.value_mapping import MetadataValue, getPersistenceRepresentation\n'), ((4900, 4919), 'datafinder.persistence.metadata.value_mapping.MetadataValue', 'MetadataValue', (['"""{}"""'], {}), "('{}')\n", (4913, 4919), False, 'from datafinder.persistence.metadata.value_mapping import MetadataValue, getPersistenceRepresentation\n'), ((6631, 6653), 'decimal.Decimal', 'decimal.Decimal', (['"""4.5"""'], {}), "('4.5')\n", (6646, 6653), False, 'import decimal\n'), ((6761, 6781), 'decimal.Decimal', 'decimal.Decimal', (['"""5"""'], {}), "('5')\n", (6776, 6781), False, 'import decimal\n'), ((7110, 7144), 'datetime.datetime', 'datetime', (['(2006)', '(10)', '(16)', '(10)', '(19)', '(39)'], {}), '(2006, 10, 16, 10, 19, 39)\n', (7118, 7144), False, 'from datetime import datetime\n'), ((2382, 2400), 'datafinder.persistence.metadata.value_mapping.MetadataValue', 'MetadataValue', (['"""a"""'], {}), "('a')\n", (2395, 2400), False, 'from datafinder.persistence.metadata.value_mapping import MetadataValue, getPersistenceRepresentation\n'), ((2402, 2420), 'datafinder.persistence.metadata.value_mapping.MetadataValue', 'MetadataValue', (['"""a"""'], {}), "('a')\n", (2415, 2420), False, 'from datafinder.persistence.metadata.value_mapping import MetadataValue, getPersistenceRepresentation\n'), ((2569, 2587), 'datafinder.persistence.metadata.value_mapping.MetadataValue', 'MetadataValue', (['"""a"""'], {}), "('a')\n", (2582, 2587), False, 'from datafinder.persistence.metadata.value_mapping import MetadataValue, getPersistenceRepresentation\n'), ((2589, 2607), 'datafinder.persistence.metadata.value_mapping.MetadataValue', 'MetadataValue', (['"""b"""'], {}), "('b')\n", (2602, 2607), False, 'from datafinder.persistence.metadata.value_mapping import MetadataValue, getPersistenceRepresentation\n'), ((2762, 2780), 'datafinder.persistence.metadata.value_mapping.MetadataValue', 'MetadataValue', (['"""a"""'], {}), "('a')\n", (2775, 2780), False, 'from datafinder.persistence.metadata.value_mapping import MetadataValue, getPersistenceRepresentation\n'), ((3584, 3606), 'decimal.Decimal', 'decimal.Decimal', (['"""4.5"""'], {}), "('4.5')\n", (3599, 3606), False, 'import decimal\n'), ((3662, 3682), 'decimal.Decimal', 'decimal.Decimal', (['"""5"""'], {}), "('5')\n", (3677, 3682), False, 'import decimal\n'), ((3868, 3894), 'datetime.datetime', 'datetime', (['(1970)', '(1)', '(1)', '(1)', '(0)'], {}), '(1970, 1, 1, 1, 0)\n', (3876, 3894), False, 'from datetime import datetime\n'), ((4081, 4109), 'datetime.datetime', 'datetime', (['(2002)', '(10)', '(2)', '(15)', '(0)'], {}), '(2002, 10, 2, 15, 0)\n', (4089, 4109), False, 'from datetime import datetime\n'), ((4287, 4321), 'datetime.datetime', 'datetime', (['(2006)', '(10)', '(16)', '(10)', '(19)', '(39)'], {}), '(2006, 10, 16, 10, 19, 39)\n', (4295, 4321), False, 'from datetime import datetime\n'), ((5444, 5478), 'datafinder.persistence.metadata.value_mapping.getPersistenceRepresentation', 'getPersistenceRepresentation', (['(True)'], {}), '(True)\n', (5472, 5478), False, 'from datafinder.persistence.metadata.value_mapping import MetadataValue, getPersistenceRepresentation\n'), ((5512, 5547), 'datafinder.persistence.metadata.value_mapping.getPersistenceRepresentation', 'getPersistenceRepresentation', (['(False)'], {}), '(False)\n', (5540, 5547), False, 'from datafinder.persistence.metadata.value_mapping import MetadataValue, getPersistenceRepresentation\n'), ((5621, 5655), 'datafinder.persistence.metadata.value_mapping.getPersistenceRepresentation', 'getPersistenceRepresentation', (['None'], {}), '(None)\n', (5649, 5655), False, 'from datafinder.persistence.metadata.value_mapping import MetadataValue, getPersistenceRepresentation\n'), ((5814, 5851), 'datafinder.persistence.metadata.value_mapping.getPersistenceRepresentation', 'getPersistenceRepresentation', (['u"""test"""'], {}), "(u'test')\n", (5842, 5851), False, 'from datafinder.persistence.metadata.value_mapping import MetadataValue, getPersistenceRepresentation\n'), ((5889, 5925), 'datafinder.persistence.metadata.value_mapping.getPersistenceRepresentation', 'getPersistenceRepresentation', (['"""test"""'], {}), "('test')\n", (5917, 5925), False, 'from datafinder.persistence.metadata.value_mapping import MetadataValue, getPersistenceRepresentation\n'), ((6000, 6033), 'datafinder.persistence.metadata.value_mapping.getPersistenceRepresentation', 'getPersistenceRepresentation', (['""";"""'], {}), "(';')\n", (6028, 6033), False, 'from datafinder.persistence.metadata.value_mapping import MetadataValue, getPersistenceRepresentation\n'), ((6069, 6122), 'datafinder.persistence.metadata.value_mapping.getPersistenceRepresentation', 'getPersistenceRepresentation', (['"""____EMPTY____LIST____"""'], {}), "('____EMPTY____LIST____')\n", (6097, 6122), False, 'from datafinder.persistence.metadata.value_mapping import MetadataValue, getPersistenceRepresentation\n'), ((6681, 6725), 'datafinder.persistence.metadata.value_mapping.getPersistenceRepresentation', 'getPersistenceRepresentation', (['persistedValue'], {}), '(persistedValue)\n', (6709, 6725), False, 'from datafinder.persistence.metadata.value_mapping import MetadataValue, getPersistenceRepresentation\n'), ((6809, 6853), 'datafinder.persistence.metadata.value_mapping.getPersistenceRepresentation', 'getPersistenceRepresentation', (['persistedValue'], {}), '(persistedValue)\n', (6837, 6853), False, 'from datafinder.persistence.metadata.value_mapping import MetadataValue, getPersistenceRepresentation\n'), ((6911, 6942), 'datafinder.persistence.metadata.value_mapping.getPersistenceRepresentation', 'getPersistenceRepresentation', (['(5)'], {}), '(5)\n', (6939, 6942), False, 'from datafinder.persistence.metadata.value_mapping import MetadataValue, getPersistenceRepresentation\n'), ((6997, 7030), 'datafinder.persistence.metadata.value_mapping.getPersistenceRepresentation', 'getPersistenceRepresentation', (['(4.5)'], {}), '(4.5)\n', (7025, 7030), False, 'from datafinder.persistence.metadata.value_mapping import MetadataValue, getPersistenceRepresentation\n'), ((7172, 7216), 'datafinder.persistence.metadata.value_mapping.getPersistenceRepresentation', 'getPersistenceRepresentation', (['persistedValue'], {}), '(persistedValue)\n', (7200, 7216), False, 'from datafinder.persistence.metadata.value_mapping import MetadataValue, getPersistenceRepresentation\n'), ((7338, 7361), 'decimal.Decimal', 'decimal.Decimal', (['"""2006"""'], {}), "('2006')\n", (7353, 7361), False, 'import decimal\n'), ((7363, 7384), 'decimal.Decimal', 'decimal.Decimal', (['"""10"""'], {}), "('10')\n", (7378, 7384), False, 'import decimal\n'), ((7414, 7435), 'decimal.Decimal', 'decimal.Decimal', (['"""16"""'], {}), "('16')\n", (7429, 7435), False, 'import decimal\n'), ((7437, 7458), 'decimal.Decimal', 'decimal.Decimal', (['"""10"""'], {}), "('10')\n", (7452, 7458), False, 'import decimal\n'), ((7487, 7531), 'datafinder.persistence.metadata.value_mapping.getPersistenceRepresentation', 'getPersistenceRepresentation', (['persistedValue'], {}), '(persistedValue)\n', (7515, 7531), False, 'from datafinder.persistence.metadata.value_mapping import MetadataValue, getPersistenceRepresentation\n'), ((7640, 7684), 'datafinder.persistence.metadata.value_mapping.getPersistenceRepresentation', 'getPersistenceRepresentation', (['persistedValue'], {}), '(persistedValue)\n', (7668, 7684), False, 'from datafinder.persistence.metadata.value_mapping import MetadataValue, getPersistenceRepresentation\n'), ((2454, 2472), 'datafinder.persistence.metadata.value_mapping.MetadataValue', 'MetadataValue', (['"""a"""'], {}), "('a')\n", (2467, 2472), False, 'from datafinder.persistence.metadata.value_mapping import MetadataValue, getPersistenceRepresentation\n'), ((2508, 2526), 'datafinder.persistence.metadata.value_mapping.MetadataValue', 'MetadataValue', (['"""a"""'], {}), "('a')\n", (2521, 2526), False, 'from datafinder.persistence.metadata.value_mapping import MetadataValue, getPersistenceRepresentation\n'), ((2644, 2662), 'datafinder.persistence.metadata.value_mapping.MetadataValue', 'MetadataValue', (['"""a"""'], {}), "('a')\n", (2657, 2662), False, 'from datafinder.persistence.metadata.value_mapping import MetadataValue, getPersistenceRepresentation\n'), ((2701, 2719), 'datafinder.persistence.metadata.value_mapping.MetadataValue', 'MetadataValue', (['"""b"""'], {}), "('b')\n", (2714, 2719), False, 'from datafinder.persistence.metadata.value_mapping import MetadataValue, getPersistenceRepresentation\n'), ((2823, 2841), 'datafinder.persistence.metadata.value_mapping.MetadataValue', 'MetadataValue', (['"""a"""'], {}), "('a')\n", (2836, 2841), False, 'from datafinder.persistence.metadata.value_mapping import MetadataValue, getPersistenceRepresentation\n'), ((2932, 2950), 'datafinder.persistence.metadata.value_mapping.MetadataValue', 'MetadataValue', (['"""a"""'], {}), "('a')\n", (2945, 2950), False, 'from datafinder.persistence.metadata.value_mapping import MetadataValue, getPersistenceRepresentation\n'), ((3017, 3035), 'datafinder.persistence.metadata.value_mapping.MetadataValue', 'MetadataValue', (['"""1"""'], {}), "('1')\n", (3030, 3035), False, 'from datafinder.persistence.metadata.value_mapping import MetadataValue, getPersistenceRepresentation\n'), ((3069, 3087), 'datafinder.persistence.metadata.value_mapping.MetadataValue', 'MetadataValue', (['"""0"""'], {}), "('0')\n", (3082, 3087), False, 'from datafinder.persistence.metadata.value_mapping import MetadataValue, getPersistenceRepresentation\n'), ((3157, 3179), 'datafinder.persistence.metadata.value_mapping.MetadataValue', 'MetadataValue', (['u"""test"""'], {}), "(u'test')\n", (3170, 3179), False, 'from datafinder.persistence.metadata.value_mapping import MetadataValue, getPersistenceRepresentation\n'), ((3223, 3244), 'datafinder.persistence.metadata.value_mapping.MetadataValue', 'MetadataValue', (['"""test"""'], {}), "('test')\n", (3236, 3244), False, 'from datafinder.persistence.metadata.value_mapping import MetadataValue, getPersistenceRepresentation\n'), ((3324, 3364), 'datafinder.persistence.metadata.value_mapping.MetadataValue', 'MetadataValue', (['"""\\\\____EMPTY____LIST____"""'], {}), "('\\\\____EMPTY____LIST____')\n", (3337, 3364), False, 'from datafinder.persistence.metadata.value_mapping import MetadataValue, getPersistenceRepresentation\n'), ((3452, 3472), 'datafinder.persistence.metadata.value_mapping.MetadataValue', 'MetadataValue', (['"""\\\\;"""'], {}), "('\\\\;')\n", (3465, 3472), False, 'from datafinder.persistence.metadata.value_mapping import MetadataValue, getPersistenceRepresentation\n'), ((3555, 3576), 'datafinder.persistence.metadata.value_mapping.MetadataValue', 'MetadataValue', (['u"""4.5"""'], {}), "(u'4.5')\n", (3568, 3576), False, 'from datafinder.persistence.metadata.value_mapping import MetadataValue, getPersistenceRepresentation\n'), ((3635, 3654), 'datafinder.persistence.metadata.value_mapping.MetadataValue', 'MetadataValue', (['u"""5"""'], {}), "(u'5')\n", (3648, 3654), False, 'from datafinder.persistence.metadata.value_mapping import MetadataValue, getPersistenceRepresentation\n'), ((4409, 4431), 'datafinder.persistence.metadata.value_mapping.MetadataValue', 'MetadataValue', (['"""a;b;1"""'], {}), "('a;b;1')\n", (4422, 4431), False, 'from datafinder.persistence.metadata.value_mapping import MetadataValue, getPersistenceRepresentation\n'), ((4478, 4496), 'decimal.Decimal', 'decimal.Decimal', (['(1)'], {}), '(1)\n', (4493, 4496), False, 'import decimal\n'), ((4721, 4739), 'datafinder.persistence.metadata.value_mapping.MetadataValue', 'MetadataValue', (['""";"""'], {}), "(';')\n", (4734, 4739), False, 'from datafinder.persistence.metadata.value_mapping import MetadataValue, getPersistenceRepresentation\n'), ((4789, 4813), 'datafinder.persistence.metadata.value_mapping.MetadataValue', 'MetadataValue', (['"""a\\\\;b;c"""'], {}), "('a\\\\;b;c')\n", (4802, 4813), False, 'from datafinder.persistence.metadata.value_mapping import MetadataValue, getPersistenceRepresentation\n'), ((5224, 5244), 'decimal.Decimal', 'decimal.Decimal', (['"""1"""'], {}), "('1')\n", (5239, 5244), False, 'import decimal\n'), ((5275, 5304), 'datetime.datetime', 'datetime', (['(1970)', '(1)', '(1)', '(1)', '(0)', '(1)'], {}), '(1970, 1, 1, 1, 0, 1)\n', (5283, 5304), False, 'from datetime import datetime\n'), ((5071, 5088), 'datafinder.persistence.metadata.value_mapping.MetadataValue', 'MetadataValue', (['""""""'], {}), "('')\n", (5084, 5088), False, 'from datafinder.persistence.metadata.value_mapping import MetadataValue, getPersistenceRepresentation\n'), ((5147, 5165), 'datafinder.persistence.metadata.value_mapping.MetadataValue', 'MetadataValue', (['"""1"""'], {}), "('1')\n", (5160, 5165), False, 'from datafinder.persistence.metadata.value_mapping import MetadataValue, getPersistenceRepresentation\n')] |
import random
from .api import put_change_grade
# grades[id] = grade for user #{id}.
# grades[0] is not used. Since user id starts from 1.
def change_grade_randomshuffle(grades):
changed_users_id = set(range(len(grades)))
changed_users_id.remove(0)
grades = list(range(len(grades)))
random.shuffle(grades)
commands = []
for changed_user_id in changed_users_id:
commands.append({'id': changed_user_id, 'grade': grades[changed_user_id]})
put_change_grade(commands)
def change_grade_simplelinear(grades, game_results):
MAX_TAKEN = 40
changed_users_id = set()
for game_result in game_results:
changed_users_id.add(game_result['win'])
changed_users_id.add(game_result['lose'])
grades[game_result['win']] += MAX_TAKEN - game_result['taken']
grades[game_result['lose']] -= MAX_TAKEN - game_result['taken']
commands = []
for changed_user_id in changed_users_id:
commands.append({'id': changed_user_id, 'grade': grades[changed_user_id]})
put_change_grade(commands)
def change_grade_discountedlinear(grades, game_results):
BASE_SCORE = 100
MIN_TAKEN = 3
MAX_TAKEN = 40
changed_users_id = set()
for game_result in game_results:
changed_users_id.add(game_result['win'])
changed_users_id.add(game_result['lose'])
grades[game_result['win']] += BASE_SCORE * (2 - 1.6*(game_result['taken'] - MIN_TAKEN)/(MAX_TAKEN - MIN_TAKEN))
grades[game_result['lose']] -= BASE_SCORE * (2 - 1.6*(game_result['taken'] - MIN_TAKEN)/(MAX_TAKEN - MIN_TAKEN))
commands = []
for changed_user_id in changed_users_id:
commands.append({'id': changed_user_id, 'grade': grades[changed_user_id]})
put_change_grade(commands)
def change_grade_simplequadratic(grades, game_results):
MAX_TAKEN = 40
changed_users_id = set()
for game_result in game_results:
changed_users_id.add(game_result['win'])
changed_users_id.add(game_result['lose'])
grades[game_result['win']] += (MAX_TAKEN - game_result['taken'])**2
grades[game_result['lose']] -= (MAX_TAKEN - game_result['taken'])**2
commands = []
for changed_user_id in changed_users_id:
commands.append({'id': changed_user_id, 'grade': grades[changed_user_id]})
put_change_grade(commands)
def change_grade_preventabusediscountedlinear(grades, game_results, suspicion_marks):
BASE_SCORE = 4000
MIN_TAKEN = 3
MAX_TAKEN = 40
changed_users_id = set()
for game_result in game_results:
winner = game_result['win']
loser = game_result['lose']
game_time = game_result['taken']
changed_users_id.add(winner)
changed_users_id.add(loser)
if game_time < 11:
expected_game_time = 40 - abs(grades[winner] - grades[loser])/99000*35
tolerance = 5 + 5
if game_time < expected_game_time - tolerance:
suspicion_marks[loser] += 1
if suspicion_marks[loser] > 2:
continue
expected_win_rate = grades[winner]/(grades[winner] + grades[loser])
win_rate_modifier = expected_win_rate # (expected_win_rate - 0.3)*2 + 0.2
grades[winner] += win_rate_modifier*BASE_SCORE*(3 - 2.5*(game_time - MIN_TAKEN)/(MAX_TAKEN - MIN_TAKEN))
grades[loser] -= win_rate_modifier*BASE_SCORE*(3 - 2.5*(game_time - MIN_TAKEN)/(MAX_TAKEN - MIN_TAKEN))
commands = []
for changed_user_id in changed_users_id:
commands.append({'id': changed_user_id, 'grade': grades[changed_user_id]})
put_change_grade(commands)
| [
"random.shuffle"
] | [((304, 326), 'random.shuffle', 'random.shuffle', (['grades'], {}), '(grades)\n', (318, 326), False, 'import random\n')] |
import numpy as np
import matplotlib.pyplot as plt
N = 4
ind = np.arange(N) # the x locations for the groups
width = 0.4 # the width of the bars
fig, ax = plt.subplots()
ax.set_ylim(0,11) # outliers only
#ax2.set_ylim(0,35) # most of the data
#ax.spines['bottom'].set_visible(False)
#ax2.spines['top'].set_visible(False)
ax.xaxis.tick_top()
#ax.tick_params(labeltop='off') # don't put tick labels at the top
ax.xaxis.tick_bottom()
fig.subplots_adjust(hspace=0.1)
# call-site-specific
noneV = (5.729, 6.966, 7.953, 8.524)
rectsNone = ax.bar(ind, noneV, width, color='w', hatch=' ')
#ax2.bar(ind, noneV, width, color='w')
# call-target-specific uncached
classCached = (2.560, 3.616, 5.357, 6.846)
rectsClassCached = ax.bar(ind+width, classCached, width, color='w', hatch='o')
#ax2.bar(ind+width, classCached, width, color='w', hatch='/')
# call-target-specific cached
#classUncached = (2.634, 3.358, 5.583, 6.838)
#rectsClassUncached = ax.bar(ind+2*width, classUncached, width, color='w', hatch='o')
#ax2.bar(ind+2*width, classUncached, width, color='w', hatch='o')
# add some text for labels, title and axes ticks
#ax2.set_ylabel('Runtime (ms)')
#ax.set_title('Average rendering runtime per frame')
ax.set_ylabel('Runtime (s) / 100.000 invocations')
ax.set_xticks(ind+width+0.14)
ax.set_xticklabels( ('(a) 1 target \n (10 kwargs)', '(b) 2 targets \n (10 kwargs; \n 10 kwargs)', '(c) 2 targets \n (10 kwargs; \n 5 kwargs + rest kwargs)', '(d) 1 target \n (5 kwargs + rest kwargs)') )
#ax2.set_yticks(ax2.get_yticks()[:-1])
ax.set_yticks(ax.get_yticks()[1:])
ax.legend( (rectsNone[0], rectsClassCached[0]), ('call-site-specific', 'call-target-specific') , loc=4)
def autolabel(rects):
# attach some text labels
for rect in rects:
height = rect.get_height()
if height == 0:
ax.text(rect.get_x()+rect.get_width()/2., height+2, 'n/a',
ha='center', va='bottom', rotation='vertical')
else:
ax.text(rect.get_x()+rect.get_width()/2., height+0.2, '%.2f'%float(height),
ha='center', va='bottom', rotation='vertical')
autolabel(rectsNone)
autolabel(rectsClassCached)
plt.show() | [
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((66, 78), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (75, 78), True, 'import numpy as np\n'), ((165, 179), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (177, 179), True, 'import matplotlib.pyplot as plt\n'), ((2178, 2188), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2186, 2188), True, 'import matplotlib.pyplot as plt\n')] |
from datetime import timedelta
from random import sample, randint
import talktracker as tt
def time_diff(time1, time2):
"""calculate the time different"""
time1_info = timedelta(hours=time1[0], minutes=time1[1], seconds=time1[2])
time2_info = timedelta(hours=time2[0], minutes=time2[1], seconds=time2[2])
diff_in_sec = (time1_info - time2_info).seconds
diff_hours, diff_minutes, diff_seconds = dissect_time(diff_in_sec)
return diff_hours, diff_minutes, diff_seconds
def time_add(time1, time2):
"""calculate the time different"""
time1_info = timedelta(hours=time1[0], minutes=time1[1], seconds=time1[2])
time2_info = timedelta(hours=time2[0], minutes=time2[1], seconds=time2[2])
add_in_sec = (time1_info + time2_info).seconds
add_hours, add_minutes, add_seconds = dissect_time(add_in_sec)
return add_hours, add_minutes, add_seconds
def dissect_time(sec):
"""changes total seconds into hours, minutes, seconds"""
seconds = sec % 60
minutes = (sec // 60) % 60
hours = (sec // 60) // 60
return hours, minutes, seconds
def to_seconds(*args):
"""Converts (hour, min, sec) to seconds only"""
if len(args) == 3:
return args[0] * 60 * 60 + args[1] * 60 + args[2]
elif len(args) == 1:
return args[0][0] * 60 * 60 + args[0][1] * 60 + args[0][2]
else:
raise ValueError("Input must be either three integers, or a tuple of three integers")
def gen_fake_data(teams_n=0, members_n=[], duration=(2, 30, 0)):
""" Sudo code
1. create teams_n teams with randomly generated names
2. for each team create corresponding number of members with randomly generated attributes.
attributes might include:
- age (int)
- country (str, category)
- batch (int)
3. create a session and add the teams to the session
4. randomly pick a team
5. randomly pick a member and assign a time to him/her
6. do 4 and 5 again and again until the total time of the session (total time of the total times of the teams) becomes greater than the given duration
"""
team_names = team_name_list.copy()
member_names = member_name_list.copy()
teams = []
for ind in range(teams_n):
members = []
for _ in range(members_n[ind]):
name = sample(member_names, 1)[0]
member_names.remove(name) # remove this name from the list (without replacement)
age = randint(1, 40)
batch = randint(1, 3)
country = 'Germany'
members.append(tt.Member(name, age=age, batch=batch, country=country))
name = sample(team_names, 1)[0]
team_names.remove(name)
teams.append(tt.Team(name, members=members))
session = tt.Session('Untitled', teams=teams)
return session
""" Generates data for a fake session
Args:
teams_n (int): number of teams
members_n (int or a list): a single number or a list of numbers. of a single number os passed all the team will have similar number of members.
Returns:
a session object with fake data
"""
team_name_list = ["RockStars", "ShadowWalkers", "MiddleEasterns", "Newrons", "Persians",
"Baghalies", "Golabies", "Loosers"]
member_name_list = ["Mohammad", "Annika", "Amir", "Yasaman", "Arman", "Nick", "Nicholas" ,
"Michael", "Aleksndra", "Fati", "Rasoul", "Janne", "Yagmur", "Raja",
"Abdallah", "Viktorja", "Alex", "James", "Marie", "Auguste", "Nora",
"Mathew", "Stefan", "Steffen", "Darya", "Tamara", "Ali", "Niloufar",
"Christoph", "Werner", "Florian", "Bernhard", "Samuel", "Karan", "Elisa",
"Atena", "Milad", "Nazanin", "Rahaa", "Amin", "Ehsan", "Shahab", "Sepideh"] | [
"random.sample",
"talktracker.Team",
"talktracker.Member",
"datetime.timedelta",
"random.randint",
"talktracker.Session"
] | [((178, 239), 'datetime.timedelta', 'timedelta', ([], {'hours': 'time1[0]', 'minutes': 'time1[1]', 'seconds': 'time1[2]'}), '(hours=time1[0], minutes=time1[1], seconds=time1[2])\n', (187, 239), False, 'from datetime import timedelta\n'), ((257, 318), 'datetime.timedelta', 'timedelta', ([], {'hours': 'time2[0]', 'minutes': 'time2[1]', 'seconds': 'time2[2]'}), '(hours=time2[0], minutes=time2[1], seconds=time2[2])\n', (266, 318), False, 'from datetime import timedelta\n'), ((580, 641), 'datetime.timedelta', 'timedelta', ([], {'hours': 'time1[0]', 'minutes': 'time1[1]', 'seconds': 'time1[2]'}), '(hours=time1[0], minutes=time1[1], seconds=time1[2])\n', (589, 641), False, 'from datetime import timedelta\n'), ((659, 720), 'datetime.timedelta', 'timedelta', ([], {'hours': 'time2[0]', 'minutes': 'time2[1]', 'seconds': 'time2[2]'}), '(hours=time2[0], minutes=time2[1], seconds=time2[2])\n', (668, 720), False, 'from datetime import timedelta\n'), ((2764, 2799), 'talktracker.Session', 'tt.Session', (['"""Untitled"""'], {'teams': 'teams'}), "('Untitled', teams=teams)\n", (2774, 2799), True, 'import talktracker as tt\n'), ((2458, 2472), 'random.randint', 'randint', (['(1)', '(40)'], {}), '(1, 40)\n', (2465, 2472), False, 'from random import sample, randint\n'), ((2493, 2506), 'random.randint', 'randint', (['(1)', '(3)'], {}), '(1, 3)\n', (2500, 2506), False, 'from random import sample, randint\n'), ((2638, 2659), 'random.sample', 'sample', (['team_names', '(1)'], {}), '(team_names, 1)\n', (2644, 2659), False, 'from random import sample, randint\n'), ((2716, 2746), 'talktracker.Team', 'tt.Team', (['name'], {'members': 'members'}), '(name, members=members)\n', (2723, 2746), True, 'import talktracker as tt\n'), ((2320, 2343), 'random.sample', 'sample', (['member_names', '(1)'], {}), '(member_names, 1)\n', (2326, 2343), False, 'from random import sample, randint\n'), ((2566, 2620), 'talktracker.Member', 'tt.Member', (['name'], {'age': 'age', 'batch': 'batch', 'country': 'country'}), '(name, age=age, batch=batch, country=country)\n', (2575, 2620), True, 'import talktracker as tt\n')] |
"""A simple text editor made in Python 2.7."""
from os import path, chdir
workingdir = path.join(path.dirname(__file__), 'texts')
chdir(workingdir)
from Tkinter import Tk, Text, Button
import tkFileDialog
root = Tk("Text Editor")
text = Text(root)
text.grid()
def saveas():
"""Save file."""
try:
t = text.get("1.0", "end-1c") # "1.0" means read from beginning
# "end-1c" means delete last character
savelocation = tkFileDialog.asksaveasfilename()
file1 = open(savelocation, "w")
file1.write(t)
file1.close
except IOError:
pass
def openfile():
"""Open file."""
try:
location = tkFileDialog.askopenfilename()
file1 = open(location, "r")
fileContents = file1.read()
text.delete(1.0, "end")
text.insert(1.0, fileContents)
except IOError:
pass
button = Button(root, text="Open", command=openfile)
button.grid()
button = Button(root, text="Save As", command=saveas)
button.grid()
root.mainloop()
workingdir = path.join(path.dirname(__file__))
chdir(workingdir)
| [
"Tkinter.Tk",
"Tkinter.Button",
"Tkinter.Text",
"os.chdir",
"os.path.dirname",
"tkFileDialog.asksaveasfilename",
"tkFileDialog.askopenfilename"
] | [((135, 152), 'os.chdir', 'chdir', (['workingdir'], {}), '(workingdir)\n', (140, 152), False, 'from os import path, chdir\n'), ((222, 239), 'Tkinter.Tk', 'Tk', (['"""Text Editor"""'], {}), "('Text Editor')\n", (224, 239), False, 'from Tkinter import Tk, Text, Button\n'), ((250, 260), 'Tkinter.Text', 'Text', (['root'], {}), '(root)\n', (254, 260), False, 'from Tkinter import Tk, Text, Button\n'), ((920, 963), 'Tkinter.Button', 'Button', (['root'], {'text': '"""Open"""', 'command': 'openfile'}), "(root, text='Open', command=openfile)\n", (926, 963), False, 'from Tkinter import Tk, Text, Button\n'), ((989, 1033), 'Tkinter.Button', 'Button', (['root'], {'text': '"""Save As"""', 'command': 'saveas'}), "(root, text='Save As', command=saveas)\n", (995, 1033), False, 'from Tkinter import Tk, Text, Button\n'), ((1117, 1134), 'os.chdir', 'chdir', (['workingdir'], {}), '(workingdir)\n', (1122, 1134), False, 'from os import path, chdir\n'), ((101, 123), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (113, 123), False, 'from os import path, chdir\n'), ((1092, 1114), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (1104, 1114), False, 'from os import path, chdir\n'), ((470, 502), 'tkFileDialog.asksaveasfilename', 'tkFileDialog.asksaveasfilename', ([], {}), '()\n', (500, 502), False, 'import tkFileDialog\n'), ((695, 725), 'tkFileDialog.askopenfilename', 'tkFileDialog.askopenfilename', ([], {}), '()\n', (723, 725), False, 'import tkFileDialog\n')] |
import sys
n, k= map(int, sys.stdin.readline().split())
def power(a, b):
if b == 0:
return 1
if b % 2:
return (power(a, b//2) ** 2 * a) % P
else:
return (power(a, b//2) ** 2) % P
P = 1000000007
f = [1 for _ in range(n + 1)]
for i in range(2, n + 1):
f[i] = (f[i - 1] * i) % P
A = f[n]
B = (f[n-k]*f[k])%P
print((A % P) * (power(B, P-2) %P) % P) #페르마의 소정리: N!%P * (K!(N-K)!)^(p-2)%P
| [
"sys.stdin.readline"
] | [((26, 46), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (44, 46), False, 'import sys\n')] |
from modem_base import Modem
from network import LTE
import logging
class ModemSequans(Modem):
def __init__(self):
self.lte = LTE()
def power_on(self):
self.lte.init()
def power_off(self):
self.lte.deinit(dettach=True, reset=True)
def init(self):
return True
def connect(self, timeoutms=30000):
(status, lines) = self.send_at_cmd('AT+CGDATA="PPP",1', 30000, "CONNECT")
if not status:
return False
import network
self.ppp = network.PPP(self.uart)
self.ppp.active(True)
self.ppp.connect()
start_timestamp = utime.ticks_ms()
timeout_timestamp = start_timestamp + timeoutms
while utime.ticks_ms() < timeout_timestamp:
self.connected = self.is_connected()
if self.connected:
break
utime.sleep_ms(100)
return self.connected
def is_connected(self):
return self.lte.isconnected()
def disconnect(self):
if self.ppp:
self.ppp.active(False)
self.connected = False
(status, _) = self.send_at_cmd("AT+CGACT=0,1")
return status
# to be overriden by children
def set_gps_state(self, poweron=True):
pass
# to be overriden by children
def is_gps_on(self):
return False
def get_gps_position(self, timeoutms=300000):
return None
def send_at_cmd(self, command, timeoutms=30000, success_condition="OK"):
response = ""
status = False
logging.debug(command)
response = self.lte.send_at_cmd(command)
if response:
response = response.strip().splitlines()
logging.debug(response)
status = (response.find("OK") != -1)
return (status, response)
| [
"network.PPP",
"network.LTE",
"logging.debug"
] | [((140, 145), 'network.LTE', 'LTE', ([], {}), '()\n', (143, 145), False, 'from network import LTE\n'), ((527, 549), 'network.PPP', 'network.PPP', (['self.uart'], {}), '(self.uart)\n', (538, 549), False, 'import network\n'), ((1558, 1580), 'logging.debug', 'logging.debug', (['command'], {}), '(command)\n', (1571, 1580), False, 'import logging\n'), ((1712, 1735), 'logging.debug', 'logging.debug', (['response'], {}), '(response)\n', (1725, 1735), False, 'import logging\n')] |
from ravendb.exceptions.exceptions import NonUniqueObjectException, InvalidOperationException
from ravendb.tests.test_base import UserWithId, TestBase
class TestTrackEntity(TestBase):
def setUp(self):
super(TestTrackEntity, self).setUp()
def test_storing_document_with_the_same_id_in_the_same_session_should_throw(self):
with self.store.open_session() as session:
user = UserWithId("User1", None, "users/1")
session.store(user)
session.save_changes()
new_user = UserWithId("User2", None, "users/1")
ex_message = "Attempted to associate a different object with id 'users/1'."
self.assertRaisesWithMessage(session.store, NonUniqueObjectException, ex_message, new_user)
def test_deleting_entity_that_is_not_tracked_should_throw(self):
with self.store.open_session() as session:
user = UserWithId(None, None)
ex_message = f"{user} is not associated with the session, cannot delete unknown entity instance."
self.assertRaisesWithMessage(session.delete, InvalidOperationException, ex_message, user)
def test_loading_deleted_document_should_return_null(self):
with self.store.open_session() as session:
user1 = UserWithId("John", None, "users/1")
user2 = UserWithId("Jonathan", None, "users/2")
session.store(user1)
session.store(user2)
session.save_changes()
with self.store.open_session() as session:
session.delete("users/1")
session.delete("users/2")
session.save_changes()
with self.store.open_session() as session:
self.assertIsNone(session.load("users/1", UserWithId))
self.assertIsNone(session.load("users/2", UserWithId))
| [
"ravendb.tests.test_base.UserWithId"
] | [((410, 446), 'ravendb.tests.test_base.UserWithId', 'UserWithId', (['"""User1"""', 'None', '"""users/1"""'], {}), "('User1', None, 'users/1')\n", (420, 446), False, 'from ravendb.tests.test_base import UserWithId, TestBase\n'), ((538, 574), 'ravendb.tests.test_base.UserWithId', 'UserWithId', (['"""User2"""', 'None', '"""users/1"""'], {}), "('User2', None, 'users/1')\n", (548, 574), False, 'from ravendb.tests.test_base import UserWithId, TestBase\n'), ((907, 929), 'ravendb.tests.test_base.UserWithId', 'UserWithId', (['None', 'None'], {}), '(None, None)\n', (917, 929), False, 'from ravendb.tests.test_base import UserWithId, TestBase\n'), ((1278, 1313), 'ravendb.tests.test_base.UserWithId', 'UserWithId', (['"""John"""', 'None', '"""users/1"""'], {}), "('John', None, 'users/1')\n", (1288, 1313), False, 'from ravendb.tests.test_base import UserWithId, TestBase\n'), ((1334, 1373), 'ravendb.tests.test_base.UserWithId', 'UserWithId', (['"""Jonathan"""', 'None', '"""users/2"""'], {}), "('Jonathan', None, 'users/2')\n", (1344, 1373), False, 'from ravendb.tests.test_base import UserWithId, TestBase\n')] |
# Copyright (c) Meta Platforms, Inc
import os
import sys
from setuptools import find_packages, setup
REQUIRED_MAJOR = 3
REQUIRED_MINOR = 7
TEST_REQUIRES = ["numpy", "pytest", "pytest-cov", "scipy"]
DEV_REQUIRES = TEST_REQUIRES + [
"black",
"flake8",
"flake8-bugbear",
"mypy",
"toml",
"usort",
]
# Check for python version
if sys.version_info < (REQUIRED_MAJOR, REQUIRED_MINOR):
error = (
"Your version of python ({major}.{minor}) is too old. You need "
"python >= {required_major}.{required_minor}."
).format(
major=sys.version_info.major,
minor=sys.version_info.minor,
required_minor=REQUIRED_MINOR,
required_major=REQUIRED_MAJOR,
)
sys.exit(error)
# read in README.md as the long description
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="flowtorch",
description="Normalizing Flows for PyTorch",
author="FlowTorch Development Team",
author_email="<EMAIL>",
license="MIT",
url="https://flowtorch.ai/users",
project_urls={
"Documentation": "https://flowtorch.ai/users",
"Source": "https://www.github.com/facebookincubator/flowtorch",
},
keywords=[
"Deep Learning",
"Bayesian Inference",
"Statistical Modeling",
"Variational Inference",
"PyTorch",
],
classifiers=[
"Development Status :: 3 - Alpha",
"Programming Language :: Python :: 3 :: Only",
"License :: OSI Approved :: MIT License",
"Topic :: Scientific/Engineering",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
],
long_description=long_description,
long_description_content_type="text/markdown",
python_requires=">={}.{}".format(REQUIRED_MAJOR, REQUIRED_MINOR),
install_requires=[
"torch>=1.8.1",
],
setup_requires=["setuptools_scm"],
use_scm_version={
"root": ".",
"relative_to": __file__,
"write_to": os.path.join("flowtorch", "version.py"),
},
packages=find_packages(
include=["flowtorch", "flowtorch.*"],
exclude=["debug", "tests", "website", "examples", "scripts"],
),
extras_require={
"dev": DEV_REQUIRES,
"test": TEST_REQUIRES,
},
)
| [
"setuptools.find_packages",
"os.path.join",
"sys.exit"
] | [((729, 744), 'sys.exit', 'sys.exit', (['error'], {}), '(error)\n', (737, 744), False, 'import sys\n'), ((2096, 2213), 'setuptools.find_packages', 'find_packages', ([], {'include': "['flowtorch', 'flowtorch.*']", 'exclude': "['debug', 'tests', 'website', 'examples', 'scripts']"}), "(include=['flowtorch', 'flowtorch.*'], exclude=['debug',\n 'tests', 'website', 'examples', 'scripts'])\n", (2109, 2213), False, 'from setuptools import find_packages, setup\n'), ((2035, 2074), 'os.path.join', 'os.path.join', (['"""flowtorch"""', '"""version.py"""'], {}), "('flowtorch', 'version.py')\n", (2047, 2074), False, 'import os\n')] |
from flask import Blueprint
bp = Blueprint('db_analysis',
__name__,
template_folder='templates',
static_folder='static',
static_url_path='/db_analysis/static'
)
from retrobiocat_web.app.db_analysis.routes import bioinformatics, ssn
| [
"flask.Blueprint"
] | [((34, 164), 'flask.Blueprint', 'Blueprint', (['"""db_analysis"""', '__name__'], {'template_folder': '"""templates"""', 'static_folder': '"""static"""', 'static_url_path': '"""/db_analysis/static"""'}), "('db_analysis', __name__, template_folder='templates',\n static_folder='static', static_url_path='/db_analysis/static')\n", (43, 164), False, 'from flask import Blueprint\n')] |
#!/usr/bin/env python3.9
"""Tasks file used by the *invoke* command.
This simplifies some common development tasks.
Run these tasks with the `invoke` tool.
"""
from __future__ import annotations
import sys
import os
import shutil
import getpass
from glob import glob
from pathlib import Path
import keyring
import semver
from setuptools_scm import get_version
from invoke import task, run, Exit
SIGNERS = ["keith"]
PYTHONBIN = os.environ.get("PYTHONBIN", sys.executable)
# Put the path in quotes in case there is a space in it.
PYTHONBIN = f'"{PYTHONBIN}"'
GPG = "gpg2"
CURRENT_USER = getpass.getuser()
# Putting pypi info here eliminates the need for user-private ~/.pypirc file.
PYPI_HOST = "upload.pypi.org"
PYPI_URL = f"https://{PYPI_HOST}/legacy/"
PYPI_USER = "__token__"
PYPI_INDEX = f"{PYPI_URL}simple"
@task
def info(ctx):
"""Show information about the current Python and environment."""
version = get_version()
suffix = get_suffix()
print(f"Python being used: {PYTHONBIN}")
print(f"Python extension suffix: {suffix}")
print(f"Package version: {version}")
venv = get_virtualenv()
if venv:
print(f"Virtual environment:", venv)
@task
def flake8(ctx, pathname="devtest"):
"""Run flake8 linter on the package."""
ctx.run(f"{PYTHONBIN} -m flake8 {pathname}")
@task
def format(ctx, pathname="devtest", check=False):
"""Run yapf formatter on the specified file, or recurse into directory."""
option = "-d" if check else "-i"
recurse = "--recursive" if os.path.isdir(pathname) else ""
ctx.run(f"{PYTHONBIN} -m yapf --style setup.cfg {option} {recurse} {pathname}")
@task
def format_changed(ctx, check=False, untracked=False):
"""Run yapf formatter on currently modified python files.
If check option given then just show the diff.
"""
option = "-d" if check else "-i"
files = get_modified_files(untracked)
if files:
ctx.run(f'{PYTHONBIN} -m yapf --style setup.cfg {option} {" ".join(files)}')
else:
print("No changed python files.")
@task
def set_pypi_token(ctx):
"""Set the token in the local key ring.
"""
pw = getpass.getpass(f"Enter pypi token? ")
if pw:
keyring.set_password(PYPI_HOST, PYPI_USER, pw)
else:
raise Exit("No password entered.", 3)
@task
def build(ctx):
"""Build the intermediate package components."""
ctx.run(f"{PYTHONBIN} setup.py build")
@task
def dev_requirements(ctx):
"""Install development requirements."""
ctx.run(f"{PYTHONBIN} -m pip install --index-url {PYPI_INDEX} --trusted-host {PYPI_HOST} "
f"-r dev-requirements.txt --user")
@task(pre=[dev_requirements])
def develop(ctx, uninstall=False):
"""Start developing in developer mode."""
if uninstall:
ctx.run(f"{PYTHONBIN} setup.py develop --uninstall --user")
else:
ctx.run(f'{PYTHONBIN} setup.py develop --index-url "{PYPI_INDEX}" --user')
@task
def clean(ctx):
"""Clean out build and cache files. Remove extension modules."""
ctx.run(f"{PYTHONBIN} setup.py clean")
ctx.run(r"find . -depth -type d -name __pycache__ -exec rm -rf {} \;")
ctx.run('find devtest -name "*.so" -delete')
with ctx.cd("docs"):
ctx.run('rm -f modules/devtest.*.rst')
ctx.run(f"{PYTHONBIN} -m sphinx.cmd.build -M clean . _build")
@task
def cleandist(ctx):
"""Clean out dist subdirectory."""
if os.path.isdir("dist"):
shutil.rmtree("dist", ignore_errors=True)
os.mkdir("dist")
@task
def test(ctx, testfile=None, ls=False):
"""Run unit tests. Use ls option to only list them."""
if ls:
ctx.run(f"{PYTHONBIN} -m pytest --collect-only -qq tests")
elif testfile:
ctx.run(f"{PYTHONBIN} -m pytest -s {testfile}")
else:
ctx.run(f"{PYTHONBIN} -m pytest tests", hide=False, in_stream=False)
@task
def tag(ctx, tag=None, major=False, minor=False, patch=False):
"""Tag or bump release with a semver tag. Makes a signed tag if you're a signer."""
latest = None
if tag is None:
tags = get_tags()
if not tags:
latest = semver.VersionInfo(0, 0, 0)
else:
latest = tags[-1]
if patch:
nextver = latest.bump_patch()
elif minor:
nextver = latest.bump_minor()
elif major:
nextver = latest.bump_major()
else:
nextver = latest.bump_patch()
else:
if tag.startswith("v"):
tag = tag[1:]
try:
nextver = semver.parse_version_info(tag)
except ValueError:
raise Exit("Invalid semver tag.", 2)
print(latest, "->", nextver)
tagopt = "-s" if CURRENT_USER in SIGNERS else "-a"
ctx.run(f'git tag {tagopt} -m "Release v{nextver}" v{nextver}')
@task
def tag_delete(ctx, tag=None):
"""Delete a tag, both local and remote."""
if tag:
ctx.run(f"git tag -d {tag}")
ctx.run(f"git push origin :refs/tags/{tag}")
@task(cleandist)
def sdist(ctx):
"""Build source distribution."""
ctx.run(f"{PYTHONBIN} setup.py sdist")
@task
def build_ext(ctx):
"""Build compiled extension modules, in place."""
ctx.run(f"{PYTHONBIN} setup.py build_ext --inplace")
@task(sdist)
def bdist(ctx):
"""Build a standard wheel file, an installable format."""
ctx.run(f"{PYTHONBIN} setup.py bdist_wheel")
@task(bdist)
def sign(ctx):
"""Cryptographically sign dist with your default GPG key."""
if CURRENT_USER in SIGNERS:
ctx.run(f"{GPG} --detach-sign -a dist/devtest-*.whl")
ctx.run(f"{GPG} --detach-sign -a dist/devtest-*.tar.gz")
else:
print("Not signing.")
@task(pre=[sign])
def publish(ctx):
"""Publish built wheel file to package repo."""
token = get_pypi_token()
distfiles = glob("dist/*.whl")
distfiles.extend(glob("dist/*.tar.gz"))
if not distfiles:
raise Exit("Nothing in dist folder!")
distfiles = " ".join(distfiles)
ctx.run(f'{PYTHONBIN} -m twine upload --repository-url \"{PYPI_URL}\" '
f'--username {PYPI_USER} --password {token} {distfiles}')
@task
def docs(ctx):
"""Build the HTML documentation."""
ctx.run("rm docs/modules/devtest.*.rst", warn=True)
ctx.run(f"{PYTHONBIN} -m sphinx.ext.apidoc --force --separate --no-toc --output-dir "
f"docs/modules devtest")
with ctx.cd("docs"):
ctx.run(f"{PYTHONBIN} -m sphinx.cmd.build -M html . _build")
if os.environ.get("DISPLAY"):
ctx.run("xdg-open docs/_build/html/index.html")
@task
def branch(ctx, name=None):
"""start a new branch, both local and remote tracking."""
if name:
ctx.run(f"git checkout -b {name}")
ctx.run(f"git push -u origin {name}")
else:
ctx.run("git --no-pager branch")
@task
def branch_delete(ctx, name=None):
"""Delete local, remote and tracking branch by name."""
if name:
ctx.run(f"git branch -d {name}", warn=True) # delete local branch
ctx.run(f"git branch -d -r {name}", warn=True) # delete local tracking info
ctx.run(f"git push origin --delete {name}", warn=True) # delete remote (origin) branch.
else:
print("Supply a branch name: --name <name>")
@task(pre=[sdist])
def docker_build(ctx):
"""Build docker image."""
version = get_version()
if not version:
raise Exit("Need to tag a version first.", 2)
environ = {
"PYVER": "{}.{}".format(sys.version_info.major, sys.version_info.minor),
"VERSION": version,
"PYPI_REPO": PYPI_INDEX,
"PYPI_HOST": PYPI_HOST,
}
ctx.run(
f"docker build "
f"--build-arg PYVER --build-arg VERSION "
f"--build-arg PYPI_REPO --build-arg PYPI_HOST -t devtest:{version} .",
env=environ)
print(f"Done. To run it:\n docker run -it devtest:{version}")
@task
def logfile(ctx, name="devtester"):
"""Dump the system log file with optional name filter."""
if WINDOWS:
ctx.run(f'wevtutil.exe qe Application /query:"*[System[Provider[@Name={name!r}]]]" /f:text')
elif LINUX:
ctx.run(f'journalctl --identifier={name!r} --no-pager --priority=debug')
elif DARWIN: # May need a tweak
ctx.run(f'log stream --predicate \'senderImagePath contains "Python"\' --level debug')
# Helper functions follow.
def get_virtualenv():
venv = os.environ.get("VIRTUAL_ENV")
if venv and os.path.isdir(venv):
return venv
return None
def get_tags():
rv = run('git tag -l "v*"', hide="out")
vilist = []
for line in rv.stdout.split():
try:
vi = semver.parse_version_info(line[1:])
except ValueError:
pass
else:
vilist.append(vi)
vilist.sort()
return vilist
def get_pypi_token():
cred = keyring.get_credential(PYPI_HOST, PYPI_USER)
if not cred:
raise Exit("You must set the pypi token with the set-pypi-token target.", 1)
return cred.password
def get_suffix():
return run(
f'{PYTHONBIN} -c \'import sysconfig; print(sysconfig.get_config_vars()["EXT_SUFFIX"])\'',
hide=True,
).stdout.strip() # noqa
def resolve_path(base, p):
p = Path(p)
return str(base / p)
def find_git_base():
"""Find the base directory of this git repo.
The git status output is always relative to this directory.
"""
start = Path.cwd().resolve()
while start:
if (start / ".git").exists():
return start
start = start.parent
raise Exit("Not able to find git repo base.")
def get_modified_files(untracked):
"""Find the list of modified and, optionally, untracked Python files.
If `untracked` is True, also include untracked Python files.
"""
filelist = []
gitbase = find_git_base()
gitout = run('git status --porcelain=1 -z', hide=True)
for line in gitout.stdout.split("\0"):
if line:
if not line.endswith(".py"):
continue
if line[0:2] == " M":
filelist.append(resolve_path(gitbase, line[3:]))
if untracked and line[0:2] == "??":
filelist.append(resolve_path(gitbase, line[3:]))
return filelist
| [
"semver.VersionInfo",
"keyring.get_credential",
"pathlib.Path",
"keyring.set_password",
"pathlib.Path.cwd",
"os.environ.get",
"getpass.getpass",
"invoke.run",
"setuptools_scm.get_version",
"os.path.isdir",
"invoke.Exit",
"os.mkdir",
"shutil.rmtree",
"getpass.getuser",
"semver.parse_versi... | [((434, 477), 'os.environ.get', 'os.environ.get', (['"""PYTHONBIN"""', 'sys.executable'], {}), "('PYTHONBIN', sys.executable)\n", (448, 477), False, 'import os\n'), ((594, 611), 'getpass.getuser', 'getpass.getuser', ([], {}), '()\n', (609, 611), False, 'import getpass\n'), ((2659, 2687), 'invoke.task', 'task', ([], {'pre': '[dev_requirements]'}), '(pre=[dev_requirements])\n', (2663, 2687), False, 'from invoke import task, run, Exit\n'), ((5005, 5020), 'invoke.task', 'task', (['cleandist'], {}), '(cleandist)\n', (5009, 5020), False, 'from invoke import task, run, Exit\n'), ((5259, 5270), 'invoke.task', 'task', (['sdist'], {}), '(sdist)\n', (5263, 5270), False, 'from invoke import task, run, Exit\n'), ((5401, 5412), 'invoke.task', 'task', (['bdist'], {}), '(bdist)\n', (5405, 5412), False, 'from invoke import task, run, Exit\n'), ((5695, 5711), 'invoke.task', 'task', ([], {'pre': '[sign]'}), '(pre=[sign])\n', (5699, 5711), False, 'from invoke import task, run, Exit\n'), ((7260, 7277), 'invoke.task', 'task', ([], {'pre': '[sdist]'}), '(pre=[sdist])\n', (7264, 7277), False, 'from invoke import task, run, Exit\n'), ((926, 939), 'setuptools_scm.get_version', 'get_version', ([], {}), '()\n', (937, 939), False, 'from setuptools_scm import get_version\n'), ((2154, 2192), 'getpass.getpass', 'getpass.getpass', (['f"""Enter pypi token? """'], {}), "(f'Enter pypi token? ')\n", (2169, 2192), False, 'import getpass\n'), ((3424, 3445), 'os.path.isdir', 'os.path.isdir', (['"""dist"""'], {}), "('dist')\n", (3437, 3445), False, 'import os\n'), ((5827, 5845), 'glob.glob', 'glob', (['"""dist/*.whl"""'], {}), "('dist/*.whl')\n", (5831, 5845), False, 'from glob import glob\n'), ((6487, 6512), 'os.environ.get', 'os.environ.get', (['"""DISPLAY"""'], {}), "('DISPLAY')\n", (6501, 6512), False, 'import os\n'), ((7345, 7358), 'setuptools_scm.get_version', 'get_version', ([], {}), '()\n', (7356, 7358), False, 'from setuptools_scm import get_version\n'), ((8397, 8426), 'os.environ.get', 'os.environ.get', (['"""VIRTUAL_ENV"""'], {}), "('VIRTUAL_ENV')\n", (8411, 8426), False, 'import os\n'), ((8527, 8561), 'invoke.run', 'run', (['"""git tag -l "v*\\""""'], {'hide': '"""out"""'}), '(\'git tag -l "v*"\', hide=\'out\')\n', (8530, 8561), False, 'from invoke import task, run, Exit\n'), ((8838, 8882), 'keyring.get_credential', 'keyring.get_credential', (['PYPI_HOST', 'PYPI_USER'], {}), '(PYPI_HOST, PYPI_USER)\n', (8860, 8882), False, 'import keyring\n'), ((9229, 9236), 'pathlib.Path', 'Path', (['p'], {}), '(p)\n', (9233, 9236), False, 'from pathlib import Path\n'), ((9559, 9598), 'invoke.Exit', 'Exit', (['"""Not able to find git repo base."""'], {}), "('Not able to find git repo base.')\n", (9563, 9598), False, 'from invoke import task, run, Exit\n'), ((9845, 9890), 'invoke.run', 'run', (['"""git status --porcelain=1 -z"""'], {'hide': '(True)'}), "('git status --porcelain=1 -z', hide=True)\n", (9848, 9890), False, 'from invoke import task, run, Exit\n'), ((1529, 1552), 'os.path.isdir', 'os.path.isdir', (['pathname'], {}), '(pathname)\n', (1542, 1552), False, 'import os\n'), ((2212, 2258), 'keyring.set_password', 'keyring.set_password', (['PYPI_HOST', 'PYPI_USER', 'pw'], {}), '(PYPI_HOST, PYPI_USER, pw)\n', (2232, 2258), False, 'import keyring\n'), ((2283, 2314), 'invoke.Exit', 'Exit', (['"""No password entered."""', '(3)'], {}), "('No password entered.', 3)\n", (2287, 2314), False, 'from invoke import task, run, Exit\n'), ((3455, 3496), 'shutil.rmtree', 'shutil.rmtree', (['"""dist"""'], {'ignore_errors': '(True)'}), "('dist', ignore_errors=True)\n", (3468, 3496), False, 'import shutil\n'), ((3505, 3521), 'os.mkdir', 'os.mkdir', (['"""dist"""'], {}), "('dist')\n", (3513, 3521), False, 'import os\n'), ((5867, 5888), 'glob.glob', 'glob', (['"""dist/*.tar.gz"""'], {}), "('dist/*.tar.gz')\n", (5871, 5888), False, 'from glob import glob\n'), ((5926, 5957), 'invoke.Exit', 'Exit', (['"""Nothing in dist folder!"""'], {}), "('Nothing in dist folder!')\n", (5930, 5957), False, 'from invoke import task, run, Exit\n'), ((7393, 7432), 'invoke.Exit', 'Exit', (['"""Need to tag a version first."""', '(2)'], {}), "('Need to tag a version first.', 2)\n", (7397, 7432), False, 'from invoke import task, run, Exit\n'), ((8443, 8462), 'os.path.isdir', 'os.path.isdir', (['venv'], {}), '(venv)\n', (8456, 8462), False, 'import os\n'), ((8914, 8984), 'invoke.Exit', 'Exit', (['"""You must set the pypi token with the set-pypi-token target."""', '(1)'], {}), "('You must set the pypi token with the set-pypi-token target.', 1)\n", (8918, 8984), False, 'from invoke import task, run, Exit\n'), ((4134, 4161), 'semver.VersionInfo', 'semver.VersionInfo', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (4152, 4161), False, 'import semver\n'), ((4549, 4579), 'semver.parse_version_info', 'semver.parse_version_info', (['tag'], {}), '(tag)\n', (4574, 4579), False, 'import semver\n'), ((8643, 8678), 'semver.parse_version_info', 'semver.parse_version_info', (['line[1:]'], {}), '(line[1:])\n', (8668, 8678), False, 'import semver\n'), ((9419, 9429), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (9427, 9429), False, 'from pathlib import Path\n'), ((4625, 4655), 'invoke.Exit', 'Exit', (['"""Invalid semver tag."""', '(2)'], {}), "('Invalid semver tag.', 2)\n", (4629, 4655), False, 'from invoke import task, run, Exit\n'), ((9041, 9150), 'invoke.run', 'run', (['f"""{PYTHONBIN} -c \'import sysconfig; print(sysconfig.get_config_vars()["EXT_SUFFIX"])\'"""'], {'hide': '(True)'}), '(f\'{PYTHONBIN} -c \\\'import sysconfig; print(sysconfig.get_config_vars()["EXT_SUFFIX"])\\\'\'\n , hide=True)\n', (9044, 9150), False, 'from invoke import task, run, Exit\n')] |
from Jumpscale import j
from io import BytesIO
import binascii
def main(self):
"""
kosmos -p 'j.servers.gedis.test("threebot_redis_registration")'
"""
####THREEBOT REGISTRATION
phonebook = j.threebot.package.phonebook.client_get()
if j.sal.nettools.tcpPortConnectionTest("www.google.com", 443):
phonebook.actors.phonebook.wallet_create("test")
j.data.nacl.configure(name="client_test", generate=True, interactive=False)
client_nacl = j.data.nacl.get(name="client_test")
cl = j.clients.redis.get(port=8901)
def register_threebot_redis():
cl.execute_command("config_format", "json")
# get a nacl config (to act as a virtual person)
myname = "test.ibiza"
data_return_json = cl.execute_command(
"default.phonebook.name_register",
j.data.serializers.json.dumps({"name": myname, "pubkey": client_nacl.verify_key_hex}),
)
data_return = j.data.serializers.json.loads(data_return_json)
assert data_return["pubkey"] == client_nacl.verify_key_hex
assert data_return["name"] == myname
data = {
"tid": data_return["id"],
"name": data_return["name"],
"email": "<EMAIL>",
"ipaddr": "192.168.3.11",
"description": "",
"pubkey": client_nacl.verify_key_hex,
}
def sign(nacl, *args):
buffer = BytesIO()
for item in args:
if isinstance(item, str):
item = item.encode()
elif isinstance(item, int):
item = str(item).encode()
elif isinstance(item, bytes):
pass
else:
raise RuntimeError()
buffer.write(item)
payload = buffer.getvalue()
print(payload)
signature = nacl.sign(payload)
return binascii.hexlify(signature).decode()
# we sign the different records to come up with the right 'sender_signature_hex'
sender_signature_hex = sign(
client_nacl, data["tid"], data["name"], data["email"], data["ipaddr"], data["description"], data["pubkey"]
)
data["sender_signature_hex"] = sender_signature_hex
data2 = j.data.serializers.json.dumps(data)
data_return_json = cl.execute_command("default.phonebook.record_register", data2)
data_return = j.data.serializers.json.loads(data_return_json)
print(data)
return data_return
def query_threebot_redis(tid):
cl.execute_command("config_format", "json")
myname = "test.ibiza"
data2 = j.data.serializers.json.dumps({"name": myname})
res_json = cl.execute_command("default.phonebook.get", data2)
threebot_info3 = j.data.serializers.json.loads(res_json)
data2 = j.data.serializers.json.dumps({"tid": tid})
res_json = cl.execute_command("default.phonebook.get", data2)
threebot_info4 = j.data.serializers.json.loads(res_json)
assert threebot_info3 == threebot_info4
# verify the data (is same logic as above in register threebot, to see if record is valid)
rc = j.data.nacl.payload_verify(
threebot_info4["id"],
threebot_info4["name"],
threebot_info4["email"],
threebot_info4["ipaddr"],
threebot_info4["description"],
threebot_info4["pubkey"],
verifykey=threebot_info4["pubkey"],
signature=threebot_info4["signature"],
die=True,
)
return threebot_info4
threebot_info = register_threebot_redis()
threebot_info2 = query_threebot_redis(threebot_info["id"])
assert threebot_info == threebot_info2
print("**DONE**")
| [
"Jumpscale.j.data.nacl.configure",
"Jumpscale.j.clients.redis.get",
"binascii.hexlify",
"io.BytesIO",
"Jumpscale.j.data.nacl.payload_verify",
"Jumpscale.j.threebot.package.phonebook.client_get",
"Jumpscale.j.data.serializers.json.loads",
"Jumpscale.j.data.serializers.json.dumps",
"Jumpscale.j.sal.ne... | [((213, 254), 'Jumpscale.j.threebot.package.phonebook.client_get', 'j.threebot.package.phonebook.client_get', ([], {}), '()\n', (252, 254), False, 'from Jumpscale import j\n'), ((263, 322), 'Jumpscale.j.sal.nettools.tcpPortConnectionTest', 'j.sal.nettools.tcpPortConnectionTest', (['"""www.google.com"""', '(443)'], {}), "('www.google.com', 443)\n", (299, 322), False, 'from Jumpscale import j\n'), ((386, 461), 'Jumpscale.j.data.nacl.configure', 'j.data.nacl.configure', ([], {'name': '"""client_test"""', 'generate': '(True)', 'interactive': '(False)'}), "(name='client_test', generate=True, interactive=False)\n", (407, 461), False, 'from Jumpscale import j\n'), ((480, 515), 'Jumpscale.j.data.nacl.get', 'j.data.nacl.get', ([], {'name': '"""client_test"""'}), "(name='client_test')\n", (495, 515), False, 'from Jumpscale import j\n'), ((526, 556), 'Jumpscale.j.clients.redis.get', 'j.clients.redis.get', ([], {'port': '(8901)'}), '(port=8901)\n', (545, 556), False, 'from Jumpscale import j\n'), ((961, 1008), 'Jumpscale.j.data.serializers.json.loads', 'j.data.serializers.json.loads', (['data_return_json'], {}), '(data_return_json)\n', (990, 1008), False, 'from Jumpscale import j\n'), ((2313, 2348), 'Jumpscale.j.data.serializers.json.dumps', 'j.data.serializers.json.dumps', (['data'], {}), '(data)\n', (2342, 2348), False, 'from Jumpscale import j\n'), ((2461, 2508), 'Jumpscale.j.data.serializers.json.loads', 'j.data.serializers.json.loads', (['data_return_json'], {}), '(data_return_json)\n', (2490, 2508), False, 'from Jumpscale import j\n'), ((2695, 2742), 'Jumpscale.j.data.serializers.json.dumps', 'j.data.serializers.json.dumps', (["{'name': myname}"], {}), "({'name': myname})\n", (2724, 2742), False, 'from Jumpscale import j\n'), ((2839, 2878), 'Jumpscale.j.data.serializers.json.loads', 'j.data.serializers.json.loads', (['res_json'], {}), '(res_json)\n', (2868, 2878), False, 'from Jumpscale import j\n'), ((2896, 2939), 'Jumpscale.j.data.serializers.json.dumps', 'j.data.serializers.json.dumps', (["{'tid': tid}"], {}), "({'tid': tid})\n", (2925, 2939), False, 'from Jumpscale import j\n'), ((3036, 3075), 'Jumpscale.j.data.serializers.json.loads', 'j.data.serializers.json.loads', (['res_json'], {}), '(res_json)\n', (3065, 3075), False, 'from Jumpscale import j\n'), ((3238, 3517), 'Jumpscale.j.data.nacl.payload_verify', 'j.data.nacl.payload_verify', (["threebot_info4['id']", "threebot_info4['name']", "threebot_info4['email']", "threebot_info4['ipaddr']", "threebot_info4['description']", "threebot_info4['pubkey']"], {'verifykey': "threebot_info4['pubkey']", 'signature': "threebot_info4['signature']", 'die': '(True)'}), "(threebot_info4['id'], threebot_info4['name'],\n threebot_info4['email'], threebot_info4['ipaddr'], threebot_info4[\n 'description'], threebot_info4['pubkey'], verifykey=threebot_info4[\n 'pubkey'], signature=threebot_info4['signature'], die=True)\n", (3264, 3517), False, 'from Jumpscale import j\n'), ((841, 931), 'Jumpscale.j.data.serializers.json.dumps', 'j.data.serializers.json.dumps', (["{'name': myname, 'pubkey': client_nacl.verify_key_hex}"], {}), "({'name': myname, 'pubkey': client_nacl.\n verify_key_hex})\n", (870, 931), False, 'from Jumpscale import j\n'), ((1433, 1442), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (1440, 1442), False, 'from io import BytesIO\n'), ((1944, 1971), 'binascii.hexlify', 'binascii.hexlify', (['signature'], {}), '(signature)\n', (1960, 1971), False, 'import binascii\n')] |
#!/usr/bin/env python2
# -*- coding:utf-8 -*-
import os
import argparse
import glob
from functools import partial
import fontforge
import psMat
import source
opt_parser= argparse.ArgumentParser()
opt_parser.add_argument("--cjkv_info", type= str,
help= u"the path of cjkv_info")
opt_parser.add_argument("--region", type= str,
help= u"the region from where to select characters")
opt_parser.add_argument("--start", type= partial(int, base=16),
help= u"the start point of unicode")
opt_parser.add_argument("--end", type= partial(int, base=16),
help= u"the end point of unicode")
opt_parser.add_argument("--name", type= str,
help= u"the name of the new font")
opt_parser.add_argument("--adjust", type= int,
help= u"adjust the position of the outline")
opt_parser.add_argument("--output", type= str,
help= u"the path and filename of the new font")
class Opts:
def __init__(self):
self.cjkv_info= "."
self.region= "China"
self.start= 0
self.end= 0
self.name= "my font"
self.adjust= 0
self.output= (".", "out.ttf")
def setup_opt():
opts= Opts()
args= opt_parser.parse_args()
opts.cjkv_info= args.cjkv_info or opts.cjkv_info
opts.region= args.region or opts.region
opts.start= args.start or opts.start
opts.end= args.end or opts.end
opts.name= args.name or opts.name
opts.adjust= args.adjust or opts.adjust
if args.output:
d= os.path.dirname(args.output) or opts.output[0]
f= os.path.basename(args.output) or opts.output[1]
opts.output= (d, f)
try:
os.mkdir(opts.output[0])
except OSError:
pass
return opts
def get_code(path):
basename= os.path.basename(path)
(root, ext)= os.path.splitext(basename)
code_tag= root.split("_")
code= int(code_tag[0], 16)
return code
def read_src(path):
with open(path, "r") as f:
src= f.readline()
return src
def get_region(src):
return source.rev.get(src.split("-")[0])
def is_region(region):
return region == opts.region
def filter_src(path):
code= get_code(path)
if opts.start <= code and code <= opts.end:
src= read_src(path)
region= get_region(src)
return is_region(region)
else:
return False
opts= setup_opt()
src_files= glob.glob(os.path.join(opts.cjkv_info, "data", "*", "*.src"))
src_files= filter(filter_src, src_files)
newfont= fontforge.font()
newfont.em= 1024
newfont.fontname= opts.name
for src_file in src_files:
code= get_code(src_file);
glyph= newfont.createChar(code)
(root, ext)= os.path.splitext(src_file)
glyph.importOutlines(root + ".svg")
glyph.transform(psMat.translate(0, opts.adjust))
newfont.generate(os.path.join(opts.output[0], opts.output[1]))
| [
"argparse.ArgumentParser",
"os.path.join",
"os.path.splitext",
"psMat.translate",
"fontforge.font",
"os.path.dirname",
"functools.partial",
"os.path.basename",
"os.mkdir"
] | [((173, 198), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (196, 198), False, 'import argparse\n'), ((2445, 2461), 'fontforge.font', 'fontforge.font', ([], {}), '()\n', (2459, 2461), False, 'import fontforge\n'), ((1716, 1738), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (1732, 1738), False, 'import os\n'), ((1756, 1782), 'os.path.splitext', 'os.path.splitext', (['basename'], {}), '(basename)\n', (1772, 1782), False, 'import os\n'), ((2342, 2392), 'os.path.join', 'os.path.join', (['opts.cjkv_info', '"""data"""', '"""*"""', '"""*.src"""'], {}), "(opts.cjkv_info, 'data', '*', '*.src')\n", (2354, 2392), False, 'import os\n'), ((2618, 2644), 'os.path.splitext', 'os.path.splitext', (['src_file'], {}), '(src_file)\n', (2634, 2644), False, 'import os\n'), ((2756, 2800), 'os.path.join', 'os.path.join', (['opts.output[0]', 'opts.output[1]'], {}), '(opts.output[0], opts.output[1])\n', (2768, 2800), False, 'import os\n'), ((431, 452), 'functools.partial', 'partial', (['int'], {'base': '(16)'}), '(int, base=16)\n', (438, 452), False, 'from functools import partial\n'), ((534, 555), 'functools.partial', 'partial', (['int'], {'base': '(16)'}), '(int, base=16)\n', (541, 555), False, 'from functools import partial\n'), ((1607, 1631), 'os.mkdir', 'os.mkdir', (['opts.output[0]'], {}), '(opts.output[0])\n', (1615, 1631), False, 'import os\n'), ((2705, 2736), 'psMat.translate', 'psMat.translate', (['(0)', 'opts.adjust'], {}), '(0, opts.adjust)\n', (2720, 2736), False, 'import psMat\n'), ((1456, 1484), 'os.path.dirname', 'os.path.dirname', (['args.output'], {}), '(args.output)\n', (1471, 1484), False, 'import os\n'), ((1514, 1543), 'os.path.basename', 'os.path.basename', (['args.output'], {}), '(args.output)\n', (1530, 1543), False, 'import os\n')] |
import logging
import pandas as pd
from homeassistant.components.alarm_control_panel import (
AlarmControlPanel
)
from homeassistant.core import callback
from homeassistant.util import convert
from .ringalarmdevice import RingAlarmDevice
from .constants import *
from homeassistant.const import (
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_DISARMED
)
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_devices, device):
# for index, device in devices.iterrows():
add_devices([RingAlarmControlPanel(device)], True)
class RingAlarmControlPanel(RingAlarmDevice, AlarmControlPanel):
def __init__(self, ringalarm_device):
super().__init__(ringalarm_device)
try:
if ringalarm_device[DEVICE_ALARM_MODE] == "none":
self._state = STATE_ALARM_DISARMED
except:
pass
try:
if ringalarm_device[DEVICE_ALARM_MODE] == "some":
self._state = STATE_ALARM_ARMED_HOME
except:
pass
try:
if ringalarm_device[DEVICE_ALARM_MODE] == "all":
self._state = STATE_ALARM_ARMED_AWAY
except:
pass
try:
self._tamper_status = ringalarm_device[DEVICE_TAMPER_STATUS]
except:
pass
def update(self):
pass
def alarm_disarm(self, code=None):
"""Send disarm command."""
try:
self.controller.ring_api.send_command_ring(self.ringalarm_device[DEVICE_ZID],
self.ringalarm_device[DEVICE_SOURCE],
'security-panel.switch-mode',
data={'mode': 'none', "bypass": None})
except:
pass
def alarm_arm_home(self, code=None):
"""Send arm home command."""
try:
self.controller.ring_api.send_command_ring(self.ringalarm_device[DEVICE_ZID],
self.ringalarm_device[DEVICE_SOURCE],
'security-panel.switch-mode',
data={'mode': 'some', "bypass": None})
except:
pass
def alarm_arm_away(self, code=None):
"""Send arm away command."""
try:
self.controller.ring_api.send_command_ring(self.ringalarm_device[DEVICE_ZID],
self.ringalarm_device[DEVICE_SOURCE],
'security-panel.switch-mode',
data={'mode': 'all', "bypass": None})
except:
pass
def update_callback(self, data):
try:
if data[DEVICE_ALARM_MODE] == "none":
self._state = STATE_ALARM_DISARMED
except:
pass
try:
if data[DEVICE_ALARM_MODE] == "some":
self._state = STATE_ALARM_ARMED_HOME
except:
pass
try:
if data[DEVICE_ALARM_MODE] == "all":
self._state = STATE_ALARM_ARMED_AWAY
except:
pass
self.schedule_update_ha_state(True)
@property
def changed_by(self):
"""Last change triggered by."""
return None
@property
def code_arm_required(self):
"""Whether the code is required for arm actions."""
return True
@property
def state(self):
"""Get the state of the device."""
return self._state
| [
"logging.getLogger"
] | [((398, 425), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (415, 425), False, 'import logging\n')] |
from sys import stdin, stdout, stderr
import traceback
import time
from player import Player
from field.field import Field
class Game:
def __init__(self):
self.time_per_move = -1
self.timebank = -1
self.last_update = None
self.max_rounds = -1
self.round = 0
self.player_names = []
self.players = {}
self.me = None
self.opponent = None
self.field = Field()
def update(self, data):
# start timer
self.last_update = time.time()
for line in data.split('\n'):
line = line.strip()
if len(line) <= 0:
continue
tokens = line.split()
if tokens[0] == "settings":
self.parse_settings(tokens[1], tokens[2])
elif tokens[0] == "update":
if tokens[1] == "game":
self.parse_game_updates(tokens[2], tokens[3])
else:
self.parse_player_updates(tokens[1], tokens[2], tokens[3])
elif tokens[0] == "action":
self.timebank = int(tokens[2])
# Launching bot logic happens after setup finishes
def parse_settings(self, key, value):
if key == "timebank":
self.timebank = int(value)
elif key == "time_per_move":
self.time_per_move = int(value)
elif key == "player_names":
self.player_names = value.split(',')
self.players = {name: Player(name) for name in self.player_names}
elif key == "your_bot":
self.me = self.players[value]
self.opponent = self.players[[name for name in self.player_names if name != value][0]]
elif key == "your_botid":
self.me.id = value
self.opponent.id = str(2 - (int(value) + 1))
elif key == "field_width":
self.field.width = int(value)
elif key == "field_height":
self.field.height = int(value)
elif key == "max_rounds":
self.max_rounds = int(value)
else:
stderr.write('Cannot parse settings input with key {}'.format(key))
def parse_game_updates(self, key, value):
if key == "round":
self.round = int(value)
elif key == "field":
self.field.parse(value)
else:
stderr.write('Cannot parse game update with key {}'.format(key))
def parse_player_updates(self, player_name, key, value):
player = self.players.get(player_name)
if player is None:
stderr.write('Cannot find player with name {}'.format(player_name))
return
if key == "living_cells":
player.living_cells = int(value)
elif key == "move":
player.previous_move = value
else:
stderr.write('Cannot parse {} update with key {}'.format(player_name, key))
def time_remaining(self):
return self.timebank - int(1000 * (time.clock() - self.last_update))
@staticmethod
def print_move(move):
"""issue an order"""
stdout.write('{}\n'.format(move))
stdout.flush()
def run(self, bot):
"""parse input, update game state and call the bot classes do_turn method"""
not_finished = True
data = ''
while not stdin.closed and not_finished:
try:
current_line = stdin.readline().rstrip('\r\n')
if len(current_line) <= 0:
time.sleep(1)
continue
data += current_line + "\n"
if current_line.lower().startswith("action"):
self.update(data)
move = bot.make_move(self)
self.print_move(move)
data = ''
elif current_line.lower().startswith("quit"):
not_finished = False
except EOFError:
break
except KeyboardInterrupt:
raise
except:
# don't raise error or return so that bot attempts to stay alive
traceback.print_exc(file=stderr)
stderr.flush()
| [
"field.field.Field",
"time.clock",
"player.Player",
"sys.stderr.flush",
"time.sleep",
"sys.stdin.readline",
"sys.stdout.flush",
"traceback.print_exc",
"time.time"
] | [((435, 442), 'field.field.Field', 'Field', ([], {}), '()\n', (440, 442), False, 'from field.field import Field\n'), ((521, 532), 'time.time', 'time.time', ([], {}), '()\n', (530, 532), False, 'import time\n'), ((3155, 3169), 'sys.stdout.flush', 'stdout.flush', ([], {}), '()\n', (3167, 3169), False, 'from sys import stdin, stdout, stderr\n'), ((3520, 3533), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3530, 3533), False, 'import time\n'), ((4160, 4192), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'stderr'}), '(file=stderr)\n', (4179, 4192), False, 'import traceback\n'), ((4209, 4223), 'sys.stderr.flush', 'stderr.flush', ([], {}), '()\n', (4221, 4223), False, 'from sys import stdin, stdout, stderr\n'), ((1506, 1518), 'player.Player', 'Player', (['name'], {}), '(name)\n', (1512, 1518), False, 'from player import Player\n'), ((2997, 3009), 'time.clock', 'time.clock', ([], {}), '()\n', (3007, 3009), False, 'import time\n'), ((3424, 3440), 'sys.stdin.readline', 'stdin.readline', ([], {}), '()\n', (3438, 3440), False, 'from sys import stdin, stdout, stderr\n')] |
import click
import random
from pyfiglet import Figlet
from termcolor import colored, cprint
import imagenet
@click.command()
@click.option("--count", default=10, help="Yield number of codenames.")
def codename_gen(count):
"""Enjoy the codenames 🍺"""
imagenet_cls = imagenet.imagenet1000_labels()
f = Figlet(font='slant')
print(f.renderText('Codename Gen'))
c_tag = ['red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white']
for _ in range(count):
print(colored(random.choice(imagenet_cls), random.choice(c_tag)))
cprint('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>', 'green')
cprint('Done! Enjoy the codenames 🍺', 'grey', 'on_green')
if __name__ == '__main__':
codename_gen()
| [
"random.choice",
"click.option",
"pyfiglet.Figlet",
"imagenet.imagenet1000_labels",
"click.command",
"termcolor.cprint"
] | [((111, 126), 'click.command', 'click.command', ([], {}), '()\n', (124, 126), False, 'import click\n'), ((128, 198), 'click.option', 'click.option', (['"""--count"""'], {'default': '(10)', 'help': '"""Yield number of codenames."""'}), "('--count', default=10, help='Yield number of codenames.')\n", (140, 198), False, 'import click\n'), ((275, 305), 'imagenet.imagenet1000_labels', 'imagenet.imagenet1000_labels', ([], {}), '()\n', (303, 305), False, 'import imagenet\n'), ((314, 334), 'pyfiglet.Figlet', 'Figlet', ([], {'font': '"""slant"""'}), "(font='slant')\n", (320, 334), False, 'from pyfiglet import Figlet\n'), ((555, 655), 'termcolor.cprint', 'cprint', (['""">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"""', '"""green"""'], {}), "(\n '>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>',\n 'green')\n", (561, 655), False, 'from termcolor import colored, cprint\n'), ((651, 708), 'termcolor.cprint', 'cprint', (['"""Done! Enjoy the codenames 🍺"""', '"""grey"""', '"""on_green"""'], {}), "('Done! Enjoy the codenames 🍺', 'grey', 'on_green')\n", (657, 708), False, 'from termcolor import colored, cprint\n'), ((499, 526), 'random.choice', 'random.choice', (['imagenet_cls'], {}), '(imagenet_cls)\n', (512, 526), False, 'import random\n'), ((528, 548), 'random.choice', 'random.choice', (['c_tag'], {}), '(c_tag)\n', (541, 548), False, 'import random\n')] |
import logging
import json
from abc import ABCMeta, abstractmethod
from django.contrib import auth
from django.contrib.auth import update_session_auth_hash, password_validation
from django.contrib.auth.tokens import default_token_generator
from django.core.exceptions import ValidationError, PermissionDenied
from django.http import HttpResponse
from django.utils.decorators import method_decorator
from django.views.decorators.debug import sensitive_post_parameters
from django.views.decorators.cache import never_cache
from django.utils.translation import ugettext as _
from binder.permissions.views import no_scoping_required
from binder.exceptions import BinderForbidden, BinderReadOnlyFieldError, BinderMethodNotAllowed, BinderIsNotDeleted, \
BinderIsDeleted, BinderNotAuthenticated, BinderFieldTypeError, BinderRequestError, BinderValidationError, \
BinderNotFound
from binder.router import list_route, detail_route
from binder.json import JsonResponse
from binder.views import annotate
logger = logging.getLogger(__name__)
class UserBaseMixin:
__metaclass__ = ABCMeta
def respond_with_user(self, request, user_id):
return JsonResponse(
self._get_objs(
annotate(self.get_queryset(request).filter(pk=user_id), request),
request=request,
)[0]
)
class MasqueradeMixin(UserBaseMixin):
__metaclass__ = ABCMeta
@detail_route(name='masquerade')
@no_scoping_required()
def masquerade(self, request, pk=None):
from hijack.helpers import login_user
if request.method != 'POST':
raise BinderMethodNotAllowed()
try:
user = self.model._default_manager.get(pk=pk)
except self.model.DoesNotExist:
raise BinderNotFound()
self._require_model_perm('masquerade', request)
login_user(request, user) # Ignore returned redirect response object
return self.respond_with_user(request, user.id)
@list_route(name='endmasquerade')
@no_scoping_required()
def endmasquerade(self, request):
from hijack.helpers import release_hijack
if request.method != 'POST':
raise BinderMethodNotAllowed()
self._require_model_perm('unmasquerade', request)
release_hijack(request) # Ignore returned redirect response object
return self.respond_with_user(request, request.user.id)
def _logout(self, request):
from hijack.helpers import release_hijack
# Release masquerade on logout if masquerading
try:
release_hijack(request)
except PermissionDenied: # Means we are not hijacked
super()._logout(request)
class UserViewMixIn(UserBaseMixin):
__metaclass__ = ABCMeta
log_request_body = False
token_generator = default_token_generator
default_authentication_backend = None
def _require_model_perm(self, perm_type, request, pk=None):
"""
Overwrite the _require_model_perm, to make sure that you can not modify a superuser as non superuser
We need to be very careful about permission assumptions after this point
"""
# If the user is trying to change a superuser and is not a superuser, disallow
if pk and self.model.objects.get(pk=int(pk)).is_superuser and not request.user.is_superuser:
# Maybe BinderRequestError?
raise BinderForbidden('modify superuser', request.user)
# Everything normal
return super()._require_model_perm(perm_type, request, pk)
def _store__groups(self, obj, field, value, request, pk=None):
"""
Store the groups of the user.
If we get here, the user might not actually have admin permissions;
If the user does not have user change perms, disallow setting groups.
"""
try:
self._require_model_perm('changegroups', request)
return self._store_field(obj, field, value, request, pk=pk)
except BinderForbidden: # convert to read-only error, so the field is ignored
raise BinderReadOnlyFieldError(self.model.__name__, field)
def authenticate(self, request, **kwargs):
return auth.authenticate(request, **kwargs)
def auth_login(self, request, user, backend=None):
return auth.login(request, user, backend=(
backend or
getattr(user, 'backend', None) or
self.default_authentication_backend
))
@method_decorator(sensitive_post_parameters())
@list_route(name='login', unauthenticated=True)
@no_scoping_required()
def login(self, request):
"""
Login the user
Request:
POST user/login/
{
"username": "foo",
"password": "password"
}
Response:
returns the same parameters as GET user/{id}/
"""
if request.method != 'POST':
raise BinderMethodNotAllowed()
try:
decoded = request.body.decode()
body = json.loads(decoded)
username = body.get(self.model.USERNAME_FIELD, '')
password = body.get('password', '')
except Exception:
username = request.POST.get(self.model.USERNAME_FIELD, '')
password = request.POST.get('password', '')
user = self.authenticate(request, **{
self.model.USERNAME_FIELD: username.lower(),
'password': password,
})
self._require_model_perm('login', request)
if user is None:
logger.info('login failed for "{}"'.format(username))
raise BinderNotAuthenticated()
else:
self.auth_login(request, user)
logger.info('login for {}/{}'.format(user.id, user))
return self.respond_with_user(request, user.id)
def _logout(self, request):
auth.logout(request)
@list_route(name='logout')
@no_scoping_required()
def logout(self, request):
"""
Logout the user
Request:
POST /user/logout/
{}
Response:
204
{}
"""
if request.method != 'POST':
raise BinderMethodNotAllowed()
self._require_model_perm('logout', request)
logger.info('logout for {}/{}'.format(request.user.id, request.user))
self._logout(request)
return HttpResponse(status=204)
def get_users(self, request, username):
"""
Given a username, return matching user(s) who should receive a reset.
This allows subclasses to more easily customize the default policies
that prevent inactive users and users with unusable passwords from
resetting their password.
Copied from django.contrib.auth.forms.PasswordResetForm
"""
active_users = self.model._default_manager.filter(**{
self.model.USERNAME_FIELD + '__iexact': username,
'is_active': True,
})
return (u for u in active_users if u.has_usable_password())
def _store__username(self, user, field, value, request, pk=None):
"""
Makes sure the username is always stored as a lowercase
"""
if not isinstance(value, str):
raise BinderFieldTypeError(self.model.__name__, field)
return self._store_field(user, field, value.lower(), request, pk=pk)
def filter_deleted(self, queryset, pk, deleted, request=None):
"""
Can be used to filter deleted users, or unfilter them.
"""
if pk or deleted == 'true':
return queryset
if deleted is None:
return queryset.filter(is_active=True)
if deleted == 'only':
return queryset.filter(is_active=False)
raise BinderRequestError(_('Invalid value: deleted=%s.') % request.GET.get('deleted'))
def soft_delete(self, user, undelete=False, request=None):
"""
Allows the user to be soft deleted, and undeleted. What actually needs to be done on soft deletion
can be implemented in
_after_soft_delete
"""
try:
if not user.is_active and not undelete:
raise BinderIsDeleted()
if not not user.is_active and undelete:
raise BinderIsNotDeleted()
except AttributeError:
raise BinderMethodNotAllowed()
user.is_active = undelete
user.save()
self._after_soft_delete(request, user, undelete)
@list_route(name='reset_request', unauthenticated=True)
@no_scoping_required()
def reset_request(self, request):
"""
Adds an endpoint to do a reset request. Generates a token, and calls the _send_reset_mail callback if the reset
request is successful
Request:
POST user/reset_request/
{
'username': 'foo'
}
Response:
204
{
}
"""
if request.method != 'POST':
raise BinderMethodNotAllowed()
self._require_model_perm('reset_password', request)
decoded = request.body.decode()
try:
body = json.loads(decoded)
except ValueError:
raise BinderRequestError(_('Invalid request body: not a JSON document.'))
logger.info('password reset attempt for {}'.format(body.get(self.model.USERNAME_FIELD, '')))
for user in self.get_users(request, body.get(self.model.USERNAME_FIELD, '').lower()):
token = self.token_generator.make_token(user)
self._send_reset_mail(request, user, token)
return HttpResponse(status=204)
@never_cache
@list_route(name='send_activation_email', unauthenticated=True)
@no_scoping_required()
def send_activation_email(self, request):
"""
Endpoint that can be used to send an activation mail for an user.
Calls the _send_activation_email callback if the user is succesfully activated
Request:
POST
{
"email": "email"
}
Response:
{
"code": code
}
Possible codes:
sent Mail is send sucessfully
already active User is already active, no mail was send
blacklisted User was not activated
"""
if request.method != 'PUT':
raise BinderMethodNotAllowed()
# For lack of a better check
self._require_model_perm('reset_password', request)
decoded = request.body.decode()
try:
body = json.loads(decoded)
except ValueError:
raise BinderRequestError(_('Invalid request body: not a JSON document.'))
logger.info('activation email attempt for {}'.format(body.get('email', '')))
if body.get('email') is None:
raise BinderValidationError({'email': ['missing']})
try:
user = self.model._default_manager.get(email=body.get('email'))
except self.model.DoesNotExist:
raise BinderNotFound()
if user.is_active:
if user.last_login is None:
# TODO: Figure out a way to make this customisable without
# allowing injection of arbitrary URLs (phishing!)
self._send_activation_email(request, user)
response = JsonResponse({'code': 'sent'})
response.status_code = 201
else:
response = JsonResponse({'code': 'already active'})
else:
response = JsonResponse({'code': 'blacklisted'})
response.status_code = 400
return response
@method_decorator(sensitive_post_parameters())
@never_cache
@detail_route(name='activate', unauthenticated=True)
@no_scoping_required()
def activate(self, request, pk=None):
"""
Adds an endpoint to activate an user. Also logs in the user
Request:
PUT user/{id}/activate/
{
"activation_code": string
}
Response:
Same as GET user/{id}/
"""
if request.method != 'PUT':
raise BinderMethodNotAllowed()
self._require_model_perm('activate', request)
decoded = request.body.decode()
try:
body = json.loads(decoded)
except ValueError:
raise BinderRequestError(_('Invalid request body: not a JSON document.'))
errors = {}
for item in ['activation_code']:
if body.get(item) is None:
errors[item] = ['missing']
if len(errors) != 0:
raise BinderValidationError(errors)
try:
user = self.model._default_manager.get(pk=pk)
except (TypeError, ValueError, OverflowError, self.model.DoesNotExist):
user = None
if user is None or not self.token_generator.check_token(user, body.get('activation_code')):
raise BinderNotFound()
logger.info('login for {}/{} via successful activation'.format(user.id, user))
user.is_active = True
user.save()
self.auth_login(request, user)
return self.respond_with_user(request, user.id)
@method_decorator(sensitive_post_parameters())
@never_cache
@detail_route(name='reset_password', unauthenticated=True, methods=['PUT'])
@no_scoping_required()
def reset_password(self, request, pk=None):
"""
Resets the password from an reset code
Request:
POST user/reset_password/
{
"reset_code": str,
"password": str
}
Response:
Same as GET user/{id}/
"""
self._require_model_perm('reset_password', request)
decoded = request.body.decode()
try:
body = json.loads(decoded)
except ValueError:
raise BinderRequestError(_('Invalid request body: not a JSON document.'))
errors = {item: 'missing' for item in ['reset_code', 'password'] if item not in body}
if errors:
raise BinderValidationError(errors)
return self._reset_pass_for_user(request, int(pk), body['reset_code'], body['password'])
def _reset_pass_for_user(self, request, user_id, token, password):
"""
Helper function that actually resets the password for an user
"""
try:
user = self.model._default_manager.get(pk=user_id)
except (TypeError, ValueError, OverflowError, self.model.DoesNotExist):
user = None
if user is None or not self.token_generator.check_token(user, token):
raise BinderNotFound()
logger.info('login for {}/{} via successful password reset'.format(user.id, user))
try:
password_validation.validate_password(password, user)
except ValidationError as ve:
raise BinderValidationError({'password': ve.messages})
user.set_password(password)
user.save()
self.auth_login(request, user)
return self.respond_with_user(request, user.id)
@method_decorator(sensitive_post_parameters())
@never_cache
@list_route(name='change_password')
@no_scoping_required()
def change_password(self, request):
"""
Change the password from an old password
Request:
POST user/change_password/
{
"old_password": str,
"new_password": str
}
Response:
Same as GET user/{id}/
"""
if request.method != 'PUT':
raise BinderMethodNotAllowed()
self._require_model_perm('change_own_password', request)
decoded = request.body.decode()
try:
body = json.loads(decoded)
except ValueError:
raise BinderRequestError(_('Invalid request body: not a JSON document.'))
user = request.user
errors = {}
for item in ['old_password', 'new_password']:
if body.get(item) is None:
errors[item] = ['missing']
if not user.check_password(body.get('old_password')):
errors['old_password'] = ['incorrect']
if len(errors) != 0:
raise BinderValidationError(errors)
password = body.get('new_password')
try:
password_validation.validate_password(password, user)
except ValidationError as ve:
validation_errors = {'new_password': ve.messages}
raise BinderValidationError(validation_errors)
user.set_password(password)
user.save()
logger.info('password changed for {}/{}'.format(user.id, user))
if user == request.user:
"""
No need to change the password of an user that is not our own
"""
update_session_auth_hash(request, user)
return self.respond_with_user(request, user.id)
@abstractmethod
def _after_soft_delete(self, request, user, undelete):
"""
Callback called after an user is softdeleted or softundeleted
"""
pass
@abstractmethod
def _send_reset_mail(self, request, user, token):
"""
Callback to send the actual reset mail using the token.
"""
pass
@abstractmethod
def _send_activation_email(self, request, user):
"""
Callback to send a mail notifying that the user is activated.
"""
pass
| [
"logging.getLogger",
"binder.exceptions.BinderForbidden",
"binder.exceptions.BinderNotFound",
"django.http.HttpResponse",
"binder.exceptions.BinderMethodNotAllowed",
"binder.router.detail_route",
"django.contrib.auth.logout",
"binder.json.JsonResponse",
"django.contrib.auth.authenticate",
"json.lo... | [((1007, 1034), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1024, 1034), False, 'import logging\n'), ((1345, 1376), 'binder.router.detail_route', 'detail_route', ([], {'name': '"""masquerade"""'}), "(name='masquerade')\n", (1357, 1376), False, 'from binder.router import list_route, detail_route\n'), ((1379, 1400), 'binder.permissions.views.no_scoping_required', 'no_scoping_required', ([], {}), '()\n', (1398, 1400), False, 'from binder.permissions.views import no_scoping_required\n'), ((1842, 1874), 'binder.router.list_route', 'list_route', ([], {'name': '"""endmasquerade"""'}), "(name='endmasquerade')\n", (1852, 1874), False, 'from binder.router import list_route, detail_route\n'), ((1877, 1898), 'binder.permissions.views.no_scoping_required', 'no_scoping_required', ([], {}), '()\n', (1896, 1898), False, 'from binder.permissions.views import no_scoping_required\n'), ((4096, 4142), 'binder.router.list_route', 'list_route', ([], {'name': '"""login"""', 'unauthenticated': '(True)'}), "(name='login', unauthenticated=True)\n", (4106, 4142), False, 'from binder.router import list_route, detail_route\n'), ((4145, 4166), 'binder.permissions.views.no_scoping_required', 'no_scoping_required', ([], {}), '()\n', (4164, 4166), False, 'from binder.permissions.views import no_scoping_required\n'), ((5214, 5239), 'binder.router.list_route', 'list_route', ([], {'name': '"""logout"""'}), "(name='logout')\n", (5224, 5239), False, 'from binder.router import list_route, detail_route\n'), ((5242, 5263), 'binder.permissions.views.no_scoping_required', 'no_scoping_required', ([], {}), '()\n', (5261, 5263), False, 'from binder.permissions.views import no_scoping_required\n'), ((7411, 7465), 'binder.router.list_route', 'list_route', ([], {'name': '"""reset_request"""', 'unauthenticated': '(True)'}), "(name='reset_request', unauthenticated=True)\n", (7421, 7465), False, 'from binder.router import list_route, detail_route\n'), ((7468, 7489), 'binder.permissions.views.no_scoping_required', 'no_scoping_required', ([], {}), '()\n', (7487, 7489), False, 'from binder.permissions.views import no_scoping_required\n'), ((8395, 8457), 'binder.router.list_route', 'list_route', ([], {'name': '"""send_activation_email"""', 'unauthenticated': '(True)'}), "(name='send_activation_email', unauthenticated=True)\n", (8405, 8457), False, 'from binder.router import list_route, detail_route\n'), ((8460, 8481), 'binder.permissions.views.no_scoping_required', 'no_scoping_required', ([], {}), '()\n', (8479, 8481), False, 'from binder.permissions.views import no_scoping_required\n'), ((10080, 10131), 'binder.router.detail_route', 'detail_route', ([], {'name': '"""activate"""', 'unauthenticated': '(True)'}), "(name='activate', unauthenticated=True)\n", (10092, 10131), False, 'from binder.router import list_route, detail_route\n'), ((10134, 10155), 'binder.permissions.views.no_scoping_required', 'no_scoping_required', ([], {}), '()\n', (10153, 10155), False, 'from binder.permissions.views import no_scoping_required\n'), ((11375, 11449), 'binder.router.detail_route', 'detail_route', ([], {'name': '"""reset_password"""', 'unauthenticated': '(True)', 'methods': "['PUT']"}), "(name='reset_password', unauthenticated=True, methods=['PUT'])\n", (11387, 11449), False, 'from binder.router import list_route, detail_route\n'), ((11452, 11473), 'binder.permissions.views.no_scoping_required', 'no_scoping_required', ([], {}), '()\n', (11471, 11473), False, 'from binder.permissions.views import no_scoping_required\n'), ((12988, 13022), 'binder.router.list_route', 'list_route', ([], {'name': '"""change_password"""'}), "(name='change_password')\n", (12998, 13022), False, 'from binder.router import list_route, detail_route\n'), ((13025, 13046), 'binder.permissions.views.no_scoping_required', 'no_scoping_required', ([], {}), '()\n', (13044, 13046), False, 'from binder.permissions.views import no_scoping_required\n'), ((1719, 1744), 'hijack.helpers.login_user', 'login_user', (['request', 'user'], {}), '(request, user)\n', (1729, 1744), False, 'from hijack.helpers import login_user\n'), ((2100, 2123), 'hijack.helpers.release_hijack', 'release_hijack', (['request'], {}), '(request)\n', (2114, 2123), False, 'from hijack.helpers import release_hijack\n'), ((3815, 3851), 'django.contrib.auth.authenticate', 'auth.authenticate', (['request'], {}), '(request, **kwargs)\n', (3832, 3851), False, 'from django.contrib import auth\n'), ((4065, 4092), 'django.views.decorators.debug.sensitive_post_parameters', 'sensitive_post_parameters', ([], {}), '()\n', (4090, 4092), False, 'from django.views.decorators.debug import sensitive_post_parameters\n'), ((5190, 5210), 'django.contrib.auth.logout', 'auth.logout', (['request'], {}), '(request)\n', (5201, 5210), False, 'from django.contrib import auth\n'), ((5602, 5626), 'django.http.HttpResponse', 'HttpResponse', ([], {'status': '(204)'}), '(status=204)\n', (5614, 5626), False, 'from django.http import HttpResponse\n'), ((8353, 8377), 'django.http.HttpResponse', 'HttpResponse', ([], {'status': '(204)'}), '(status=204)\n', (8365, 8377), False, 'from django.http import HttpResponse\n'), ((10035, 10062), 'django.views.decorators.debug.sensitive_post_parameters', 'sensitive_post_parameters', ([], {}), '()\n', (10060, 10062), False, 'from django.views.decorators.debug import sensitive_post_parameters\n'), ((11330, 11357), 'django.views.decorators.debug.sensitive_post_parameters', 'sensitive_post_parameters', ([], {}), '()\n', (11355, 11357), False, 'from django.views.decorators.debug import sensitive_post_parameters\n'), ((12943, 12970), 'django.views.decorators.debug.sensitive_post_parameters', 'sensitive_post_parameters', ([], {}), '()\n', (12968, 12970), False, 'from django.views.decorators.debug import sensitive_post_parameters\n'), ((1523, 1547), 'binder.exceptions.BinderMethodNotAllowed', 'BinderMethodNotAllowed', ([], {}), '()\n', (1545, 1547), False, 'from binder.exceptions import BinderForbidden, BinderReadOnlyFieldError, BinderMethodNotAllowed, BinderIsNotDeleted, BinderIsDeleted, BinderNotAuthenticated, BinderFieldTypeError, BinderRequestError, BinderValidationError, BinderNotFound\n'), ((2019, 2043), 'binder.exceptions.BinderMethodNotAllowed', 'BinderMethodNotAllowed', ([], {}), '()\n', (2041, 2043), False, 'from binder.exceptions import BinderForbidden, BinderReadOnlyFieldError, BinderMethodNotAllowed, BinderIsNotDeleted, BinderIsDeleted, BinderNotAuthenticated, BinderFieldTypeError, BinderRequestError, BinderValidationError, BinderNotFound\n'), ((2359, 2382), 'hijack.helpers.release_hijack', 'release_hijack', (['request'], {}), '(request)\n', (2373, 2382), False, 'from hijack.helpers import release_hijack\n'), ((3109, 3158), 'binder.exceptions.BinderForbidden', 'BinderForbidden', (['"""modify superuser"""', 'request.user'], {}), "('modify superuser', request.user)\n", (3124, 3158), False, 'from binder.exceptions import BinderForbidden, BinderReadOnlyFieldError, BinderMethodNotAllowed, BinderIsNotDeleted, BinderIsDeleted, BinderNotAuthenticated, BinderFieldTypeError, BinderRequestError, BinderValidationError, BinderNotFound\n'), ((4414, 4438), 'binder.exceptions.BinderMethodNotAllowed', 'BinderMethodNotAllowed', ([], {}), '()\n', (4436, 4438), False, 'from binder.exceptions import BinderForbidden, BinderReadOnlyFieldError, BinderMethodNotAllowed, BinderIsNotDeleted, BinderIsDeleted, BinderNotAuthenticated, BinderFieldTypeError, BinderRequestError, BinderValidationError, BinderNotFound\n'), ((4492, 4511), 'json.loads', 'json.loads', (['decoded'], {}), '(decoded)\n', (4502, 4511), False, 'import json\n'), ((4984, 5008), 'binder.exceptions.BinderNotAuthenticated', 'BinderNotAuthenticated', ([], {}), '()\n', (5006, 5008), False, 'from binder.exceptions import BinderForbidden, BinderReadOnlyFieldError, BinderMethodNotAllowed, BinderIsNotDeleted, BinderIsDeleted, BinderNotAuthenticated, BinderFieldTypeError, BinderRequestError, BinderValidationError, BinderNotFound\n'), ((5425, 5449), 'binder.exceptions.BinderMethodNotAllowed', 'BinderMethodNotAllowed', ([], {}), '()\n', (5447, 5449), False, 'from binder.exceptions import BinderForbidden, BinderReadOnlyFieldError, BinderMethodNotAllowed, BinderIsNotDeleted, BinderIsDeleted, BinderNotAuthenticated, BinderFieldTypeError, BinderRequestError, BinderValidationError, BinderNotFound\n'), ((6359, 6407), 'binder.exceptions.BinderFieldTypeError', 'BinderFieldTypeError', (['self.model.__name__', 'field'], {}), '(self.model.__name__, field)\n', (6379, 6407), False, 'from binder.exceptions import BinderForbidden, BinderReadOnlyFieldError, BinderMethodNotAllowed, BinderIsNotDeleted, BinderIsDeleted, BinderNotAuthenticated, BinderFieldTypeError, BinderRequestError, BinderValidationError, BinderNotFound\n'), ((7812, 7836), 'binder.exceptions.BinderMethodNotAllowed', 'BinderMethodNotAllowed', ([], {}), '()\n', (7834, 7836), False, 'from binder.exceptions import BinderForbidden, BinderReadOnlyFieldError, BinderMethodNotAllowed, BinderIsNotDeleted, BinderIsDeleted, BinderNotAuthenticated, BinderFieldTypeError, BinderRequestError, BinderValidationError, BinderNotFound\n'), ((7944, 7963), 'json.loads', 'json.loads', (['decoded'], {}), '(decoded)\n', (7954, 7963), False, 'import json\n'), ((8962, 8986), 'binder.exceptions.BinderMethodNotAllowed', 'BinderMethodNotAllowed', ([], {}), '()\n', (8984, 8986), False, 'from binder.exceptions import BinderForbidden, BinderReadOnlyFieldError, BinderMethodNotAllowed, BinderIsNotDeleted, BinderIsDeleted, BinderNotAuthenticated, BinderFieldTypeError, BinderRequestError, BinderValidationError, BinderNotFound\n'), ((9125, 9144), 'json.loads', 'json.loads', (['decoded'], {}), '(decoded)\n', (9135, 9144), False, 'import json\n'), ((9365, 9410), 'binder.exceptions.BinderValidationError', 'BinderValidationError', (["{'email': ['missing']}"], {}), "({'email': ['missing']})\n", (9386, 9410), False, 'from binder.exceptions import BinderForbidden, BinderReadOnlyFieldError, BinderMethodNotAllowed, BinderIsNotDeleted, BinderIsDeleted, BinderNotAuthenticated, BinderFieldTypeError, BinderRequestError, BinderValidationError, BinderNotFound\n'), ((9928, 9965), 'binder.json.JsonResponse', 'JsonResponse', (["{'code': 'blacklisted'}"], {}), "({'code': 'blacklisted'})\n", (9940, 9965), False, 'from binder.json import JsonResponse\n'), ((10423, 10447), 'binder.exceptions.BinderMethodNotAllowed', 'BinderMethodNotAllowed', ([], {}), '()\n', (10445, 10447), False, 'from binder.exceptions import BinderForbidden, BinderReadOnlyFieldError, BinderMethodNotAllowed, BinderIsNotDeleted, BinderIsDeleted, BinderNotAuthenticated, BinderFieldTypeError, BinderRequestError, BinderValidationError, BinderNotFound\n'), ((10549, 10568), 'json.loads', 'json.loads', (['decoded'], {}), '(decoded)\n', (10559, 10568), False, 'import json\n'), ((10810, 10839), 'binder.exceptions.BinderValidationError', 'BinderValidationError', (['errors'], {}), '(errors)\n', (10831, 10839), False, 'from binder.exceptions import BinderForbidden, BinderReadOnlyFieldError, BinderMethodNotAllowed, BinderIsNotDeleted, BinderIsDeleted, BinderNotAuthenticated, BinderFieldTypeError, BinderRequestError, BinderValidationError, BinderNotFound\n'), ((11089, 11105), 'binder.exceptions.BinderNotFound', 'BinderNotFound', ([], {}), '()\n', (11103, 11105), False, 'from binder.exceptions import BinderForbidden, BinderReadOnlyFieldError, BinderMethodNotAllowed, BinderIsNotDeleted, BinderIsDeleted, BinderNotAuthenticated, BinderFieldTypeError, BinderRequestError, BinderValidationError, BinderNotFound\n'), ((11809, 11828), 'json.loads', 'json.loads', (['decoded'], {}), '(decoded)\n', (11819, 11828), False, 'import json\n'), ((12038, 12067), 'binder.exceptions.BinderValidationError', 'BinderValidationError', (['errors'], {}), '(errors)\n', (12059, 12067), False, 'from binder.exceptions import BinderForbidden, BinderReadOnlyFieldError, BinderMethodNotAllowed, BinderIsNotDeleted, BinderIsDeleted, BinderNotAuthenticated, BinderFieldTypeError, BinderRequestError, BinderValidationError, BinderNotFound\n'), ((12537, 12553), 'binder.exceptions.BinderNotFound', 'BinderNotFound', ([], {}), '()\n', (12551, 12553), False, 'from binder.exceptions import BinderForbidden, BinderReadOnlyFieldError, BinderMethodNotAllowed, BinderIsNotDeleted, BinderIsDeleted, BinderNotAuthenticated, BinderFieldTypeError, BinderRequestError, BinderValidationError, BinderNotFound\n'), ((12651, 12704), 'django.contrib.auth.password_validation.validate_password', 'password_validation.validate_password', (['password', 'user'], {}), '(password, user)\n', (12688, 12704), False, 'from django.contrib.auth import update_session_auth_hash, password_validation\n'), ((13314, 13338), 'binder.exceptions.BinderMethodNotAllowed', 'BinderMethodNotAllowed', ([], {}), '()\n', (13336, 13338), False, 'from binder.exceptions import BinderForbidden, BinderReadOnlyFieldError, BinderMethodNotAllowed, BinderIsNotDeleted, BinderIsDeleted, BinderNotAuthenticated, BinderFieldTypeError, BinderRequestError, BinderValidationError, BinderNotFound\n'), ((13451, 13470), 'json.loads', 'json.loads', (['decoded'], {}), '(decoded)\n', (13461, 13470), False, 'import json\n'), ((13848, 13877), 'binder.exceptions.BinderValidationError', 'BinderValidationError', (['errors'], {}), '(errors)\n', (13869, 13877), False, 'from binder.exceptions import BinderForbidden, BinderReadOnlyFieldError, BinderMethodNotAllowed, BinderIsNotDeleted, BinderIsDeleted, BinderNotAuthenticated, BinderFieldTypeError, BinderRequestError, BinderValidationError, BinderNotFound\n'), ((13927, 13980), 'django.contrib.auth.password_validation.validate_password', 'password_validation.validate_password', (['password', 'user'], {}), '(password, user)\n', (13964, 13980), False, 'from django.contrib.auth import update_session_auth_hash, password_validation\n'), ((14337, 14376), 'django.contrib.auth.update_session_auth_hash', 'update_session_auth_hash', (['request', 'user'], {}), '(request, user)\n', (14361, 14376), False, 'from django.contrib.auth import update_session_auth_hash, password_validation\n'), ((1648, 1664), 'binder.exceptions.BinderNotFound', 'BinderNotFound', ([], {}), '()\n', (1662, 1664), False, 'from binder.exceptions import BinderForbidden, BinderReadOnlyFieldError, BinderMethodNotAllowed, BinderIsNotDeleted, BinderIsDeleted, BinderNotAuthenticated, BinderFieldTypeError, BinderRequestError, BinderValidationError, BinderNotFound\n'), ((3708, 3760), 'binder.exceptions.BinderReadOnlyFieldError', 'BinderReadOnlyFieldError', (['self.model.__name__', 'field'], {}), '(self.model.__name__, field)\n', (3732, 3760), False, 'from binder.exceptions import BinderForbidden, BinderReadOnlyFieldError, BinderMethodNotAllowed, BinderIsNotDeleted, BinderIsDeleted, BinderNotAuthenticated, BinderFieldTypeError, BinderRequestError, BinderValidationError, BinderNotFound\n'), ((6820, 6851), 'django.utils.translation.ugettext', '_', (['"""Invalid value: deleted=%s."""'], {}), "('Invalid value: deleted=%s.')\n", (6821, 6851), True, 'from django.utils.translation import ugettext as _\n'), ((7162, 7179), 'binder.exceptions.BinderIsDeleted', 'BinderIsDeleted', ([], {}), '()\n', (7177, 7179), False, 'from binder.exceptions import BinderForbidden, BinderReadOnlyFieldError, BinderMethodNotAllowed, BinderIsNotDeleted, BinderIsDeleted, BinderNotAuthenticated, BinderFieldTypeError, BinderRequestError, BinderValidationError, BinderNotFound\n'), ((7233, 7253), 'binder.exceptions.BinderIsNotDeleted', 'BinderIsNotDeleted', ([], {}), '()\n', (7251, 7253), False, 'from binder.exceptions import BinderForbidden, BinderReadOnlyFieldError, BinderMethodNotAllowed, BinderIsNotDeleted, BinderIsDeleted, BinderNotAuthenticated, BinderFieldTypeError, BinderRequestError, BinderValidationError, BinderNotFound\n'), ((7288, 7312), 'binder.exceptions.BinderMethodNotAllowed', 'BinderMethodNotAllowed', ([], {}), '()\n', (7310, 7312), False, 'from binder.exceptions import BinderForbidden, BinderReadOnlyFieldError, BinderMethodNotAllowed, BinderIsNotDeleted, BinderIsDeleted, BinderNotAuthenticated, BinderFieldTypeError, BinderRequestError, BinderValidationError, BinderNotFound\n'), ((9529, 9545), 'binder.exceptions.BinderNotFound', 'BinderNotFound', ([], {}), '()\n', (9543, 9545), False, 'from binder.exceptions import BinderForbidden, BinderReadOnlyFieldError, BinderMethodNotAllowed, BinderIsNotDeleted, BinderIsDeleted, BinderNotAuthenticated, BinderFieldTypeError, BinderRequestError, BinderValidationError, BinderNotFound\n'), ((9779, 9809), 'binder.json.JsonResponse', 'JsonResponse', (["{'code': 'sent'}"], {}), "({'code': 'sent'})\n", (9791, 9809), False, 'from binder.json import JsonResponse\n'), ((9865, 9905), 'binder.json.JsonResponse', 'JsonResponse', (["{'code': 'already active'}"], {}), "({'code': 'already active'})\n", (9877, 9905), False, 'from binder.json import JsonResponse\n'), ((12746, 12794), 'binder.exceptions.BinderValidationError', 'BinderValidationError', (["{'password': ve.messages}"], {}), "({'password': ve.messages})\n", (12767, 12794), False, 'from binder.exceptions import BinderForbidden, BinderReadOnlyFieldError, BinderMethodNotAllowed, BinderIsNotDeleted, BinderIsDeleted, BinderNotAuthenticated, BinderFieldTypeError, BinderRequestError, BinderValidationError, BinderNotFound\n'), ((14075, 14115), 'binder.exceptions.BinderValidationError', 'BinderValidationError', (['validation_errors'], {}), '(validation_errors)\n', (14096, 14115), False, 'from binder.exceptions import BinderForbidden, BinderReadOnlyFieldError, BinderMethodNotAllowed, BinderIsNotDeleted, BinderIsDeleted, BinderNotAuthenticated, BinderFieldTypeError, BinderRequestError, BinderValidationError, BinderNotFound\n'), ((8013, 8060), 'django.utils.translation.ugettext', '_', (['"""Invalid request body: not a JSON document."""'], {}), "('Invalid request body: not a JSON document.')\n", (8014, 8060), True, 'from django.utils.translation import ugettext as _\n'), ((9194, 9241), 'django.utils.translation.ugettext', '_', (['"""Invalid request body: not a JSON document."""'], {}), "('Invalid request body: not a JSON document.')\n", (9195, 9241), True, 'from django.utils.translation import ugettext as _\n'), ((10618, 10665), 'django.utils.translation.ugettext', '_', (['"""Invalid request body: not a JSON document."""'], {}), "('Invalid request body: not a JSON document.')\n", (10619, 10665), True, 'from django.utils.translation import ugettext as _\n'), ((11878, 11925), 'django.utils.translation.ugettext', '_', (['"""Invalid request body: not a JSON document."""'], {}), "('Invalid request body: not a JSON document.')\n", (11879, 11925), True, 'from django.utils.translation import ugettext as _\n'), ((13520, 13567), 'django.utils.translation.ugettext', '_', (['"""Invalid request body: not a JSON document."""'], {}), "('Invalid request body: not a JSON document.')\n", (13521, 13567), True, 'from django.utils.translation import ugettext as _\n')] |
#+ echo=False
import numpy
from biobakery_workflows import utilities, visualizations, files
from anadama2 import PweaveDocument
document=PweaveDocument()
# get the variables for this document generation task
vars = document.get_vars()
# determine the document format
pdf_format = True if vars["format"] == "pdf" else False
# read in the DNA samples
(dna_paired_columns, dna_orphan_columns), dna_samples, (dna_paired_data, dna_orphan_data) = visualizations.qc_read_counts(document, vars["dna_read_counts"])
# read in the RNA samples
(rna_paired_columns, rna_orphan_columns), rna_samples, (rna_paired_data, rna_orphan_data) = visualizations.qc_read_counts(document, vars["rna_read_counts"])
#' # Quality Control
#' <% visualizations.ShotGun.print_qc_intro_caption("{} DNA and {} RNA ".format(len(dna_samples),len(rna_samples)), rna_paired_columns[2:], paired=True) %>
#+ echo=False
#' ## DNA Samples Quality Control
#' ### DNA Samples Tables of Filtered Reads
#+ echo=False
document.write_table(["# Sample"]+dna_paired_columns, dna_samples, dna_paired_data,
files.ShotGunVis.path("qc_counts_paired",document.data_folder))
table_message=visualizations.show_table_max_rows(document, dna_paired_data, dna_samples,
dna_paired_columns, "DNA Paired end reads", files.ShotGunVis.path("qc_counts_paired"),
format_data_comma=True)
#' <%= table_message %>
#+ echo=False
document.write_table(["# Sample"]+dna_orphan_columns, dna_samples, dna_orphan_data,
files.ShotGunVis.path("qc_counts_orphan",document.data_folder))
table_message=visualizations.show_table_max_rows(document, dna_orphan_data, dna_samples,
dna_orphan_columns, "DNA Orphan reads", files.ShotGunVis.path("qc_counts_orphan"),
format_data_comma=True)
#' <%= table_message %>
#' <% if pdf_format: print("\clearpage") %>
#+ echo=False
# plot the microbial reads ratios
dna_microbial_reads, dna_microbial_labels = utilities.microbial_read_proportion_multiple_databases(
dna_paired_data, dna_paired_columns, dna_orphan_data)
document.write_table(["# Sample"]+dna_microbial_labels, dna_samples,
dna_microbial_reads, files.ShotGunVis.path("microbial_counts",document.data_folder))
table_message=visualizations.show_table_max_rows(document, dna_microbial_reads, dna_samples,
dna_microbial_labels, "DNA microbial read proportion",
files.ShotGunVis.path("microbial_counts"))
#' <%= visualizations.ShotGun.captions["microbial_ratios"] %>
#' <%= table_message %>
#' ### DNA Samples Plots of Filtered Reads
#+ echo=False
document.plot_grouped_barchart(numpy.transpose(dna_paired_data), row_labels=dna_paired_columns,
column_labels=dna_samples, title="DNA Paired end reads", ylabel="Read count (in millions)",
legend_title="Filter", yaxis_in_millions=True)
#+ echo=False
document.plot_grouped_barchart(numpy.transpose(dna_orphan_data), row_labels=dna_orphan_columns,
column_labels=dna_samples, title="DNA Orphan reads", ylabel="Read count (in millions)",
legend_title="Filter", yaxis_in_millions=True)
#' ## RNA Samples Quality Control
#' ### RNA Samples Tables of Filtered Reads
#+ echo=False
document.write_table(["# Sample"]+rna_paired_columns, rna_samples, rna_paired_data,
files.ShotGunVis.path("rna_qc_counts_paired",document.data_folder))
table_message=visualizations.show_table_max_rows(document, rna_paired_data, rna_samples,
rna_paired_columns, "RNA Paired end reads", files.ShotGunVis.path("rna_qc_counts_paired"),
format_data_comma=True)
#' <%= table_message %>
#+ echo=False
document.write_table(["# Sample"]+rna_orphan_columns, rna_samples, rna_orphan_data,
files.ShotGunVis.path("rna_qc_counts_orphan",document.data_folder))
table_message=visualizations.show_table_max_rows(document, rna_orphan_data, rna_samples,
rna_orphan_columns, "RNA Orphan reads", files.ShotGunVis.path("rna_qc_counts_orphan"),
format_data_comma=True)
#' <%= table_message %>
#' <% if pdf_format: print("\clearpage") %>
#+ echo=False
# write and plot the microbial reads ratios
rna_microbial_reads, rna_microbial_labels = utilities.microbial_read_proportion_multiple_databases(
rna_paired_data, rna_paired_columns, rna_orphan_data)
document.write_table(["# Sample"]+rna_microbial_labels, rna_samples,
rna_microbial_reads, files.ShotGunVis.path("rna_microbial_counts",document.data_folder))
table_message=visualizations.show_table_max_rows(document, rna_microbial_reads, rna_samples,
rna_microbial_labels, "RNA microbial read proportion",
files.ShotGunVis.path("rna_microbial_counts"))
#' <%= visualizations.ShotGun.captions["microbial_ratios"] %>
#' <%= table_message %>
#' ### RNA Samples Plots of Filtered Reads
#+ echo=False
document.plot_grouped_barchart(numpy.transpose(rna_paired_data), row_labels=rna_paired_columns,
column_labels=rna_samples, title="RNA Paired end reads", ylabel="Read count (in millions)",
legend_title="Filter", yaxis_in_millions=True)
#+ echo=False
document.plot_grouped_barchart(numpy.transpose(rna_orphan_data), row_labels=rna_orphan_columns,
column_labels=rna_samples, title="RNA Orphan reads", ylabel="Read count (in millions)",
legend_title="Filter", yaxis_in_millions=True)
| [
"biobakery_workflows.files.ShotGunVis.path",
"numpy.transpose",
"anadama2.PweaveDocument",
"biobakery_workflows.utilities.microbial_read_proportion_multiple_databases",
"biobakery_workflows.visualizations.qc_read_counts"
] | [((141, 157), 'anadama2.PweaveDocument', 'PweaveDocument', ([], {}), '()\n', (155, 157), False, 'from anadama2 import PweaveDocument\n'), ((450, 514), 'biobakery_workflows.visualizations.qc_read_counts', 'visualizations.qc_read_counts', (['document', "vars['dna_read_counts']"], {}), "(document, vars['dna_read_counts'])\n", (479, 514), False, 'from biobakery_workflows import utilities, visualizations, files\n'), ((634, 698), 'biobakery_workflows.visualizations.qc_read_counts', 'visualizations.qc_read_counts', (['document', "vars['rna_read_counts']"], {}), "(document, vars['rna_read_counts'])\n", (663, 698), False, 'from biobakery_workflows import utilities, visualizations, files\n'), ((1919, 2031), 'biobakery_workflows.utilities.microbial_read_proportion_multiple_databases', 'utilities.microbial_read_proportion_multiple_databases', (['dna_paired_data', 'dna_paired_columns', 'dna_orphan_data'], {}), '(dna_paired_data,\n dna_paired_columns, dna_orphan_data)\n', (1973, 2031), False, 'from biobakery_workflows import utilities, visualizations, files\n'), ((4082, 4194), 'biobakery_workflows.utilities.microbial_read_proportion_multiple_databases', 'utilities.microbial_read_proportion_multiple_databases', (['rna_paired_data', 'rna_paired_columns', 'rna_orphan_data'], {}), '(rna_paired_data,\n rna_paired_columns, rna_orphan_data)\n', (4136, 4194), False, 'from biobakery_workflows import utilities, visualizations, files\n'), ((1077, 1140), 'biobakery_workflows.files.ShotGunVis.path', 'files.ShotGunVis.path', (['"""qc_counts_paired"""', 'document.data_folder'], {}), "('qc_counts_paired', document.data_folder)\n", (1098, 1140), False, 'from biobakery_workflows import utilities, visualizations, files\n'), ((1279, 1320), 'biobakery_workflows.files.ShotGunVis.path', 'files.ShotGunVis.path', (['"""qc_counts_paired"""'], {}), "('qc_counts_paired')\n", (1300, 1320), False, 'from biobakery_workflows import utilities, visualizations, files\n'), ((1478, 1541), 'biobakery_workflows.files.ShotGunVis.path', 'files.ShotGunVis.path', (['"""qc_counts_orphan"""', 'document.data_folder'], {}), "('qc_counts_orphan', document.data_folder)\n", (1499, 1541), False, 'from biobakery_workflows import utilities, visualizations, files\n'), ((1676, 1717), 'biobakery_workflows.files.ShotGunVis.path', 'files.ShotGunVis.path', (['"""qc_counts_orphan"""'], {}), "('qc_counts_orphan')\n", (1697, 1717), False, 'from biobakery_workflows import utilities, visualizations, files\n'), ((2128, 2191), 'biobakery_workflows.files.ShotGunVis.path', 'files.ShotGunVis.path', (['"""microbial_counts"""', 'document.data_folder'], {}), "('microbial_counts', document.data_folder)\n", (2149, 2191), False, 'from biobakery_workflows import utilities, visualizations, files\n'), ((2349, 2390), 'biobakery_workflows.files.ShotGunVis.path', 'files.ShotGunVis.path', (['"""microbial_counts"""'], {}), "('microbial_counts')\n", (2370, 2390), False, 'from biobakery_workflows import utilities, visualizations, files\n'), ((2572, 2604), 'numpy.transpose', 'numpy.transpose', (['dna_paired_data'], {}), '(dna_paired_data)\n', (2587, 2604), False, 'import numpy\n'), ((2831, 2863), 'numpy.transpose', 'numpy.transpose', (['dna_orphan_data'], {}), '(dna_orphan_data)\n', (2846, 2863), False, 'import numpy\n'), ((3223, 3290), 'biobakery_workflows.files.ShotGunVis.path', 'files.ShotGunVis.path', (['"""rna_qc_counts_paired"""', 'document.data_folder'], {}), "('rna_qc_counts_paired', document.data_folder)\n", (3244, 3290), False, 'from biobakery_workflows import utilities, visualizations, files\n'), ((3429, 3474), 'biobakery_workflows.files.ShotGunVis.path', 'files.ShotGunVis.path', (['"""rna_qc_counts_paired"""'], {}), "('rna_qc_counts_paired')\n", (3450, 3474), False, 'from biobakery_workflows import utilities, visualizations, files\n'), ((3632, 3699), 'biobakery_workflows.files.ShotGunVis.path', 'files.ShotGunVis.path', (['"""rna_qc_counts_orphan"""', 'document.data_folder'], {}), "('rna_qc_counts_orphan', document.data_folder)\n", (3653, 3699), False, 'from biobakery_workflows import utilities, visualizations, files\n'), ((3834, 3879), 'biobakery_workflows.files.ShotGunVis.path', 'files.ShotGunVis.path', (['"""rna_qc_counts_orphan"""'], {}), "('rna_qc_counts_orphan')\n", (3855, 3879), False, 'from biobakery_workflows import utilities, visualizations, files\n'), ((4291, 4358), 'biobakery_workflows.files.ShotGunVis.path', 'files.ShotGunVis.path', (['"""rna_microbial_counts"""', 'document.data_folder'], {}), "('rna_microbial_counts', document.data_folder)\n", (4312, 4358), False, 'from biobakery_workflows import utilities, visualizations, files\n'), ((4516, 4561), 'biobakery_workflows.files.ShotGunVis.path', 'files.ShotGunVis.path', (['"""rna_microbial_counts"""'], {}), "('rna_microbial_counts')\n", (4537, 4561), False, 'from biobakery_workflows import utilities, visualizations, files\n'), ((4744, 4776), 'numpy.transpose', 'numpy.transpose', (['rna_paired_data'], {}), '(rna_paired_data)\n', (4759, 4776), False, 'import numpy\n'), ((5003, 5035), 'numpy.transpose', 'numpy.transpose', (['rna_orphan_data'], {}), '(rna_orphan_data)\n', (5018, 5035), False, 'import numpy\n')] |
# Code behind module for Shapefile_Demo.ipynb
################################
##
## Import Statments
##
################################
# Import standard Python modules
import sys
import datacube
import numpy as np
import fiona
import xarray as xr
from rasterio.features import geometry_mask
import shapely
from shapely.ops import transform
from shapely.geometry import shape
from functools import partial
import pyproj
################################
##
## Function Definitions
##
################################
def shapefile_mask(dataset: xr.Dataset, shapefile) -> np.array:
"""Extracts a mask from a shapefile using dataset latitude and longitude extents.
Args:
dataset (xarray.Dataset): The dataset with the latitude and longitude extents.
shapefile (string): The shapefile to be used for extraction.
Returns:
A boolean mask array.
"""
with fiona.open(shapefile, 'r') as source:
collection = list(source)
geometries = []
for feature in collection:
geom = shape(feature['geometry'])
project = partial(
pyproj.transform,
pyproj.Proj(init=source.crs['init']), # source crs
pyproj.Proj(init='epsg:4326')) # destination crs
geom = transform(project, geom) # apply projection
geometries.append(geom)
geobox = dataset.geobox
mask = geometry_mask(
geometries,
out_shape=geobox.shape,
transform=geobox.affine,
all_touched=True,
invert=True)
return mask | [
"shapely.ops.transform",
"fiona.open",
"pyproj.Proj",
"shapely.geometry.shape",
"rasterio.features.geometry_mask"
] | [((901, 927), 'fiona.open', 'fiona.open', (['shapefile', '"""r"""'], {}), "(shapefile, 'r')\n", (911, 927), False, 'import fiona\n'), ((1422, 1531), 'rasterio.features.geometry_mask', 'geometry_mask', (['geometries'], {'out_shape': 'geobox.shape', 'transform': 'geobox.affine', 'all_touched': '(True)', 'invert': '(True)'}), '(geometries, out_shape=geobox.shape, transform=geobox.affine,\n all_touched=True, invert=True)\n', (1435, 1531), False, 'from rasterio.features import geometry_mask\n'), ((1051, 1077), 'shapely.geometry.shape', 'shape', (["feature['geometry']"], {}), "(feature['geometry'])\n", (1056, 1077), False, 'from shapely.geometry import shape\n'), ((1294, 1318), 'shapely.ops.transform', 'transform', (['project', 'geom'], {}), '(project, geom)\n', (1303, 1318), False, 'from shapely.ops import transform\n'), ((1159, 1195), 'pyproj.Proj', 'pyproj.Proj', ([], {'init': "source.crs['init']"}), "(init=source.crs['init'])\n", (1170, 1195), False, 'import pyproj\n'), ((1226, 1255), 'pyproj.Proj', 'pyproj.Proj', ([], {'init': '"""epsg:4326"""'}), "(init='epsg:4326')\n", (1237, 1255), False, 'import pyproj\n')] |
'''
This example provides three examples of a simple plot of 1-D data.
1. a publication-ready single column figure, which is printed to png (600 dpi), pdf, and svg
2. a presentation-ready figure on a black background
Four steps are involved in each figure:
- load/generate the data
- construct a 1d plot (figure, axis, line series) for the spectrum
- size the figure and font
- print the figure to a pdf
'''
import jalapeno.colors.svgcolors as jc
import jalapeno.plots.plots as jpp
import jalapeno.plots.colorscheme as jpc
import numpy as np
# generate the data
x = np.linspace(0, 2*np.pi, 600)
y = np.abs(np.cos(2*x))
# make a 1d plot
fig, ax, line = jpp.make_1d_plot(linecolor=jc.darkorange,
maxx=max(x/np.pi),
maxy=1.01,
xname='x/pi',
yname='cos(2x)')
# plot the data on our 1d plot
line.set_data(x/np.pi,y)
# size the figure and print it to pdf
jpp.SquareFigure().set_size(fig)
jpp.print_fig(fig, 'xy-for-publication', ['pdf', 'png', 'svg'], dpi=600)
# make another 1d plot
fig, ax, line = jpp.make_1d_plot(colorscheme=jpc.FigColors.scheme('black'),
linecolor=jc.coral,
linewidth=4,
showgrid='off',
maxx=max(x/np.pi),
maxy=1.01,
xname='x/pi',
yname='cos(2x)')
# plot the data on our 1d plot
line.set_data(x/np.pi, y)
# size the figure and print it to pdf
jpp.SquareFigure(width=4, fontsize=12).set_size(fig)
jpp.print_fig(fig, 'xy-for-presentation', exts=['pdf']) # way 1, use print_fig and provide exts=['pdf']
jpp.print_fig_to_pdf(fig, 'xy-for-presentation') # way 2, use print_fig_to_pdf | [
"jalapeno.plots.plots.print_fig",
"jalapeno.plots.colorscheme.FigColors.scheme",
"jalapeno.plots.plots.SquareFigure",
"numpy.linspace",
"numpy.cos",
"jalapeno.plots.plots.print_fig_to_pdf"
] | [((573, 603), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(600)'], {}), '(0, 2 * np.pi, 600)\n', (584, 603), True, 'import numpy as np\n'), ((1024, 1096), 'jalapeno.plots.plots.print_fig', 'jpp.print_fig', (['fig', '"""xy-for-publication"""', "['pdf', 'png', 'svg']"], {'dpi': '(600)'}), "(fig, 'xy-for-publication', ['pdf', 'png', 'svg'], dpi=600)\n", (1037, 1096), True, 'import jalapeno.plots.plots as jpp\n'), ((1688, 1743), 'jalapeno.plots.plots.print_fig', 'jpp.print_fig', (['fig', '"""xy-for-presentation"""'], {'exts': "['pdf']"}), "(fig, 'xy-for-presentation', exts=['pdf'])\n", (1701, 1743), True, 'import jalapeno.plots.plots as jpp\n'), ((1793, 1841), 'jalapeno.plots.plots.print_fig_to_pdf', 'jpp.print_fig_to_pdf', (['fig', '"""xy-for-presentation"""'], {}), "(fig, 'xy-for-presentation')\n", (1813, 1841), True, 'import jalapeno.plots.plots as jpp\n'), ((613, 626), 'numpy.cos', 'np.cos', (['(2 * x)'], {}), '(2 * x)\n', (619, 626), True, 'import numpy as np\n'), ((991, 1009), 'jalapeno.plots.plots.SquareFigure', 'jpp.SquareFigure', ([], {}), '()\n', (1007, 1009), True, 'import jalapeno.plots.plots as jpp\n'), ((1166, 1195), 'jalapeno.plots.colorscheme.FigColors.scheme', 'jpc.FigColors.scheme', (['"""black"""'], {}), "('black')\n", (1186, 1195), True, 'import jalapeno.plots.colorscheme as jpc\n'), ((1635, 1673), 'jalapeno.plots.plots.SquareFigure', 'jpp.SquareFigure', ([], {'width': '(4)', 'fontsize': '(12)'}), '(width=4, fontsize=12)\n', (1651, 1673), True, 'import jalapeno.plots.plots as jpp\n')] |