code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
# -*- coding: utf-8 -*-
import sys
sys.path.append('../')
import argparse
import pickle
import random
from data_split.util import *
from data_util.reader import *
from data_util.schema import *
def ensure_dir(file_path):
directory = os.path.dirname(file_path)
if not os.path.exists(directory):
os.makedirs(directory)
def reverse_dict_list(orig_dict):
rev_dict = dict()
for test in orig_dict:
for lang in orig_dict[test]:
if lang not in rev_dict:
rev_dict[lang] = [test]
else:
rev_dict[lang].append(test)
return rev_dict
def get_mixed_surface(feat, lang, vocab, threshold):
"""
Get cnt number of frequent surface and lemma which DOES not contain the 'feat'
None of the surface forms should have the feat
:param feat: morphological feature
:param lang: language
:param vocab: list of frequent words
:param cnt: number of desired nonsense labels
:return: list of word, label tuple
"""
freq_surf = []
rare_surf = []
schema = UnimorphSchema()
data = load_ds("unimorph", lang)
forbid_vocab = dict()
# make a vocabulary of forbidden words
for x in data[lang]:
# exclude lemmas with space
if ' ' in x["form"]:
continue
x_feats = schema.decode_msd(x["msd"])[0]
if feat in x_feats:
forbid_vocab[x["form"]] = 1
# include each surface form once
surf_cnt = dict()
for x in data[lang]:
# exclude lemmas with space
if ' ' in x["form"]:
continue
# if any of the surface forms have the feat, exclude them
if x["form"] in forbid_vocab:
continue
# exclude x, if the surface form is already in the data
if x["form"] in surf_cnt:
continue
else:
surf_cnt[x['form']] = 1
x_feats = schema.decode_msd(x["msd"])[0]
## Exceptions: drop V.PTCP from the case and gender tests - russian
if (feat in ['Case', 'Gender']) and (lang == 'russian') and (x_feats['Part of Speech'] == 'Participle'):
continue
## Exceptions: If it is a gender test and the noun does not have a gender feature, ignore
if (feat == 'Gender') and (lang == 'russian') and (x_feats['Part of Speech'] == 'Noun') and (
'Gender' not in x_feats):
continue
if feat not in x_feats:
# flag x
x["flag"] = 1
if x["form"].lower() in vocab:
freq_surf.append(x)
else:
rare_surf.append(x)
"""
else:
if feat == 'Number' and (x_feats['Part of Speech'] != 'Noun') and (feat in x_feats):
x["flag"] = 1
if x["form"].lower() in vocab:
freq_surf.append(x)
else:
rare_surf.append(x)
"""
# Try to sample 80%-20% if possible
if (len(freq_surf) >= int(threshold * 0.8)) and (len(rare_surf) >= int(threshold * 0.2)):
shuffled_frequent = random.sample(freq_surf, int(threshold * 0.8))
shuffled_rare = random.sample(rare_surf, int(threshold * 0.2))
instances = shuffled_frequent + shuffled_rare
# else get all the frequent ones, and sample the rest from the rare ones
elif (len(freq_surf) + len(rare_surf)) >= threshold:
shuffled_frequent = random.sample(freq_surf, len(freq_surf))
shuffled_rare = random.sample(rare_surf, int(threshold - len(freq_surf)))
instances = shuffled_frequent + shuffled_rare
else:
print("Not enough instances are left")
return []
return instances
def split_for_morph_test_mixed(feat, lang, vocab, nonlabelratio, savedir, threshold=10000):
"""
Splits unimorph data into training, dev an test for the given feature and language.
Precheck 1: We check if the feature can have more than one label beforehand
This function eliminates cases where the form has space - e.g., "anlıyor musun"
This function eliminates cases where the feature is very sparse (seen less than 5 times)
This function eliminates ambiguous forms
:param feat: Case, Valency...
:param lang: turkish, russian, english...
:param vocab: frequent word list from wikipedia
:param savedir: folder to save the splits
:param threshold: fixed to 10K
:return: Default output directory is ./output/feature/lang/train-dev-test.txt
"""
freq_surf = []
rare_surf = []
schema = UnimorphSchema()
data = load_ds("unimorph", lang)
# make a label dictionary for noisy labels
label_cnt = dict()
# make a surface form dictionary for ambiguous
surf_cnt = dict()
nonlabel_cnt = threshold * nonlabelratio
reallabel_cnt = threshold * (1. - nonlabelratio)
for x in data[lang]:
# exclude lemmas with space
if ' ' in x["form"]:
continue
x_feats = schema.decode_msd(x["msd"])[0]
if feat in x_feats:
# There is a bug with Number: 'Part of Speech'
# Don't include verbs/verb like words to singular/plural test
# if feat=='Number' and (x_feats['Part of Speech']!='Noun'):
# continue
## Exceptions: drop V.PTCP from the case and gender tests - russian
if (feat in ['Case', 'Gender']) and (lang == 'russian') and (x_feats['Part of Speech'] == 'Participle'):
continue
## Exceptions: If it is a gender test and the noun does not have a gender feature, ignore
if (feat == 'Gender') and (lang == 'russian') and (x_feats['Part of Speech'] == 'Noun') and (
'Gender' not in x_feats):
continue
if x["form"].lower() in vocab:
freq_surf.append(x)
# rare surface and frequent lemma
else:
rare_surf.append(x)
# for sparse labels
if x_feats[feat] not in label_cnt:
label_cnt[x_feats[feat]] = 1
else:
label_cnt[x_feats[feat]] += 1
# for amb. forms
if x['form'] not in surf_cnt:
surf_cnt[x['form']] = 1
else:
surf_cnt[x['form']] += 1
# if there is any (very) sparse label, exclude those
forbid_labs = []
for label in label_cnt:
if (label_cnt[label]) < 5:
forbid_labs.append(label)
# if there are any surface forms with multiple values, exclude those
amb_form_dict = dict()
for surf, cnt in surf_cnt.items():
if cnt > 1:
amb_form_dict[surf] = 1
# check here if we don't have enough instances or labels already
if ((len(label_cnt) - len(forbid_labs)) < 2) or len(surf_cnt) < reallabel_cnt:
print("Not enough instances or labels are left")
return False
# Exclude the noisy labels, ambiguous forms and rare words
if ((len(forbid_labs) > 0) or (len(amb_form_dict) > 0)):
freq_surf = []
rare_surf = []
for x in data[lang]:
# exclude lemmas with space
if ' ' in x["form"]:
continue
# exclude amb. forms
if x["form"] in amb_form_dict:
continue
x_feats = schema.decode_msd(x["msd"])[0]
if 'Part of Speech' not in x_feats:
# probably a mistake in unimorph, just pass
continue
# exclude non nominal forms which has plurality tag
# if feat=='Number' and (x_feats['Part of Speech']!='Noun'):
# continue
## Exceptions: drop V.PTCP from the case and gender tests - russian
if (feat in ['Case', 'Gender']) and (lang == 'russian') and x_feats['Part of Speech'] == 'Participle':
continue
## Exceptions: If it is a gender test and the noun does not have a gender feature, ignore
if (feat == 'Gender') and (lang == 'russian') and (x_feats['Part of Speech'] == 'Noun') and (
'Gender' not in x_feats):
continue
if (feat in x_feats) and (x_feats[feat] not in forbid_labs):
# instances.append(x)
# if frequent surface
if x["form"].lower() in vocab:
freq_surf.append(x)
# rare surface
else:
rare_surf.append(x)
# Try to sample 80%-20% if possible
if (len(freq_surf) >= int(reallabel_cnt * 0.8)) and (len(rare_surf) >= int(reallabel_cnt * 0.2)):
shuffled_frequent = random.sample(freq_surf, int(reallabel_cnt * 0.8))
shuffled_rare = random.sample(rare_surf, int(reallabel_cnt * 0.2))
instances = shuffled_frequent + shuffled_rare
# else get all the frequent ones, and sample the rest from the rare ones
elif (len(freq_surf) + len(rare_surf)) >= reallabel_cnt:
shuffled_frequent = random.sample(freq_surf, len(freq_surf))
shuffled_rare = random.sample(rare_surf, int(reallabel_cnt - len(freq_surf)))
instances = shuffled_frequent + shuffled_rare
else:
print("Not enough instances are left")
return False
# get the nonlabel instances
non_instances = get_mixed_surface(feat, lang, vocab, nonlabel_cnt)
if len(non_instances) == 0:
return False
all_instances = instances + non_instances
shuffled_instances = random.sample(all_instances, threshold)
train_inst = shuffled_instances[:int(threshold * 0.7)]
dev_inst = shuffled_instances[int(threshold * 0.7):int(threshold * 0.9)]
test_inst = shuffled_instances[int(threshold * 0.9):]
train_path = os.path.join(savedir, feat, lang, "train.txt")
ensure_dir(train_path)
dev_path = os.path.join(savedir, feat, lang, "dev.txt")
ensure_dir(dev_path)
test_path = os.path.join(savedir, feat, lang, "test.txt")
ensure_dir(test_path)
# Write file
with open(train_path, 'w') as fout:
for inst in train_inst:
x_feats = schema.decode_msd(inst["msd"])[0]
if "flag" not in inst:
if feat == 'Person':
x_feats[feat] = x_feats[feat] + " " + x_feats['Number']
fout.write("\t".join([inst["form"], x_feats[feat]]) + "\n")
else:
fout.write("\t".join([inst["form"], "None"]) + "\n")
fout.close()
with open(dev_path, 'w') as fout:
for inst in dev_inst:
x_feats = schema.decode_msd(inst["msd"])[0]
if "flag" not in inst:
if feat == 'Person':
x_feats[feat] = x_feats[feat] + " " + x_feats['Number']
fout.write("\t".join([inst["form"], x_feats[feat]]) + "\n")
else:
fout.write("\t".join([inst["form"], "None"]) + "\n")
fout.close()
with open(test_path, 'w') as fout:
for inst in test_inst:
x_feats = schema.decode_msd(inst["msd"])[0]
if "flag" not in inst:
if feat == 'Person':
x_feats[feat] = x_feats[feat] + " " + x_feats['Number']
fout.write("\t".join([inst["form"], x_feats[feat]]) + "\n")
else:
fout.write("\t".join([inst["form"], "None"]) + "\n")
fout.close()
return True
def split_for_morph_test(feat, lang, vocab, savedir, threshold=10000):
"""
Splits unimorph data into training, dev an test for the given feature and language.
Precheck 1: We check if the feature can have more than one label beforehand
This function eliminates cases where the form has space - e.g., "anlıyor musun"
This function eliminates cases where the feature is very sparse (seen less than 5 times)
This function eliminates ambiguous forms
:param feat: Case, Valency...
:param lang: turkish, russian, english...
:param vocab: frequent word list from wikipedia
:param savedir: folder to save the splits
:param threshold: fixed to 10K
:return: Default output directory is ./output/feature/lang/train-dev-test.txt
"""
freq_surf = []
rare_surf = []
schema = UnimorphSchema()
data = load_ds("unimorph", lang)
# make a label dictionary for noisy labels
label_cnt = dict()
# make a surface form dictionary for ambiguous
surf_cnt = dict()
for x in data[lang]:
# exclude lemmas with space
if ' ' in x["form"]:
continue
x_feats = schema.decode_msd(x["msd"])[0]
if feat in x_feats:
# There is a bug with Number: 'Part of Speech'
# Don't include verbs/verb like words to singular/plural test
# if feat=='Number' and (x_feats['Part of Speech']!='Noun'):
# continue
# instances.append(x)
if x["form"].lower() in vocab:
freq_surf.append(x)
# rare surface
else:
rare_surf.append(x)
# for sparse labels
if x_feats[feat] not in label_cnt:
label_cnt[x_feats[feat]] = 1
else:
label_cnt[x_feats[feat]] += 1
# for amb. forms
if x['form'] not in surf_cnt:
surf_cnt[x['form']] = 1
else:
surf_cnt[x['form']] += 1
# if there is any (very) sparse label, exclude those
forbid_labs = []
for label in label_cnt:
if (label_cnt[label]) < 5:
forbid_labs.append(label)
# if there are any surface forms with multiple values, exclude those
amb_form_dict = dict()
for surf, cnt in surf_cnt.items():
if cnt > 1:
amb_form_dict[surf] = 1
# check here if we don't have enough instances or labels already
if ((len(label_cnt) - len(forbid_labs)) < 2) or len(surf_cnt) < threshold:
print("Not enough instances or labels are left")
return False
# Exclude the noisy labels, ambiguous forms and rare words
if ((len(forbid_labs) > 0) or (len(amb_form_dict) > 0)):
freq_surf = []
rare_surf = []
for x in data[lang]:
# exclude lemmas with space
if ' ' in x["form"]:
continue
# exclude amb. forms
if x["form"] in amb_form_dict:
continue
x_feats = schema.decode_msd(x["msd"])[0]
if 'Part of Speech' not in x_feats:
# probably a mistake in unimorph, just pass
continue
# exclude non nominal forms which has plurality tag
# if feat=='Number' and (x_feats['Part of Speech']!='Noun'):
# continue
if (feat in x_feats) and (x_feats[feat] not in forbid_labs):
# instances.append(x)
# if frequent surface
if x["form"].lower() in vocab:
freq_surf.append(x)
# rare surface
else:
rare_surf.append(x)
# Try to sample 80%-20% if possible
if (len(freq_surf) >= int(threshold * 0.8)) and (len(rare_surf) >= int(threshold * 0.2)):
shuffled_frequent = random.sample(freq_surf, int(threshold * 0.8))
shuffled_rare = random.sample(rare_surf, int(threshold * 0.2))
instances = shuffled_frequent + shuffled_rare
# else get all the frequent ones, and sample the rest from the rare ones
elif (len(freq_surf) + len(rare_surf)) >= threshold:
shuffled_frequent = random.sample(freq_surf, len(freq_surf))
shuffled_rare = random.sample(rare_surf, int(threshold - len(freq_surf)))
instances = shuffled_frequent + shuffled_rare
else:
print("Not enough instances are left")
return False
shuffled_instances = random.sample(instances, threshold)
train_inst = shuffled_instances[:int(threshold * 0.7)]
dev_inst = shuffled_instances[int(threshold * 0.7):int(threshold * 0.9)]
test_inst = shuffled_instances[int(threshold * 0.9):]
train_path = os.path.join(savedir, feat, lang, "train.txt")
ensure_dir(train_path)
dev_path = os.path.join(savedir, feat, lang, "dev.txt")
ensure_dir(dev_path)
test_path = os.path.join(savedir, feat, lang, "test.txt")
ensure_dir(test_path)
# Write file
with open(train_path, 'w') as fout:
for inst in train_inst:
x_feats = schema.decode_msd(inst["msd"])[0]
if feat == 'Person':
x_feats[feat] = x_feats[feat] + " " + x_feats['Number']
fout.write("\t".join([inst["form"], x_feats[feat]]) + "\n")
fout.close()
with open(dev_path, 'w') as fout:
for inst in dev_inst:
x_feats = schema.decode_msd(inst["msd"])[0]
if feat == 'Person':
x_feats[feat] = x_feats[feat] + " " + x_feats['Number']
fout.write("\t".join([inst["form"], x_feats[feat]]) + "\n")
fout.close()
with open(test_path, 'w') as fout:
for inst in test_inst:
x_feats = schema.decode_msd(inst["msd"])[0]
if feat == 'Person':
x_feats[feat] = x_feats[feat] + " " + x_feats['Number']
fout.write("\t".join([inst["form"], x_feats[feat]]) + "\n")
fout.close()
return True
def split_for_number_test(lang, vocab, savedir, threshold=10000):
"""
Create train, dev, test splits for 'number of characters' and 'number of morphemes' tests
:param lang: turkish, russian, english...
:param vocab: frequent word list from wikipedia
:param savedir: folder to save the splits
:param threshold: fixed to 10K
:return: Default output directory is ./output/CharacterCount/lang/train-dev-test.txt and
./output/TagCount/lang/train-dev-test.txt
"""
# instances = []
freq_surf = []
rare_surf = []
schema = UnimorphSchema()
data = load_ds("unimorph", lang)
surf_dict = dict()
for x in data[lang]:
# exclude lemmas with space
if ' ' in x["form"]:
continue
# exclude duplicates
if x['form'] in surf_dict:
continue
# exclude rare words
# if x[raretype].lower() not in vocab:
# continue
# else:
surf_dict[x['form']] = 1
x["num_chars"] = str(len(x["form"]))
x["num_morph_tags"] = str(len(schema.decode_msd(x["msd"])[0]))
if x["form"].lower() in vocab:
freq_surf.append(x)
# rare surface and frequent lemma
else:
rare_surf.append(x)
# instances.append(x)
# Try to sample 80%-20% if possible
if (len(freq_surf) >= int(threshold * 0.8)) and (len(rare_surf) >= int(threshold * 0.2)):
shuffled_frequent = random.sample(freq_surf, int(threshold * 0.8))
shuffled_rare = random.sample(rare_surf, int(threshold * 0.2))
instances = shuffled_frequent + shuffled_rare
# else get all the frequent ones, and sample the rest from the rare ones
elif (len(freq_surf) + len(rare_surf)) >= threshold:
shuffled_frequent = random.sample(freq_surf, len(freq_surf))
shuffled_rare = random.sample(rare_surf, int(threshold - len(freq_surf)))
instances = shuffled_frequent + shuffled_rare
else:
print("Not enough instances are left")
return False
shuffled_instances = random.sample(instances, threshold)
train_inst = shuffled_instances[:int(threshold * 0.7)]
dev_inst = shuffled_instances[int(threshold * 0.7):int(threshold * 0.9)]
test_inst = shuffled_instances[int(threshold * 0.9):]
feat = "CharacterCount"
train_path = os.path.join(savedir, feat, lang, "train.txt")
ensure_dir(train_path)
dev_path = os.path.join(savedir, feat, lang, "dev.txt")
ensure_dir(dev_path)
test_path = os.path.join(savedir, feat, lang, "test.txt")
ensure_dir(test_path)
# Write file
with open(train_path, 'w') as fout:
for inst in train_inst:
fout.write("\t".join([inst["form"], inst["num_chars"]]) + "\n")
fout.close()
with open(dev_path, 'w') as fout:
for inst in dev_inst:
fout.write("\t".join([inst["form"], inst["num_chars"]]) + "\n")
fout.close()
with open(test_path, 'w') as fout:
for inst in test_inst:
fout.write("\t".join([inst["form"], inst["num_chars"]]) + "\n")
fout.close()
feat = "TagCount"
train_path = os.path.join(savedir, feat, lang, "train.txt")
ensure_dir(train_path)
dev_path = os.path.join(savedir, feat, lang, "dev.txt")
ensure_dir(dev_path)
test_path = os.path.join(savedir, feat, lang, "test.txt")
ensure_dir(test_path)
# Write file
with open(train_path, 'w') as fout:
for inst in train_inst:
fout.write("\t".join([inst["form"], inst["num_morph_tags"]]) + "\n")
fout.close()
with open(dev_path, 'w') as fout:
for inst in dev_inst:
fout.write("\t".join([inst["form"], inst["num_morph_tags"]]) + "\n")
fout.close()
with open(test_path, 'w') as fout:
for inst in test_inst:
fout.write("\t".join([inst["form"], inst["num_morph_tags"]]) + "\n")
fout.close()
return True
def split_for_nonsense(lang, pseudodir, savedir, type="ort", threshold=10000):
"""
Create splits in two different formats:
Binary: given the word, guess if it is pseduo or not
Old20: given the pseudo word, guess its level of nonsense - approximately
Probably binary one makes more sense, but there are more options available
:param lang: any supported-prcessed wuggy language under generated folder
:param pseudodir: folder of pseudo files generated by wuggy
:param savedir: folder to save the splits
:param type: ort or phon
:param threshold:
:return:
"""
instances = []
words = []
fin_path = os.path.join(pseudodir, (type + "_" + lang))
# Read file
i = 0
with open(fin_path) as fin:
for line in fin:
if i == 0:
i += 1
continue
x = {}
all_cols = line.rstrip().split("\t")
x["word"] = all_cols[0]
words.append(x["word"])
x["non_sense"] = all_cols[1]
instances.append(x)
fin.close()
# make a vocab
word_vocab = list(set(words))
if len(instances) < threshold:
print("Not enough instances")
return False
if len(word_vocab) < (threshold / 2):
print("Not enough words")
return False
# shuffle is an in-place operation
random.shuffle(word_vocab)
shuffled_instances = random.sample(instances, threshold)
shuffled_labels = np.random.choice([0, 1], size=(threshold,), p=[1. / 2, 1. / 2])
train_inst = shuffled_instances[:int(threshold * 0.7)]
train_labels = shuffled_labels[:int(threshold * 0.7)]
dev_inst = shuffled_instances[int(threshold * 0.7):int(threshold * 0.9)]
dev_labels = shuffled_labels[int(threshold * 0.7):int(threshold * 0.9)]
test_inst = shuffled_instances[int(threshold * 0.9):]
test_labels = shuffled_labels[int(threshold * 0.9):]
feat = "NonSense_Binary"
train_path = os.path.join(savedir, feat, lang, "train.txt")
ensure_dir(train_path)
dev_path = os.path.join(savedir, feat, lang, "dev.txt")
ensure_dir(dev_path)
test_path = os.path.join(savedir, feat, lang, "test.txt")
ensure_dir(test_path)
# Write file
wi = 0
with open(train_path, 'w') as fout:
for inst, label in zip(train_inst, train_labels):
if label == 0:
fout.write("\t".join([inst["non_sense"], str(label)]) + "\n")
elif label == 1:
fout.write("\t".join([word_vocab[wi], str(label)]) + "\n")
wi += 1
fout.close()
with open(dev_path, 'w') as fout:
for inst, label in zip(dev_inst, dev_labels):
if label == 0:
fout.write("\t".join([inst["non_sense"], str(label)]) + "\n")
elif label == 1:
fout.write("\t".join([word_vocab[wi], str(label)]) + "\n")
wi += 1
fout.close()
with open(test_path, 'w') as fout:
for inst, label in zip(test_inst, test_labels):
if label == 0:
fout.write("\t".join([inst["non_sense"], str(label)]) + "\n")
elif label == 1:
fout.write("\t".join([word_vocab[wi], str(label)]) + "\n")
wi += 1
fout.close()
return True
def main(args):
langs = {'portuguese': 'pt',
'french': 'fr',
'serbo-croatian': 'sh',
'polish': 'pl',
'czech': 'cs',
'modern-greek': 'el',
'catalan': 'ca',
'bulgarian': 'bg',
'danish': 'da',
'estonian': 'et',
'quechua': 'qu',
'swedish': 'sv',
'armenian': 'hy',
'macedonian': 'mk',
'arabic': 'ar',
'dutch': 'nl',
'hungarian': 'hu',
'italian': 'it',
'romanian': 'ro',
'ukranian': 'uk',
'german': 'de',
'finnish': 'fi',
'russian': 'ru',
'turkish': 'tr',
'spanish': 'es'
}
# Language specific vocabulary sizes
# wiki vocabulary sizes: de: 2275234, es: 985668, fi: 730484, tr: 416052, ru: 1888424,
# pt: 592109, fr: 1152450, sh: 454675, pl: 1032578, cs:627842, el: 306450, ca:490566, bg: 334079, da: 312957
# et: 329988, qu: 23074, sv: 1143274, hy: 332673, mk: 176948, ar: 610978, nl: 871023, hu: 793867, it: 871054,
# ro: 354325, uk: 912459
langs_vocab = {'german': 750000, 'finnish': 500000, 'russian': 750000, 'turkish': 500000, 'spanish': 500000, \
'portuguese': 500000, 'french': 750000, 'serbo-croatian': 500000, 'polish': 750000, 'czech': 500000, \
'modern-greek': 500000, 'catalan': 500000, 'bulgarian': 500000, 'danish': 500000, 'estonian': 500000, \
'quechua': 500000, 'swedish': 750000, 'armenian': 500000, 'macedonian': 500000, 'arabic': 500000, \
'dutch': 600000, 'hungarian': 600000, 'italian': 600000, 'romanian': 500000, 'ukranian': 750000}
with open('test_vs_lang_feat_over_10K.pkl', 'rb') as handle:
test_vs_lang = pickle.load(handle)
lang_vs_test = reverse_dict_list(test_vs_lang)
# Load preprocessed statistics
with open('supported_languages_over_10K.pkl', 'rb') as handle:
supported_lang_list = pickle.load(handle)
if args.feat == 1:
for lang in lang_vs_test:
if lang in langs:
embfile = os.path.join('..', "embeddings", "wiki." + langs[lang] + ".vec")
print("Reading vocabulary for lang " + lang)
vocab = load_dict(embfile, maxvoc=langs_vocab[lang])
for test_name in lang_vs_test[lang]:
print("Preparing " + lang + " - " + test_name)
split_for_morph_test_mixed(test_name, lang, vocab, args.nonlabelratio, args.savedir)
if args.common == 1:
# General tests for all supported languages
for lang in supported_lang_list:
if lang in langs:
# get the vocabulary first
embfile = os.path.join('..', "embeddings", "wiki." + langs[lang] + ".vec")
print("Reading vocabulary for lang " + lang)
vocab = load_dict(embfile, maxvoc=langs_vocab[lang])
print("Preparing Character and Tag Count Tests- " + lang)
split_for_number_test(lang, vocab, args.savedir)
print("Preparing POS Test- " + lang)
split_for_morph_test("Part of Speech", lang, vocab, args.savedir)
if args.pseudo == 1:
# Pseudo word tests only for languages with wuggy support
# Orthographic pseudo
ort_lang_lst = ["turkish", "german", "spanish", "english", 'dutch', 'french', 'serbian_latin', 'basque',
'vietnamese']
for lang in ort_lang_lst:
print("Processing orthographic " + lang)
split_for_nonsense(lang, args.pseudodir, args.savedir, type="ort")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Prepare feature tests
parser.add_argument('--nonlabelratio', type=float, default=0.3)
parser.add_argument('--savedir', type=str, default='./probing_datasets_2')
parser.add_argument('--feat', type=int, default=0)
parser.add_argument('--common', type=int, default=1)
parser.add_argument('--pseudo', type=int, default=1)
parser.add_argument('--pseudodir', type=str, default='./generated_wuggy_files')
args = parser.parse_args()
main(args)
| [
"random.sample",
"random.shuffle",
"argparse.ArgumentParser",
"pickle.load",
"sys.path.append"
] | [((36, 58), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (51, 58), False, 'import sys\n'), ((9518, 9557), 'random.sample', 'random.sample', (['all_instances', 'threshold'], {}), '(all_instances, threshold)\n', (9531, 9557), False, 'import random\n'), ((15880, 15915), 'random.sample', 'random.sample', (['instances', 'threshold'], {}), '(instances, threshold)\n', (15893, 15915), False, 'import random\n'), ((19487, 19522), 'random.sample', 'random.sample', (['instances', 'threshold'], {}), '(instances, threshold)\n', (19500, 19522), False, 'import random\n'), ((22722, 22748), 'random.shuffle', 'random.shuffle', (['word_vocab'], {}), '(word_vocab)\n', (22736, 22748), False, 'import random\n'), ((22774, 22809), 'random.sample', 'random.sample', (['instances', 'threshold'], {}), '(instances, threshold)\n', (22787, 22809), False, 'import random\n'), ((28457, 28482), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (28480, 28482), False, 'import argparse\n'), ((26531, 26550), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (26542, 26550), False, 'import pickle\n'), ((26735, 26754), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (26746, 26754), False, 'import pickle\n')] |
import numpy as np
from keras.layers import *
from keras.models import *
from keras.activations import *
from keras.callbacks import ModelCheckpoint,ReduceLROnPlateau
def keras_model():
model=Sequential()
model.add(Conv2D(32,(3,3),padding="same"))
model.add(Conv2D(32,(3,3),padding="same"))
model.add(MaxPool2D())
model.add(Conv2D(64,(3,3),padding="same"))
model.add(Conv2D(64,(3,3),padding="same"))
model.add(MaxPool2D())
model.add(Flatten())
model.add(Dense(128,activation='relu'))
# model.add(relu())
model.add(Dense(256,activation='relu'))
# model.add(relu())
model.add(Dense(128,activation='relu'))
# model.add(relu())
model.add(Dense(1))
model.compile(optimizer="adam",loss="mse")
filepath="selfdrivingv1.h5"
checkpoint= ModelCheckpoint (filepath,verbose=1,save_best_only=True)
lr=ReduceLROnPlateau(factor=0.1,patience=3,min_lr=1e-8)
callbacks=[checkpoint,lr]
return model,callbacks
features=np.load("features_40x40.npy")
labels=np.load("labels_40x40.npy")
#augment data
features=np.append(features,features[:,:,::-1],axis=0)
labels=np.append(labels,-labels,axis=0)
features=features.reshape(features.shape[0],40,40,1)
print(features.shape)
model,callbacks=keras_model()
from sklearn.model_selection import train_test_split as split
train_x,test_x,train_y,test_y=split(features,labels,test_size=0.1,random_state=1)
print(train_x[0])
model.fit(x=train_x,y=train_y,epochs=10,batch_size=64,callbacks=callbacks,validation_data=(test_x,test_y))
print(model.summary())
model.save("selfdriving1v1.h5")
| [
"keras.callbacks.ModelCheckpoint",
"sklearn.model_selection.train_test_split",
"keras.callbacks.ReduceLROnPlateau",
"numpy.append",
"numpy.load"
] | [((1030, 1059), 'numpy.load', 'np.load', (['"""features_40x40.npy"""'], {}), "('features_40x40.npy')\n", (1037, 1059), True, 'import numpy as np\n'), ((1068, 1095), 'numpy.load', 'np.load', (['"""labels_40x40.npy"""'], {}), "('labels_40x40.npy')\n", (1075, 1095), True, 'import numpy as np\n'), ((1125, 1174), 'numpy.append', 'np.append', (['features', 'features[:, :, ::-1]'], {'axis': '(0)'}), '(features, features[:, :, ::-1], axis=0)\n', (1134, 1174), True, 'import numpy as np\n'), ((1179, 1213), 'numpy.append', 'np.append', (['labels', '(-labels)'], {'axis': '(0)'}), '(labels, -labels, axis=0)\n', (1188, 1213), True, 'import numpy as np\n'), ((1418, 1472), 'sklearn.model_selection.train_test_split', 'split', (['features', 'labels'], {'test_size': '(0.1)', 'random_state': '(1)'}), '(features, labels, test_size=0.1, random_state=1)\n', (1423, 1472), True, 'from sklearn.model_selection import train_test_split as split\n'), ((839, 896), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['filepath'], {'verbose': '(1)', 'save_best_only': '(True)'}), '(filepath, verbose=1, save_best_only=True)\n', (854, 896), False, 'from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau\n'), ((904, 959), 'keras.callbacks.ReduceLROnPlateau', 'ReduceLROnPlateau', ([], {'factor': '(0.1)', 'patience': '(3)', 'min_lr': '(1e-08)'}), '(factor=0.1, patience=3, min_lr=1e-08)\n', (921, 959), False, 'from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau\n')] |
import os
from pathlib import Path
from pytest import fixture
@fixture(autouse=True, scope='session')
def chdir() -> None:
os.chdir(Path(__file__).parent)
| [
"pytest.fixture",
"pathlib.Path"
] | [((66, 104), 'pytest.fixture', 'fixture', ([], {'autouse': '(True)', 'scope': '"""session"""'}), "(autouse=True, scope='session')\n", (73, 104), False, 'from pytest import fixture\n'), ((139, 153), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (143, 153), False, 'from pathlib import Path\n')] |
#encoding=utf8
import time
import numpy as np
import tensorflow as tf
from tensorflow.contrib import crf
import cws.BiLSTM as modelDef
from cws.data import Data
tf.app.flags.DEFINE_string('dict_path', 'data/your_dict.pkl', 'dict path')
tf.app.flags.DEFINE_string('train_data', 'data/your_train_data.pkl', 'train data path')
tf.app.flags.DEFINE_string('ckpt_path', 'checkpoint/cws.finetune.ckpt/', 'checkpoint path')
tf.app.flags.DEFINE_integer('embed_size', 256, 'embedding size')
tf.app.flags.DEFINE_integer('hidden_size', 512, 'hidden layer node number')
tf.app.flags.DEFINE_integer('batch_size', 128, 'batch size')
tf.app.flags.DEFINE_integer('epoch', 20, 'training epoch')
tf.app.flags.DEFINE_float('lr', 0.001, 'learning rate')
tf.app.flags.DEFINE_string('save_path','checkpoint/cws.ckpt/','new model save path')
FLAGS = tf.app.flags.FLAGS
class BiLSTMTrain(object):
def __init__(self, data_train=None, data_valid=None, data_test=None, model=None):
self.data_train = data_train
self.data_valid = data_valid
self.data_test = data_test
self.model = model
def train(self):
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
sess.run(tf.global_variables_initializer())
## finetune ##
# ckpt = tf.train.latest_checkpoint(FLAGS.ckpt_path)
# saver = tf.train.Saver()
# saver.restore(sess, ckpt)
# print('-->finetune the ckeckpoint:'+ckpt+'...')
##############
max_epoch = 10
tr_batch_size = FLAGS.batch_size
max_max_epoch = FLAGS.epoch # Max epoch
display_num = 10 # Display 10 pre epoch
tr_batch_num = int(self.data_train.y.shape[0] / tr_batch_size)
display_batch = int(tr_batch_num / display_num)
saver = tf.train.Saver(max_to_keep=10)
for epoch in range(max_max_epoch):
_lr = FLAGS.lr
if epoch > max_epoch:
_lr = 0.0002
print('EPOCH %d, lr=%g' % (epoch + 1, _lr))
start_time = time.time()
_losstotal = 0.0
show_loss = 0.0
for batch in range(tr_batch_num):
fetches = [self.model.loss, self.model.train_op]
X_batch, y_batch = self.data_train.next_batch(tr_batch_size)
feed_dict = {self.model.X_inputs: X_batch, self.model.y_inputs: y_batch, self.model.lr: _lr,
self.model.batch_size: tr_batch_size,
self.model.keep_prob: 0.5}
_loss, _ = sess.run(fetches, feed_dict)
_losstotal += _loss
show_loss += _loss
if (batch + 1) % display_batch == 0:
valid_acc = self.test_epoch(self.data_valid, sess) # valid
print('\ttraining loss=%g ; valid acc= %g ' % (show_loss / display_batch,
valid_acc))
show_loss = 0.0
mean_loss = _losstotal / (tr_batch_num + 0.000001)
if (epoch + 1) % 1 == 0: # Save once per epoch
save_path = saver.save(sess, self.model.model_save_path+'_plus', global_step=(epoch + 1))
print('the save path is ', save_path)
print('\ttraining %d, loss=%g ' % (self.data_train.y.shape[0], mean_loss))
print('Epoch training %d, loss=%g, speed=%g s/epoch' % (
self.data_train.y.shape[0], mean_loss, time.time() - start_time))
# testing
print('**TEST RESULT:')
test_acc = self.test_epoch(self.data_test, sess)
print('**Test %d, acc=%g' % (self.data_test.y.shape[0], test_acc))
sess.close()
def test_epoch(self, dataset=None, sess=None):
_batch_size = FLAGS.batch_size
_y = dataset.y
data_size = _y.shape[0]
batch_num = int(data_size / _batch_size)
correct_labels = 0
total_labels = 0
fetches = [self.model.scores, self.model.length, self.model.transition_params]
for i in range(batch_num):
X_batch, y_batch = dataset.next_batch(_batch_size)
feed_dict = {self.model.X_inputs: X_batch, self.model.y_inputs: y_batch, self.model.lr: 1e-5,
self.model.batch_size: _batch_size,
self.model.keep_prob: 1.0}
test_score, test_length, transition_params = sess.run(fetches=fetches,
feed_dict=feed_dict)
#print(test_score)
#print(test_length)
for tf_unary_scores_, y_, sequence_length_ in zip(
test_score, y_batch, test_length):
tf_unary_scores_ = tf_unary_scores_[:sequence_length_]
y_ = y_[:sequence_length_]
viterbi_sequence, _ = crf.viterbi_decode(
tf_unary_scores_, transition_params)
correct_labels += np.sum(np.equal(viterbi_sequence, y_))
total_labels += sequence_length_
accuracy = correct_labels / float(total_labels)
return accuracy
def main(_):
Data_ = Data(dict_path=FLAGS.dict_path, train_data=FLAGS.train_data)
print('Corpus loading completed:', FLAGS.train_data)
data_train, data_valid, data_test = Data_.builderTrainData()
print('The training set, verification set, and test set split are completed!')
model = modelDef.BiLSTMModel(max_len=Data_.max_len,
vocab_size=Data_.word2id.__len__()+1,
class_num= Data_.tag2id.__len__(),
model_save_path=FLAGS.save_path,
embed_size=FLAGS.embed_size,
hs=FLAGS.hidden_size)
print('Model definition completed!')
train = BiLSTMTrain(data_train, data_valid, data_test, model)
train.train()
print('Model training completed!')
if __name__ == '__main__':
tf.app.run()
| [
"tensorflow.app.flags.DEFINE_float",
"cws.data.Data",
"tensorflow.app.flags.DEFINE_integer",
"tensorflow.Session",
"tensorflow.train.Saver",
"tensorflow.app.flags.DEFINE_string",
"numpy.equal",
"tensorflow.global_variables_initializer",
"tensorflow.ConfigProto",
"tensorflow.contrib.crf.viterbi_dec... | [((172, 246), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""dict_path"""', '"""data/your_dict.pkl"""', '"""dict path"""'], {}), "('dict_path', 'data/your_dict.pkl', 'dict path')\n", (198, 246), True, 'import tensorflow as tf\n'), ((248, 339), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""train_data"""', '"""data/your_train_data.pkl"""', '"""train data path"""'], {}), "('train_data', 'data/your_train_data.pkl',\n 'train data path')\n", (274, 339), True, 'import tensorflow as tf\n'), ((337, 432), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""ckpt_path"""', '"""checkpoint/cws.finetune.ckpt/"""', '"""checkpoint path"""'], {}), "('ckpt_path', 'checkpoint/cws.finetune.ckpt/',\n 'checkpoint path')\n", (363, 432), True, 'import tensorflow as tf\n'), ((430, 494), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""embed_size"""', '(256)', '"""embedding size"""'], {}), "('embed_size', 256, 'embedding size')\n", (457, 494), True, 'import tensorflow as tf\n'), ((496, 571), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""hidden_size"""', '(512)', '"""hidden layer node number"""'], {}), "('hidden_size', 512, 'hidden layer node number')\n", (523, 571), True, 'import tensorflow as tf\n'), ((573, 633), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""batch_size"""', '(128)', '"""batch size"""'], {}), "('batch_size', 128, 'batch size')\n", (600, 633), True, 'import tensorflow as tf\n'), ((635, 693), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""epoch"""', '(20)', '"""training epoch"""'], {}), "('epoch', 20, 'training epoch')\n", (662, 693), True, 'import tensorflow as tf\n'), ((695, 750), 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""lr"""', '(0.001)', '"""learning rate"""'], {}), "('lr', 0.001, 'learning rate')\n", (720, 750), True, 'import tensorflow as tf\n'), ((752, 842), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""save_path"""', '"""checkpoint/cws.ckpt/"""', '"""new model save path"""'], {}), "('save_path', 'checkpoint/cws.ckpt/',\n 'new model save path')\n", (778, 842), True, 'import tensorflow as tf\n'), ((5400, 5460), 'cws.data.Data', 'Data', ([], {'dict_path': 'FLAGS.dict_path', 'train_data': 'FLAGS.train_data'}), '(dict_path=FLAGS.dict_path, train_data=FLAGS.train_data)\n', (5404, 5460), False, 'from cws.data import Data\n'), ((6262, 6274), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (6272, 6274), True, 'import tensorflow as tf\n'), ((1175, 1191), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (1189, 1191), True, 'import tensorflow as tf\n'), ((1256, 1281), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (1266, 1281), True, 'import tensorflow as tf\n'), ((1887, 1917), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': '(10)'}), '(max_to_keep=10)\n', (1901, 1917), True, 'import tensorflow as tf\n'), ((1300, 1333), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1331, 1333), True, 'import tensorflow as tf\n'), ((2141, 2152), 'time.time', 'time.time', ([], {}), '()\n', (2150, 2152), False, 'import time\n'), ((5065, 5120), 'tensorflow.contrib.crf.viterbi_decode', 'crf.viterbi_decode', (['tf_unary_scores_', 'transition_params'], {}), '(tf_unary_scores_, transition_params)\n', (5083, 5120), False, 'from tensorflow.contrib import crf\n'), ((5203, 5233), 'numpy.equal', 'np.equal', (['viterbi_sequence', 'y_'], {}), '(viterbi_sequence, y_)\n', (5211, 5233), True, 'import numpy as np\n'), ((3633, 3644), 'time.time', 'time.time', ([], {}), '()\n', (3642, 3644), False, 'import time\n')] |
from stream import Stream
from error_handler import none, eof, expected
EOL = ';'
class Parser:
def __init__(self, tokens, end):
self.tokens = tokens
self.end = end
def get_expression(self, prev):
if self.error(self.end):
return none
key, value = self.tokens.next
if key in self.end:
return prev
self.tokens.get()
if key in ['number', 'string', 'symbol']:
return self.get_expression((key, value))
elif key == 'operation':
second_arg = self.get_expression(none)
return self.get_expression(("operation", value, prev, second_arg))
elif key == '(':
args = self.expression_list_from_next(')', ',')
if prev == none:
func_body = self.expression_list(';', '}')
return self.get_expression(("function", args or [], func_body))
else:
return self.get_expression(("call", prev, args))
elif key == ':':
if prev == none:
expected('nothing', 'variable')
return none
if prev[0] != 'symbol':
expected(prev[0], 'variable')
return none
second_arg = self.get_expression(none)
return self.get_expression(("assignment", prev, second_arg))
elif key == "{":
func_body = self.expression_list_from_next('}', ';')
return self.get_expression(("global function", func_body))
else:
return none
def expression_list_from_next(self, end, seperation):
ret = []
if self.error(end):
return ret
(key, value) = self.tokens.next
if key != end:
list_parser = Parser(self.tokens, [seperation, end])
while key != end:
exp = list_parser.get_expression(none)
if exp != none:
ret.append(exp)
(key, value) = self.tokens.get()
if self.error(end):
break
else:
self.tokens.get()
return ret
def expression_list(self, seperation, end):
ret = []
if self.error(end):
return ret
(key, value) = self.tokens.next
self.tokens.get()
if key != end:
list_parser = Parser(self.tokens, [seperation, end])
while key != end:
exp = list_parser.get_expression(none)
if exp != none:
ret.append(exp)
(key, value) = self.tokens.next
self.tokens.get()
if self.error(end):
break
return ret
def error(self, looking_for):
if self.tokens.next is None:
eof(looking_for)
return True
return False
def parse(tokens):
parser = Parser(Stream(tokens), EOL)
ret = []
while parser.tokens.next is not None:
exp = parser.get_expression(none)
if exp != none:
ret.append(exp)
parser.tokens.get()
return ret | [
"error_handler.eof",
"error_handler.expected",
"stream.Stream"
] | [((2279, 2293), 'stream.Stream', 'Stream', (['tokens'], {}), '(tokens)\n', (2285, 2293), False, 'from stream import Stream\n'), ((2195, 2211), 'error_handler.eof', 'eof', (['looking_for'], {}), '(looking_for)\n', (2198, 2211), False, 'from error_handler import none, eof, expected\n'), ((874, 905), 'error_handler.expected', 'expected', (['"""nothing"""', '"""variable"""'], {}), "('nothing', 'variable')\n", (882, 905), False, 'from error_handler import none, eof, expected\n'), ((953, 982), 'error_handler.expected', 'expected', (['prev[0]', '"""variable"""'], {}), "(prev[0], 'variable')\n", (961, 982), False, 'from error_handler import none, eof, expected\n')] |
import os
import shutil
import subprocess
import shlex
import logging
import jinja2
from .config import Config
from .files import file_class_for_path, ErrorFile
from .jinja import default_extensions, default_filters
from .jinja.exceptions import ReservedVariableError
from .exceptions import BuildError
from .checks.favicon import FaviconCheck
from .checks.issues import Issues
logger = logging.getLogger(__file__)
class Combine:
def __init__(self, config_path, env=None, variables={}):
self.config_path = config_path
self.env = env
self.variables = variables
self.load()
def load(self):
self.config = Config(self.config_path)
self.output_path = self.config.output_path
self.content_paths = self.config.content_paths
self.content_directories = [ContentDirectory(x) for x in self.content_paths]
choice_loaders = [
jinja2.FileSystemLoader(x.path) for x in self.content_directories
]
self.jinja_environment = jinja2.Environment(
loader=jinja2.ChoiceLoader(choice_loaders),
autoescape=jinja2.select_autoescape(["html", "xml"]),
undefined=jinja2.StrictUndefined, # make sure variables exist
extensions=default_extensions,
)
self.jinja_environment.globals.update(self.get_jinja_variables())
self.jinja_environment.filters.update(default_filters)
def get_jinja_variables(self):
"""
1. combine.yml variables
2. Combine object variables (CLI, Python, etc.) that should override
3. Built-in variables
"""
variables = self.config.variables
variables.update(self.variables)
if "env" in variables:
raise ReservedVariableError("env")
variables["env"] = self.env
return variables
def reload(self):
"""Reload the config and entire jinja environment"""
self.load()
def clean(self):
if os.path.exists(self.output_path):
shutil.rmtree(self.output_path)
def build(self, only_paths=None, check=True):
build_errors = {}
if not only_paths:
# completely wipe it
self.clean()
if not os.path.exists(self.output_path):
os.mkdir(self.output_path)
paths_rendered = []
files_rendered = []
for file in self.iter_files():
if (
file.output_relative_path
and file.output_relative_path not in paths_rendered
):
if only_paths and file.path not in only_paths:
continue
try:
file.render(
output_path=self.output_path,
jinja_environment=self.jinja_environment,
)
files_rendered.append(file)
except Exception as e:
build_errors[file.path] = e
ErrorFile(file.path, file.content_directory, error=e).render(
output_path=self.output_path,
jinja_environment=self.jinja_environment,
)
paths_rendered.append(file.output_relative_path)
if not only_paths:
self.run_build_steps()
if build_errors:
for file_path, error in build_errors.items():
logger.error(f"Error building {file_path}", exc_info=error)
raise BuildError()
if check:
self.check_build(files=files_rendered, site_checks=(not only_paths))
def check_build(self, files=[], site_checks=False):
self.issues = Issues()
if site_checks:
for issue in FaviconCheck(site_dir=self.output_path).run():
self.issues.append(issue)
if self.issues:
self.issues.print(f"Issues across your site")
for file in files:
# TODO could pass check settings here, just don't know what they should look like
for issue in file.check_output():
self.issues.append(issue)
def run_build_steps(self):
for step in self.config.steps:
subprocess.run(shlex.split(step["run"]), check=True)
def get_related_files(self, content_relative_path):
files = []
for file in self.iter_files():
if (
content_relative_path in file.references
or file.content_relative_path == content_relative_path
):
# TODO could this include duplicates? in the content-relative sense?
files.append(file)
return files
def content_relative_path(self, path):
for content_path in self.content_paths:
if (
os.path.commonpath([content_path, path]) != os.getcwd()
and os.getcwd() in content_path
):
return os.path.relpath(path, content_path)
def is_in_output_path(self, path):
return (
os.path.commonpath([self.output_path, os.path.abspath(path)]) != os.getcwd()
)
def iter_files(self):
for content_directory in self.content_directories:
for file in content_directory.files:
yield file
class ContentDirectory:
def __init__(self, path):
assert os.path.exists(path), f"Path does not exist: {path}"
self.path = path
self.load_files()
def load_files(self):
self.files = []
for root, dirs, files in os.walk(self.path, followlinks=True):
for file in files:
file_path = os.path.join(root, file)
self.files.append(file_class_for_path(file_path)(file_path, self))
def file_classes(self):
return set([x.__class__ for x in self.files])
| [
"logging.getLogger",
"os.path.exists",
"shlex.split",
"os.path.join",
"jinja2.ChoiceLoader",
"os.getcwd",
"jinja2.select_autoescape",
"os.mkdir",
"os.path.commonpath",
"shutil.rmtree",
"os.path.abspath",
"jinja2.FileSystemLoader",
"os.walk",
"os.path.relpath"
] | [((391, 418), 'logging.getLogger', 'logging.getLogger', (['__file__'], {}), '(__file__)\n', (408, 418), False, 'import logging\n'), ((1993, 2025), 'os.path.exists', 'os.path.exists', (['self.output_path'], {}), '(self.output_path)\n', (2007, 2025), False, 'import os\n'), ((5402, 5422), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (5416, 5422), False, 'import os\n'), ((5591, 5627), 'os.walk', 'os.walk', (['self.path'], {'followlinks': '(True)'}), '(self.path, followlinks=True)\n', (5598, 5627), False, 'import os\n'), ((914, 945), 'jinja2.FileSystemLoader', 'jinja2.FileSystemLoader', (['x.path'], {}), '(x.path)\n', (937, 945), False, 'import jinja2\n'), ((2039, 2070), 'shutil.rmtree', 'shutil.rmtree', (['self.output_path'], {}), '(self.output_path)\n', (2052, 2070), False, 'import shutil\n'), ((2250, 2282), 'os.path.exists', 'os.path.exists', (['self.output_path'], {}), '(self.output_path)\n', (2264, 2282), False, 'import os\n'), ((2296, 2322), 'os.mkdir', 'os.mkdir', (['self.output_path'], {}), '(self.output_path)\n', (2304, 2322), False, 'import os\n'), ((5147, 5158), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5156, 5158), False, 'import os\n'), ((1063, 1098), 'jinja2.ChoiceLoader', 'jinja2.ChoiceLoader', (['choice_loaders'], {}), '(choice_loaders)\n', (1082, 1098), False, 'import jinja2\n'), ((1123, 1164), 'jinja2.select_autoescape', 'jinja2.select_autoescape', (["['html', 'xml']"], {}), "(['html', 'xml'])\n", (1147, 1164), False, 'import jinja2\n'), ((4256, 4280), 'shlex.split', 'shlex.split', (["step['run']"], {}), "(step['run'])\n", (4267, 4280), False, 'import shlex\n'), ((4977, 5012), 'os.path.relpath', 'os.path.relpath', (['path', 'content_path'], {}), '(path, content_path)\n', (4992, 5012), False, 'import os\n'), ((5688, 5712), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (5700, 5712), False, 'import os\n'), ((4835, 4875), 'os.path.commonpath', 'os.path.commonpath', (['[content_path, path]'], {}), '([content_path, path])\n', (4853, 4875), False, 'import os\n'), ((4879, 4890), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4888, 4890), False, 'import os\n'), ((4911, 4922), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4920, 4922), False, 'import os\n'), ((5120, 5141), 'os.path.abspath', 'os.path.abspath', (['path'], {}), '(path)\n', (5135, 5141), False, 'import os\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# proto_datetime.py
import logging
import datetime
from DateTime.python.DateTime_pb2 import DateTime
def create_proto_datetime(datetime_value: datetime.datetime = None):
"""
Create a protobuf message filled with the current or given timedate
:param datetime_value: A datetime.datetime with a custom datetime
if not given current time will be returned
:return: protobuf message
"""
datetime_msg = DateTime()
if not isinstance(datetime_value, datetime.datetime):
datetime_value = datetime.datetime.now()
for field in datetime_msg.DESCRIPTOR.fields_by_name.keys():
try: # set the date
attr = getattr(datetime_value, field)
if field == 'isoformat': # callable attr
setattr(datetime_msg, field, attr())
else: # it is a property
setattr(datetime_msg, field, attr)
except Exception as e:
logging.exception(e)
return datetime_msg # -> protobuf.message object
def parse_proto_datetime(proto_msg) -> datetime.datetime:
"""
Create a datetime object filled with the content of a protobuf message
:param proto_msg: DateTime protobuf message
:return: datetime.datetime
"""
date_time = datetime.datetime(1, 1, 1)
try:
year, month, day = proto_msg.year, proto_msg.month, proto_msg.day
date_time = datetime.datetime(
year, month, day,
proto_msg.hour, proto_msg.minute,
proto_msg.second, proto_msg.microsecond)
except Exception as e:
logging.exception(e)
return date_time
if __name__ == '__main__':
proto_time = DateTime()
proto_time.CopyFrom(create_proto_datetime())
print(proto_time)
date_time = parse_proto_datetime(proto_time)
print(date_time)
| [
"datetime.datetime",
"datetime.datetime.now",
"DateTime.python.DateTime_pb2.DateTime",
"logging.exception"
] | [((497, 507), 'DateTime.python.DateTime_pb2.DateTime', 'DateTime', ([], {}), '()\n', (505, 507), False, 'from DateTime.python.DateTime_pb2 import DateTime\n'), ((1318, 1344), 'datetime.datetime', 'datetime.datetime', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (1335, 1344), False, 'import datetime\n'), ((1719, 1729), 'DateTime.python.DateTime_pb2.DateTime', 'DateTime', ([], {}), '()\n', (1727, 1729), False, 'from DateTime.python.DateTime_pb2 import DateTime\n'), ((591, 614), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (612, 614), False, 'import datetime\n'), ((1448, 1562), 'datetime.datetime', 'datetime.datetime', (['year', 'month', 'day', 'proto_msg.hour', 'proto_msg.minute', 'proto_msg.second', 'proto_msg.microsecond'], {}), '(year, month, day, proto_msg.hour, proto_msg.minute,\n proto_msg.second, proto_msg.microsecond)\n', (1465, 1562), False, 'import datetime\n'), ((1631, 1651), 'logging.exception', 'logging.exception', (['e'], {}), '(e)\n', (1648, 1651), False, 'import logging\n'), ((997, 1017), 'logging.exception', 'logging.exception', (['e'], {}), '(e)\n', (1014, 1017), False, 'import logging\n')] |
from datetime import datetime, timedelta
from auth import *
from edit import video_length_seconds, get_total_length, change_fps, is_copyright, merge_videos
from upload_video import *
from random import randrange
import os
# if __name__ == '__main__'
auth = createAuthObject()
def get_clips_by_cat(daysdiff, category):
d = datetime.utcnow() - timedelta(days=daysdiff)
started_at = d.isoformat("T") + "Z"
r = requests.get(auth.twitch_api_endpoint + f'?game_id={categories[category]}&first=100&started_at={started_at}',
headers=auth.twitchHeader)
return r.json()["data"]
def get_clips_by_streamer(streamer, daysdiff):
d = datetime.utcnow() - timedelta(days=daysdiff)
started_at = d.isoformat("T") + "Z"
d = datetime.utcnow()
ended_at = d.isoformat("T") + "Z"
r1 = requests.get("https://api.twitch.tv/helix/users?login={}".format(streamer), headers=auth.twitchHeader)
broadcaster_id = r1.json()["data"][0]["id"]
r2 = requests.get(auth.twitch_api_endpoint + f'?broadcaster_id={broadcaster_id}&first=100&started_at={started_at}&ended_at={ended_at}',
headers=auth.twitchHeader)
return r2.json()["data"]
def filter_clips(clips, language):
for clip in clips:
vid_id = clip["thumbnail_url"].split("-preview")[0] + ".mp4"
clip["video_url"] = vid_id
for stat in unnecessary_stats:
del clip[stat]
filtered_clips = [clip for clip in clips if language in clip["language"]]
return filtered_clips
def downloadfile(name, url, s, broadcaster_name, shoutouts):
r=s.get(url)
f=open(f'videos/{name}.mp4','wb');
for chunk in r.iter_content(chunk_size=1024*1024):
if chunk:
f.write(chunk)
f.close()
if not is_copyright(name, auth.copyright_recognizer):
shoutouts.add(broadcaster_name.lower())
os.system(f'HandBrakeCLI -i videos/{name}.mp4 -o videos/out_{name}.mp4 --preset="Vimeo YouTube HQ 1080p60" --width 1920 --height 1080')
os.remove(f'videos/{name}.mp4')
def sort_clips_chronologically(arg):
arg.sort(key=lambda k : k["created_at"])
def sort_clips_popularity(arg):
arg.sort(key=lambda k : k["view_count"])
def make_video_by_cat(category, daysdiff = 1, sort_chron = False, uploadtime_diff = 1, custom_title=""):
clips = get_clips_by_cat(daysdiff, category)
shoutouts = set()
filtered_clips = filter_clips(clips, "en")
if custom_title:
title = custom_title
else:
title = filtered_clips[0]["title"].title() + " - {} Highlights".format(category)
if sort_chron:
sort_clips_chronologically(filtered_clips)
print(json.dumps(filtered_clips, indent = 4))
with requests.Session() as s:
maxLength = 270 + randrange(50)
for i, clip in enumerate(filtered_clips, 10):
length = get_total_length()
print(length)
if length >= maxLength:
break
downloadfile(i, clip["video_url"], s, clip["broadcaster_name"], shoutouts)
print(shoutouts)
output_name = "video_output.mp4"
merge_videos(output_name)
upload_video(output_name, create_description(shoutouts), title, category, timedelta(hours=uploadtime_diff))
def make_video_by_streamer(streamers, category = "", daysdiff = 1, sort_chron = True, uploadtime_diff = timedelta(hours=1), custom_title = ""):
clips = []
shoutouts = set()
for streamer in streamers:
clips += get_clips_by_streamer(streamer, daysdiff)
if category:
clips = [clip for clip in clips if categories[category] == clip["game_id"]]
if sort_chron:
clips = clips[:120]
sort_clips_chronologically(clips)
filtered_clips = filter_clips(clips, "en")
print(json.dumps(filtered_clips, indent = 4))
if custom_title:
title = custom_title
else:
title = filtered_clips[0]["title"].title() + " - {} Highlights".format(category)
maxLength = 270 + randrange(50)
with requests.Session() as s:
for i, clip in enumerate(filtered_clips, 10):
length = get_total_length()
print(length)
if length >= maxLength:
break
downloadfile(i, clip["video_url"], s, clip["broadcaster_name"], shoutouts)
print(shoutouts)
output_name = "video_output.mp4"
merge_videos(output_name)
upload_video(output_name, create_description(shoutouts), title, category, timedelta(hours=uploadtime_diff))
categories = {
"League of Legends": "21779",
"Just Chatting": "509658",
"Fortnite": "33214",
"Call of Duty Modern Warfare": "512710",
"Dota 2": "29595",
"GTA V": "32982",
"Minecraft": "27471",
"CSGO": "32399",
"Hyperscape": "518306",
"Valorant": "516575",
"WoW": "18122",
"Tarkov": "491931",
"Apex Legends": "511224",
"Tarkov": "491931",
"Fall Guys": "512980",
"Among Us": "510218"
}
unnecessary_stats = ["embed_url", "creator_id", "creator_name", "thumbnail_url"]
make_video_by_cat("Among Us", uploadtime_diff=2.5)
make_video_by_cat("Just Chatting", uploadtime_diff=5)
make_video_by_cat("Valorant", uploadtime_diff=7.5);
#make_video_by_cat("Among Us", uploadtime_diff=11, daysdiff=30, custom_title = "Most-watched Among Us clips of the month")
os.system("shutdown now")
#make_video_by_cat("Minecraft", uploadtime_diff=16)
#make_video_by_streamer(["greekgodx"], daysdiff = 30, sort_chron = False, custom_title = "greekgodx shows his thick legs", uploadtime_diff=10)
#make_video_by_streamer(["tfue"], daysdiff = 2, sort_chron = False, custom_title="tfue and cloak DOMINATE Warzone tournament", uploadtime_diff = 13, category = "Call of Duty Modern Warfare")
#make_video_by_streamer(["thor"], daysdiff = 30, sort_chron = False, custom_title = "HYPERSCAPE CLIPS THAT MADE THOR FAMOUS", uploadtime_diff=6)
| [
"edit.merge_videos",
"datetime.datetime.utcnow",
"random.randrange",
"edit.get_total_length",
"edit.is_copyright",
"os.system",
"datetime.timedelta",
"os.remove"
] | [((5314, 5339), 'os.system', 'os.system', (['"""shutdown now"""'], {}), "('shutdown now')\n", (5323, 5339), False, 'import os\n'), ((758, 775), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (773, 775), False, 'from datetime import datetime, timedelta\n'), ((2016, 2047), 'os.remove', 'os.remove', (['f"""videos/{name}.mp4"""'], {}), "(f'videos/{name}.mp4')\n", (2025, 2047), False, 'import os\n'), ((3108, 3133), 'edit.merge_videos', 'merge_videos', (['output_name'], {}), '(output_name)\n', (3120, 3133), False, 'from edit import video_length_seconds, get_total_length, change_fps, is_copyright, merge_videos\n'), ((3351, 3369), 'datetime.timedelta', 'timedelta', ([], {'hours': '(1)'}), '(hours=1)\n', (3360, 3369), False, 'from datetime import datetime, timedelta\n'), ((4358, 4383), 'edit.merge_videos', 'merge_videos', (['output_name'], {}), '(output_name)\n', (4370, 4383), False, 'from edit import video_length_seconds, get_total_length, change_fps, is_copyright, merge_videos\n'), ((330, 347), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (345, 347), False, 'from datetime import datetime, timedelta\n'), ((350, 374), 'datetime.timedelta', 'timedelta', ([], {'days': 'daysdiff'}), '(days=daysdiff)\n', (359, 374), False, 'from datetime import datetime, timedelta\n'), ((665, 682), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (680, 682), False, 'from datetime import datetime, timedelta\n'), ((685, 709), 'datetime.timedelta', 'timedelta', ([], {'days': 'daysdiff'}), '(days=daysdiff)\n', (694, 709), False, 'from datetime import datetime, timedelta\n'), ((1771, 1816), 'edit.is_copyright', 'is_copyright', (['name', 'auth.copyright_recognizer'], {}), '(name, auth.copyright_recognizer)\n', (1783, 1816), False, 'from edit import video_length_seconds, get_total_length, change_fps, is_copyright, merge_videos\n'), ((1874, 2020), 'os.system', 'os.system', (['f"""HandBrakeCLI -i videos/{name}.mp4 -o videos/out_{name}.mp4 --preset="Vimeo YouTube HQ 1080p60" --width 1920 --height 1080"""'], {}), '(\n f\'HandBrakeCLI -i videos/{name}.mp4 -o videos/out_{name}.mp4 --preset="Vimeo YouTube HQ 1080p60" --width 1920 --height 1080\'\n )\n', (1883, 2020), False, 'import os\n'), ((3212, 3244), 'datetime.timedelta', 'timedelta', ([], {'hours': 'uploadtime_diff'}), '(hours=uploadtime_diff)\n', (3221, 3244), False, 'from datetime import datetime, timedelta\n'), ((3982, 3995), 'random.randrange', 'randrange', (['(50)'], {}), '(50)\n', (3991, 3995), False, 'from random import randrange\n'), ((4462, 4494), 'datetime.timedelta', 'timedelta', ([], {'hours': 'uploadtime_diff'}), '(hours=uploadtime_diff)\n', (4471, 4494), False, 'from datetime import datetime, timedelta\n'), ((2766, 2779), 'random.randrange', 'randrange', (['(50)'], {}), '(50)\n', (2775, 2779), False, 'from random import randrange\n'), ((2855, 2873), 'edit.get_total_length', 'get_total_length', ([], {}), '()\n', (2871, 2873), False, 'from edit import video_length_seconds, get_total_length, change_fps, is_copyright, merge_videos\n'), ((4105, 4123), 'edit.get_total_length', 'get_total_length', ([], {}), '()\n', (4121, 4123), False, 'from edit import video_length_seconds, get_total_length, change_fps, is_copyright, merge_videos\n')] |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metrics support via prometheus.
NOTE(ewiseblatt): 20180102
This does not work in practice because the job is short lived so restarts
frequently with low metric counts. Prometheus wont scrape it so we need
the gateway server, which does not aggregate. Since it has no concept of
job lifetime, it cannot detect restarts. In pratice many of the counters
are 1 per run. The gateway server always sees 1 and thinks nothing has
changed.
I'm leaving this around for the time being in case inspiration hits for
how to work around this. Otherwise, use the file based metrics then
upload them into some other database or metrics system via post-processing.
"""
from buildtool.base_metrics import (
BaseMetricsRegistry,
Counter,
Gauge,
Timer,
MetricFamily)
from buildtool.util import add_parser_argument
from prometheus_client.core import (
GaugeMetricFamily,
CounterMetricFamily,
REGISTRY)
from prometheus_client.exposition import push_to_gateway
class PrometheusMetricFamily(MetricFamily):
"""Handles conversion into prometheus format."""
def __init__(self, registry, name, description, factory, family_type,
label_names):
super(PrometheusMetricFamily, self).__init__(
registry, name, description, factory, family_type)
self.__label_names = list(label_names)
self.__label_index = {key: index for index, key in enumerate(label_names)}
self.__prometheus_name = 'buildtool:{name}'.format(
name=name.replace('.', ':'))
def instance_labels(self, instance):
"""Returns a list of label values in the expected order."""
result = [''] * len(self.__label_names)
for key, value in instance.labels.items():
result[self.__label_index[key]] = str(value)
return result
def encode_timer(self):
"""Encodes a timer as a pair of counters."""
count_member = CounterMetricFamily(
self.__prometheus_name + '_count',
self.description, labels=self.__label_names)
total_member = CounterMetricFamily(
self.__prometheus_name + '_totalSeconds',
self.description, labels=self.__label_names)
for instance in self.instance_list:
labels = self.instance_labels(instance)
count_member.add_metric(labels=labels, value=instance.count)
total_member.add_metric(labels=labels, value=instance.total_seconds)
return [count_member, total_member]
def encode(self):
"""Encodes metrics into the prometheus registry."""
if self.family_type == 'TIMER':
return self.encode_timer()
if self.family_type == 'COUNTER':
prometheus_family = CounterMetricFamily
elif self.family_type == 'GAUGE':
prometheus_family = GaugeMetricFamily
else:
raise ValueError('Unsupported type {0}'.format(self.family_type))
member = prometheus_family(
self.__prometheus_name, '', labels=self.__label_names)
for instance in self.instance_list:
labels = self.instance_labels(instance)
member.add_metric(labels=labels, value=instance.value)
return [member]
class PrometheusMetricsRegistry(BaseMetricsRegistry):
"""Implements MetricsRegistry using Prometheus."""
# pylint: disable=too-few-public-methods
def __init__(self, options):
super(PrometheusMetricsRegistry, self).__init__(options)
if not options.monitoring_enabled:
return
self.__push_gateway = options.prometheus_gateway_netloc
REGISTRY.register(self)
def _do_make_counter_family(self, name, description, label_names, value_type):
"""Implements interface."""
return PrometheusMetricFamily(
self, name, description, Counter, MetricFamily.COUNTER, label_names)
def _do_make_gauge_family(self, name, description, label_names, value_type):
"""Implements interface."""
return PrometheusMetricFamily(
self, name, description, Gauge, MetricFamily.GAUGE, label_names)
def _do_make_timer_family(self, name, description, label_names):
"""Implements interface."""
return PrometheusMetricFamily(
self, name, description, Timer, MetricFamily.TIMER, label_names)
def _do_flush_updated_metrics(self, updated_metrics):
"""Pushes metrics to prometheus."""
push_to_gateway(self.__push_gateway, "buildtool", REGISTRY)
def collect(self):
"""Implements prometheus REGISTRY collection interface."""
all_members = []
for family in self.metric_family_list:
all_members.extend(family.encode())
for member in all_members:
yield member
def init_argument_parser(parser, defaults):
"""Initialize argument parser with prometheus parameters."""
add_parser_argument(
parser, 'prometheus_gateway_netloc', defaults, 'localhost:9091',
help='Location of the prometheus gateway to push to.')
| [
"prometheus_client.core.REGISTRY.register",
"buildtool.util.add_parser_argument",
"prometheus_client.core.CounterMetricFamily",
"prometheus_client.exposition.push_to_gateway"
] | [((5200, 5343), 'buildtool.util.add_parser_argument', 'add_parser_argument', (['parser', '"""prometheus_gateway_netloc"""', 'defaults', '"""localhost:9091"""'], {'help': '"""Location of the prometheus gateway to push to."""'}), "(parser, 'prometheus_gateway_netloc', defaults,\n 'localhost:9091', help='Location of the prometheus gateway to push to.')\n", (5219, 5343), False, 'from buildtool.util import add_parser_argument\n'), ((2456, 2559), 'prometheus_client.core.CounterMetricFamily', 'CounterMetricFamily', (["(self.__prometheus_name + '_count')", 'self.description'], {'labels': 'self.__label_names'}), "(self.__prometheus_name + '_count', self.description,\n labels=self.__label_names)\n", (2475, 2559), False, 'from prometheus_client.core import GaugeMetricFamily, CounterMetricFamily, REGISTRY\n'), ((2592, 2703), 'prometheus_client.core.CounterMetricFamily', 'CounterMetricFamily', (["(self.__prometheus_name + '_totalSeconds')", 'self.description'], {'labels': 'self.__label_names'}), "(self.__prometheus_name + '_totalSeconds', self.\n description, labels=self.__label_names)\n", (2611, 2703), False, 'from prometheus_client.core import GaugeMetricFamily, CounterMetricFamily, REGISTRY\n'), ((4008, 4031), 'prometheus_client.core.REGISTRY.register', 'REGISTRY.register', (['self'], {}), '(self)\n', (4025, 4031), False, 'from prometheus_client.core import GaugeMetricFamily, CounterMetricFamily, REGISTRY\n'), ((4787, 4846), 'prometheus_client.exposition.push_to_gateway', 'push_to_gateway', (['self.__push_gateway', '"""buildtool"""', 'REGISTRY'], {}), "(self.__push_gateway, 'buildtool', REGISTRY)\n", (4802, 4846), False, 'from prometheus_client.exposition import push_to_gateway\n')] |
import pyvista
import vtk
import numpy as np
#
grid = pyvista.UniformGrid()
#
vtkgrid = vtk.vtkImageData()
grid = pyvista.UniformGrid(vtkgrid)
#
dims = (10, 10, 10)
grid = pyvista.UniformGrid(dims)
#
spacing = (2, 1, 5)
grid = pyvista.UniformGrid(dims, spacing)
#
origin = (10, 35, 50)
grid = pyvista.UniformGrid(dims, spacing, origin)
| [
"vtk.vtkImageData",
"pyvista.UniformGrid"
] | [((54, 75), 'pyvista.UniformGrid', 'pyvista.UniformGrid', ([], {}), '()\n', (73, 75), False, 'import pyvista\n'), ((88, 106), 'vtk.vtkImageData', 'vtk.vtkImageData', ([], {}), '()\n', (104, 106), False, 'import vtk\n'), ((114, 142), 'pyvista.UniformGrid', 'pyvista.UniformGrid', (['vtkgrid'], {}), '(vtkgrid)\n', (133, 142), False, 'import pyvista\n'), ((172, 197), 'pyvista.UniformGrid', 'pyvista.UniformGrid', (['dims'], {}), '(dims)\n', (191, 197), False, 'import pyvista\n'), ((227, 261), 'pyvista.UniformGrid', 'pyvista.UniformGrid', (['dims', 'spacing'], {}), '(dims, spacing)\n', (246, 261), False, 'import pyvista\n'), ((293, 335), 'pyvista.UniformGrid', 'pyvista.UniformGrid', (['dims', 'spacing', 'origin'], {}), '(dims, spacing, origin)\n', (312, 335), False, 'import pyvista\n')] |
#!/usr/bin/python3
import os
from pathlib import Path
from pwd import getpwnam
from clic import pssh
import configparser
config = configparser.ConfigParser()
config.read('/etc/clic/clic.conf')
user = config['Daemon']['user']
def init(host, cpus, disk, mem, user=user):
# Copy executables in /etc/clic/ to node and run in shell expansion order
paths = [path for path in Path('/etc/clic').iterdir()]
paths.sort()
for path in paths:
if path.is_file() and os.access(str(path), os.X_OK):
dest = '/tmp/{0}'.format(path.parts[-1])
pssh.copy(user, user, host, str(path), dest)
pssh.run(user, user, host, 'sudo {0} {1} {2} {3}'.format(dest, cpus, disk, mem))
def main():
import argparse
import re
parser = argparse.ArgumentParser(description='Intitialize a node for use with clic by configuring its /etc/hosts and nfs. This script is run from the head node.')
from clic import version
parser.add_argument('-v', '--version', action='version', version=version.__version__)
parser.add_argument('userhost', metavar='USER@HOST', nargs=1, help='passwordless ssh exists both ways between USER@localhost and USER@HOST')
args = parser.parse_args()
[user, host] = args.userhost[0].split('@')
init(host, 0, 0, 0, user=user)
| [
"configparser.ConfigParser",
"argparse.ArgumentParser",
"pathlib.Path"
] | [((131, 158), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (156, 158), False, 'import configparser\n'), ((773, 936), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Intitialize a node for use with clic by configuring its /etc/hosts and nfs. This script is run from the head node."""'}), "(description=\n 'Intitialize a node for use with clic by configuring its /etc/hosts and nfs. This script is run from the head node.'\n )\n", (796, 936), False, 'import argparse\n'), ((379, 396), 'pathlib.Path', 'Path', (['"""/etc/clic"""'], {}), "('/etc/clic')\n", (383, 396), False, 'from pathlib import Path\n')] |
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField, TextAreaField, IntegerField
from wtforms.validators import DataRequired, EqualTo, ValidationError, Length
from app.models import Barber
class NewBarberForm(FlaskForm):
first_name = StringField('First Name', validators=[DataRequired()])
last_name = StringField('Last Name', validators=[DataRequired()])
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
password2 = PasswordField('Repeat Password', validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('Add barber')
def validate_username(self, username):
user = Barber.query.filter_by(username=username.data).first()
if user is not None:
raise ValidationError('Please use a different username.')
class NewServiceForm(FlaskForm):
name = StringField('Name', validators=[DataRequired()])
description = TextAreaField('Description', validators=[DataRequired(), Length(min=0, max=300)])
duration = IntegerField('Duration (minutes)', validators=[DataRequired()])
price = IntegerField('Price (RON)', validators=[DataRequired()])
submit = SubmitField('Add service')
| [
"app.models.Barber.query.filter_by",
"wtforms.validators.ValidationError",
"wtforms.SubmitField",
"wtforms.validators.EqualTo",
"wtforms.validators.Length",
"wtforms.validators.DataRequired"
] | [((655, 680), 'wtforms.SubmitField', 'SubmitField', (['"""Add barber"""'], {}), "('Add barber')\n", (666, 680), False, 'from wtforms import StringField, PasswordField, SubmitField, TextAreaField, IntegerField\n'), ((1250, 1276), 'wtforms.SubmitField', 'SubmitField', (['"""Add service"""'], {}), "('Add service')\n", (1261, 1276), False, 'from wtforms import StringField, PasswordField, SubmitField, TextAreaField, IntegerField\n'), ((842, 893), 'wtforms.validators.ValidationError', 'ValidationError', (['"""Please use a different username."""'], {}), "('Please use a different username.')\n", (857, 893), False, 'from wtforms.validators import DataRequired, EqualTo, ValidationError, Length\n'), ((318, 332), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (330, 332), False, 'from wtforms.validators import DataRequired, EqualTo, ValidationError, Length\n'), ((388, 402), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (400, 402), False, 'from wtforms.validators import DataRequired, EqualTo, ValidationError, Length\n'), ((456, 470), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (468, 470), False, 'from wtforms.validators import DataRequired, EqualTo, ValidationError, Length\n'), ((526, 540), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (538, 540), False, 'from wtforms.validators import DataRequired, EqualTo, ValidationError, Length\n'), ((604, 618), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (616, 618), False, 'from wtforms.validators import DataRequired, EqualTo, ValidationError, Length\n'), ((620, 639), 'wtforms.validators.EqualTo', 'EqualTo', (['"""password"""'], {}), "('password')\n", (627, 639), False, 'from wtforms.validators import DataRequired, EqualTo, ValidationError, Length\n'), ((740, 786), 'app.models.Barber.query.filter_by', 'Barber.query.filter_by', ([], {'username': 'username.data'}), '(username=username.data)\n', (762, 786), False, 'from app.models import Barber\n'), ((972, 986), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (984, 986), False, 'from wtforms.validators import DataRequired, EqualTo, ValidationError, Length\n'), ((1048, 1062), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (1060, 1062), False, 'from wtforms.validators import DataRequired, EqualTo, ValidationError, Length\n'), ((1064, 1086), 'wtforms.validators.Length', 'Length', ([], {'min': '(0)', 'max': '(300)'}), '(min=0, max=300)\n', (1070, 1086), False, 'from wtforms.validators import DataRequired, EqualTo, ValidationError, Length\n'), ((1151, 1165), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (1163, 1165), False, 'from wtforms.validators import DataRequired, EqualTo, ValidationError, Length\n'), ((1220, 1234), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (1232, 1234), False, 'from wtforms.validators import DataRequired, EqualTo, ValidationError, Length\n')] |
import os
import sys
import tkinter as tk
from configparser import ConfigParser
from tkinter import filedialog # for Python 3
from tkinter import messagebox
from config.Dialogs import Dialogs
from UI.helpers.open_folder import open_folder
# https://stackoverflow.com/questions/31170616/how-to-access-a-method-in-one-inherited-tkinter-class-from-another-inherited-tki
class SetupMenu(tk.Menu):
def __init__(self, parent, main):
tk.Menu.__init__(self, parent)
self.window = main
self.dialogs = Dialogs()
setupMenu = tk.Menu(parent, tearoff=False)
parent.add_cascade(label="Setup", underline=0, menu=setupMenu)
setupMenu.add_command(
label="Save as", command=self.save_as_DIALOGS)
setupMenu.add_command(label="Open", command=self.open_DIALOGS)
setupMenu.add_command(label="Save to defaults",
command=self.save_to_default_DIALOGS)
setupMenu.add_command(label="Reset to defaults",
command=self.reset_to_default_DIALOGS)
setupMenu.add_command(label="Defaults reset",
command=self.reset_default_DIALOGS)
setupMenu.add_separator()
setupMenu.add_command(label="Exit", underline=1, command=self.quit)
def quit(self):
sys.exit(0)
def save_as_DIALOGS(self):
window = self.window
DIALOGS_path = self.dialogs.get_dialogs_path_save()
if DIALOGS_path:
checkedboxes = list(window.checkbars.state())
source_path = window.source_path_entry.get()
target_path = window.target_path_entry.get()
similarity = float(window.similarity_entry.get())
self.dialogs.saving_dialogs_to_file(
DIALOGS_path,
checkedboxes,
source_path,
target_path,
similarity
)
messagebox.showinfo(
"Done!",
"You saved setup file in:"f"\n{DIALOGS_path}"
)
else:
messagebox.showinfo(
"Ouch!",
"You haven't saved config!"
)
def open_DIALOGS(self):
setup_path = self.dialogs.get_dialogs_path_open()
if setup_path:
config = self.dialogs.read_config_file(setup_path)
self.set_setup_dialogs(config)
else:
messagebox.showinfo(
"Ouch!",
"You haven't choose any file!"
)
def save_to_default_DIALOGS(self):
window = self.window
AppFolder = self.dialogs.AppData_folder_path
DEFAULTS_folder = self.dialogs.DEFAULTS_folder_path
DEFAULT_DIALOGS = self.dialogs.DEFAULT_file_path
if not os.path.isdir(AppFolder):
os.mkdir(AppFolder)
if not os.path.isdir(DEFAULTS_folder):
os.mkdir(DEFAULTS_folder)
checkedboxes = list(window.checkbars.state())
source_path = window.source_path_entry.get()
target_path = window.target_path_entry.get()
similarity = float(window.similarity_entry.get())
self.dialogs.saving_dialogs_to_file(
DEFAULT_DIALOGS,
checkedboxes,
source_path,
target_path,
similarity
)
def reset_to_default_DIALOGS(self):
AppFolder = self.dialogs.AppData_folder_path
DEFAULTS_folder = self.dialogs.DEFAULTS_folder_path
DEFAULT_DIALOGS = self.dialogs.DEFAULT_file_path
if not os.path.isdir(AppFolder):
os.mkdir(AppFolder)
if not os.path.isdir(DEFAULTS_folder):
os.mkdir(DEFAULTS_folder)
if not os.path.exists(DEFAULT_DIALOGS):
self.dialogs.create_DEFAULT_file()
config = self.dialogs.read_config_file(DEFAULT_DIALOGS)
self.set_setup_dialogs(config)
def reset_default_DIALOGS(self):
AppFolder = self.dialogs.AppData_folder_path
DEFAULTS_folder = self.dialogs.DEFAULTS_folder_path
DEFAULT_DIALOGS = self.dialogs.DEFAULT_file_path
if not os.path.isdir(AppFolder):
os.mkdir(AppFolder)
if not os.path.isdir(DEFAULTS_folder):
os.mkdir(DEFAULTS_folder)
# it will overwrite the file if it already exists
self.dialogs.create_DEFAULT_file()
config = self.dialogs.read_config_file(DEFAULT_DIALOGS)
self.set_setup_dialogs(config)
def set_setup_dialogs(self, config):
window = self.window
window.source_path_entry = window.entry_set(
window.source_path_entry, self.dialogs.get_source_path(
config)
)
window.target_path_entry = window.entry_set(
window.target_path_entry, self.dialogs.get_target_path(
config)
)
picks = self.dialogs.get_checked_extensions(config)
window.checkbars.set_state(picks)
window.similarity_entry = window.entry_set(
window.similarity_entry, self.dialogs.get_similarity(config)
)
| [
"tkinter.Menu",
"os.path.exists",
"config.Dialogs.Dialogs",
"os.path.isdir",
"tkinter.Menu.__init__",
"os.mkdir",
"sys.exit",
"tkinter.messagebox.showinfo"
] | [((443, 473), 'tkinter.Menu.__init__', 'tk.Menu.__init__', (['self', 'parent'], {}), '(self, parent)\n', (459, 473), True, 'import tkinter as tk\n'), ((525, 534), 'config.Dialogs.Dialogs', 'Dialogs', ([], {}), '()\n', (532, 534), False, 'from config.Dialogs import Dialogs\n'), ((556, 586), 'tkinter.Menu', 'tk.Menu', (['parent'], {'tearoff': '(False)'}), '(parent, tearoff=False)\n', (563, 586), True, 'import tkinter as tk\n'), ((1328, 1339), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1336, 1339), False, 'import sys\n'), ((1945, 2021), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['"""Done!"""', 'f"""You saved setup file in:\n{DIALOGS_path}"""'], {}), '(\'Done!\', f"""You saved setup file in:\n{DIALOGS_path}""")\n', (1964, 2021), False, 'from tkinter import messagebox\n'), ((2095, 2152), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['"""Ouch!"""', '"""You haven\'t saved config!"""'], {}), '(\'Ouch!\', "You haven\'t saved config!")\n', (2114, 2152), False, 'from tkinter import messagebox\n'), ((2443, 2503), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['"""Ouch!"""', '"""You haven\'t choose any file!"""'], {}), '(\'Ouch!\', "You haven\'t choose any file!")\n', (2462, 2503), False, 'from tkinter import messagebox\n'), ((2807, 2831), 'os.path.isdir', 'os.path.isdir', (['AppFolder'], {}), '(AppFolder)\n', (2820, 2831), False, 'import os\n'), ((2845, 2864), 'os.mkdir', 'os.mkdir', (['AppFolder'], {}), '(AppFolder)\n', (2853, 2864), False, 'import os\n'), ((2881, 2911), 'os.path.isdir', 'os.path.isdir', (['DEFAULTS_folder'], {}), '(DEFAULTS_folder)\n', (2894, 2911), False, 'import os\n'), ((2925, 2950), 'os.mkdir', 'os.mkdir', (['DEFAULTS_folder'], {}), '(DEFAULTS_folder)\n', (2933, 2950), False, 'import os\n'), ((3581, 3605), 'os.path.isdir', 'os.path.isdir', (['AppFolder'], {}), '(AppFolder)\n', (3594, 3605), False, 'import os\n'), ((3619, 3638), 'os.mkdir', 'os.mkdir', (['AppFolder'], {}), '(AppFolder)\n', (3627, 3638), False, 'import os\n'), ((3655, 3685), 'os.path.isdir', 'os.path.isdir', (['DEFAULTS_folder'], {}), '(DEFAULTS_folder)\n', (3668, 3685), False, 'import os\n'), ((3699, 3724), 'os.mkdir', 'os.mkdir', (['DEFAULTS_folder'], {}), '(DEFAULTS_folder)\n', (3707, 3724), False, 'import os\n'), ((3741, 3772), 'os.path.exists', 'os.path.exists', (['DEFAULT_DIALOGS'], {}), '(DEFAULT_DIALOGS)\n', (3755, 3772), False, 'import os\n'), ((4149, 4173), 'os.path.isdir', 'os.path.isdir', (['AppFolder'], {}), '(AppFolder)\n', (4162, 4173), False, 'import os\n'), ((4187, 4206), 'os.mkdir', 'os.mkdir', (['AppFolder'], {}), '(AppFolder)\n', (4195, 4206), False, 'import os\n'), ((4223, 4253), 'os.path.isdir', 'os.path.isdir', (['DEFAULTS_folder'], {}), '(DEFAULTS_folder)\n', (4236, 4253), False, 'import os\n'), ((4267, 4292), 'os.mkdir', 'os.mkdir', (['DEFAULTS_folder'], {}), '(DEFAULTS_folder)\n', (4275, 4292), False, 'import os\n')] |
"""Network serialization configuration."""
from __future__ import annotations
from functools import partialmethod
from itertools import chain
from tsim.core.entity import EntityRef
from tsim.core.network.intersection import ConflictPoint, Curve, Intersection
from tsim.core.network.lane import Lane, LaneSegment
from tsim.utils import pickling
from tsim.utils.linkedlist import LinkedList
def configure():
"""Configure serialization of network related classes."""
Intersection.__getstate__ = _intersection_getstate
Intersection.__setstate__ = _intersection_setstate
ConflictPoint.__getstate__ = _conflict_point_getstate
ConflictPoint.__setstate__ = _conflict_point_setstate
Curve.__getstate__ = _curve_getstate
Curve.__setstate__ = _curve_setstate
Lane.__getstate__ = _lane_getstate
Lane.__setstate__ = _lane_setstate
LaneSegment.__getstate__ = partialmethod(pickling.getstate, add_dict=False)
LaneSegment.__setstate__ = partialmethod(pickling.setstate, add_dict=False)
# pylint: disable=protected-access
def _intersection_getstate(self: Intersection):
points = {p.id: p for _, p in
chain.from_iterable(c.conflict_points
for c in self.curves.values())}
neighbors = {k: {p.id for p in v.neighbors}
for k, v in points.items()}
return (self.node_ref.id, self.lane_connections, self.way_connections,
self.connection_map, self.curves, points, neighbors)
def _intersection_setstate(self: Intersection, state):
(node_id, self.lane_connections, self.way_connections,
self.connection_map, self.curves, points, neighbors) = state
self.node_ref = EntityRef(node_id)
for id_, neighbor_ids in neighbors.items():
points[id_].neighbors.update(points[i] for i in neighbor_ids)
for point in points.values():
point.create_lock_order()
def _conflict_point_getstate(self: ConflictPoint):
return self.id, self.point, self.type
def _conflict_point_setstate(self: ConflictPoint, state):
self.id, self.point, self.type = state
self.neighbors = set()
self.curves = set()
self.owner = None
self.queue = LinkedList()
def _curve_getstate(self: Curve):
return (self.node_ref.id, self.source, self.dest, self.curve,
self.length, self._conflict_points, self._sorted)
def _curve_setstate(self: Curve, state):
(node_id, self.source, self.dest, self.curve, self.length,
self._conflict_points, self._sorted) = state
self.node_ref = EntityRef(node_id)
self.positions = {}
for param, point in self._conflict_points:
point.curves.add(self)
self.positions[point] = param * self.length
self.init_traffic()
def _lane_getstate(self: Lane):
return (self.lane_ref, self.distance_from_center, self.length,
self.segments)
def _lane_setstate(self: Lane, state):
(self.lane_ref, self.distance_from_center,
self.length, self.segments) = state
self.traffic = LinkedList()
| [
"tsim.core.entity.EntityRef",
"tsim.utils.linkedlist.LinkedList",
"functools.partialmethod"
] | [((890, 938), 'functools.partialmethod', 'partialmethod', (['pickling.getstate'], {'add_dict': '(False)'}), '(pickling.getstate, add_dict=False)\n', (903, 938), False, 'from functools import partialmethod\n'), ((970, 1018), 'functools.partialmethod', 'partialmethod', (['pickling.setstate'], {'add_dict': '(False)'}), '(pickling.setstate, add_dict=False)\n', (983, 1018), False, 'from functools import partialmethod\n'), ((1693, 1711), 'tsim.core.entity.EntityRef', 'EntityRef', (['node_id'], {}), '(node_id)\n', (1702, 1711), False, 'from tsim.core.entity import EntityRef\n'), ((2186, 2198), 'tsim.utils.linkedlist.LinkedList', 'LinkedList', ([], {}), '()\n', (2196, 2198), False, 'from tsim.utils.linkedlist import LinkedList\n'), ((2539, 2557), 'tsim.core.entity.EntityRef', 'EntityRef', (['node_id'], {}), '(node_id)\n', (2548, 2557), False, 'from tsim.core.entity import EntityRef\n'), ((3012, 3024), 'tsim.utils.linkedlist.LinkedList', 'LinkedList', ([], {}), '()\n', (3022, 3024), False, 'from tsim.utils.linkedlist import LinkedList\n')] |
import time
from typing import Dict, List, Optional
from urllib.parse import urlencode
from requests.utils import dict_from_cookiejar
from requests_toolbelt import MultipartEncoder
from utils.encrypt_utils import md5_str
from utils.logger_utils import LogManager
from utils.str_utils import check_is_json
from captcha.zhihu_captcha import ZhihuCaptcha
from config import LOG_LEVEL, PROCESS_STATUS_FAIL
from utils.encrypt_utils import hmac_encrypt_sha1
from utils.image_utils import image_base64_to_pillow
from utils.exception_utils import LoginException, ParseDataException
from spiders import BaseSpider, BaseSpiderParseMethodType, CookieUtils
from utils.time_utils import datetime_str_change_fmt, timestamp_to_datetime_str
from utils.js_utils import compile_js, zhihu_encrypt_js_code, zhihu_zse86_js_code
logger = LogManager(__name__).get_logger_and_add_handlers(
formatter_template=5, log_level_int=LOG_LEVEL
)
class ZhiHuSpider(BaseSpider):
def __init__(self, task_id: str, username: str, password: str):
self._task_id: str = task_id
self._login_username: str = username
self._login_password: str = password
self._main_url: str = "https://www.zhihu.com"
self._signin_url: str = f"{self._main_url}/signin"
self._login_url: str = f"{self._main_url}/api/v3/oauth/sign_in"
self._captcha_url: str = f"{self._main_url}/api/v3/oauth/captcha?lang=en"
self._spider_name: str = f"zhihu:{self._login_username}"
self._login_cookies: Optional[str] = None
super().__init__()
self._common_headers.update(
{
"referer": "https://www.zhihu.com/",
"host": "www.zhihu.com",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9",
"accept": "*/*",
}
)
self._captcha_headers = {
"referer": "https://www.zhihu.com/signin",
"x-requested-with": "fetch",
"x-zse-83": "3_2.0",
}
self._login_headers = {
"content-type": "application/x-www-form-urlencoded",
"origin": "https://www.zhihu.com",
"referer": "https://www.zhihu.com/signin",
"x-requested-with": "fetch",
"x-zse-83": "3_2.0",
}
self._login_cookies = self.get_cookies(spider_name=self._spider_name)
self._login_user_info: Optional[Dict] = None
self._login_user_url_token: str = ""
self._blogs_data: List = []
self._blogs_collection_data: List = []
def parse_data_with_method(self, method: str):
if method == BaseSpiderParseMethodType.LoginResult:
self._parse_login_data()
elif method == BaseSpiderParseMethodType.PersonalBlogs:
self._parse_personal_blogs()
self._parse_personal_collect_blogs()
elif method == BaseSpiderParseMethodType.Finish:
self.send_data()
def _init_login(self) -> bool:
"""
初始化登录准备
:return: 是否初始化成功
"""
self._session.headers.update(self._common_headers)
self.make_request_with_session(
session=self._session, url=self._signin_url, headers=self._common_headers
)
response = self.make_request_with_session(
session=self._session, url=self._captcha_url, headers=self._captcha_headers
)
if check_is_json(data=response.content.decode()):
if response.json()["show_captcha"]:
self._captcha_headers.update(origin="https://www.zhihu.com")
response = self.make_request_with_session(
session=self._session,
url=self._captcha_url,
headers=self._common_headers,
method="PUT",
)
if check_is_json(data=response.content.decode()):
img_base64 = response.json()["img_base64"].replace("\\n", "")
# 验证码识别
captcha_model = ZhihuCaptcha()
captcha_code = captcha_model.predict(
img=image_base64_to_pillow(img_str=img_base64)
)
post_data: dict = {"input_text": captcha_code}
data = MultipartEncoder(
fields=post_data, boundary="----WebKitFormBoundary"
)
headers = {
"content-type": data.content_type,
"origin": "https://www.zhihu.com",
"referer": "https://www.zhihu.com/signin",
"x-requested-with": "fetch",
}
# 这里需要暂停一下, 防止请求过快
time.sleep(2)
response = self.make_request_with_session(
session=self._session,
url=self._captcha_url,
data=data,
headers=headers,
method="POST",
)
if check_is_json(response.content.decode()):
if response.json().get("success"):
return True
else:
logger.error(
f"验证码校验请求错误!当前接口返回结果:{response.content.decode()}"
)
return False
else:
logger.error("登录 --> 验证码校验请求失败")
return False
else:
logger.error("登录 --> 获取验证码失败")
return False
else:
logger.error(
f"登录 --> 获取验证码接口数据发生变化!当前接口返回结果:{response.content.decode()}"
)
return False
else:
logger.error("登录 --> 获取验证码初始化失败")
return False
def login(self):
if self._login_cookies is None:
if self._init_login():
grant_type: str = "password"
client_id: str = "c3cef7c66a1843f8b3a9e6a1e3160e20"
source: str = "com.zhihu.web"
timestamp: str = str(int(time.time() * 1000))
signature: str = hmac_encrypt_sha1(
key=b"d1b964811afb40118a12068ff74a12f4",
encrypt_str=f"{grant_type}{client_id}{source}{timestamp}",
)
post_data: dict = {
"client_id": client_id,
"grant_type": grant_type,
"source": source,
"username": self._login_username,
"password": self._login_password,
"lang": "en",
"ref_source": "other_https://www.zhihu.com/signin",
"utm_source": "",
"captcha": "",
"timestamp": timestamp,
"signature": signature,
}
js_code = compile_js(js_str=zhihu_encrypt_js_code)
data = js_code.call("encrypt", urlencode(post_data))
response = self.make_request_with_session(
session=self._session,
url=self._login_url,
data=data,
headers=self._login_headers,
method="POST",
)
if check_is_json(data=response.content.decode()):
json_response = response.json()
if json_response.get("user_id"):
logger.debug(json_response)
self._login_cookies = json_response["cookie"]
self._session.cookies.update(self._login_cookies)
logger.info(f"登录 --> 登录成功!当前用户:{self._login_username}")
self._login_user_info = {"username": self._login_username}
self._login_user_info.update(json_response)
elif json_response.get("error"):
error_code: int = json_response["error"]["code"]
error_msg: str = json_response["error"]["message"]
if error_code == 100005:
logger.error("登录 --> 用户名或密码错误!登录失败!")
raise LoginException()
elif error_code == 120005:
logger.error(f"登录 --> 登录失败!错误信息:{error_code}")
raise LoginException()
else:
logger.error(f"登录 --> 其他错误!错误信息:{error_msg}")
raise LoginException()
else:
logger.error("登录 --> 获取登录后的用户信息失败!登录失败!")
self.update_task_status(
task_id=self._task_id, data=str(PROCESS_STATUS_FAIL)
)
raise LoginException()
else:
logger.error("登录 --> 失败")
self.update_task_status(
task_id=self._task_id, data=str(PROCESS_STATUS_FAIL)
)
raise LoginException()
if self._login_user_info is not None:
self.parse_data_with_method(
method=BaseSpiderParseMethodType.LoginResult
)
else:
logger.error("登录 --> 获取用户数据失败!")
raise LoginException()
else:
# self._session.headers.update(self._common_headers)
# self._session.cookies.update(self._login_cookies)
self._common_headers.update(Cookie=self._login_cookies)
self._login_user_url_token = self.get_data(
spider_name=f"{self._spider_name}:token"
)
self.parse_data_with_method(method=BaseSpiderParseMethodType.LoginResult)
def _parse_login_data(self):
include_params: str = "ad_type,available_message_types," "default_notifications_count," "follow_notifications_count," "vote_thank_notifications_count," "messages_count," "email,account_status,is_bind_phone," "visits_count,answer_count,articles_count," "gender,follower_count"
self._personal_url: str = f"{self._main_url}/api/v4/me?include={include_params}"
# 这个地方很重要
request_cookie: str = CookieUtils(
cookie_list=self._session.cookies.items()
).to_str()
self.set_cookies(
spider_name=f"zhihu:{self._login_username}", cookies=request_cookie
)
response = self.make_request_with_session(
session=self._session, url=self._personal_url, headers=self._common_headers
)
if check_is_json(data=response.content.decode()):
json_response = response.json()
self._login_user_url_token = json_response["url_token"]
self.set_data(
spider_name=f"{self._spider_name}:token",
data=self._login_user_url_token,
)
self._common_headers.update(Cookie=request_cookie)
# 知乎神奇的参数
# TODO 知乎把 API 的请求又进行了加密,导致下面的 API 又用不了了,待解决
resp1 = self._session.get('https://www.zhihu.com', headers=self.get_default_headers())
d_c0 = resp1.cookies['d_c0']
headers = self.get_default_headers()
search_api: str = f"/api/v4/members/{self._login_user_url_token}/followees"
req_url: str = f"{self._main_url}{search_api}"
f = "+".join(
(
"3_2.0",
search_api,
req_url,
d_c0,
)
)
js_code = compile_js(js_str=zhihu_zse86_js_code)
sign = js_code.call("b", md5_str(encrypt_str=f))
headers.update(
{
"content-type": "application/json",
"accept": "*/*",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache, no-store, must-revalidate, private, max-age=0",
"pragma": "no-cache",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "navigate",
"sec-fetch-site": "same-origin",
"cookie": f"d_c0={d_c0}",
"x-api-version": "3.0.91",
"x-app-za": "OS=Web",
"x-requested-with": "fetch",
"x-zse-83": "3_2.0",
"x-zse-86": f"1.0_{sign}",
}
)
followee_response = self.make_request(url=req_url, headers=headers)
followee_count: int = 0
if check_is_json(data=followee_response.text):
followee_count = followee_response.json()["paging"]["totals"]
personal_data: Dict = {
"username": json_response["name"],
"description": json_response["headline"],
"avatarImg": json_response["avatar_url_template"],
"followee": followee_count,
"follower": json_response["follower_count"],
"likeBlogs": 0,
}
# 推送数据
logger.debug(personal_data)
self.data_model.set_personal_data(data=personal_data)
logger.info("查询 --> 获取个人数据成功!")
self.parse_data_with_method(method=BaseSpiderParseMethodType.PersonalBlogs)
else:
logger.error("查询 --> 获取个人数据失败!")
self.update_task_status(
task_id=self._task_id, data=str(PROCESS_STATUS_FAIL)
)
raise ParseDataException(message="获取个人数据失败!")
def _parse_personal_blogs(self, next_params: Optional[str] = None):
include_params: str = "data[*].comment_count,suggest_edit," "is_normal,thumbnail_extra_info," "thumbnail,can_comment,comment_permission," "admin_closed_comment,content,voteup_count," "created,updated,upvoted_followees,voting," "review_info,is_labeled,label_info;" "data[*].author.badge[?(type=best_answerer)].topics"
if next_params is None:
self._blogs_url: str = f"{self._main_url}/api/v4/members/" f"{self._login_user_url_token}/articles?include={include_params}" f"&offset=0&limit=20&sort_by=created"
else:
self._blogs_url = next_params
response = self.make_request(
url=self._blogs_url, headers=self.get_default_headers()
)
if check_is_json(response.content.decode()):
json_response = response.json()
for blogs in json_response["data"]:
# 知乎的浏览者数据用赞同数代替
blog_data: Dict = {
"blogId": blogs["id"],
"blogTitle": blogs["title"],
"blogHref": blogs["url"],
"blogViewers": blogs["voteup_count"],
"blogCreateTime": timestamp_to_datetime_str(
timestamp=blogs["created"]
),
}
self._blogs_data.append(blog_data)
if json_response["paging"]["is_end"] is not True:
time.sleep(0.5)
self._parse_personal_blogs(next_params=json_response["paging"]["next"])
else:
logger.debug(self._blogs_data)
self.data_model.set_personal_blogs_data(data=self._blogs_data)
logger.info("获取个人博客数据成功!")
else:
logger.error("获取个人博客数据失败!")
self.update_task_status(
task_id=self._task_id, data=str(PROCESS_STATUS_FAIL)
)
raise ParseDataException()
def _parse_personal_collect_blogs(self):
include_params: str = "data[*].updated_time,answer_count,follower_count," "creator,description,is_following,comment_count,created_time"
self._collection_url: str = f"{self._main_url}/api/v4/people/" f"{self._login_user_url_token}/collections?include={include_params}" f"&offset=0&limit=20"
response = self.make_request(
url=self._collection_url, headers=self.get_default_headers()
)
if check_is_json(data=response.content.decode()):
json_response = response.json()
collections_id: List = []
for collections in json_response["data"]:
collections_id.append(collections["id"])
if len(collections_id) == 0:
logger.info("个人收藏博客获取完毕!数据为空!")
self.parse_data_with_method(method=BaseSpiderParseMethodType.Finish)
else:
# 用闭包进行爬取
def inner_spider(c_id: int, next_url: Optional[str] = None) -> bool:
req_params: str = "data[*].created,content.comment_count," "suggest_edit,is_normal,thumbnail_extra_info," "thumbnail,description,content,voteup_count," "created,updated,upvoted_followees,voting," "review_info,is_labeled,label_info," "relationship.is_authorized,voting,is_author," "is_thanked,is_nothelp,is_recognized;" "data[*].author.badge[?(type=best_answerer)].topics"
if next_url is None:
collection_url: str = f"{self._main_url}/api/v4/favlists/" f"{c_id}/items?include={req_params}" f"&offset=0&limit=20"
else:
collection_url = next_url
inner_response = self.make_request(
url=collection_url, headers=self.get_default_headers()
)
if check_is_json(data=inner_response.content.decode()):
inner_json_response = inner_response.json()
for data in inner_json_response["data"]:
create_time = datetime_str_change_fmt(
time_str=data["created"], prev_fmt="%Y-%m-%dT%H:%M:%SZ"
)
content = data["content"]
blog_id = content["id"]
blog_href = content["url"]
# 收藏夹中可以混入问题类型
if content["type"] == "answer":
title = content["question"]["title"]
blog_href = content["question"]["url"]
else:
title = content["title"]
# 封装数据
blog_data: Dict = {
"blogId": blog_id,
"blogTitle": title,
"blogHref": blog_href,
"blogViewers": content["voteup_count"],
"blogCreateTime": create_time,
}
self._blogs_collection_data.append(blog_data)
if inner_json_response["paging"]["is_end"] is not True:
time.sleep(1.5)
return inner_spider(
c_id=c_id,
next_url=inner_json_response["paging"]["next"],
)
else:
logger.info(f"收藏夹 ID: {c_id} 数据爬取完毕!")
return True
else:
logger.error("解析个人收藏博客数据失败!")
return False
for index, collection_id in enumerate(collections_id):
logger.info(f"正在爬取第{index}个收藏夹数据...当前收藏夹 ID: {collection_id}")
is_continue = inner_spider(c_id=collection_id)
if is_continue is not True:
break
logger.info(f"个人收藏博客获取完毕!数据长度: {len(self._blogs_collection_data)}")
self.data_model.set_personal_like_blogs_data(
data=self._blogs_collection_data
)
self.parse_data_with_method(method=BaseSpiderParseMethodType.Finish)
else:
logger.error("获取个人收藏博客数据失败!")
self.update_task_status(
task_id=self._task_id, data=str(PROCESS_STATUS_FAIL)
)
raise ParseDataException()
def _test_cookies(self, cookies: Optional[str] = None) -> bool:
params: str = "visits_count"
test_user_url: str = f"{self._main_url}/api/v4/me?include={params}"
test_request_headers: Dict = self.get_default_headers()
test_request_cookies = self._login_cookies
if cookies is not None:
test_request_cookies = cookies
if isinstance(test_request_cookies, dict):
test_request_headers.update(
Cookie=CookieUtils(cookie_list=test_request_cookies.items()).to_str()
)
elif isinstance(test_request_cookies, str):
test_request_headers.update(Cookie=test_request_cookies)
test_response = self.make_request(
url=test_user_url, headers=test_request_headers
)
if (
test_response.status_code != 200
or check_is_json(test_response.content.decode()) is not True
):
logger.error(f"当前知乎登录状态: 已退出!")
self._async_task.remove_async_scheduler(job_id=self._spider_name)
return False
test_json_response = test_response.json()
if test_json_response.get("error"):
logger.error(f"当前知乎账号登录状态: 已退出!")
return False
else:
logger.info(
f"当前知乎账号为: {self._login_username} 用户 ID: {test_json_response['id']}, 状态: 已登录"
)
return True
| [
"utils.time_utils.timestamp_to_datetime_str",
"utils.js_utils.compile_js",
"utils.str_utils.check_is_json",
"utils.encrypt_utils.md5_str",
"utils.image_utils.image_base64_to_pillow",
"utils.exception_utils.LoginException",
"time.sleep",
"utils.encrypt_utils.hmac_encrypt_sha1",
"utils.time_utils.date... | [((819, 839), 'utils.logger_utils.LogManager', 'LogManager', (['__name__'], {}), '(__name__)\n', (829, 839), False, 'from utils.logger_utils import LogManager\n'), ((11802, 11840), 'utils.js_utils.compile_js', 'compile_js', ([], {'js_str': 'zhihu_zse86_js_code'}), '(js_str=zhihu_zse86_js_code)\n', (11812, 11840), False, 'from utils.js_utils import compile_js, zhihu_encrypt_js_code, zhihu_zse86_js_code\n'), ((12831, 12873), 'utils.str_utils.check_is_json', 'check_is_json', ([], {'data': 'followee_response.text'}), '(data=followee_response.text)\n', (12844, 12873), False, 'from utils.str_utils import check_is_json\n'), ((13771, 13810), 'utils.exception_utils.ParseDataException', 'ParseDataException', ([], {'message': '"""获取个人数据失败!"""'}), "(message='获取个人数据失败!')\n", (13789, 13810), False, 'from utils.exception_utils import LoginException, ParseDataException\n'), ((15769, 15789), 'utils.exception_utils.ParseDataException', 'ParseDataException', ([], {}), '()\n', (15787, 15789), False, 'from utils.exception_utils import LoginException, ParseDataException\n'), ((20374, 20394), 'utils.exception_utils.ParseDataException', 'ParseDataException', ([], {}), '()\n', (20392, 20394), False, 'from utils.exception_utils import LoginException, ParseDataException\n'), ((6329, 6451), 'utils.encrypt_utils.hmac_encrypt_sha1', 'hmac_encrypt_sha1', ([], {'key': "b'd1b964811afb40118a12068ff74a12f4'", 'encrypt_str': 'f"""{grant_type}{client_id}{source}{timestamp}"""'}), "(key=b'd1b964811afb40118a12068ff74a12f4', encrypt_str=\n f'{grant_type}{client_id}{source}{timestamp}')\n", (6346, 6451), False, 'from utils.encrypt_utils import hmac_encrypt_sha1\n'), ((7089, 7129), 'utils.js_utils.compile_js', 'compile_js', ([], {'js_str': 'zhihu_encrypt_js_code'}), '(js_str=zhihu_encrypt_js_code)\n', (7099, 7129), False, 'from utils.js_utils import compile_js, zhihu_encrypt_js_code, zhihu_zse86_js_code\n'), ((9257, 9273), 'utils.exception_utils.LoginException', 'LoginException', ([], {}), '()\n', (9271, 9273), False, 'from utils.exception_utils import LoginException, ParseDataException\n'), ((9542, 9558), 'utils.exception_utils.LoginException', 'LoginException', ([], {}), '()\n', (9556, 9558), False, 'from utils.exception_utils import LoginException, ParseDataException\n'), ((11878, 11900), 'utils.encrypt_utils.md5_str', 'md5_str', ([], {'encrypt_str': 'f'}), '(encrypt_str=f)\n', (11885, 11900), False, 'from utils.encrypt_utils import md5_str\n'), ((15286, 15301), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (15296, 15301), False, 'import time\n'), ((4066, 4080), 'captcha.zhihu_captcha.ZhihuCaptcha', 'ZhihuCaptcha', ([], {}), '()\n', (4078, 4080), False, 'from captcha.zhihu_captcha import ZhihuCaptcha\n'), ((4326, 4395), 'requests_toolbelt.MultipartEncoder', 'MultipartEncoder', ([], {'fields': 'post_data', 'boundary': '"""----WebKitFormBoundary"""'}), "(fields=post_data, boundary='----WebKitFormBoundary')\n", (4342, 4395), False, 'from requests_toolbelt import MultipartEncoder\n'), ((4793, 4806), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (4803, 4806), False, 'import time\n'), ((7177, 7197), 'urllib.parse.urlencode', 'urlencode', (['post_data'], {}), '(post_data)\n', (7186, 7197), False, 'from urllib.parse import urlencode\n'), ((9026, 9042), 'utils.exception_utils.LoginException', 'LoginException', ([], {}), '()\n', (9040, 9042), False, 'from utils.exception_utils import LoginException, ParseDataException\n'), ((15037, 15090), 'utils.time_utils.timestamp_to_datetime_str', 'timestamp_to_datetime_str', ([], {'timestamp': "blogs['created']"}), "(timestamp=blogs['created'])\n", (15062, 15090), False, 'from utils.time_utils import datetime_str_change_fmt, timestamp_to_datetime_str\n'), ((4167, 4209), 'utils.image_utils.image_base64_to_pillow', 'image_base64_to_pillow', ([], {'img_str': 'img_base64'}), '(img_str=img_base64)\n', (4189, 4209), False, 'from utils.image_utils import image_base64_to_pillow\n'), ((6275, 6286), 'time.time', 'time.time', ([], {}), '()\n', (6284, 6286), False, 'import time\n'), ((17877, 17962), 'utils.time_utils.datetime_str_change_fmt', 'datetime_str_change_fmt', ([], {'time_str': "data['created']", 'prev_fmt': '"""%Y-%m-%dT%H:%M:%SZ"""'}), "(time_str=data['created'], prev_fmt='%Y-%m-%dT%H:%M:%SZ'\n )\n", (17900, 17962), False, 'from utils.time_utils import datetime_str_change_fmt, timestamp_to_datetime_str\n'), ((19106, 19121), 'time.sleep', 'time.sleep', (['(1.5)'], {}), '(1.5)\n', (19116, 19121), False, 'import time\n'), ((8423, 8439), 'utils.exception_utils.LoginException', 'LoginException', ([], {}), '()\n', (8437, 8439), False, 'from utils.exception_utils import LoginException, ParseDataException\n'), ((8600, 8616), 'utils.exception_utils.LoginException', 'LoginException', ([], {}), '()\n', (8614, 8616), False, 'from utils.exception_utils import LoginException, ParseDataException\n'), ((8755, 8771), 'utils.exception_utils.LoginException', 'LoginException', ([], {}), '()\n', (8769, 8771), False, 'from utils.exception_utils import LoginException, ParseDataException\n')] |
# Generated by Django 2.2.7 on 2020-02-14 10:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('komax_app', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='komax',
name='group_of_square',
field=models.CharField(default='1 2 3', max_length=6),
preserve_default=False,
),
]
| [
"django.db.models.CharField"
] | [((333, 380), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""1 2 3"""', 'max_length': '(6)'}), "(default='1 2 3', max_length=6)\n", (349, 380), False, 'from django.db import migrations, models\n')] |
from django.views.generic import TemplateView
import logging
log = logging.getLogger(__name__)
class Base(TemplateView):
template_name = 'base.html'
| [
"logging.getLogger"
] | [((69, 96), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (86, 96), False, 'import logging\n')] |
import pyowm
token = '88983f82566dea8294a9d3e3fb479918'
language = 'ru'
owm = pyowm.OWM(API_key = token, language = language)
def city_temp():
city = input('Введите название города: ')
try:
observation = owm.weather_at_place(city)
except pyowm.exceptions.api_response_error.NotFoundError:
print('Такой город не найден, попробуйте еще.')
city_temp()
weather = observation.get_weather()
temp = weather.get_temperature(unit='celsius')['temp']
status = weather.get_detailed_status()
print('Текущая температура в городе ' + city + ' ' + str(temp) + ' по цельсию, ' + status + '.')
city_temp()
city_temp()
| [
"pyowm.OWM"
] | [((79, 122), 'pyowm.OWM', 'pyowm.OWM', ([], {'API_key': 'token', 'language': 'language'}), '(API_key=token, language=language)\n', (88, 122), False, 'import pyowm\n')] |
DESCRIPTION = """
This script will look at all the csvs in 'inputs'
Gets all 'to-read' books
Outputs their availability into 'outputs'
"""
import argparse
import os
import logging
logger = logging.Logger("Main Logger")
from pathlib import Path
from dotenv import load_dotenv
from nlbsg import Client
from nlbsg.catalogue import PRODUCTION_URL
from utils.nlb_checker import NlbChecker
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument("--config", help="Config file", type=str, default="config.env")
parser.add_argument("--inputs", help="Input folder", type=str, default="inputs")
parser.add_argument("--outputs", help="Output folder", type=str, default="outputs")
parser.add_argument(
"--min_rating",
help="Only search for books with this rating or higher. Range from 0.0 to 5.0",
type=str,
default="0.0",
)
args = parser.parse_args()
INPUT_DIR = Path(args.inputs)
OUTPUT_DIR = Path(args.outputs)
env_path = Path(".") / args.config
load_dotenv(dotenv_path=env_path, verbose=True, override=True)
API_KEY = os.environ.get("API_KEY")
logger.info("Starting!")
client = Client(PRODUCTION_URL, API_KEY)
nlb_checker = NlbChecker(
client=client,
input_dir=INPUT_DIR,
output_dir=OUTPUT_DIR,
)
csv_paths = nlb_checker.process_all()
| [
"utils.nlb_checker.NlbChecker",
"argparse.ArgumentParser",
"pathlib.Path",
"os.environ.get",
"nlbsg.Client",
"dotenv.load_dotenv",
"logging.Logger"
] | [((191, 220), 'logging.Logger', 'logging.Logger', (['"""Main Logger"""'], {}), "('Main Logger')\n", (205, 220), False, 'import logging\n'), ((429, 477), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'DESCRIPTION'}), '(description=DESCRIPTION)\n', (452, 477), False, 'import argparse\n'), ((971, 988), 'pathlib.Path', 'Path', (['args.inputs'], {}), '(args.inputs)\n', (975, 988), False, 'from pathlib import Path\n'), ((1006, 1024), 'pathlib.Path', 'Path', (['args.outputs'], {}), '(args.outputs)\n', (1010, 1024), False, 'from pathlib import Path\n'), ((1069, 1131), 'dotenv.load_dotenv', 'load_dotenv', ([], {'dotenv_path': 'env_path', 'verbose': '(True)', 'override': '(True)'}), '(dotenv_path=env_path, verbose=True, override=True)\n', (1080, 1131), False, 'from dotenv import load_dotenv\n'), ((1147, 1172), 'os.environ.get', 'os.environ.get', (['"""API_KEY"""'], {}), "('API_KEY')\n", (1161, 1172), False, 'import os\n'), ((1217, 1248), 'nlbsg.Client', 'Client', (['PRODUCTION_URL', 'API_KEY'], {}), '(PRODUCTION_URL, API_KEY)\n', (1223, 1248), False, 'from nlbsg import Client\n'), ((1267, 1336), 'utils.nlb_checker.NlbChecker', 'NlbChecker', ([], {'client': 'client', 'input_dir': 'INPUT_DIR', 'output_dir': 'OUTPUT_DIR'}), '(client=client, input_dir=INPUT_DIR, output_dir=OUTPUT_DIR)\n', (1277, 1336), False, 'from utils.nlb_checker import NlbChecker\n'), ((1041, 1050), 'pathlib.Path', 'Path', (['"""."""'], {}), "('.')\n", (1045, 1050), False, 'from pathlib import Path\n')] |
# Generated by Django 2.2.12 on 2021-02-02 06:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0003_auto_20210202_0503'),
]
operations = [
migrations.AddField(
model_name='account',
name='email',
field=models.CharField(default='NEED TO FORMAT EMAIL', max_length=42),
),
migrations.AddField(
model_name='account',
name='name',
field=models.CharField(default='None', max_length=42),
),
migrations.AddField(
model_name='account',
name='profits',
field=models.CharField(default='0.00', max_length=42),
),
migrations.AlterField(
model_name='account',
name='country_code',
field=models.CharField(default='NO', max_length=2),
),
migrations.AlterField(
model_name='account',
name='current_balance',
field=models.CharField(default='0.00', max_length=10),
),
migrations.AlterField(
model_name='account',
name='interest_rate',
field=models.CharField(default='0.01', max_length=10),
),
migrations.AlterField(
model_name='transaction',
name='deposit_date',
field=models.CharField(default='NEED TO FORMAT DATE', max_length=10),
),
migrations.AlterField(
model_name='transaction',
name='wallet',
field=models.CharField(default='MISSING WALLET', max_length=42),
),
]
| [
"django.db.models.CharField"
] | [((336, 399), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""NEED TO FORMAT EMAIL"""', 'max_length': '(42)'}), "(default='NEED TO FORMAT EMAIL', max_length=42)\n", (352, 399), False, 'from django.db import migrations, models\n'), ((518, 565), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""None"""', 'max_length': '(42)'}), "(default='None', max_length=42)\n", (534, 565), False, 'from django.db import migrations, models\n'), ((687, 734), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""0.00"""', 'max_length': '(42)'}), "(default='0.00', max_length=42)\n", (703, 734), False, 'from django.db import migrations, models\n'), ((863, 907), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""NO"""', 'max_length': '(2)'}), "(default='NO', max_length=2)\n", (879, 907), False, 'from django.db import migrations, models\n'), ((1039, 1086), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""0.00"""', 'max_length': '(10)'}), "(default='0.00', max_length=10)\n", (1055, 1086), False, 'from django.db import migrations, models\n'), ((1216, 1263), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""0.01"""', 'max_length': '(10)'}), "(default='0.01', max_length=10)\n", (1232, 1263), False, 'from django.db import migrations, models\n'), ((1396, 1458), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""NEED TO FORMAT DATE"""', 'max_length': '(10)'}), "(default='NEED TO FORMAT DATE', max_length=10)\n", (1412, 1458), False, 'from django.db import migrations, models\n'), ((1585, 1642), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""MISSING WALLET"""', 'max_length': '(42)'}), "(default='MISSING WALLET', max_length=42)\n", (1601, 1642), False, 'from django.db import migrations, models\n')] |
"""
Implements MissSVM
"""
from __future__ import print_function, division
import numpy as np
import scipy.sparse as sp
from random import uniform
import inspect
from misvm.quadprog import IterativeQP, Objective
from misvm.util import BagSplitter, spdiag, slices
from misvm.kernel import by_name as kernel_by_name
from misvm.mica import MICA
from misvm.cccp import CCCP
class MissSVM(MICA):
"""
Semi-supervised learning applied to MI data (Zhou & Xu 2007)
"""
def __init__(self, alpha=1e4, **kwargs):
"""
@param kernel : the desired kernel function; can be linear, quadratic,
polynomial, or rbf [default: linear]
@param C : the loss/regularization tradeoff constant [default: 1.0]
@param scale_C : if True [default], scale C by the number of examples
@param p : polynomial degree when a 'polynomial' kernel is used
[default: 3]
@param gamma : RBF scale parameter when an 'rbf' kernel is used
[default: 1.0]
@param verbose : print optimization status messages [default: True]
@param sv_cutoff : the numerical cutoff for an example to be considered
a support vector [default: 1e-7]
@param restarts : the number of random restarts [default: 0]
@param max_iters : the maximum number of iterations in the outer loop of
the optimization procedure [default: 50]
@param alpha : the softmax parameter [default: 1e4]
"""
self.alpha = alpha
super(MissSVM, self).__init__(**kwargs)
self._bags = None
self._sv_bags = None
self._bag_predictions = None
def fit(self, bags, y):
"""
@param bags : a sequence of n bags; each bag is an m-by-k array-like
object containing m instances with k features
@param y : an array-like object of length n containing -1/+1 labels
"""
self._bags = map(np.asmatrix, bags)
bs = BagSplitter(self._bags,
np.asmatrix(y).reshape((-1, 1)))
self._X = np.vstack([bs.pos_instances,
bs.pos_instances,
bs.pos_instances,
bs.neg_instances])
self._y = np.vstack([np.matrix(np.ones((bs.X_p + bs.L_p, 1))),
-np.matrix(np.ones((bs.L_p + bs.L_n, 1)))])
if self.scale_C:
C = self.C / float(len(self._bags))
else:
C = self.C
# Setup SVM and adjust constraints
_, _, f, A, b, lb, ub = self._setup_svm(self._y, self._y, C)
ub[:bs.X_p] *= (float(bs.L_n) / float(bs.X_p))
ub[bs.X_p: bs.X_p + 2 * bs.L_p] *= (float(bs.L_n) / float(bs.L_p))
K = kernel_by_name(self.kernel, gamma=self.gamma, p=self.p)(self._X, self._X)
D = spdiag(self._y)
ub0 = np.matrix(ub)
ub0[bs.X_p: bs.X_p + 2 * bs.L_p] *= 0.5
def get_V(pos_classifications):
eye_n = bs.L_n + 2 * bs.L_p
top = np.zeros((bs.X_p, bs.L_p))
for row, (i, j) in enumerate(slices(bs.pos_groups)):
top[row, i:j] = _grad_softmin(-pos_classifications[i:j], self.alpha).flat
return sp.bmat([[sp.coo_matrix(top), None],
[None, sp.eye(eye_n, eye_n)]])
V0 = get_V(np.matrix(np.zeros((bs.L_p, 1))))
qp = IterativeQP(D * V0 * K * V0.T * D, f, A, b, lb, ub0)
best_obj = float('inf')
best_svm = None
for rr in range(self.restarts + 1):
if rr == 0:
if self.verbose:
print('Non-random start...')
# Train on instances
alphas, obj = qp.solve(self.verbose)
else:
if self.verbose:
print('Random restart %d of %d...' % (rr, self.restarts))
alphas = np.matrix([uniform(0.0, 1.0) for i in range(len(lb))]).T
obj = Objective(0.0, 0.0)
svm = MICA(kernel=self.kernel, gamma=self.gamma, p=self.p,
verbose=self.verbose, sv_cutoff=self.sv_cutoff)
svm._X = self._X
svm._y = self._y
svm._V = V0
svm._alphas = alphas
svm._objective = obj
svm._compute_separator(K)
svm._K = K
class missCCCP(CCCP):
def bailout(cself, svm, obj_val):
return svm
def iterate(cself, svm, obj_val):
cself.mention('Linearizing constraints...')
classifications = svm._predictions[bs.X_p: bs.X_p + bs.L_p]
V = get_V(classifications)
cself.mention('Computing slacks...')
# Difference is [1 - y_i*(w*phi(x_i) + b)]
pos_differences = 1.0 - classifications
neg_differences = 1.0 + classifications
# Slacks are positive differences only
pos_slacks = np.multiply(pos_differences > 0, pos_differences)
neg_slacks = np.multiply(neg_differences > 0, neg_differences)
all_slacks = np.hstack([pos_slacks, neg_slacks])
cself.mention('Linearizing...')
# Compute gradient across pairs
slack_grads = np.vstack([_grad_softmin(pair, self.alpha)
for pair in all_slacks])
# Stack results into one column
slack_grads = np.vstack([np.ones((bs.X_p, 1)),
slack_grads[:, 0],
slack_grads[:, 1],
np.ones((bs.L_n, 1))])
# Update QP
qp.update_H(D * V * K * V.T * D)
qp.update_ub(np.multiply(ub, slack_grads))
# Re-solve
cself.mention('Solving QP...')
alphas, obj = qp.solve(self.verbose)
new_svm = MICA(kernel=self.kernel, gamma=self.gamma, p=self.p,
verbose=self.verbose, sv_cutoff=self.sv_cutoff)
new_svm._X = self._X
new_svm._y = self._y
new_svm._V = V
new_svm._alphas = alphas
new_svm._objective = obj
new_svm._compute_separator(K)
new_svm._K = K
if cself.check_tolerance(obj_val, obj):
return None, new_svm
return {'svm': new_svm, 'obj_val': obj}, None
cccp = missCCCP(verbose=self.verbose, svm=svm, obj_val=None,
max_iters=self.max_iters)
svm = cccp.solve()
if svm is not None:
obj = float(svm._objective)
if obj < best_obj:
best_svm = svm
best_obj = obj
if best_svm is not None:
self._V = best_svm._V
self._alphas = best_svm._alphas
self._objective = best_svm._objective
self._compute_separator(best_svm._K)
self._bag_predictions = self.predict(self._bags)
def get_params(self, deep=True):
super_args = super(MissSVM, self).get_params()
args, _, _, _ = inspect.getargspec(MissSVM.__init__)
args.pop(0)
super_args.update({key: getattr(self, key, None) for key in args})
return super_args
def _grad_softmin(x, alpha=1e4):
"""
Computes the gradient of min function,
taken from gradient of softmin as
alpha goes to infinity. It is:
0 if x_i != min(x), or
1/n if x_i is one of the n
elements equal to min(x)
"""
grad = np.matrix(np.zeros(x.shape))
minimizers = (x == min(x.flat))
n = float(np.sum(minimizers))
grad[np.nonzero(minimizers)] = 1.0 / n
return grad
| [
"misvm.quadprog.IterativeQP",
"numpy.hstack",
"numpy.asmatrix",
"numpy.multiply",
"misvm.util.spdiag",
"misvm.quadprog.Objective",
"scipy.sparse.eye",
"numpy.vstack",
"misvm.mica.MICA",
"scipy.sparse.coo_matrix",
"misvm.util.slices",
"misvm.kernel.by_name",
"random.uniform",
"numpy.ones",
... | [((2143, 2231), 'numpy.vstack', 'np.vstack', (['[bs.pos_instances, bs.pos_instances, bs.pos_instances, bs.neg_instances]'], {}), '([bs.pos_instances, bs.pos_instances, bs.pos_instances, bs.\n neg_instances])\n', (2152, 2231), True, 'import numpy as np\n'), ((2909, 2924), 'misvm.util.spdiag', 'spdiag', (['self._y'], {}), '(self._y)\n', (2915, 2924), False, 'from misvm.util import BagSplitter, spdiag, slices\n'), ((2939, 2952), 'numpy.matrix', 'np.matrix', (['ub'], {}), '(ub)\n', (2948, 2952), True, 'import numpy as np\n'), ((3465, 3517), 'misvm.quadprog.IterativeQP', 'IterativeQP', (['(D * V0 * K * V0.T * D)', 'f', 'A', 'b', 'lb', 'ub0'], {}), '(D * V0 * K * V0.T * D, f, A, b, lb, ub0)\n', (3476, 3517), False, 'from misvm.quadprog import IterativeQP, Objective\n'), ((7528, 7564), 'inspect.getargspec', 'inspect.getargspec', (['MissSVM.__init__'], {}), '(MissSVM.__init__)\n', (7546, 7564), False, 'import inspect\n'), ((7967, 7984), 'numpy.zeros', 'np.zeros', (['x.shape'], {}), '(x.shape)\n', (7975, 7984), True, 'import numpy as np\n'), ((8036, 8054), 'numpy.sum', 'np.sum', (['minimizers'], {}), '(minimizers)\n', (8042, 8054), True, 'import numpy as np\n'), ((8065, 8087), 'numpy.nonzero', 'np.nonzero', (['minimizers'], {}), '(minimizers)\n', (8075, 8087), True, 'import numpy as np\n'), ((2823, 2878), 'misvm.kernel.by_name', 'kernel_by_name', (['self.kernel'], {'gamma': 'self.gamma', 'p': 'self.p'}), '(self.kernel, gamma=self.gamma, p=self.p)\n', (2837, 2878), True, 'from misvm.kernel import by_name as kernel_by_name\n'), ((3100, 3126), 'numpy.zeros', 'np.zeros', (['(bs.X_p, bs.L_p)'], {}), '((bs.X_p, bs.L_p))\n', (3108, 3126), True, 'import numpy as np\n'), ((4086, 4190), 'misvm.mica.MICA', 'MICA', ([], {'kernel': 'self.kernel', 'gamma': 'self.gamma', 'p': 'self.p', 'verbose': 'self.verbose', 'sv_cutoff': 'self.sv_cutoff'}), '(kernel=self.kernel, gamma=self.gamma, p=self.p, verbose=self.verbose,\n sv_cutoff=self.sv_cutoff)\n', (4090, 4190), False, 'from misvm.mica import MICA\n'), ((3168, 3189), 'misvm.util.slices', 'slices', (['bs.pos_groups'], {}), '(bs.pos_groups)\n', (3174, 3189), False, 'from misvm.util import BagSplitter, spdiag, slices\n'), ((3427, 3448), 'numpy.zeros', 'np.zeros', (['(bs.L_p, 1)'], {}), '((bs.L_p, 1))\n', (3435, 3448), True, 'import numpy as np\n'), ((4048, 4067), 'misvm.quadprog.Objective', 'Objective', (['(0.0)', '(0.0)'], {}), '(0.0, 0.0)\n', (4057, 4067), False, 'from misvm.quadprog import IterativeQP, Objective\n'), ((2092, 2106), 'numpy.asmatrix', 'np.asmatrix', (['y'], {}), '(y)\n', (2103, 2106), True, 'import numpy as np\n'), ((2353, 2382), 'numpy.ones', 'np.ones', (['(bs.X_p + bs.L_p, 1)'], {}), '((bs.X_p + bs.L_p, 1))\n', (2360, 2382), True, 'import numpy as np\n'), ((5111, 5160), 'numpy.multiply', 'np.multiply', (['(pos_differences > 0)', 'pos_differences'], {}), '(pos_differences > 0, pos_differences)\n', (5122, 5160), True, 'import numpy as np\n'), ((5194, 5243), 'numpy.multiply', 'np.multiply', (['(neg_differences > 0)', 'neg_differences'], {}), '(neg_differences > 0, neg_differences)\n', (5205, 5243), True, 'import numpy as np\n'), ((5277, 5312), 'numpy.hstack', 'np.hstack', (['[pos_slacks, neg_slacks]'], {}), '([pos_slacks, neg_slacks])\n', (5286, 5312), True, 'import numpy as np\n'), ((6198, 6302), 'misvm.mica.MICA', 'MICA', ([], {'kernel': 'self.kernel', 'gamma': 'self.gamma', 'p': 'self.p', 'verbose': 'self.verbose', 'sv_cutoff': 'self.sv_cutoff'}), '(kernel=self.kernel, gamma=self.gamma, p=self.p, verbose=self.verbose,\n sv_cutoff=self.sv_cutoff)\n', (6202, 6302), False, 'from misvm.mica import MICA\n'), ((2425, 2454), 'numpy.ones', 'np.ones', (['(bs.L_p + bs.L_n, 1)'], {}), '((bs.L_p + bs.L_n, 1))\n', (2432, 2454), True, 'import numpy as np\n'), ((3311, 3329), 'scipy.sparse.coo_matrix', 'sp.coo_matrix', (['top'], {}), '(top)\n', (3324, 3329), True, 'import scipy.sparse as sp\n'), ((3373, 3393), 'scipy.sparse.eye', 'sp.eye', (['eye_n', 'eye_n'], {}), '(eye_n, eye_n)\n', (3379, 3393), True, 'import scipy.sparse as sp\n'), ((5998, 6026), 'numpy.multiply', 'np.multiply', (['ub', 'slack_grads'], {}), '(ub, slack_grads)\n', (6009, 6026), True, 'import numpy as np\n'), ((3980, 3997), 'random.uniform', 'uniform', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (3987, 3997), False, 'from random import uniform\n'), ((5662, 5682), 'numpy.ones', 'np.ones', (['(bs.X_p, 1)'], {}), '((bs.X_p, 1))\n', (5669, 5682), True, 'import numpy as np\n'), ((5857, 5877), 'numpy.ones', 'np.ones', (['(bs.L_n, 1)'], {}), '((bs.L_n, 1))\n', (5864, 5877), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 3 21:28:05 2016
@author: thasegawa
"""
import logging
# ============================================================================
class VehicleCounter(object):
def __init__(self, shape, divider):
self.log = logging.getLogger("vehicle_counter")
self.height, self.width = shape
self.divider = divider
self.vehicle_count = 0
def update_count(self, matches, output_image = None):
self.log.debug("Updating count using %d matches...", len(matches))
# ============================================================================
| [
"logging.getLogger"
] | [((274, 310), 'logging.getLogger', 'logging.getLogger', (['"""vehicle_counter"""'], {}), "('vehicle_counter')\n", (291, 310), False, 'import logging\n')] |
import json
import datetime
import muffin
from bson import ObjectId
from aiohttp.web import json_response
from motor.motor_asyncio import AsyncIOMotorClient
from functools import partial
from umongo import Instance, Document, fields, ValidationError, set_gettext
from umongo.marshmallow_bonus import SchemaFromUmongo
import logging
logging.basicConfig(level=logging.DEBUG)
app = muffin.Application(__name__,
PLUGINS=(
'muffin_babel',
),
BABEL_LOCALES_DIRS=['translations']
)
db = AsyncIOMotorClient()['demo_umongo']
instance = Instance(db)
set_gettext(app.ps.babel.gettext)
@app.ps.babel.locale_selector
def set_locale(request):
"""Get locale based on request Accept-Language header"""
return app.ps.babel.select_locale_by_request(request)
class MongoJsonEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, (datetime.datetime, datetime.date)):
return obj.isoformat()
elif isinstance(obj, ObjectId):
return str(obj)
return json.JSONEncoder.default(self, obj)
def jsonify(request, *args, **kwargs):
"""
jsonify with support for MongoDB ObjectId
"""
dumps = partial(json.dumps, cls=MongoJsonEncoder, indent=True)
return json_response(dict(*args, **kwargs), dumps=dumps)
@instance.register
class User(Document):
nick = fields.StrField(required=True, unique=True)
firstname = fields.StrField()
lastname = fields.StrField()
birthday = fields.DateTimeField()
password = fields.StrField() # Don't store it in clear in real life !
async def populate_db():
await User.collection.drop()
await User.ensure_indexes()
for data in [
{
'nick': 'mze', 'lastname': 'Mao', 'firstname': 'Zedong',
'birthday': datetime.datetime(1893, 12, 26),
'password': '<PASSWORD>'
},
{
'nick': 'lsh', 'lastname': 'Liu', 'firstname': 'Shaoqi',
'birthday': datetime.datetime(1898, 11, 24),
'password': '<PASSWORD>'
},
{
'nick': 'lxia', 'lastname': 'Li', 'firstname': 'Xiannian',
'birthday': datetime.datetime(1909, 6, 23),
'password': '<PASSWORD>'
},
{
'nick': 'ysh', 'lastname': 'Yang', 'firstname': 'Shangkun',
'birthday': datetime.datetime(1907, 7, 5),
'password': '<PASSWORD>'
},
{
'nick': 'jze', 'lastname': 'Jiang', 'firstname': 'Zemin',
'birthday': datetime.datetime(1926, 8, 17),
'password': '<PASSWORD>'
},
{
'nick': 'huji', 'lastname': 'Hu', 'firstname': 'Jintao',
'birthday': datetime.datetime(1942, 12, 21),
'password': '<PASSWORD>'
},
{
'nick': 'xiji', 'lastname': 'Xi', 'firstname': 'Jinping',
'birthday': datetime.datetime(1953, 6, 15),
'password': '<PASSWORD>'
}
]:
await User(**data).commit()
# Create a custom marshmallow schema from User document in order to avoid some fields
class UserNoPassSchema(User.schema.as_marshmallow_schema()):
class Meta:
read_only = ('password',)
load_only = ('password',)
no_pass_schema = UserNoPassSchema()
def dump_user_no_pass(u):
return no_pass_schema.dump(u).data
@app.register('/', methods=['GET'])
async def root(request):
return """<h1>Umongo flask example</h1>
<br>
<h3>routes:</h3><br>
<ul>
<li><a href="/users">GET /users</a></li>
<li>POST /users</li>
<li>GET /users/<nick_or_id></li>
<li>PATCH /users/<nick_or_id></li>
<li>PUT /users/<nick_or_id>/password</li>
</ul>
"""
def _to_objid(data):
try:
return ObjectId(data)
except Exception:
return None
def _nick_or_id_lookup(nick_or_id):
return {'$or': [{'nick': nick_or_id}, {'_id': _to_objid(nick_or_id)}]}
def build_error(status=400, msg=None):
if status == 404 and not msg:
msg = 'Not found'
return json_response({'message': msg}, status=status)
@app.register('/users/{nick_or_id}', methods=['GET'])
async def get_user(request):
nick_or_id = request.match_info['nick_or_id']
user = await User.find_one(_nick_or_id_lookup(nick_or_id))
if not user:
return build_error(404)
return jsonify(request, dump_user_no_pass(user))
@app.register('/users/{nick_or_id}', methods=['PATCH'])
async def update_user(request):
nick_or_id = request.match_info['nick_or_id']
payload = await request.json()
if payload is None:
return build_error(400, 'Request body must be json with Content-type: application/json')
user = await User.find_one(_nick_or_id_lookup(nick_or_id))
if not user:
return build_error(404)
# Define a custom schema from the default one to ignore read-only fields
UserUpdateSchema = User.Schema.as_marshmallow_schema(params={
'password': {'dump_only': True},
'nick': {'dump_only': True}
})()
# with `strict`, marshmallow raise ValidationError if something is wrong
schema = UserUpdateSchema(strict=True)
try:
data, _ = schema.load(payload)
user.update(data)
await user.commit()
except ValidationError as ve:
return build_error(400, ve.args[0])
return jsonify(request, dump_user_no_pass(user))
@app.register('/users/{nick_or_id}', methods=['DELETE'])
async def delete_user(request):
nick_or_id = request.match_info['nick_or_id']
user = await User.find_one(_nick_or_id_lookup(nick_or_id))
if not user:
return build_error(404)
try:
await user.remove()
except ValidationError as ve:
return build_error(400, ve.args[0])
return 'Ok'
@app.register('/users/{nick_or_id}/password', methods=['PUT'])
async def change_password_user(request):
nick_or_id = request.match_info['nick_or_id']
payload = await request.json()
if payload is None:
return build_error(400, 'Request body must be json with Content-type: application/json')
user = await User.find_one(_nick_or_id_lookup(nick_or_id))
if not user:
return build_error(404, 'Not found')
# Use a field from our document to create a marshmallow schema
# Note that we use `SchemaFromUmongo` to get unknown fields check on
# deserialization and skip missing fields instead of returning None
class ChangePasswordSchema(SchemaFromUmongo):
password = User.schema.fields['password'].as_marshmallow_field(params={'required': True})
# with `strict`, marshmallow raises a ValidationError if something is wrong
schema = ChangePasswordSchema(strict=True)
try:
data, _ = schema.load(payload)
user.password = data['password']
await user.commit()
except ValidationError as ve:
return build_error(400, ve.args[0])
return jsonify(request, dump_user_no_pass(user))
@app.register('/users', methods=['GET'])
async def list_users(request):
page = int(request.GET.get('page', 1))
per_page = 10
cursor = User.find(limit=per_page, skip=(page - 1) * per_page)
return jsonify(request, {
'_total': (await cursor.count()),
'_page': page,
'_per_page': per_page,
'_items': [dump_user_no_pass(u) for u in (await cursor.to_list(per_page))]
})
@app.register('/users', methods=['POST'])
async def create_user(request):
payload = await request.json()
if payload is None:
return build_error(400, 'Request body must be json with Content-type: application/json')
try:
user = User(**payload)
await user.commit()
except ValidationError as ve:
return build_error(400, ve.args[0])
return jsonify(request, dump_user_no_pass(user))
if __name__ == '__main__':
import asyncio
loop = asyncio.get_event_loop()
loop.run_until_complete(populate_db())
# Needed to bootstrap plugins
loop.run_until_complete(app.start())
from aiohttp import web
web.run_app(app, port=5000)
| [
"logging.basicConfig",
"aiohttp.web.run_app",
"datetime.datetime",
"muffin.Application",
"json.JSONEncoder.default",
"motor.motor_asyncio.AsyncIOMotorClient",
"umongo.set_gettext",
"asyncio.get_event_loop",
"umongo.Instance",
"functools.partial",
"aiohttp.web.json_response",
"bson.ObjectId",
... | [((334, 374), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (353, 374), False, 'import logging\n'), ((383, 480), 'muffin.Application', 'muffin.Application', (['__name__'], {'PLUGINS': "('muffin_babel',)", 'BABEL_LOCALES_DIRS': "['translations']"}), "(__name__, PLUGINS=('muffin_babel',), BABEL_LOCALES_DIRS=\n ['translations'])\n", (401, 480), False, 'import muffin\n'), ((551, 563), 'umongo.Instance', 'Instance', (['db'], {}), '(db)\n', (559, 563), False, 'from umongo import Instance, Document, fields, ValidationError, set_gettext\n'), ((566, 599), 'umongo.set_gettext', 'set_gettext', (['app.ps.babel.gettext'], {}), '(app.ps.babel.gettext)\n', (577, 599), False, 'from umongo import Instance, Document, fields, ValidationError, set_gettext\n'), ((504, 524), 'motor.motor_asyncio.AsyncIOMotorClient', 'AsyncIOMotorClient', ([], {}), '()\n', (522, 524), False, 'from motor.motor_asyncio import AsyncIOMotorClient\n'), ((1181, 1235), 'functools.partial', 'partial', (['json.dumps'], {'cls': 'MongoJsonEncoder', 'indent': '(True)'}), '(json.dumps, cls=MongoJsonEncoder, indent=True)\n', (1188, 1235), False, 'from functools import partial\n'), ((1351, 1394), 'umongo.fields.StrField', 'fields.StrField', ([], {'required': '(True)', 'unique': '(True)'}), '(required=True, unique=True)\n', (1366, 1394), False, 'from umongo import Instance, Document, fields, ValidationError, set_gettext\n'), ((1411, 1428), 'umongo.fields.StrField', 'fields.StrField', ([], {}), '()\n', (1426, 1428), False, 'from umongo import Instance, Document, fields, ValidationError, set_gettext\n'), ((1444, 1461), 'umongo.fields.StrField', 'fields.StrField', ([], {}), '()\n', (1459, 1461), False, 'from umongo import Instance, Document, fields, ValidationError, set_gettext\n'), ((1477, 1499), 'umongo.fields.DateTimeField', 'fields.DateTimeField', ([], {}), '()\n', (1497, 1499), False, 'from umongo import Instance, Document, fields, ValidationError, set_gettext\n'), ((1515, 1532), 'umongo.fields.StrField', 'fields.StrField', ([], {}), '()\n', (1530, 1532), False, 'from umongo import Instance, Document, fields, ValidationError, set_gettext\n'), ((4029, 4075), 'aiohttp.web.json_response', 'json_response', (["{'message': msg}"], {'status': 'status'}), "({'message': msg}, status=status)\n", (4042, 4075), False, 'from aiohttp.web import json_response\n'), ((7831, 7855), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (7853, 7855), False, 'import asyncio\n'), ((8007, 8034), 'aiohttp.web.run_app', 'web.run_app', (['app'], {'port': '(5000)'}), '(app, port=5000)\n', (8018, 8034), False, 'from aiohttp import web\n'), ((1030, 1065), 'json.JSONEncoder.default', 'json.JSONEncoder.default', (['self', 'obj'], {}), '(self, obj)\n', (1054, 1065), False, 'import json\n'), ((3747, 3761), 'bson.ObjectId', 'ObjectId', (['data'], {}), '(data)\n', (3755, 3761), False, 'from bson import ObjectId\n'), ((1788, 1819), 'datetime.datetime', 'datetime.datetime', (['(1893)', '(12)', '(26)'], {}), '(1893, 12, 26)\n', (1805, 1819), False, 'import datetime\n'), ((1972, 2003), 'datetime.datetime', 'datetime.datetime', (['(1898)', '(11)', '(24)'], {}), '(1898, 11, 24)\n', (1989, 2003), False, 'import datetime\n'), ((2158, 2188), 'datetime.datetime', 'datetime.datetime', (['(1909)', '(6)', '(23)'], {}), '(1909, 6, 23)\n', (2175, 2188), False, 'import datetime\n'), ((2344, 2373), 'datetime.datetime', 'datetime.datetime', (['(1907)', '(7)', '(5)'], {}), '(1907, 7, 5)\n', (2361, 2373), False, 'import datetime\n'), ((2527, 2557), 'datetime.datetime', 'datetime.datetime', (['(1926)', '(8)', '(17)'], {}), '(1926, 8, 17)\n', (2544, 2557), False, 'import datetime\n'), ((2710, 2741), 'datetime.datetime', 'datetime.datetime', (['(1942)', '(12)', '(21)'], {}), '(1942, 12, 21)\n', (2727, 2741), False, 'import datetime\n'), ((2895, 2925), 'datetime.datetime', 'datetime.datetime', (['(1953)', '(6)', '(15)'], {}), '(1953, 6, 15)\n', (2912, 2925), False, 'import datetime\n')] |
from bluebed import download
from bluebed import storage
import xmlrpc.client
def connect():
url = "http://deepblue.mpi-inf.mpg.de/xmlrpc"
user_key = "anonymous_key"
server = xmlrpc.client.Server(url, allow_none=True, encoding='UTF-8')
return user_key, server
def get_t_cell_dhs(server, user_key):
# get all children of biosource 'T cell'
# get all samples with any of those biosources
# get all experiments on those samples that have 'peaks' and are 'DnaseI' type experiments
_, related = server.get_biosource_related('T cell', user_key)
related_names = server.extract_names(related)[1] # get BioSource names
(s, samples) = server.list_samples(related_names, {}, user_key)
samples_id = server.extract_ids(samples)[1]
s, t_cell_dnase_experiments = server.list_experiments(
"hg19", "peaks", "DNaseI", None, samples_id, None, None, user_key)
return t_cell_dnase_experiments
def main():
user_key, server = connect()
tcell_exp = get_t_cell_dhs(server, user_key)
for exp_id, exp_name in tcell_exp:
print('Downloading', exp_name)
meta = download.experiment_metadata(exp_id, user_key, server)
if not meta['extra_metadata']['output_type'] == 'peaks':
print('skipping {} due to output_type {}'.format(exp_id, meta['extra_metadata']['output_type']))
continue
bed = download.experiment(exp_id, user_key, server)
out_dir = storage.ensure_out_dir(meta, base_dir='bluebed_dhs')
storage.write_bed_and_meta(bed, meta, out_dir)
print('hello from bluebird.')
| [
"bluebed.download.experiment_metadata",
"bluebed.download.experiment",
"bluebed.storage.ensure_out_dir",
"bluebed.storage.write_bed_and_meta"
] | [((1123, 1177), 'bluebed.download.experiment_metadata', 'download.experiment_metadata', (['exp_id', 'user_key', 'server'], {}), '(exp_id, user_key, server)\n', (1151, 1177), False, 'from bluebed import download\n'), ((1387, 1432), 'bluebed.download.experiment', 'download.experiment', (['exp_id', 'user_key', 'server'], {}), '(exp_id, user_key, server)\n', (1406, 1432), False, 'from bluebed import download\n'), ((1451, 1503), 'bluebed.storage.ensure_out_dir', 'storage.ensure_out_dir', (['meta'], {'base_dir': '"""bluebed_dhs"""'}), "(meta, base_dir='bluebed_dhs')\n", (1473, 1503), False, 'from bluebed import storage\n'), ((1512, 1558), 'bluebed.storage.write_bed_and_meta', 'storage.write_bed_and_meta', (['bed', 'meta', 'out_dir'], {}), '(bed, meta, out_dir)\n', (1538, 1558), False, 'from bluebed import storage\n')] |
# Copyright 2014 SolidBuilds.com. All rights reserved
#
# Authors: <NAME> <<EMAIL>>
from flask import Blueprint, redirect, render_template
from flask import request, url_for
from flask_user import current_user, login_required, roles_required
from app import db
from app.models.user_models import UserProfileForm
book_blueprint = Blueprint('books', __name__, template_folder='templates')
@book_blueprint.route('/foo_x')
@login_required
def foo():
print('in FOOOOOO!!!!!!')
return render_template('books/foo.html')
@book_blueprint.route('/all_books')
@login_required
def books():
books = db.engine.execute('SELECT Category.description AS c_description, Book.description AS b_description, * FROM Category INNER JOIN Book ON Category.rowID=Book.category_id ORDER BY c_description ASC')
# print(len(books))
print('in all books')
books = [dict(row) for row in books]
return render_template('books/all_books.html', books=books)
@book_blueprint.route('/seedDB')
@roles_required('admin')
def seedDB():
result = db.engine.execute("SELECT * FROM users")
for row in result:
print(row['email'])
result = db.engine.execute('DELETE FROM Book')
result = db.engine.execute('DELETE FROM Category')
result = db.engine.execute('INSERT INTO Book (author,title,isbn, description, category_id) VALUES ("<NAME>","Frankenstein","1", "A horror story written by a romantic.","1")')
result = db.engine.execute('INSERT INTO Book (author,title,isbn, description, category_id) VALUES ("<NAME>","The Turn of the Screw","2", "Another British horror story.","1")')
result = db.engine.execute('INSERT INTO Book (author,title,isbn, description, category_id) VALUES ("<NAME>","The Protestant Work Ethic and The Spirit of Capitalism","3", "A classic early 20th C. sociology text","2")')
result = db.engine.execute('INSERT INTO Book (author,title,isbn, description, category_id) VALUES ("<NAME>","Bowling Alone","4", "A classic late 20th C. sociology test","2")')
result = db.engine.execute('INSERT INTO Category (description) VALUES ("Horror")')
result = db.engine.execute('INSERT INTO Category (description) VALUES ("Sociology")')
result = db.engine.execute("SELECT * FROM Book")
for row in result:
print(row['title'])
return '<h1>DB Seeded!</h1>'
@book_blueprint.route('/erase_DB')
@roles_required('admin')
def eraseDB():
sqlQ = db.engine.execute('DELETE FROM Book')
sqlQ = db.engine.execute('DELETE FROM Category')
return '<h1>DB Erased!</h1>'
@book_blueprint.route('/addbook', methods={'GET','POST'})
@login_required
def addbook():
if request.method == 'POST':
author = request.form['author']
title = request.form['title']
isbn = request.form['isbn']
description = request.form['description']
category_field = request.form['category']
sql = "SELECT * FROM Category WHERE description ='" + category_field + "'"
categoryID = db.engine.execute(sql)
categoryID = [dict(row) for row in categoryID]
if len(categoryID) == 0:
sql = "INSERT INTO Category (description) VALUES ('" + category_field +"')"
returnStatus = db.engine.execute(sql)
# returnQuery = db.engine.execute('SELECT last_insert_rowid()')
# returnQuery = [dict(row) for row in returnQuery]
# categoryID = returnQuery[0][0]
# or, instead of above two lines use this one instead:
sql = "SELECT id FROM Category WHERE description ='" + category_field + "'"
returnQuery = db.engine.execute(sql)
returnQuery = [dict(row) for row in returnQuery]
categoryID = returnQuery[0]['id']
else:
categoryID = categoryID[0]['id']
sql = "INSERT INTO Book (author, title, isbn, description, category_id) VALUES ('"+ author +"','"+ title +"',"+ isbn +",'"+ description +"',"+ str(categoryID) +")"
returnStatus = db.engine.execute(sql)
return redirect(url_for('books.home_page'))
categories = db.engine.execute('SELECT * FROM Category ORDER BY description ASC')
return render_template('books/addbook.html', categories=categories)
@book_blueprint.route('/categories')
@login_required
def categories():
categories = db.engine.execute('SELECT rowid, * FROM Category ORDER BY description ASC')
categories = [dict(row) for row in categories]
for cat in categories:
print(cat['id'])
return render_template('books/categories.html', categories=categories)
@book_blueprint.route('/books_in_category/<categoryID>')
@login_required
def books_in_cat(categoryID):
sql = "SELECT * FROM Category WHERE rowid =" + categoryID
categories = db.engine.execute(sql)
categories = [dict(row) for row in categories]
categoryDescription= categories[0]['description']
sql= "SELECT * FROM Book WHERE category_id =" + categoryID
books = db.engine.execute(sql)
books = [dict(row) for row in books]
for book in books:
print('dddd')
print('debug')
return render_template('books/books_in_cat.html', books=books, categoryDescription=categoryDescription)
@book_blueprint.route('/sql', methods={'GET','POST'})
@roles_required('admin')
def sql():
data=""
if request.method == 'POST':
sqlField = request.form['sqlField']
try:
returnVar = db.engine.execute(sqlField)
returnVar = [dict(row) for row in returnVar]
except:
data="An error occurred. . .look in the console"
else:
try:
for row in returnVar:
print('')
print(type(row))
rowAsDict = dict(row)
print(type(rowAsDict))
data = data + "\n"
for key, value in rowAsDict.items():
print(key, ":", value)
data= data + key + ":" + str(value) + "\n"
except:
data="Data returned from sql was not iterable"
return render_template('books/sql.html',data=data)
return render_template('books/sql.html',data=data)
@book_blueprint.route('/tinker')
def tinker():
return '<h1>Tinker function executed, check console</h1>'
#Demos Jinja2 extends
@book_blueprint.route('/tink')
def tink():
return render_template('tink.html')
# The Home page is accessible to anyone
@book_blueprint.route('/')
def home_page():
return render_template('books/index.html')
# The Admin page requires an 'Admin' role.
@book_blueprint.route('/admin_books')
@roles_required('admin') # Use of @roles_required decorator
def admin_books():
return render_template('books/admin_books.html')
@book_blueprint.context_processor
def example():
return dict(myexample='This is an example')
| [
"flask.render_template",
"flask_user.roles_required",
"flask.url_for",
"app.db.engine.execute",
"flask.Blueprint"
] | [((334, 391), 'flask.Blueprint', 'Blueprint', (['"""books"""', '__name__'], {'template_folder': '"""templates"""'}), "('books', __name__, template_folder='templates')\n", (343, 391), False, 'from flask import Blueprint, redirect, render_template\n'), ((993, 1016), 'flask_user.roles_required', 'roles_required', (['"""admin"""'], {}), "('admin')\n", (1007, 1016), False, 'from flask_user import current_user, login_required, roles_required\n'), ((2358, 2381), 'flask_user.roles_required', 'roles_required', (['"""admin"""'], {}), "('admin')\n", (2372, 2381), False, 'from flask_user import current_user, login_required, roles_required\n'), ((5246, 5269), 'flask_user.roles_required', 'roles_required', (['"""admin"""'], {}), "('admin')\n", (5260, 5269), False, 'from flask_user import current_user, login_required, roles_required\n'), ((6632, 6655), 'flask_user.roles_required', 'roles_required', (['"""admin"""'], {}), "('admin')\n", (6646, 6655), False, 'from flask_user import current_user, login_required, roles_required\n'), ((494, 527), 'flask.render_template', 'render_template', (['"""books/foo.html"""'], {}), "('books/foo.html')\n", (509, 527), False, 'from flask import Blueprint, redirect, render_template\n'), ((606, 812), 'app.db.engine.execute', 'db.engine.execute', (['"""SELECT Category.description AS c_description, Book.description AS b_description, * FROM Category INNER JOIN Book ON Category.rowID=Book.category_id ORDER BY c_description ASC"""'], {}), "(\n 'SELECT Category.description AS c_description, Book.description AS b_description, * FROM Category INNER JOIN Book ON Category.rowID=Book.category_id ORDER BY c_description ASC'\n )\n", (623, 812), False, 'from app import db\n'), ((905, 957), 'flask.render_template', 'render_template', (['"""books/all_books.html"""'], {'books': 'books'}), "('books/all_books.html', books=books)\n", (920, 957), False, 'from flask import Blueprint, redirect, render_template\n'), ((1045, 1085), 'app.db.engine.execute', 'db.engine.execute', (['"""SELECT * FROM users"""'], {}), "('SELECT * FROM users')\n", (1062, 1085), False, 'from app import db\n'), ((1151, 1188), 'app.db.engine.execute', 'db.engine.execute', (['"""DELETE FROM Book"""'], {}), "('DELETE FROM Book')\n", (1168, 1188), False, 'from app import db\n'), ((1202, 1243), 'app.db.engine.execute', 'db.engine.execute', (['"""DELETE FROM Category"""'], {}), "('DELETE FROM Category')\n", (1219, 1243), False, 'from app import db\n'), ((1257, 1432), 'app.db.engine.execute', 'db.engine.execute', (['"""INSERT INTO Book (author,title,isbn, description, category_id) VALUES ("<NAME>","Frankenstein","1", "A horror story written by a romantic.","1")"""'], {}), '(\n \'INSERT INTO Book (author,title,isbn, description, category_id) VALUES ("<NAME>","Frankenstein","1", "A horror story written by a romantic.","1")\'\n )\n', (1274, 1432), False, 'from app import db\n'), ((1436, 1612), 'app.db.engine.execute', 'db.engine.execute', (['"""INSERT INTO Book (author,title,isbn, description, category_id) VALUES ("<NAME>","The Turn of the Screw","2", "Another British horror story.","1")"""'], {}), '(\n \'INSERT INTO Book (author,title,isbn, description, category_id) VALUES ("<NAME>","The Turn of the Screw","2", "Another British horror story.","1")\'\n )\n', (1453, 1612), False, 'from app import db\n'), ((1616, 1834), 'app.db.engine.execute', 'db.engine.execute', (['"""INSERT INTO Book (author,title,isbn, description, category_id) VALUES ("<NAME>","The Protestant Work Ethic and The Spirit of Capitalism","3", "A classic early 20th C. sociology text","2")"""'], {}), '(\n \'INSERT INTO Book (author,title,isbn, description, category_id) VALUES ("<NAME>","The Protestant Work Ethic and The Spirit of Capitalism","3", "A classic early 20th C. sociology text","2")\'\n )\n', (1633, 1834), False, 'from app import db\n'), ((1838, 2014), 'app.db.engine.execute', 'db.engine.execute', (['"""INSERT INTO Book (author,title,isbn, description, category_id) VALUES ("<NAME>","Bowling Alone","4", "A classic late 20th C. sociology test","2")"""'], {}), '(\n \'INSERT INTO Book (author,title,isbn, description, category_id) VALUES ("<NAME>","Bowling Alone","4", "A classic late 20th C. sociology test","2")\'\n )\n', (1855, 2014), False, 'from app import db\n'), ((2018, 2091), 'app.db.engine.execute', 'db.engine.execute', (['"""INSERT INTO Category (description) VALUES ("Horror")"""'], {}), '(\'INSERT INTO Category (description) VALUES ("Horror")\')\n', (2035, 2091), False, 'from app import db\n'), ((2105, 2181), 'app.db.engine.execute', 'db.engine.execute', (['"""INSERT INTO Category (description) VALUES ("Sociology")"""'], {}), '(\'INSERT INTO Category (description) VALUES ("Sociology")\')\n', (2122, 2181), False, 'from app import db\n'), ((2196, 2235), 'app.db.engine.execute', 'db.engine.execute', (['"""SELECT * FROM Book"""'], {}), "('SELECT * FROM Book')\n", (2213, 2235), False, 'from app import db\n'), ((2412, 2449), 'app.db.engine.execute', 'db.engine.execute', (['"""DELETE FROM Book"""'], {}), "('DELETE FROM Book')\n", (2429, 2449), False, 'from app import db\n'), ((2465, 2506), 'app.db.engine.execute', 'db.engine.execute', (['"""DELETE FROM Category"""'], {}), "('DELETE FROM Category')\n", (2482, 2506), False, 'from app import db\n'), ((4083, 4151), 'app.db.engine.execute', 'db.engine.execute', (['"""SELECT * FROM Category ORDER BY description ASC"""'], {}), "('SELECT * FROM Category ORDER BY description ASC')\n", (4100, 4151), False, 'from app import db\n'), ((4163, 4223), 'flask.render_template', 'render_template', (['"""books/addbook.html"""'], {'categories': 'categories'}), "('books/addbook.html', categories=categories)\n", (4178, 4223), False, 'from flask import Blueprint, redirect, render_template\n'), ((4313, 4388), 'app.db.engine.execute', 'db.engine.execute', (['"""SELECT rowid, * FROM Category ORDER BY description ASC"""'], {}), "('SELECT rowid, * FROM Category ORDER BY description ASC')\n", (4330, 4388), False, 'from app import db\n'), ((4504, 4567), 'flask.render_template', 'render_template', (['"""books/categories.html"""'], {'categories': 'categories'}), "('books/categories.html', categories=categories)\n", (4519, 4567), False, 'from flask import Blueprint, redirect, render_template\n'), ((4751, 4773), 'app.db.engine.execute', 'db.engine.execute', (['sql'], {}), '(sql)\n', (4768, 4773), False, 'from app import db\n'), ((4954, 4976), 'app.db.engine.execute', 'db.engine.execute', (['sql'], {}), '(sql)\n', (4971, 4976), False, 'from app import db\n'), ((5093, 5194), 'flask.render_template', 'render_template', (['"""books/books_in_cat.html"""'], {'books': 'books', 'categoryDescription': 'categoryDescription'}), "('books/books_in_cat.html', books=books, categoryDescription\n =categoryDescription)\n", (5108, 5194), False, 'from flask import Blueprint, redirect, render_template\n'), ((6155, 6199), 'flask.render_template', 'render_template', (['"""books/sql.html"""'], {'data': 'data'}), "('books/sql.html', data=data)\n", (6170, 6199), False, 'from flask import Blueprint, redirect, render_template\n'), ((6387, 6415), 'flask.render_template', 'render_template', (['"""tink.html"""'], {}), "('tink.html')\n", (6402, 6415), False, 'from flask import Blueprint, redirect, render_template\n'), ((6512, 6547), 'flask.render_template', 'render_template', (['"""books/index.html"""'], {}), "('books/index.html')\n", (6527, 6547), False, 'from flask import Blueprint, redirect, render_template\n'), ((6724, 6765), 'flask.render_template', 'render_template', (['"""books/admin_books.html"""'], {}), "('books/admin_books.html')\n", (6739, 6765), False, 'from flask import Blueprint, redirect, render_template\n'), ((2986, 3008), 'app.db.engine.execute', 'db.engine.execute', (['sql'], {}), '(sql)\n', (3003, 3008), False, 'from app import db\n'), ((3988, 4010), 'app.db.engine.execute', 'db.engine.execute', (['sql'], {}), '(sql)\n', (4005, 4010), False, 'from app import db\n'), ((6099, 6143), 'flask.render_template', 'render_template', (['"""books/sql.html"""'], {'data': 'data'}), "('books/sql.html', data=data)\n", (6114, 6143), False, 'from flask import Blueprint, redirect, render_template\n'), ((3214, 3236), 'app.db.engine.execute', 'db.engine.execute', (['sql'], {}), '(sql)\n', (3231, 3236), False, 'from app import db\n'), ((3602, 3624), 'app.db.engine.execute', 'db.engine.execute', (['sql'], {}), '(sql)\n', (3619, 3624), False, 'from app import db\n'), ((4037, 4063), 'flask.url_for', 'url_for', (['"""books.home_page"""'], {}), "('books.home_page')\n", (4044, 4063), False, 'from flask import request, url_for\n'), ((5407, 5434), 'app.db.engine.execute', 'db.engine.execute', (['sqlField'], {}), '(sqlField)\n', (5424, 5434), False, 'from app import db\n')] |
import numpy as np
def trapezoidal_rule(f, a, b, tol=1e-8):
"""
The trapezoidal rule is known to be very accurate for
oscillatory integrals integrated over their period.
See papers on spectral integration (it's just the composite trapezoidal rule....)
TODO (aaron): f is memoized to get the already computed points quickly.
Ideally, we should put this into a C++ function and call it with Cython. (Maybe someday)
"""
# endpoints first:
num = 2
dx = b - a
res0 = 1e30
res1 = 0.5 * dx * (f(b) + f(a))
delta_res = res0 - res1
re_err = np.abs(np.real(delta_res))
im_err = np.abs(np.imag(delta_res))
while re_err > tol or im_err > tol:
res0 = res1
num = 2 * num - 1
# print(num)
x = np.linspace(a, b, num=num)
res = 0
dx = (x[1] - x[0])
res += f(x[0])
for i in range(1, len(x) - 1):
res += 2 * f(x[i])
res += f(x[-1])
res1 = 0.5 * dx * res
delta_res = res1 - res0
re_err = np.abs(np.real(delta_res))
im_err = np.abs(np.imag(delta_res))
if num > 100000:
print('Integral failed to converge with', num, 'points.')
return np.nan, np.nan, np.nan
return res1, re_err, im_err | [
"numpy.real",
"numpy.linspace",
"numpy.imag"
] | [((599, 617), 'numpy.real', 'np.real', (['delta_res'], {}), '(delta_res)\n', (606, 617), True, 'import numpy as np\n'), ((639, 657), 'numpy.imag', 'np.imag', (['delta_res'], {}), '(delta_res)\n', (646, 657), True, 'import numpy as np\n'), ((778, 804), 'numpy.linspace', 'np.linspace', (['a', 'b'], {'num': 'num'}), '(a, b, num=num)\n', (789, 804), True, 'import numpy as np\n'), ((1052, 1070), 'numpy.real', 'np.real', (['delta_res'], {}), '(delta_res)\n', (1059, 1070), True, 'import numpy as np\n'), ((1096, 1114), 'numpy.imag', 'np.imag', (['delta_res'], {}), '(delta_res)\n', (1103, 1114), True, 'import numpy as np\n')] |
from faunadb import query as q
from shop.fauna.client import FaunaClient
def get_orders(secret, after=None, before=None, size=5):
client = FaunaClient(secret=secret)
return client.query(
q.map_(
lambda ref: q.get(ref),
q.paginate(q.documents(q.collection('orders')), size=size, after=after, before=before)
)
)
def find_order(secret, order_ref):
client = FaunaClient(secret=secret)
return client.query(
q.let(
{
"order": q.get(q.ref(q.collection("orders"), order_ref)),
"status_history": q.call("get_order_status_history", q.select(["ref"], q.var("order")))
},
{
"ref": q.select(["ref"], q.var("order")),
"data": q.merge(q.select(["data"], q.var("order")), {"status_history": q.var("status_history")})
}
)
)
def update_status(secret, order_ref, status):
client = FaunaClient(secret=secret)
return client.query(
q.update(
q.ref(q.collection('orders'), order_ref),
{"data": {
"status": status,
"statusAt": q.now()
}}
)
)
| [
"faunadb.query.now",
"shop.fauna.client.FaunaClient",
"faunadb.query.collection",
"faunadb.query.get",
"faunadb.query.var"
] | [((145, 171), 'shop.fauna.client.FaunaClient', 'FaunaClient', ([], {'secret': 'secret'}), '(secret=secret)\n', (156, 171), False, 'from shop.fauna.client import FaunaClient\n'), ((413, 439), 'shop.fauna.client.FaunaClient', 'FaunaClient', ([], {'secret': 'secret'}), '(secret=secret)\n', (424, 439), False, 'from shop.fauna.client import FaunaClient\n'), ((963, 989), 'shop.fauna.client.FaunaClient', 'FaunaClient', ([], {'secret': 'secret'}), '(secret=secret)\n', (974, 989), False, 'from shop.fauna.client import FaunaClient\n'), ((237, 247), 'faunadb.query.get', 'q.get', (['ref'], {}), '(ref)\n', (242, 247), True, 'from faunadb import query as q\n'), ((1051, 1073), 'faunadb.query.collection', 'q.collection', (['"""orders"""'], {}), "('orders')\n", (1063, 1073), True, 'from faunadb import query as q\n'), ((284, 306), 'faunadb.query.collection', 'q.collection', (['"""orders"""'], {}), "('orders')\n", (296, 306), True, 'from faunadb import query as q\n'), ((743, 757), 'faunadb.query.var', 'q.var', (['"""order"""'], {}), "('order')\n", (748, 757), True, 'from faunadb import query as q\n'), ((1172, 1179), 'faunadb.query.now', 'q.now', ([], {}), '()\n', (1177, 1179), True, 'from faunadb import query as q\n'), ((531, 553), 'faunadb.query.collection', 'q.collection', (['"""orders"""'], {}), "('orders')\n", (543, 553), True, 'from faunadb import query as q\n'), ((655, 669), 'faunadb.query.var', 'q.var', (['"""order"""'], {}), "('order')\n", (660, 669), True, 'from faunadb import query as q\n'), ((811, 825), 'faunadb.query.var', 'q.var', (['"""order"""'], {}), "('order')\n", (816, 825), True, 'from faunadb import query as q\n'), ((847, 870), 'faunadb.query.var', 'q.var', (['"""status_history"""'], {}), "('status_history')\n", (852, 870), True, 'from faunadb import query as q\n')] |
"""
January 13th 2020
Author T.Mizumoto
"""
#! python 3
# ver.x1.00
# Integral-Scale_function.py - this program calculate integral-scale and correlation.
import numpy as np
from scipy.integrate import simps
from scipy.stats import pearsonr
import pandas as pd
# index_basepoint = 0 (defult)
def fun_CrossCorr(data, index_basepoint):
alpha = data[:, index_basepoint]
cross_corretion = []
p_value = []
matrix_num = data.shape
point_num = int(matrix_num[1])
for i in range(point_num):
line = data[:, i]
cc, p = pearsonr(alpha, line)
cross_corretion.append(cc)
p_value.append(p)
df_CC = pd.DataFrame(columns = ["CrossCorrelation", "Pvalue"])
df_CC["CrossCorrelation"] = cross_corretion
df_CC["Pvalue"] = p_value
return df_CC
def fun_IntegralScale(correlation, distance):
# find the first negative point
minus = np.where(correlation < 0)
first_minus = minus[0][0]
# extract positibe points
corr_plus = list(correlation[:first_minus])
dis_plus = distance[:first_minus]
complement = (distance[first_minus + 1] - distance[first_minus]) / 2 + distance[first_minus]
corr_plus.append(0.0)
dis_plus.append(complement)
# integrate
integral = simps(corr_plus, dis_plus)
return integral
if __name__ == "__main__":
from graph import Graph
import matplotlib.pyplot as plt
# read data
data_path = "HISTORY/z-traverse_2-1-0_MeasureData.txt"
data = np.loadtxt(data_path)
coord_path = "HISTORY/z-traverse_2-1-0_Coordinate.txt"
coord = np.loadtxt(coord_path)
point = [0, 161, 322, 483, 644, 805, 966, 1127, 1288, 1449]
name = ["X2-MVD", "X2-RMS1", "X2-RMS2", "X1-MVD", "X1-RMS1", "X1-RMS2", "X0-MVD", "X0-RMS1", "X0-RMS2"]
IS_list = []
for i in range(9):
pstart = point[i]
pend = point[i + 1]
# calculate CrossCorrelation
df_CC = fun_CrossCorr(data[:, pstart:pend], 0)
# only z-traverse
z_axis = coord[pstart:pend, 2]
distance = []
for j in z_axis:
diff = j - z_axis[0]
distance.append(diff)
IS = fun_IntegralScale(df_CC["CrossCorrelation"], distance)
IS_list.append(IS)
g = Graph()
g.label = ["CrossCorrelation", "Pvalue"]
g.line(distance, df_CC["CrossCorrelation"], 0)
g.line(distance, df_CC["Pvalue"], 1)
plt.legend(title = "IS = " + str(IS))
g.save_graph("graph/Z-traverse/" + name[i])
print(IS_list) | [
"numpy.where",
"scipy.integrate.simps",
"graph.Graph",
"scipy.stats.pearsonr",
"pandas.DataFrame",
"numpy.loadtxt"
] | [((660, 712), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['CrossCorrelation', 'Pvalue']"}), "(columns=['CrossCorrelation', 'Pvalue'])\n", (672, 712), True, 'import pandas as pd\n'), ((906, 931), 'numpy.where', 'np.where', (['(correlation < 0)'], {}), '(correlation < 0)\n', (914, 931), True, 'import numpy as np\n'), ((1270, 1296), 'scipy.integrate.simps', 'simps', (['corr_plus', 'dis_plus'], {}), '(corr_plus, dis_plus)\n', (1275, 1296), False, 'from scipy.integrate import simps\n'), ((1497, 1518), 'numpy.loadtxt', 'np.loadtxt', (['data_path'], {}), '(data_path)\n', (1507, 1518), True, 'import numpy as np\n'), ((1590, 1612), 'numpy.loadtxt', 'np.loadtxt', (['coord_path'], {}), '(coord_path)\n', (1600, 1612), True, 'import numpy as np\n'), ((565, 586), 'scipy.stats.pearsonr', 'pearsonr', (['alpha', 'line'], {}), '(alpha, line)\n', (573, 586), False, 'from scipy.stats import pearsonr\n'), ((2260, 2267), 'graph.Graph', 'Graph', ([], {}), '()\n', (2265, 2267), False, 'from graph import Graph\n')] |
#!/usr/bin/python
# encoding: utf-8
import sys
from subprocess import call
from workflow import Workflow, notify
from args import *
def main(wf):
args = wf.args
actions = {
START_ARG: start_action,
STOP_ARG: stop_action,
BREAK_ARG: break_action
}
action = args[0]
actions[action]()
def start_action():
notify.notify('Starting a pomodoro')
run_script('src/applescript/startPomo.scpt')
def stop_action():
notify.notify('Stopping a pomodoro/break')
run_script('src/applescript/stopPomo.scpt')
def break_action():
notify.notify('Starting a break')
run_script('src/applescript/startBreak.scpt')
def run_script(filename):
call(['osascript', filename])
if __name__ == '__main__':
# Create a global `Workflow` object
wf = Workflow()
# Call your entry function via `Workflow.run()` to enable its helper
# functions, like exception catching, ARGV normalization, magic
# arguments etc.
sys.exit(wf.run(main))
| [
"workflow.Workflow",
"workflow.notify.notify",
"subprocess.call"
] | [((355, 391), 'workflow.notify.notify', 'notify.notify', (['"""Starting a pomodoro"""'], {}), "('Starting a pomodoro')\n", (368, 391), False, 'from workflow import Workflow, notify\n'), ((465, 507), 'workflow.notify.notify', 'notify.notify', (['"""Stopping a pomodoro/break"""'], {}), "('Stopping a pomodoro/break')\n", (478, 507), False, 'from workflow import Workflow, notify\n'), ((581, 614), 'workflow.notify.notify', 'notify.notify', (['"""Starting a break"""'], {}), "('Starting a break')\n", (594, 614), False, 'from workflow import Workflow, notify\n'), ((696, 725), 'subprocess.call', 'call', (["['osascript', filename]"], {}), "(['osascript', filename])\n", (700, 725), False, 'from subprocess import call\n'), ((803, 813), 'workflow.Workflow', 'Workflow', ([], {}), '()\n', (811, 813), False, 'from workflow import Workflow, notify\n')] |
# Copyright (C) 2015-2016 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from opsrest.constants import\
REST_VERSION_PATH, OVSDB_SCHEMA_SYSTEM_URI, OVSDB_SCHEMA_CONFIG
from opsrest.exceptions import MethodNotAllowed, NotFound
from opsrest.patch import create_patch, apply_patch
from tornado import gen
class BaseController():
"""
BaseController base controller class with generic
CRUD operations.
"""
def __init__(self, context=None):
self.base_uri_path = ""
self.context = context
self.initialize()
def initialize(self):
pass
@gen.coroutine
def create(self, data, current_user=None, query_args=None):
raise MethodNotAllowed
@gen.coroutine
def update(self, item_id, data, current_user=None, query_args=None):
raise MethodNotAllowed
@gen.coroutine
def delete(self, item_id, current_user=None, query_args=None):
raise MethodNotAllowed
@gen.coroutine
def get(self, item_id, current_user=None, selector=None, query_args=None):
raise MethodNotAllowed
@gen.coroutine
def get_all(self, current_user=None, selector=None, query_args=None):
raise MethodNotAllowed
@gen.coroutine
def create_uri(self, item_id):
return REST_VERSION_PATH + OVSDB_SCHEMA_SYSTEM_URI + "/" +\
self.base_uri_path + "/" + item_id
@gen.coroutine
def patch(self, item_id, data, current_user=None, query_args=None):
try:
# Get the resource's JSON to patch
resource_json = self.get(item_id, current_user,
OVSDB_SCHEMA_CONFIG)
if resource_json is None:
raise NotFound
# Create and verify patch
(patch, needs_update) = create_patch(data)
# Apply patch to the resource's JSON
patched_resource = apply_patch(patch, resource_json)
# Update resource only if needed, since a valid
# patch can contain PATCH_OP_TEST operations
# only, which do not modify the resource
if needs_update:
self.update(item_id, patched_resource, current_user)
# In case the resource doesn't implement GET/PUT
except MethodNotAllowed:
raise MethodNotAllowed("PATCH not allowed on resource")
| [
"opsrest.patch.apply_patch",
"opsrest.exceptions.MethodNotAllowed",
"opsrest.patch.create_patch"
] | [((2335, 2353), 'opsrest.patch.create_patch', 'create_patch', (['data'], {}), '(data)\n', (2347, 2353), False, 'from opsrest.patch import create_patch, apply_patch\n'), ((2435, 2468), 'opsrest.patch.apply_patch', 'apply_patch', (['patch', 'resource_json'], {}), '(patch, resource_json)\n', (2446, 2468), False, 'from opsrest.patch import create_patch, apply_patch\n'), ((2847, 2896), 'opsrest.exceptions.MethodNotAllowed', 'MethodNotAllowed', (['"""PATCH not allowed on resource"""'], {}), "('PATCH not allowed on resource')\n", (2863, 2896), False, 'from opsrest.exceptions import MethodNotAllowed, NotFound\n')] |
import numpy as np
import matplotlib.pyplot as plt
from utils import get_state_vowel
class HopfieldNetwork:
"""
Creates a Hopfield Network.
"""
def __init__(self, patterns):
"""
Initializes the network.
Args:
patterns (np.array): Group of states to be memorized by the network.
"""
self.num_units = patterns.shape[1]
self.passes = 0
self.state_units = np.array([1 if 2 * np.random.random() - 1 >= 0 else 0 for _ in range(self.num_units)])
self.W = np.zeros((self.num_units, self.num_units))
for pattern in patterns:
self.W += np.dot(np.transpose((2 * patterns - 1)), (2 * patterns - 1))
np.fill_diagonal(self.W, 0)
self.energy = [-0.5 * np.dot(np.dot(self.state_units.T, self.W), self.state_units)]
def _generate_sequence_units(self):
""" Selects randomly the order to update states in the next iteration."""
return np.random.choice(self.num_units, self.num_units)
def run(self):
""" Runs the network until no updates occur. """
no_update = True
while True:
for unit in self._generate_sequence_units():
unit_activation = np.dot(self.W[unit, :], self.state_units)
if unit_activation >= 0 and self.state_units[unit] == 0:
self.state_units[unit] = 1
no_update = False
elif unit_activation < 0 and self.state_units[unit] == 1:
self.state_units[unit] = 0
no_update = False
self.energy.append(-0.5 * np.dot(np.dot(self.state_units.T, self.W), self.state_units))
self.passes += 1
if no_update:
break
else:
no_update = True
def main():
np.random.seed(1234)
patterns = np.array([get_state_vowel('A'),
get_state_vowel('E'),
get_state_vowel('I'),
get_state_vowel('O'),
get_state_vowel('U')])
net = HopfieldNetwork(patterns)
net.run()
# Plot patterns and output
plt.figure(figsize=(6, 3), tight_layout=True)
plt.subplot(2, 3, 1)
plt.imshow(np.reshape(patterns[0, :], (5, 5)), cmap="Greys_r")
plt.title("A")
plt.subplot(2, 3, 2)
plt.imshow(np.reshape(patterns[1, :], (5, 5)), cmap="Greys_r")
plt.title("E")
plt.subplot(2, 3, 3)
plt.imshow(np.reshape(patterns[2, :], (5, 5)), cmap="Greys_r")
plt.title("I")
plt.subplot(2, 3, 4)
plt.imshow(np.reshape(patterns[3, :], (5, 5)), cmap="Greys_r")
plt.title("O")
plt.subplot(2, 3, 5)
plt.imshow(np.reshape(patterns[4, :], (5, 5)), cmap="Greys_r")
plt.title("U")
plt.subplot(2, 3, 6)
plt.imshow(np.reshape(net.state_units, (5, 5)), cmap="Greys_r")
plt.title("Output")
# Plot energy over time
plt.figure(figsize=(4, 2))
plt.plot(net.energy)
plt.title("Energy")
plt.show()
if __name__ == "__main__":
main()
| [
"numpy.reshape",
"numpy.random.choice",
"numpy.random.random",
"matplotlib.pyplot.plot",
"utils.get_state_vowel",
"numpy.fill_diagonal",
"matplotlib.pyplot.figure",
"numpy.zeros",
"numpy.dot",
"numpy.random.seed",
"matplotlib.pyplot.title",
"numpy.transpose",
"matplotlib.pyplot.subplot",
"... | [((1840, 1860), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (1854, 1860), True, 'import numpy as np\n'), ((2183, 2228), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 3)', 'tight_layout': '(True)'}), '(figsize=(6, 3), tight_layout=True)\n', (2193, 2228), True, 'import matplotlib.pyplot as plt\n'), ((2233, 2253), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(1)'], {}), '(2, 3, 1)\n', (2244, 2253), True, 'import matplotlib.pyplot as plt\n'), ((2325, 2339), 'matplotlib.pyplot.title', 'plt.title', (['"""A"""'], {}), "('A')\n", (2334, 2339), True, 'import matplotlib.pyplot as plt\n'), ((2344, 2364), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(2)'], {}), '(2, 3, 2)\n', (2355, 2364), True, 'import matplotlib.pyplot as plt\n'), ((2436, 2450), 'matplotlib.pyplot.title', 'plt.title', (['"""E"""'], {}), "('E')\n", (2445, 2450), True, 'import matplotlib.pyplot as plt\n'), ((2455, 2475), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(3)'], {}), '(2, 3, 3)\n', (2466, 2475), True, 'import matplotlib.pyplot as plt\n'), ((2547, 2561), 'matplotlib.pyplot.title', 'plt.title', (['"""I"""'], {}), "('I')\n", (2556, 2561), True, 'import matplotlib.pyplot as plt\n'), ((2566, 2586), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(4)'], {}), '(2, 3, 4)\n', (2577, 2586), True, 'import matplotlib.pyplot as plt\n'), ((2658, 2672), 'matplotlib.pyplot.title', 'plt.title', (['"""O"""'], {}), "('O')\n", (2667, 2672), True, 'import matplotlib.pyplot as plt\n'), ((2677, 2697), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(5)'], {}), '(2, 3, 5)\n', (2688, 2697), True, 'import matplotlib.pyplot as plt\n'), ((2769, 2783), 'matplotlib.pyplot.title', 'plt.title', (['"""U"""'], {}), "('U')\n", (2778, 2783), True, 'import matplotlib.pyplot as plt\n'), ((2788, 2808), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(6)'], {}), '(2, 3, 6)\n', (2799, 2808), True, 'import matplotlib.pyplot as plt\n'), ((2881, 2900), 'matplotlib.pyplot.title', 'plt.title', (['"""Output"""'], {}), "('Output')\n", (2890, 2900), True, 'import matplotlib.pyplot as plt\n'), ((2934, 2960), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4, 2)'}), '(figsize=(4, 2))\n', (2944, 2960), True, 'import matplotlib.pyplot as plt\n'), ((2965, 2985), 'matplotlib.pyplot.plot', 'plt.plot', (['net.energy'], {}), '(net.energy)\n', (2973, 2985), True, 'import matplotlib.pyplot as plt\n'), ((2990, 3009), 'matplotlib.pyplot.title', 'plt.title', (['"""Energy"""'], {}), "('Energy')\n", (2999, 3009), True, 'import matplotlib.pyplot as plt\n'), ((3014, 3024), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3022, 3024), True, 'import matplotlib.pyplot as plt\n'), ((544, 586), 'numpy.zeros', 'np.zeros', (['(self.num_units, self.num_units)'], {}), '((self.num_units, self.num_units))\n', (552, 586), True, 'import numpy as np\n'), ((711, 738), 'numpy.fill_diagonal', 'np.fill_diagonal', (['self.W', '(0)'], {}), '(self.W, 0)\n', (727, 738), True, 'import numpy as np\n'), ((969, 1017), 'numpy.random.choice', 'np.random.choice', (['self.num_units', 'self.num_units'], {}), '(self.num_units, self.num_units)\n', (985, 1017), True, 'import numpy as np\n'), ((2269, 2303), 'numpy.reshape', 'np.reshape', (['patterns[0, :]', '(5, 5)'], {}), '(patterns[0, :], (5, 5))\n', (2279, 2303), True, 'import numpy as np\n'), ((2380, 2414), 'numpy.reshape', 'np.reshape', (['patterns[1, :]', '(5, 5)'], {}), '(patterns[1, :], (5, 5))\n', (2390, 2414), True, 'import numpy as np\n'), ((2491, 2525), 'numpy.reshape', 'np.reshape', (['patterns[2, :]', '(5, 5)'], {}), '(patterns[2, :], (5, 5))\n', (2501, 2525), True, 'import numpy as np\n'), ((2602, 2636), 'numpy.reshape', 'np.reshape', (['patterns[3, :]', '(5, 5)'], {}), '(patterns[3, :], (5, 5))\n', (2612, 2636), True, 'import numpy as np\n'), ((2713, 2747), 'numpy.reshape', 'np.reshape', (['patterns[4, :]', '(5, 5)'], {}), '(patterns[4, :], (5, 5))\n', (2723, 2747), True, 'import numpy as np\n'), ((2824, 2859), 'numpy.reshape', 'np.reshape', (['net.state_units', '(5, 5)'], {}), '(net.state_units, (5, 5))\n', (2834, 2859), True, 'import numpy as np\n'), ((1886, 1906), 'utils.get_state_vowel', 'get_state_vowel', (['"""A"""'], {}), "('A')\n", (1901, 1906), False, 'from utils import get_state_vowel\n'), ((1933, 1953), 'utils.get_state_vowel', 'get_state_vowel', (['"""E"""'], {}), "('E')\n", (1948, 1953), False, 'from utils import get_state_vowel\n'), ((1980, 2000), 'utils.get_state_vowel', 'get_state_vowel', (['"""I"""'], {}), "('I')\n", (1995, 2000), False, 'from utils import get_state_vowel\n'), ((2027, 2047), 'utils.get_state_vowel', 'get_state_vowel', (['"""O"""'], {}), "('O')\n", (2042, 2047), False, 'from utils import get_state_vowel\n'), ((2074, 2094), 'utils.get_state_vowel', 'get_state_vowel', (['"""U"""'], {}), "('U')\n", (2089, 2094), False, 'from utils import get_state_vowel\n'), ((649, 679), 'numpy.transpose', 'np.transpose', (['(2 * patterns - 1)'], {}), '(2 * patterns - 1)\n', (661, 679), True, 'import numpy as np\n'), ((1231, 1272), 'numpy.dot', 'np.dot', (['self.W[unit, :]', 'self.state_units'], {}), '(self.W[unit, :], self.state_units)\n', (1237, 1272), True, 'import numpy as np\n'), ((776, 810), 'numpy.dot', 'np.dot', (['self.state_units.T', 'self.W'], {}), '(self.state_units.T, self.W)\n', (782, 810), True, 'import numpy as np\n'), ((1639, 1673), 'numpy.dot', 'np.dot', (['self.state_units.T', 'self.W'], {}), '(self.state_units.T, self.W)\n', (1645, 1673), True, 'import numpy as np\n'), ((459, 477), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (475, 477), True, 'import numpy as np\n')] |
import nltk
txt = nltk.data.load('/Users/ewanog/Dropbox/Work/ACAPS/nlp/text.txt')
print(txt) | [
"nltk.data.load"
] | [((19, 82), 'nltk.data.load', 'nltk.data.load', (['"""/Users/ewanog/Dropbox/Work/ACAPS/nlp/text.txt"""'], {}), "('/Users/ewanog/Dropbox/Work/ACAPS/nlp/text.txt')\n", (33, 82), False, 'import nltk\n')] |
#!/usr/bin/env python
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import unittest
import numpy as np
import onnx
from onnx import TensorProto, helper
from op_test_utils import TestDataFeeds, check_model_correctness, check_op_type_count, check_qtype_by_node_type
from onnxruntime.quantization import QuantFormat, QuantType, quantize_static
class TestOpRelu(unittest.TestCase):
def input_feeds(self, n, name2shape):
input_data_list = []
for i in range(n):
inputs = {}
for name, shape in name2shape.items():
inputs.update({name: np.random.randint(-1, 2, shape).astype(np.float32)})
input_data_list.extend([inputs])
dr = TestDataFeeds(input_data_list)
return dr
def construct_model_gemm(self, output_model_path):
# (input)
# |
# Gemm
# |
# Relu
# |
# Gemm
# |
# (output)
input_name = "input"
output_name = "output"
initializers = []
def make_gemm(input_name, weight_shape, weight_name, bias_shape, bias_name, output_name):
weight_data = np.random.normal(0, 0.1, weight_shape).astype(np.float32)
initializers.append(onnx.numpy_helper.from_array(weight_data, name=weight_name))
bias_data = np.random.normal(0, 0.1, bias_shape).astype(np.float32)
initializers.append(onnx.numpy_helper.from_array(bias_data, name=bias_name))
return onnx.helper.make_node(
"Gemm",
[input_name, weight_name, bias_name],
[output_name],
alpha=1.0,
beta=1.0,
transB=1,
)
# make gemm1 node
gemm1_output_name = "gemm1_output"
gemm1_node = make_gemm(
input_name,
[100, 10],
"linear1.weight",
[100],
"linear1.bias",
gemm1_output_name,
)
# make Relu
relu_output = "relu_output"
relu_node = onnx.helper.make_node("Relu", [gemm1_output_name], [relu_output])
# make gemm2 node
gemm2_node = make_gemm(
relu_output,
[10, 100],
"linear2.weight",
[10],
"linear2.bias",
output_name,
)
# make graph
input_tensor = helper.make_tensor_value_info(input_name, TensorProto.FLOAT, [-1, 10])
output_tensor = helper.make_tensor_value_info(output_name, TensorProto.FLOAT, [-1, 10])
graph_name = "relu_test"
graph = helper.make_graph(
[gemm1_node, relu_node, gemm2_node],
graph_name,
[input_tensor],
[output_tensor],
initializer=initializers,
)
model = helper.make_model(graph, opset_imports=[helper.make_opsetid("", 13)])
model.ir_version = onnx.IR_VERSION
onnx.save(model, output_model_path)
def static_quant_test(
self,
model_fp32_path,
data_reader,
activation_type,
weight_type,
extra_options={},
):
activation_proto_qtype = TensorProto.UINT8 if activation_type == QuantType.QUInt8 else TensorProto.INT8
activation_type_str = "u8" if (activation_type == QuantType.QUInt8) else "s8"
weight_type_str = "u8" if (weight_type == QuantType.QUInt8) else "s8"
model_int8_path = "relu_fp32.quant_{}{}.onnx".format(activation_type_str, weight_type_str)
data_reader.rewind()
quantize_static(
model_fp32_path,
model_int8_path,
data_reader,
quant_format=QuantFormat.QOperator,
activation_type=activation_type,
weight_type=weight_type,
extra_options=extra_options,
)
qdq_count = 1 if activation_type == QuantType.QUInt8 else 2
relu_count = 0 if activation_type == QuantType.QUInt8 else 1
quant_nodes = {"QGemm": 2, "QuantizeLinear": qdq_count, "DequantizeLinear": qdq_count, "Relu": relu_count}
check_op_type_count(self, model_int8_path, **quant_nodes)
qnode_io_qtypes = {
"QuantizeLinear": [
["i", 2, activation_proto_qtype],
["o", 0, activation_proto_qtype],
]
}
qnode_io_qtypes.update({"DequantizeLinear": [["i", 2, activation_proto_qtype]]})
check_qtype_by_node_type(self, model_int8_path, qnode_io_qtypes)
data_reader.rewind()
check_model_correctness(self, model_fp32_path, model_int8_path, data_reader.get_next())
def static_quant_test_qdq(
self,
model_fp32_path,
data_reader,
activation_type,
weight_type,
extra_options={},
):
activation_proto_qtype = TensorProto.UINT8 if activation_type == QuantType.QUInt8 else TensorProto.INT8
activation_type_str = "u8" if (activation_type == QuantType.QUInt8) else "s8"
weight_type_str = "u8" if (weight_type == QuantType.QUInt8) else "s8"
model_int8_path = "relu_fp32.quant_dqd_{}{}.onnx".format(activation_type_str, weight_type_str)
data_reader.rewind()
quantize_static(
model_fp32_path,
model_int8_path,
data_reader,
quant_format=QuantFormat.QDQ,
activation_type=activation_type,
weight_type=weight_type,
extra_options=extra_options,
)
relu_count = 0 if activation_type == QuantType.QUInt8 else 1
q_count = 3 if activation_type == QuantType.QUInt8 else 4
dq_count = 7 if activation_type == QuantType.QUInt8 else 8
quant_nodes = {"Gemm": 2, "QuantizeLinear": q_count, "DequantizeLinear": dq_count, "Relu": relu_count}
check_op_type_count(self, model_int8_path, **quant_nodes)
qnode_io_qtypes = {
"QuantizeLinear": [
["i", 2, activation_proto_qtype],
["o", 0, activation_proto_qtype],
]
}
check_qtype_by_node_type(self, model_int8_path, qnode_io_qtypes)
data_reader.rewind()
check_model_correctness(self, model_fp32_path, model_int8_path, data_reader.get_next())
def test_quantize_gemm(self):
np.random.seed(1)
model_fp32_path = "relu_fp32.onnx"
self.construct_model_gemm(model_fp32_path)
data_reader = self.input_feeds(1, {"input": [5, 10]})
self.static_quant_test(
model_fp32_path,
data_reader,
activation_type=QuantType.QUInt8,
weight_type=QuantType.QUInt8,
)
self.static_quant_test_qdq(
model_fp32_path,
data_reader,
activation_type=QuantType.QUInt8,
weight_type=QuantType.QUInt8,
)
def test_quantize_relu_s8s8(self):
np.random.seed(1)
model_fp32_path = "relu_fp32.onnx"
self.construct_model_gemm(model_fp32_path)
data_reader = self.input_feeds(1, {"input": [5, 10]})
self.static_quant_test(
model_fp32_path,
data_reader,
activation_type=QuantType.QInt8,
weight_type=QuantType.QInt8,
extra_options={"ActivationSymmetric": True},
)
self.static_quant_test_qdq(
model_fp32_path,
data_reader,
activation_type=QuantType.QInt8,
weight_type=QuantType.QInt8,
extra_options={"ActivationSymmetric": True},
)
if __name__ == "__main__":
unittest.main()
| [
"onnx.helper.make_graph",
"onnxruntime.quantization.quantize_static",
"numpy.random.normal",
"onnx.save",
"onnx.helper.make_node",
"onnx.numpy_helper.from_array",
"onnx.helper.make_tensor_value_info",
"numpy.random.randint",
"numpy.random.seed",
"op_test_utils.TestDataFeeds",
"unittest.main",
... | [((7923, 7938), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7936, 7938), False, 'import unittest\n'), ((984, 1014), 'op_test_utils.TestDataFeeds', 'TestDataFeeds', (['input_data_list'], {}), '(input_data_list)\n', (997, 1014), False, 'from op_test_utils import TestDataFeeds, check_model_correctness, check_op_type_count, check_qtype_by_node_type\n'), ((2403, 2468), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""Relu"""', '[gemm1_output_name]', '[relu_output]'], {}), "('Relu', [gemm1_output_name], [relu_output])\n", (2424, 2468), False, 'import onnx\n'), ((2732, 2802), 'onnx.helper.make_tensor_value_info', 'helper.make_tensor_value_info', (['input_name', 'TensorProto.FLOAT', '[-1, 10]'], {}), '(input_name, TensorProto.FLOAT, [-1, 10])\n', (2761, 2802), False, 'from onnx import TensorProto, helper\n'), ((2827, 2898), 'onnx.helper.make_tensor_value_info', 'helper.make_tensor_value_info', (['output_name', 'TensorProto.FLOAT', '[-1, 10]'], {}), '(output_name, TensorProto.FLOAT, [-1, 10])\n', (2856, 2898), False, 'from onnx import TensorProto, helper\n'), ((2948, 3078), 'onnx.helper.make_graph', 'helper.make_graph', (['[gemm1_node, relu_node, gemm2_node]', 'graph_name', '[input_tensor]', '[output_tensor]'], {'initializer': 'initializers'}), '([gemm1_node, relu_node, gemm2_node], graph_name, [\n input_tensor], [output_tensor], initializer=initializers)\n', (2965, 3078), False, 'from onnx import TensorProto, helper\n'), ((3283, 3318), 'onnx.save', 'onnx.save', (['model', 'output_model_path'], {}), '(model, output_model_path)\n', (3292, 3318), False, 'import onnx\n'), ((3899, 4094), 'onnxruntime.quantization.quantize_static', 'quantize_static', (['model_fp32_path', 'model_int8_path', 'data_reader'], {'quant_format': 'QuantFormat.QOperator', 'activation_type': 'activation_type', 'weight_type': 'weight_type', 'extra_options': 'extra_options'}), '(model_fp32_path, model_int8_path, data_reader, quant_format\n =QuantFormat.QOperator, activation_type=activation_type, weight_type=\n weight_type, extra_options=extra_options)\n', (3914, 4094), False, 'from onnxruntime.quantization import QuantFormat, QuantType, quantize_static\n'), ((4441, 4498), 'op_test_utils.check_op_type_count', 'check_op_type_count', (['self', 'model_int8_path'], {}), '(self, model_int8_path, **quant_nodes)\n', (4460, 4498), False, 'from op_test_utils import TestDataFeeds, check_model_correctness, check_op_type_count, check_qtype_by_node_type\n'), ((4780, 4844), 'op_test_utils.check_qtype_by_node_type', 'check_qtype_by_node_type', (['self', 'model_int8_path', 'qnode_io_qtypes'], {}), '(self, model_int8_path, qnode_io_qtypes)\n', (4804, 4844), False, 'from op_test_utils import TestDataFeeds, check_model_correctness, check_op_type_count, check_qtype_by_node_type\n'), ((5558, 5747), 'onnxruntime.quantization.quantize_static', 'quantize_static', (['model_fp32_path', 'model_int8_path', 'data_reader'], {'quant_format': 'QuantFormat.QDQ', 'activation_type': 'activation_type', 'weight_type': 'weight_type', 'extra_options': 'extra_options'}), '(model_fp32_path, model_int8_path, data_reader, quant_format\n =QuantFormat.QDQ, activation_type=activation_type, weight_type=\n weight_type, extra_options=extra_options)\n', (5573, 5747), False, 'from onnxruntime.quantization import QuantFormat, QuantType, quantize_static\n'), ((6155, 6212), 'op_test_utils.check_op_type_count', 'check_op_type_count', (['self', 'model_int8_path'], {}), '(self, model_int8_path, **quant_nodes)\n', (6174, 6212), False, 'from op_test_utils import TestDataFeeds, check_model_correctness, check_op_type_count, check_qtype_by_node_type\n'), ((6405, 6469), 'op_test_utils.check_qtype_by_node_type', 'check_qtype_by_node_type', (['self', 'model_int8_path', 'qnode_io_qtypes'], {}), '(self, model_int8_path, qnode_io_qtypes)\n', (6429, 6469), False, 'from op_test_utils import TestDataFeeds, check_model_correctness, check_op_type_count, check_qtype_by_node_type\n'), ((6638, 6655), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (6652, 6655), True, 'import numpy as np\n'), ((7233, 7250), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (7247, 7250), True, 'import numpy as np\n'), ((1834, 1952), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""Gemm"""', '[input_name, weight_name, bias_name]', '[output_name]'], {'alpha': '(1.0)', 'beta': '(1.0)', 'transB': '(1)'}), "('Gemm', [input_name, weight_name, bias_name], [\n output_name], alpha=1.0, beta=1.0, transB=1)\n", (1855, 1952), False, 'import onnx\n'), ((1583, 1642), 'onnx.numpy_helper.from_array', 'onnx.numpy_helper.from_array', (['weight_data'], {'name': 'weight_name'}), '(weight_data, name=weight_name)\n', (1611, 1642), False, 'import onnx\n'), ((1757, 1812), 'onnx.numpy_helper.from_array', 'onnx.numpy_helper.from_array', (['bias_data'], {'name': 'bias_name'}), '(bias_data, name=bias_name)\n', (1785, 1812), False, 'import onnx\n'), ((1493, 1531), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.1)', 'weight_shape'], {}), '(0, 0.1, weight_shape)\n', (1509, 1531), True, 'import numpy as np\n'), ((1669, 1705), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.1)', 'bias_shape'], {}), '(0, 0.1, bias_shape)\n', (1685, 1705), True, 'import numpy as np\n'), ((3201, 3228), 'onnx.helper.make_opsetid', 'helper.make_opsetid', (['""""""', '(13)'], {}), "('', 13)\n", (3220, 3228), False, 'from onnx import TensorProto, helper\n'), ((873, 904), 'numpy.random.randint', 'np.random.randint', (['(-1)', '(2)', 'shape'], {}), '(-1, 2, shape)\n', (890, 904), True, 'import numpy as np\n')] |
from model import *
from dataloader import *
from utils import *
from torch.utils.tensorboard import SummaryWriter
import torch.optim as optim
import time
import gc
from tqdm import tqdm
import matplotlib.pyplot as plt
import torch.nn as nn
import numpy as np
import warnings as wn
wn.filterwarnings('ignore')
#load either PAMAP2 or Opportunity Datasets
batch_size_train = 500 # PAM
batch_size_val = 300 # PAM
#batch_size_train = 10000 # OPP
#batch_size_val = 1 # OPP
# 1 = PAM, 0 = OPP
PAM_dataset = 1
if (PAM_dataset):
# PAM Dataset
train_dataset = Wearables_Dataset(0,dataset_name='PAM2',dataset_path='data/PAM2',train_dataset=True)
val_dataset = Wearables_Dataset(0,dataset_name='PAM2',dataset_path='data/PAM2',train_dataset=False)
else:
# Opportunity Dataset
train_dataset = Wearables_Dataset(dataset_name='OPP',dataset_path='data/OPP',train_dataset=True)
val_dataset = Wearables_Dataset(dataset_name='OPP',dataset_path='data/OPP',train_dataset=False)
# Get dataloaders
train_loader = DataLoader(dataset=train_dataset,
batch_size=batch_size_train,
num_workers=4,
shuffle=True)
val_loader = DataLoader(dataset=val_dataset,
batch_size=batch_size_val,
num_workers=4,
shuffle=False)
writer = SummaryWriter()
def init_weights(m):
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
torch.nn.init.xavier_uniform_(m.weight.data)
# torch.nn.init.xavier_uniform_(m.bias.data)
def plot(train_loss, val_loss, train_acc, val_acc, train_f1, val_f1, dataset):
# train/val acc plots
x = np.arange(len(train_loss))
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
ax1.plot(x,train_acc)
ax1.plot(x,val_acc)
ax1.set_xlabel('Number of Epochs')
ax1.set_ylabel('Accuracy')
ax1.set_title('Training vs. Validation Accuracy')
ax1.legend(['Training Acc','Val Acc'])
fig1.savefig('train_val_accuracy_' + dataset + '.png')
# train/val loss plots
fig2 = plt.figure()
ax2 = fig2.add_subplot(111)
ax2.plot(x,train_loss)
ax2.plot(x,val_loss)
ax2.set_xlabel('Number of Epochs')
ax2.set_ylabel('Cross Entropy Loss')
ax2.set_title('Training vs. Validation Loss')
ax2.legend(['Training Loss','Val Loss'])
fig2.savefig('train_val_loss_' + dataset + '.png')
# train/val f1 plots
fig3 = plt.figure()
ax3 = fig3.add_subplot(111)
ax3.plot(x,train_f1)
ax3.plot(x,val_f1)
ax3.set_xlabel('Number of Epochs')
ax3.set_ylabel('F1 Score')
ax3.set_title('Training vs. Validation F1 Score')
ax3.legend(['Train F1 Score','Val F1 Score'])
fig3.savefig('train_val_f1_' + dataset + '.png')
def train():
train_epoch_loss = []
train_epoch_acc = []
train_epoch_f1 = []
val_epoch_loss = []
val_epoch_acc = []
val_epoch_f1 = []
best_model = 'best_model_train'
best_loss = float('inf')
for epoch in tqdm(range(epochs)):
train_loss_per_iter = []
train_acc_per_iter = []
train_f1_per_iter = []
ts = time.time()
for iter, (X, Y) in tqdm(enumerate(train_loader), total=len(train_loader)):
optimizer.zero_grad()
if use_gpu:
inputs = X.cuda()
labels = Y.long().cuda()
else:
inputs, labels = X, Y.long()
clear(X, Y)
inputs = torch.split(inputs, 9, 1)
outputs = psm(inputs)
loss = criterion(outputs, torch.max(labels, 1)[1])
clear(outputs)
loss.backward()
optimizer.step()
clear(loss)
# save loss per iteration
train_loss_per_iter.append(loss.item())
t_acc = compute_acc(outputs,labels)
train_acc_per_iter.append(t_acc)
micro_f1, macro_f1, weighted = calculate_f1(outputs, labels)
train_f1_per_iter.append(weighted)
writer.add_scalar('Loss/train', loss.item(), epoch)
writer.add_scalar('Accuracy/train', t_acc, epoch)
(print("Finish epoch {}, time elapsed {}, train acc {}, train weighted f1 {}".format(epoch,
time.time() - ts, np.mean(train_acc_per_iter), np.mean(train_f1_per_iter))))
# calculate validation loss and accuracy
val_loss, val_acc, val_f1 = val(epoch)
print("Val loss {}, Val Acc {}, Val F1 {}".format(val_loss, val_acc, val_f1))
# Early Stopping
if loss < best_loss:
best_loss = loss
# TODO: Consider switching to state dict instead
torch.save(psm, best_model)
train_epoch_loss.append(np.mean(train_loss_per_iter))
train_epoch_acc.append(np.mean(train_acc_per_iter))
train_epoch_f1.append(np.mean(train_f1_per_iter))
val_epoch_loss.append(val_loss)
val_epoch_acc.append(val_acc)
val_epoch_f1.append(val_f1)
writer.add_scalar('Loss/val', val_loss, epoch)
writer.add_scalar('Accuracy/val', val_acc, epoch)
# plot val/training plot curves
plot(train_epoch_loss, val_epoch_loss, train_epoch_acc, val_epoch_acc, train_epoch_f1, val_epoch_f1, 'shared')
def val(epoch):
batch_loss = []
batch_acc = []
batch_f1 = []
for iter, (X, Y) in tqdm(enumerate(val_loader), total=len(val_loader)):
'''
y -> Labels (Used for pix acc and IOU)
tar -> One-hot encoded labels (used for loss)
'''
if use_gpu:
inputs = X.cuda()
labels = Y.long().cuda()
else:
inputs, labels = X, Y.long()
clear(X, Y)
inputs = torch.split(inputs, 9, 1)
outputs = psm(inputs)
# save val loss/accuracy
loss = criterion(outputs, torch.max(labels, 1)[1])
batch_loss.append(loss.item())
batch_acc.append(compute_acc(outputs,labels))
micro_f1, macro_f1, weighted = calculate_f1(outputs, labels)
batch_f1.append(weighted)
clear(outputs, loss)
# if iter % 20 == 0:
# print("iter: {}".format(iter))
return np.mean(batch_loss), np.mean(batch_acc), np.mean(batch_f1)
if __name__ == "__main__":
# Define model parameters
epochs = 3
criterion = nn.CrossEntropyLoss()
sensors_per_device = 3
fr = 100
# Initialize model sensor model (senseHAR paper Figure 3/4)
# Initialize encoder model A,B,C
psm = PSM(12, sensors_per_device, fr, p=0.15)
psm.apply(init_weights)
params = psm.parameters()
optimizer = optim.Adam(params, lr=1e-2)
use_gpu = torch.cuda.is_available()
if use_gpu:
psm = psm.cuda()
#print("Init val loss: {}, Init val acc: {}, Init val iou: {}".format(val_loss, val_acc, val_iou))
train()
| [
"torch.optim.Adam",
"torch.utils.tensorboard.SummaryWriter",
"numpy.mean",
"torch.nn.CrossEntropyLoss",
"matplotlib.pyplot.figure",
"time.time",
"warnings.filterwarnings"
] | [((282, 309), 'warnings.filterwarnings', 'wn.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (299, 309), True, 'import warnings as wn\n'), ((1375, 1390), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {}), '()\n', (1388, 1390), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((1741, 1753), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1751, 1753), True, 'import matplotlib.pyplot as plt\n'), ((2100, 2112), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2110, 2112), True, 'import matplotlib.pyplot as plt\n'), ((2463, 2475), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2473, 2475), True, 'import matplotlib.pyplot as plt\n'), ((6338, 6359), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (6357, 6359), True, 'import torch.nn as nn\n'), ((6625, 6652), 'torch.optim.Adam', 'optim.Adam', (['params'], {'lr': '(0.01)'}), '(params, lr=0.01)\n', (6635, 6652), True, 'import torch.optim as optim\n'), ((3154, 3165), 'time.time', 'time.time', ([], {}), '()\n', (3163, 3165), False, 'import time\n'), ((6186, 6205), 'numpy.mean', 'np.mean', (['batch_loss'], {}), '(batch_loss)\n', (6193, 6205), True, 'import numpy as np\n'), ((6207, 6225), 'numpy.mean', 'np.mean', (['batch_acc'], {}), '(batch_acc)\n', (6214, 6225), True, 'import numpy as np\n'), ((6227, 6244), 'numpy.mean', 'np.mean', (['batch_f1'], {}), '(batch_f1)\n', (6234, 6244), True, 'import numpy as np\n'), ((4745, 4773), 'numpy.mean', 'np.mean', (['train_loss_per_iter'], {}), '(train_loss_per_iter)\n', (4752, 4773), True, 'import numpy as np\n'), ((4806, 4833), 'numpy.mean', 'np.mean', (['train_acc_per_iter'], {}), '(train_acc_per_iter)\n', (4813, 4833), True, 'import numpy as np\n'), ((4865, 4891), 'numpy.mean', 'np.mean', (['train_f1_per_iter'], {}), '(train_f1_per_iter)\n', (4872, 4891), True, 'import numpy as np\n'), ((4287, 4314), 'numpy.mean', 'np.mean', (['train_acc_per_iter'], {}), '(train_acc_per_iter)\n', (4294, 4314), True, 'import numpy as np\n'), ((4316, 4342), 'numpy.mean', 'np.mean', (['train_f1_per_iter'], {}), '(train_f1_per_iter)\n', (4323, 4342), True, 'import numpy as np\n'), ((4269, 4280), 'time.time', 'time.time', ([], {}), '()\n', (4278, 4280), False, 'import time\n')] |
import logging
import logging.config
import os
import flask
import google.cloud.logging
import yaml
from flask_cors import CORS
from google.auth.credentials import AnonymousCredentials
from google.cloud import ndb
from bond_app import routes
from bond_app.json_exception_handler import JsonExceptionHandler
from bond_app.swagger_ui import swaggerui_blueprint, SWAGGER_URL
client = None
if os.environ.get('DATASTORE_EMULATOR_HOST'):
# If we're running the datastore emulator, we should use anonymous credentials to connect to it.
# The project should match the project given to the Datastore Emulator. See tests/datastore_emulator/run_emulator.sh
client = ndb.Client(project="test", credentials=AnonymousCredentials())
else:
# Otherwise, create a client grabbing credentials normally from cloud environment variables.
client = ndb.Client()
def ndb_wsgi_middleware(wsgi_app):
"""Wrap an app so that each request gets its own NDB client context."""
def middleware(environ, start_response):
with client.context():
return wsgi_app(environ, start_response)
return middleware
def setup_logging():
"""
If we are running as a GAE application, we need to set up Stackdriver logging.
Stackdriver logging will encounter errors if it doesn't have access to the right project credentials.
Proceeds to load the custom logging configuration for the app.
:return:
"""
default_log_level = logging.DEBUG
if os.environ.get('GAE_APPLICATION'):
# Connects the logger to the root logging handler; by default this captures
# all logs at INFO level and higher
logging_client = google.cloud.logging.Client()
logging_client.setup_logging(log_level=default_log_level)
# Default logging config to be used if we fail reading from the file
logging_config = {"version": 1,
"disable_existing_loggers": False,
"root": {
"level": default_log_level
}
}
log_config_file_path = 'log_config.yaml'
try:
with open(log_config_file_path, 'rt') as f:
logging_config = yaml.safe_load(f.read())
logging.debug("Successfully read Logging Config from: {}".format(log_config_file_path))
except Exception:
# TODO: How do we determine what specific exception types to handle here?
logging.basicConfig(level=default_log_level)
logging.exception("Error trying to configure logging with file: {}. Using default settings."
.format(log_config_file_path))
logging.config.dictConfig(logging_config)
def create_app():
"""Initializes app."""
flask_app = flask.Flask(__name__)
flask_app.register_blueprint(routes.routes)
flask_app.register_blueprint(swaggerui_blueprint, url_prefix=SWAGGER_URL)
CORS(flask_app)
return flask_app
# Logging setup/config should happen as early as possible so that we can log using our desired settings. If you want to
# log anything in this file, make sure you call `setup_logging()` first and then get the right logger as follows:
# logger = logging.getLogger(__name__)
setup_logging()
app = create_app()
app.wsgi_app = ndb_wsgi_middleware(app.wsgi_app) # Wrap the app in middleware.
handler = JsonExceptionHandler(app)
@app.after_request
def add_nosniff_content_type_header(response):
response.headers["X-Content-Type-Options"] = "nosniff"
response.headers["X-Frame-Options"] = "deny"
return response
| [
"google.auth.credentials.AnonymousCredentials",
"logging.basicConfig",
"flask_cors.CORS",
"flask.Flask",
"logging.config.dictConfig",
"os.environ.get",
"google.cloud.ndb.Client",
"bond_app.json_exception_handler.JsonExceptionHandler"
] | [((392, 433), 'os.environ.get', 'os.environ.get', (['"""DATASTORE_EMULATOR_HOST"""'], {}), "('DATASTORE_EMULATOR_HOST')\n", (406, 433), False, 'import os\n'), ((3343, 3368), 'bond_app.json_exception_handler.JsonExceptionHandler', 'JsonExceptionHandler', (['app'], {}), '(app)\n', (3363, 3368), False, 'from bond_app.json_exception_handler import JsonExceptionHandler\n'), ((849, 861), 'google.cloud.ndb.Client', 'ndb.Client', ([], {}), '()\n', (859, 861), False, 'from google.cloud import ndb\n'), ((1482, 1515), 'os.environ.get', 'os.environ.get', (['"""GAE_APPLICATION"""'], {}), "('GAE_APPLICATION')\n", (1496, 1515), False, 'import os\n'), ((2648, 2689), 'logging.config.dictConfig', 'logging.config.dictConfig', (['logging_config'], {}), '(logging_config)\n', (2673, 2689), False, 'import logging\n'), ((2753, 2774), 'flask.Flask', 'flask.Flask', (['__name__'], {}), '(__name__)\n', (2764, 2774), False, 'import flask\n'), ((2905, 2920), 'flask_cors.CORS', 'CORS', (['flask_app'], {}), '(flask_app)\n', (2909, 2920), False, 'from flask_cors import CORS\n'), ((709, 731), 'google.auth.credentials.AnonymousCredentials', 'AnonymousCredentials', ([], {}), '()\n', (729, 731), False, 'from google.auth.credentials import AnonymousCredentials\n'), ((2439, 2483), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'default_log_level'}), '(level=default_log_level)\n', (2458, 2483), False, 'import logging\n')] |
import FWCore.ParameterSet.Config as cms
pileupVtxDigitizer = cms.PSet(
accumulatorType = cms.string("PileupVertexAccumulator"),
hitsProducer = cms.string('generator'),
vtxTag = cms.InputTag("generatorSmeared"),
vtxFallbackTag = cms.InputTag("generator"),
makeDigiSimLinks = cms.untracked.bool(False),
saveVtxTimes = cms.bool(False))
from Configuration.Eras.Modifier_phase2_timing_cff import phase2_timing
phase2_timing.toModify( pileupVtxDigitizer, saveVtxTimes = cms.bool(True) )
| [
"FWCore.ParameterSet.Config.string",
"FWCore.ParameterSet.Config.untracked.bool",
"FWCore.ParameterSet.Config.bool",
"FWCore.ParameterSet.Config.InputTag"
] | [((95, 132), 'FWCore.ParameterSet.Config.string', 'cms.string', (['"""PileupVertexAccumulator"""'], {}), "('PileupVertexAccumulator')\n", (105, 132), True, 'import FWCore.ParameterSet.Config as cms\n'), ((153, 176), 'FWCore.ParameterSet.Config.string', 'cms.string', (['"""generator"""'], {}), "('generator')\n", (163, 176), True, 'import FWCore.ParameterSet.Config as cms\n'), ((191, 223), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""generatorSmeared"""'], {}), "('generatorSmeared')\n", (203, 223), True, 'import FWCore.ParameterSet.Config as cms\n'), ((246, 271), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""generator"""'], {}), "('generator')\n", (258, 271), True, 'import FWCore.ParameterSet.Config as cms\n'), ((296, 321), 'FWCore.ParameterSet.Config.untracked.bool', 'cms.untracked.bool', (['(False)'], {}), '(False)\n', (314, 321), True, 'import FWCore.ParameterSet.Config as cms\n'), ((342, 357), 'FWCore.ParameterSet.Config.bool', 'cms.bool', (['(False)'], {}), '(False)\n', (350, 357), True, 'import FWCore.ParameterSet.Config as cms\n'), ((491, 505), 'FWCore.ParameterSet.Config.bool', 'cms.bool', (['(True)'], {}), '(True)\n', (499, 505), True, 'import FWCore.ParameterSet.Config as cms\n')] |
# -*- coding: utf-8 -*-
import pygame
import heapq as pq
import random
def explore(u,vis,adj,q):
for v,w in adj[u]:
if not vis[v]:
pq.heappush(q,[w,u,v])
def prim(adj,return_edj=0):
tree=[[] for i in range(len(adj))]
tree_edj=[]
if not adj:
return -1
for u in adj:
if not u:
return -2
for v in u:
if not v:
return -2
q=[]
vis=[0]*len(adj)
vis[0]=1
explore(0,vis,adj,q)
while q:
#w=weight, u=node:source (in tree), v = node:target(not in tree)
w,u,v=pq.heappop(q)
if not vis[v]:
vis[v]=1
tree[u].append([v,w])
tree[v].append([u,w])
tree_edj.append([w,u,v])
explore(v,vis,adj,q)
for i in vis:
if not i:
return -3
return tree_edj if return_edj else tree
def unit_test_prim():
test1=[[[1, 4], [7, 8]],
[[0, 4], [2, 8], [7, 11]],
[[8, 2], [5, 4], [3, 7], [1, 8]],
[[2, 7], [4, 9], [5, 14]],
[[3, 9], [5, 10]],
[[6, 2], [2, 4], [4, 10], [3, 14]],
[[7, 1], [5, 2], [8, 6]],
[[6, 1], [8, 7], [0, 8], [1, 11]],
[[2, 2], [6, 6], [7, 7]]]
ans1=[[[1, 4], [7, 8]],
[[0, 4]],
[[5, 4], [8, 2], [3, 7]],
[[2, 7], [4, 9]],
[[3, 9]],
[[6, 2], [2, 4]],
[[7, 1], [5, 2]],
[[0, 8], [6, 1]],
[[2, 2]]]
test2=[]
ans2=-1
test3=[[],
[[2,5],[3,1]],
[[1,5]],
[[1,1]]]
ans3=-2
test4=[[[1,2]],
[[0,2]],
[[3,1]],
[[2,1]]]
ans4=-3
test5=[[[1, 4], [7, 8]],
[[0, 4], [2, 8], [7, 11]],
[[8, 2], [5, 4], [3, 7], [1, 8]],
[[2, 7], [], [5, 14]],
[[3, 9]],
[[6, 2], [2, 4], [4, 10], [3, 14]],
[[7, 1], [5, 2], [8, 6]],
[[6, 1], [8, 7], [0, 8], [1, 11]],
[[2, 2], [6, 6], [7, 7]]]
ans5=-2
mst1=prim(test1)
for ans,mst in zip(ans1,mst1):
ans.sort()
mst.sort()
if ans1==mst1:
print("Unit test 1: OK -> regular solution")
else:
print("Unit test 1: FAIL -> regular solution")
mst2=prim(test2)
if ans2==mst2:
print("Unit test 2: OK -> empty tree")
else:
print("Unit test 2: FAIL -> empty tree")
mst3=prim(test3)
if ans3==mst3:
print("Unit test 3: OK -> empty node")
else:
print("Unit test 3: FAIL -> empty node")
mst4=prim(test4)
if ans4==mst4:
print("Unit test 4: OK -> not connected graph")
else:
print("Unit test 4: FAIL -> not connected graph")
mst5=prim(test5)
if ans5==mst5:
print("Unit test 5: OK -> empty node")
else:
print("Unit test 5: Fail -> empty node")
def raiz(nodo,p):
if p[nodo]!=-1:
return raiz(p[nodo],p)
return nodo
def juntar(u,v,p):
p[raiz(u,p)]=raiz(v,p)
def kruskal(edges,n,return_edj=0):
edges.sort()
if n==0:
return -1
for i in edges:
if len(i)<3:
return -2
#n==número de nodos
p=[-1]*n
tree=[[] for i in range(n)]
tree_edj=[]
for w,u,v in edges:
if raiz(u,p) != raiz(v,p):
tree[u].append([v,w])
tree[v].append([u,w])
tree_edj.append([w,u,v])
juntar(u,v,p)
c=0
for i in p:
if i==-1:
c+=1
if c>1:
print(c)
return -3
return tree_edj if return_edj else tree
def unit_test_kruskal():
test1=[[1, 7, 6], [2, 8, 2], [2, 6, 5], [4, 0, 1],
[4, 2, 5], [6, 8, 6], [7, 2, 3], [7, 7, 8],
[8, 0, 7], [8, 1, 2], [9, 3, 4], [10, 5, 4],
[11, 1, 7], [14, 3, 5]]
ans1=[[[1, 4], [7, 8]],
[[0, 4]],
[[5, 4], [8, 2], [3, 7]],
[[2, 7], [4, 9]],
[[3, 9]],
[[6, 2], [2, 4]],
[[7, 1], [5, 2]],
[[0, 8], [6, 1]],
[[2, 2]]]
test2=[]
ans2=-1
test3=[[],[5,1,2],[1,1,3]]
ans3=-2
test4=[[2,0,1],[1,3,2]]
ans4=-3
test5=[[1, 7, 6], [2, 8, 2], [2, 6, 5], [4, 0, 1],
[4, 2, 5], [6, 8], [7, 2, 3], [7, 7, 8],
[8, 0, 7], [8, 1, 2], [9, 3, 4], [10, 5, 4],
[11, 1, 7], [14, 3, 5]]
ans5=-2
mst1=kruskal(test1,9)
for ans,mst in zip(ans1,mst1):
ans.sort()
mst.sort()
if ans1==mst1:
print("Unit test 1: OK -> regular solution")
else:
print("Unit test 1: FAIL -> regular solution")
mst2=kruskal(test2,0)
if ans2==mst2:
print("Unit test 2: OK -> empty tree")
else:
print("Unit test 2: FAIL -> empty tree")
mst3=kruskal(test3,4)
if ans3==mst3:
print("Unit test 3: OK -> empty node")
else:
print("Unit test 3: FAIL -> empty node")
mst4=kruskal(test4,4)
if ans4==mst4:
print("Unit test 4: OK -> not connected graph")
else:
print("Unit test 4: FAIL -> not connected graph")
mst5=kruskal(test5,9)
if ans5==mst5:
print("Unit test 5: OK -> empty node")
else:
print("Unit test 5: Fail -> empty node")
def get_random_graph(nodes,max_edges,min_weight,max_weight):
if nodes<2:
print()
raise Exception("Please input more than 1 node")
return -1
if max_edges<nodes-1:
print()
raise Exception ("Edges must be >= nodes-1")
return -2
node_population=[i for i in range(nodes)]
adj=[[] for i in range(nodes)]
edj=[]
p=[-1]*nodes
components=nodes
linked={k:{} for k in range(nodes)}
while components<=max_edges:
s=random.sample(node_population,2)
u=min(s)
v=max(s)
max_edges-=1
if not linked[u].get(v,0):
linked[u][v]=1
w=random.randint(min_weight,max_weight)
adj[u].append([v,w])
adj[v].append([u,w])
edj.append([w,u,v])
if raiz(u,p)!=raiz(v,p):
juntar(u,v,p)
components-=1
raices=[]
for i,v in enumerate(p):
if v==-1:
raices.append(i)
for i in range(1,len(raices)):
w=random.randint(min_weight,max_weight)
u=raices[i-1]
v=raices[i]
adj[u].append([v,w])
adj[v].append([u,w])
edj.append([w,u,v])
return adj,edj
width=700
height=500
pygame.init()
window = pygame.display.set_mode((width, height))
window.fill((0, 0, 0))
mst_edge=(125,255,125)
node_color=(255, 0, 0)
red=(255, 0, 0)
white=(255,255,255)
edge_color=white
node_radius=10
class Edge:
def __init__(self, node1, node2,weight=0,color=white):
self.node1=node1
self.node2=node2
self.weight=weight
self.color=color
def pos1(self):
return self.node1.pos
def pos2(self):
return self.node2.pos
class Node:
def __init__(self, x, y, color, radius):
self.pos = (x, y)
self.x_boundary = (x - radius, x + radius)
self.y_boundary = (y - radius, y + radius)
self.color = color
self.radius = radius
self.edges=[]
def recalc_boundary(self):
self.x_boundary = (
self.pos[0] - self.radius, self.pos[0] + self.radius
)
self.y_boundary = (
self.pos[1] - self.radius, self.pos[1] + self.radius
)
def add_edge(self):
return
def mouse_in_node():
pos = pygame.mouse.get_pos()
selected_node=None
index=None
for i,node in enumerate(nodes):
if (within(pos[0], *node.x_boundary) and within(pos[1], *node.y_boundary)):
selected_node=node
index=i
return selected_node,index
def generate_random_graph():
adj,edj=get_random_graph(10,16,1,10)
nodes = []
for i in range(len(adj)):
x=random.randint(0,width)
y=random.randint(0,height)
nodes.append(Node(x,y,node_color,node_radius))
for w,u,v in edj:
u,v=min(u,v),max(u,v)
new_edge=Edge(nodes[u],nodes[v],w)
nodes[u].edges.append(new_edge)
nodes[v].edges.append(new_edge)
return adj,edj,nodes
adj,edj,nodes=generate_random_graph()
within = lambda x, low, high: low <= x <= high
selected = False
i=-1
selected_node=None
last_pos=None
drawing_edge=False
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
elif event.type == pygame.MOUSEBUTTONDOWN and not selected_node and event.button == 1:
pos = pygame.mouse.get_pos()
selected_node,index=mouse_in_node()
if not selected_node:
nodes.append(Node(pos[0], pos[1], red, 10))
elif event.type==pygame.KEYDOWN:
if event.key== pygame.K_LSHIFT:
pos = pygame.mouse.get_pos()
node1,index=mouse_in_node()
if node1:
last_pos=pos
#Run Prim
if event.key==pygame.K_p:
edj_prim=prim(adj,1)
for w,u,v in edj_prim:
u,v=min(u,v),max(u,v)
for edge in nodes[u].edges:
if edge.node1==nodes[u] and edge.node2==nodes[v] and edge.weight==w:
edge.color=mst_edge
# Run Kruskal
if event.key==pygame.K_k:
edj_kruskal=kruskal(edj,len(adj),1)
for w,u,v in edj_kruskal:
u,v=min(u,v),max(u,v)
for edge in nodes[u].edges:
if edge.node1==nodes[u] and edge.node2==nodes[v] and edge.weight==w:
edge.color=mst_edge
if event.key==pygame.K_c:
for node in nodes:
for edge in node.edges:
edge.color=white
if event.key==pygame.K_g:
adj,edj,nodes=generate_random_graph()
elif event.type==pygame.MOUSEMOTION and last_pos:
current_pos = pygame.mouse.get_pos()
drawing_edge=True
elif event.type==pygame.KEYUP and drawing_edge:
node2,index=mouse_in_node()
if node1 and node2:
new_edge=Edge(node1,node2)
node1.edges.append(new_edge)
node2.edges.append(new_edge)
last_pos=None
drawing_edge=False
elif event.type == pygame.MOUSEBUTTONUP:
selected_node=None
if selected_node:
selected_node.pos = pygame.mouse.get_pos()
selected_node.recalc_boundary()
window.fill((0, 0, 0))
if drawing_edge:
pygame.draw.line(window, red, last_pos, current_pos, 1)
for i,node in enumerate(nodes):
pygame.draw.circle(
window, node.color,
node.pos,
node.radius
)
for e,edge in enumerate(node.edges):
pygame.draw.line(window, edge.color, edge.pos1(), edge.pos2(), 1)
pygame.display.update() | [
"pygame.draw.circle",
"random.sample",
"pygame.init",
"pygame.draw.line",
"pygame.event.get",
"pygame.quit",
"pygame.display.set_mode",
"pygame.mouse.get_pos",
"heapq.heappop",
"heapq.heappush",
"pygame.display.update",
"random.randint"
] | [((6739, 6752), 'pygame.init', 'pygame.init', ([], {}), '()\n', (6750, 6752), False, 'import pygame\n'), ((6762, 6802), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(width, height)'], {}), '((width, height))\n', (6785, 6802), False, 'import pygame\n'), ((7810, 7832), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (7830, 7832), False, 'import pygame\n'), ((8757, 8775), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (8773, 8775), False, 'import pygame\n'), ((11618, 11641), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (11639, 11641), False, 'import pygame\n'), ((627, 640), 'heapq.heappop', 'pq.heappop', (['q'], {}), '(q)\n', (637, 640), True, 'import heapq as pq\n'), ((5947, 5980), 'random.sample', 'random.sample', (['node_population', '(2)'], {}), '(node_population, 2)\n', (5960, 5980), False, 'import random\n'), ((6512, 6550), 'random.randint', 'random.randint', (['min_weight', 'max_weight'], {}), '(min_weight, max_weight)\n', (6526, 6550), False, 'import random\n'), ((8203, 8227), 'random.randint', 'random.randint', (['(0)', 'width'], {}), '(0, width)\n', (8217, 8227), False, 'import random\n'), ((8237, 8262), 'random.randint', 'random.randint', (['(0)', 'height'], {}), '(0, height)\n', (8251, 8262), False, 'import random\n'), ((11144, 11166), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (11164, 11166), False, 'import pygame\n'), ((11272, 11327), 'pygame.draw.line', 'pygame.draw.line', (['window', 'red', 'last_pos', 'current_pos', '(1)'], {}), '(window, red, last_pos, current_pos, 1)\n', (11288, 11327), False, 'import pygame\n'), ((11373, 11434), 'pygame.draw.circle', 'pygame.draw.circle', (['window', 'node.color', 'node.pos', 'node.radius'], {}), '(window, node.color, node.pos, node.radius)\n', (11391, 11434), False, 'import pygame\n'), ((162, 187), 'heapq.heappush', 'pq.heappush', (['q', '[w, u, v]'], {}), '(q, [w, u, v])\n', (173, 187), True, 'import heapq as pq\n'), ((6124, 6162), 'random.randint', 'random.randint', (['min_weight', 'max_weight'], {}), '(min_weight, max_weight)\n', (6138, 6162), False, 'import random\n'), ((8836, 8849), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (8847, 8849), False, 'import pygame\n'), ((8963, 8985), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (8983, 8985), False, 'import pygame\n'), ((9273, 9295), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (9293, 9295), False, 'import pygame\n'), ((10578, 10600), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (10598, 10600), False, 'import pygame\n')] |
import os
import csv
import numpy as np
from pathlib import Path
from tqdm import tqdm
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
seed = 3535999445
def imdb(path=Path("data/aclImdb/")):
import pickle
try:
return pickle.load((path / "train-test.p").open("rb"))
except FileNotFoundError:
pass
CLASSES = ["neg", "pos", "unsup"]
def get_texts(path):
texts,labels = [],[]
for idx,label in tqdm(enumerate(CLASSES)):
for fname in tqdm((path/label).glob('*.txt'), leave=False):
texts.append(fname.read_text())
labels.append(idx)
return texts, np.asarray(labels)
trXY = get_texts(path / "train")
teXY = get_texts(path / "test")
data = (trXY, teXY)
pickle.dump(data, (path / "train-test.p").open("wb"))
return data
def _rocstories(path):
"""Returns 4 lists :
st: input sentences
ct1: first answer
ct2: second answer
y: index of the good answer
"""
with open(path, encoding='utf_8') as f:
f = csv.reader(f)
st = []
ct1 = []
ct2 = []
y = []
for i, line in enumerate(tqdm(list(f), ncols=80, leave=False)):
if i > 0:
s = ' '.join(line[1:5])
c1 = line[5]
c2 = line[6]
st.append(s)
ct1.append(c1)
ct2.append(c2)
y.append(int(line[-1])-1)
return st, ct1, ct2, y
def rocstories(data_dir, n_train=1497, n_valid=374):
storys, comps1, comps2, ys = _rocstories(os.path.join(data_dir, 'cloze_test_val__spring2016 - cloze_test_ALL_val.csv'))
teX1, teX2, teX3, _ = _rocstories(os.path.join(data_dir, 'cloze_test_test__spring2016 - cloze_test_ALL_test.csv'))
tr_storys, va_storys, tr_comps1, va_comps1, tr_comps2, va_comps2, tr_ys, va_ys = train_test_split(storys, comps1, comps2, ys, test_size=n_valid, random_state=seed)
trX1, trX2, trX3 = [], [], []
trY = []
for s, c1, c2, y in zip(tr_storys, tr_comps1, tr_comps2, tr_ys):
trX1.append(s)
trX2.append(c1)
trX3.append(c2)
trY.append(y)
vaX1, vaX2, vaX3 = [], [], []
vaY = []
for s, c1, c2, y in zip(va_storys, va_comps1, va_comps2, va_ys):
vaX1.append(s)
vaX2.append(c1)
vaX3.append(c2)
vaY.append(y)
trY = np.asarray(trY, dtype=np.int32)
vaY = np.asarray(vaY, dtype=np.int32)
return (trX1, trX2, trX3, trY), (vaX1, vaX2, vaX3, vaY), (teX1, teX2, teX3)
| [
"pathlib.Path",
"sklearn.model_selection.train_test_split",
"os.path.join",
"numpy.asarray",
"csv.reader"
] | [((210, 231), 'pathlib.Path', 'Path', (['"""data/aclImdb/"""'], {}), "('data/aclImdb/')\n", (214, 231), False, 'from pathlib import Path\n'), ((1932, 2018), 'sklearn.model_selection.train_test_split', 'train_test_split', (['storys', 'comps1', 'comps2', 'ys'], {'test_size': 'n_valid', 'random_state': 'seed'}), '(storys, comps1, comps2, ys, test_size=n_valid,\n random_state=seed)\n', (1948, 2018), False, 'from sklearn.model_selection import train_test_split\n'), ((2444, 2475), 'numpy.asarray', 'np.asarray', (['trY'], {'dtype': 'np.int32'}), '(trY, dtype=np.int32)\n', (2454, 2475), True, 'import numpy as np\n'), ((2486, 2517), 'numpy.asarray', 'np.asarray', (['vaY'], {'dtype': 'np.int32'}), '(vaY, dtype=np.int32)\n', (2496, 2517), True, 'import numpy as np\n'), ((1115, 1128), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (1125, 1128), False, 'import csv\n'), ((1649, 1726), 'os.path.join', 'os.path.join', (['data_dir', '"""cloze_test_val__spring2016 - cloze_test_ALL_val.csv"""'], {}), "(data_dir, 'cloze_test_val__spring2016 - cloze_test_ALL_val.csv')\n", (1661, 1726), False, 'import os\n'), ((1766, 1845), 'os.path.join', 'os.path.join', (['data_dir', '"""cloze_test_test__spring2016 - cloze_test_ALL_test.csv"""'], {}), "(data_dir, 'cloze_test_test__spring2016 - cloze_test_ALL_test.csv')\n", (1778, 1845), False, 'import os\n'), ((689, 707), 'numpy.asarray', 'np.asarray', (['labels'], {}), '(labels)\n', (699, 707), True, 'import numpy as np\n')] |
import logging
import textwrap
from discord.ext import commands
from miyu_bot.bot.bot import D4DJBot
from miyu_bot.commands.common.fuzzy_matching import romanize, FuzzyMatcher
class Utility(commands.Cog):
bot: D4DJBot
def __init__(self, bot):
self.bot = bot
self.logger = logging.getLogger(__name__)
@commands.command(hidden=True)
@commands.is_owner()
async def romanize(self, ctx: commands.Context, *, arg: str):
await ctx.send(romanize(arg))
@commands.command(hidden=True, ignore_extra=False)
@commands.is_owner()
async def similarity_score(self, ctx: commands.Context, source: str, target: str):
await ctx.send(str(FuzzyMatcher().score(romanize(source), romanize(target))))
@commands.command(hidden=True)
@commands.is_owner()
async def shutdown(self, ctx: commands.Context):
await self.bot.logout()
@commands.command(name='eval', hidden=True)
@commands.is_owner()
async def eval_cmd(self, ctx: commands.Context, *, body: str):
env = {
'bot': self.bot,
'ctx': ctx,
'assets': self.bot.assets,
'asset_filters': self.bot.asset_filters,
**globals(),
}
self.bot.asset_filters.cards.default_filter
if body and body[0] == '`' and body[-1] == '`':
body = body[1:-1]
try:
value = eval(body, env)
if value:
await ctx.send(str(value))
else:
await ctx.send('Done')
except Exception as e:
await ctx.send(f'```{e.__class__.__name__}: {e}\n```')
@commands.command(name='exec', hidden=True)
@commands.is_owner()
async def exec_cmd(self, ctx: commands.Context, *, body: str):
env = {
'bot': self.bot,
'ctx': ctx,
'assets': self.bot.assets,
'asset_filters': self.bot.asset_filters,
**globals(),
}
if body and body[:9] == '```python' and body[-3:] == '```':
body = body[9:-3]
if body and body[:3] == '```' and body[-3:] == '```':
body = body[3:-3]
if body and body[:1] == '`' and body[-1:] == '`':
body = body[1:-1]
body = 'async def f():\n' + textwrap.indent(body, ' ')
l = locals()
exec(body, env, l)
f = l['f']
try:
value = await f()
if value:
await ctx.send(str(value))
else:
await ctx.send('Done')
except Exception as e:
await ctx.send(f'```{e.__class__.__name__}: {e}\n```')
@commands.command(name='invite',
aliases=[],
description='Sends the bot invite.',
help='!invite')
async def invite(self, ctx: commands.Context):
await ctx.send(
'https://discord.com/api/oauth2/authorize?client_id=789314370999287808&permissions=388160&scope=bot')
def setup(bot):
bot.add_cog(Utility(bot))
| [
"logging.getLogger",
"miyu_bot.commands.common.fuzzy_matching.romanize",
"textwrap.indent",
"miyu_bot.commands.common.fuzzy_matching.FuzzyMatcher",
"discord.ext.commands.is_owner",
"discord.ext.commands.command"
] | [((335, 364), 'discord.ext.commands.command', 'commands.command', ([], {'hidden': '(True)'}), '(hidden=True)\n', (351, 364), False, 'from discord.ext import commands\n'), ((370, 389), 'discord.ext.commands.is_owner', 'commands.is_owner', ([], {}), '()\n', (387, 389), False, 'from discord.ext import commands\n'), ((500, 549), 'discord.ext.commands.command', 'commands.command', ([], {'hidden': '(True)', 'ignore_extra': '(False)'}), '(hidden=True, ignore_extra=False)\n', (516, 549), False, 'from discord.ext import commands\n'), ((555, 574), 'discord.ext.commands.is_owner', 'commands.is_owner', ([], {}), '()\n', (572, 574), False, 'from discord.ext import commands\n'), ((754, 783), 'discord.ext.commands.command', 'commands.command', ([], {'hidden': '(True)'}), '(hidden=True)\n', (770, 783), False, 'from discord.ext import commands\n'), ((789, 808), 'discord.ext.commands.is_owner', 'commands.is_owner', ([], {}), '()\n', (806, 808), False, 'from discord.ext import commands\n'), ((900, 942), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""eval"""', 'hidden': '(True)'}), "(name='eval', hidden=True)\n", (916, 942), False, 'from discord.ext import commands\n'), ((948, 967), 'discord.ext.commands.is_owner', 'commands.is_owner', ([], {}), '()\n', (965, 967), False, 'from discord.ext import commands\n'), ((1647, 1689), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""exec"""', 'hidden': '(True)'}), "(name='exec', hidden=True)\n", (1663, 1689), False, 'from discord.ext import commands\n'), ((1695, 1714), 'discord.ext.commands.is_owner', 'commands.is_owner', ([], {}), '()\n', (1712, 1714), False, 'from discord.ext import commands\n'), ((2659, 2760), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""invite"""', 'aliases': '[]', 'description': '"""Sends the bot invite."""', 'help': '"""!invite"""'}), "(name='invite', aliases=[], description=\n 'Sends the bot invite.', help='!invite')\n", (2675, 2760), False, 'from discord.ext import commands\n'), ((301, 328), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (318, 328), False, 'import logging\n'), ((2294, 2321), 'textwrap.indent', 'textwrap.indent', (['body', '""" """'], {}), "(body, ' ')\n", (2309, 2321), False, 'import textwrap\n'), ((479, 492), 'miyu_bot.commands.common.fuzzy_matching.romanize', 'romanize', (['arg'], {}), '(arg)\n', (487, 492), False, 'from miyu_bot.commands.common.fuzzy_matching import romanize, FuzzyMatcher\n'), ((710, 726), 'miyu_bot.commands.common.fuzzy_matching.romanize', 'romanize', (['source'], {}), '(source)\n', (718, 726), False, 'from miyu_bot.commands.common.fuzzy_matching import romanize, FuzzyMatcher\n'), ((728, 744), 'miyu_bot.commands.common.fuzzy_matching.romanize', 'romanize', (['target'], {}), '(target)\n', (736, 744), False, 'from miyu_bot.commands.common.fuzzy_matching import romanize, FuzzyMatcher\n'), ((689, 703), 'miyu_bot.commands.common.fuzzy_matching.FuzzyMatcher', 'FuzzyMatcher', ([], {}), '()\n', (701, 703), False, 'from miyu_bot.commands.common.fuzzy_matching import romanize, FuzzyMatcher\n')] |
import gi
import ctypes as pyc
from ctypes import pythonapi
from gi.repository import GObject as GO
pyc.cdll.LoadLibrary('libgobject-2.0.so')
lego = pyc.CDLL('libgobject-2.0.so')
lego.g_type_name.restype = pyc.c_char_p
lego.g_type_name.argtypes = (pyc.c_ulonglong,)
pythonapi.PyCapsule_GetName.restype = pyc.c_char_p
pythonapi.PyCapsule_GetName.argtypes = (pyc.py_object,)
pythonapi.PyCapsule_GetPointer.restype = pyc.c_void_p
pythonapi.PyCapsule_GetPointer.argtypes = (pyc.py_object, pyc.c_char_p)
###############################################################################
# GObject
###############################################################################
class _PyGObject_Functions(pyc.Structure):
_fields_ = [
('pygobject_register_class',
pyc.PYFUNCTYPE(pyc.c_void_p)),
('pygobject_register_wrapper',
pyc.PYFUNCTYPE(pyc.c_void_p)),
('pygobject_lookup_class',
pyc.PYFUNCTYPE(pyc.c_void_p)),
('pygobject_new',
pyc.PYFUNCTYPE(pyc.py_object, pyc.c_void_p)),
]
def capsule_name(capsule):
return pythonapi.PyCapsule_GetName(capsule)
def capsule_ptr(capsule):
name = capsule_name(capsule)
return pythonapi.PyCapsule_GetPointer(capsule, name)
class _PyGO_CAPI(object):
'''
Static class to that create PyObject (object) from GObject (pointer)
'''
_api = None
@classmethod
def _set_api(cls):
addr = capsule_ptr(gi._gobject._PyGObject_API)
cls._api = _PyGObject_Functions.from_address(addr)
@classmethod
def to_object(cls, addr):
cls._api or cls._set_api()
return cls._api.pygobject_new(addr)
###############################################################################
# GType
###############################################################################
INT, ADDRESS, NONE, NOT_IMPLEMENTED = range(4)
G_PY_INT = {
(GO.TYPE_BOOLEAN, pyc.c_bool),
(GO.TYPE_UNICHAR, pyc.c_ubyte),
(GO.TYPE_UCHAR, pyc.c_ubyte),
(GO.TYPE_CHAR, pyc.c_char),
(GO.TYPE_INT, pyc.c_int),
(GO.TYPE_UINT, pyc.c_uint),
(GO.TYPE_FLAGS, pyc.c_uint),
}
G_PY_ADDRESS = {
(GO.TYPE_LONG, pyc.c_long),
(GO.TYPE_DOUBLE, pyc.c_double),
(GO.TYPE_ULONG, pyc.c_ulong),
(GO.TYPE_INT64, pyc.c_longlong),
(GO.TYPE_UINT64, pyc.c_ulonglong),
(GO.TYPE_ENUM, pyc.c_ulonglong),
(GO.TYPE_FLOAT, pyc.c_float),
(GO.TYPE_STRING, pyc.c_char_p),
(GO.TYPE_POINTER, pyc.c_void_p),
(GO.TYPE_OBJECT, pyc.c_void_p),
(GO.TYPE_PYOBJECT, pyc.py_object),
}
G_PY_NONE = {
(GO.TYPE_NONE, None),
(GO.TYPE_INVALID, None),
}
G_PY_NOT_IMPLEMENTED = {
(GO.TYPE_PARAM, None),
(GO.TYPE_STRV, None),
(GO.TYPE_VARIANT, None),
(GO.TYPE_BOXED, None),
(GO.TYPE_INTERFACE, None),
}
TYPES_G_PY = G_PY_INT | G_PY_ADDRESS | G_PY_NONE | G_PY_NOT_IMPLEMENTED
TYPES_ID = {hash(gt): (gt, ct, INT) for gt, ct in G_PY_INT}
_u = TYPES_ID.update
_u({hash(gt): (gt, ct, ADDRESS) for gt, ct in G_PY_ADDRESS})
_u({hash(gt): (gt, ct, NONE) for gt, ct in G_PY_NONE})
_u({hash(gt): (gt, ct, NOT_IMPLEMENTED) for gt, ct in G_PY_NOT_IMPLEMENTED})
def gtype_name_of(gtype_id=0):
'''
Return a name of gtype if type is a class
this method use glib/gobjec/gtype.c/g_type_name
see code
https://github.com/GNOME/glib/blob/master/gobject/gtype.c#L3787
'''
name = lego.g_type_name(hash(gtype_id))
return name and name.decode('utf-8')
def gtype_and_ctype_of(gtype_id=0):
'''
return (GType, ctype) of gtype_id
May return (None, None, NOT_IMPLEMENTED)
'''
_default = (None, None, NOT_IMPLEMENTED)
g_and_c_type = TYPES_ID.get(hash(gtype_id), _default)
if not g_and_c_type[0]:
name = gtype_name_of(gtype_id)
if name:
gtype = GO.GType.from_name(name)
parent_id = hash(gtype.parent)
parent = TYPES_ID.get(parent_id, _default)
g_and_c_type = (gtype, pyc.c_void_p, parent[2])
return g_and_c_type
def from_int(value, gtype_id):
py_value = value
types = gtype_and_ctype_of(gtype_id)
gtype, ctype, ctg = types
if gtype and ctype:
if gtype.is_a(GO.TYPE_OBJECT):
py_value = _PyGO_CAPI.to_object(value)
elif gtype.is_a(GO.TYPE_GTYPE):
py_value = gtype
elif gtype.is_a(GO.TYPE_STRING):
py_value = ctype(value).value.decode('utf-8')
elif ctg == INT:
py_value = ctype(value).value
elif ctg == ADDRESS:
py_value = ctype.from_address(value)
return py_value, gtype, ctype, ctg
def c_to_py(value, gtype_id):
return from_int(value, gtype_id)[0]
| [
"ctypes.cdll.LoadLibrary",
"gi.repository.GObject.GType.from_name",
"ctypes.pythonapi.PyCapsule_GetPointer",
"ctypes.pythonapi.PyCapsule_GetName",
"ctypes.PYFUNCTYPE",
"ctypes.CDLL"
] | [((100, 141), 'ctypes.cdll.LoadLibrary', 'pyc.cdll.LoadLibrary', (['"""libgobject-2.0.so"""'], {}), "('libgobject-2.0.so')\n", (120, 141), True, 'import ctypes as pyc\n'), ((149, 178), 'ctypes.CDLL', 'pyc.CDLL', (['"""libgobject-2.0.so"""'], {}), "('libgobject-2.0.so')\n", (157, 178), True, 'import ctypes as pyc\n'), ((1106, 1142), 'ctypes.pythonapi.PyCapsule_GetName', 'pythonapi.PyCapsule_GetName', (['capsule'], {}), '(capsule)\n', (1133, 1142), False, 'from ctypes import pythonapi\n'), ((1215, 1260), 'ctypes.pythonapi.PyCapsule_GetPointer', 'pythonapi.PyCapsule_GetPointer', (['capsule', 'name'], {}), '(capsule, name)\n', (1245, 1260), False, 'from ctypes import pythonapi\n'), ((781, 809), 'ctypes.PYFUNCTYPE', 'pyc.PYFUNCTYPE', (['pyc.c_void_p'], {}), '(pyc.c_void_p)\n', (795, 809), True, 'import ctypes as pyc\n'), ((863, 891), 'ctypes.PYFUNCTYPE', 'pyc.PYFUNCTYPE', (['pyc.c_void_p'], {}), '(pyc.c_void_p)\n', (877, 891), True, 'import ctypes as pyc\n'), ((941, 969), 'ctypes.PYFUNCTYPE', 'pyc.PYFUNCTYPE', (['pyc.c_void_p'], {}), '(pyc.c_void_p)\n', (955, 969), True, 'import ctypes as pyc\n'), ((1010, 1053), 'ctypes.PYFUNCTYPE', 'pyc.PYFUNCTYPE', (['pyc.py_object', 'pyc.c_void_p'], {}), '(pyc.py_object, pyc.c_void_p)\n', (1024, 1053), True, 'import ctypes as pyc\n'), ((3893, 3917), 'gi.repository.GObject.GType.from_name', 'GO.GType.from_name', (['name'], {}), '(name)\n', (3911, 3917), True, 'from gi.repository import GObject as GO\n')] |
from dataset.transform import crop, hflip, normalize, resize, blur, cutout
import math
import os
from PIL import Image
import random
from torch.utils.data import Dataset
from torchvision import transforms
class SemiDataset(Dataset):
def __init__(self, name, root, mode, size, labeled_id_path=None, unlabeled_id_path=None, pseudo_mask_path=None):
"""
:param name: dataset name, pascal or cityscapes
:param root: root path of the dataset.
:param mode: train: supervised learning only with labeled images, no unlabeled images are leveraged.
label: pseudo labeling the remaining unlabeled images.
semi_train: semi-supervised learning with both labeled and unlabeled images.
val: validation.
:param size: crop size of training images.
:param labeled_id_path: path of labeled image ids, needed in train or semi_train mode.
:param unlabeled_id_path: path of unlabeled image ids, needed in semi_train or label mode.
:param pseudo_mask_path: path of generated pseudo masks, needed in semi_train mode.
"""
self.name = name
self.root = root
self.mode = mode
self.size = size
self.pseudo_mask_path = pseudo_mask_path
if mode == 'semi_train':
with open(labeled_id_path, 'r') as f:
self.labeled_ids = f.read().splitlines()
with open(unlabeled_id_path, 'r') as f:
self.unlabeled_ids = f.read().splitlines()
self.ids = \
self.labeled_ids * math.ceil(len(self.unlabeled_ids) / len(self.labeled_ids)) + self.unlabeled_ids
else:
if mode == 'val':
id_path = 'dataset/splits/%s/val.txt' % name
elif mode == 'label':
id_path = unlabeled_id_path
elif mode == 'train':
id_path = labeled_id_path
with open(id_path, 'r') as f:
self.ids = f.read().splitlines()
def __getitem__(self, item):
id = self.ids[item]
img = Image.open(os.path.join(self.root, id.split(' ')[0]))
if self.mode == 'val' or self.mode == 'label':
mask = Image.open(os.path.join(self.root, id.split(' ')[1]))
img, mask = normalize(img, mask)
return img, mask, id
if self.mode == 'train' or (self.mode == 'semi_train' and id in self.labeled_ids):
mask = Image.open(os.path.join(self.root, id.split(' ')[1]))
else:
# mode == 'semi_train' and the id corresponds to unlabeled image
fname = os.path.basename(id.split(' ')[1])
mask = Image.open(os.path.join(self.pseudo_mask_path, fname))
# basic augmentation on all training images
base_size = 400 if self.name == 'pascal' else 2048
img, mask = resize(img, mask, base_size, (0.5, 2.0))
img, mask = crop(img, mask, self.size)
img, mask = hflip(img, mask, p=0.5)
# strong augmentation on unlabeled images
if self.mode == 'semi_train' and id in self.unlabeled_ids:
if random.random() < 0.8:
img = transforms.ColorJitter(0.5, 0.5, 0.5, 0.25)(img)
img = transforms.RandomGrayscale(p=0.2)(img)
img = blur(img, p=0.5)
img, mask = cutout(img, mask, p=0.5)
img, mask = normalize(img, mask)
return img, mask
def __len__(self):
return len(self.ids)
| [
"dataset.transform.hflip",
"dataset.transform.resize",
"torchvision.transforms.RandomGrayscale",
"os.path.join",
"dataset.transform.cutout",
"random.random",
"torchvision.transforms.ColorJitter",
"dataset.transform.normalize",
"dataset.transform.crop",
"dataset.transform.blur"
] | [((2887, 2927), 'dataset.transform.resize', 'resize', (['img', 'mask', 'base_size', '(0.5, 2.0)'], {}), '(img, mask, base_size, (0.5, 2.0))\n', (2893, 2927), False, 'from dataset.transform import crop, hflip, normalize, resize, blur, cutout\n'), ((2948, 2974), 'dataset.transform.crop', 'crop', (['img', 'mask', 'self.size'], {}), '(img, mask, self.size)\n', (2952, 2974), False, 'from dataset.transform import crop, hflip, normalize, resize, blur, cutout\n'), ((2995, 3018), 'dataset.transform.hflip', 'hflip', (['img', 'mask'], {'p': '(0.5)'}), '(img, mask, p=0.5)\n', (3000, 3018), False, 'from dataset.transform import crop, hflip, normalize, resize, blur, cutout\n'), ((3408, 3428), 'dataset.transform.normalize', 'normalize', (['img', 'mask'], {}), '(img, mask)\n', (3417, 3428), False, 'from dataset.transform import crop, hflip, normalize, resize, blur, cutout\n'), ((2316, 2336), 'dataset.transform.normalize', 'normalize', (['img', 'mask'], {}), '(img, mask)\n', (2325, 2336), False, 'from dataset.transform import crop, hflip, normalize, resize, blur, cutout\n'), ((3321, 3337), 'dataset.transform.blur', 'blur', (['img'], {'p': '(0.5)'}), '(img, p=0.5)\n', (3325, 3337), False, 'from dataset.transform import crop, hflip, normalize, resize, blur, cutout\n'), ((3362, 3386), 'dataset.transform.cutout', 'cutout', (['img', 'mask'], {'p': '(0.5)'}), '(img, mask, p=0.5)\n', (3368, 3386), False, 'from dataset.transform import crop, hflip, normalize, resize, blur, cutout\n'), ((2711, 2753), 'os.path.join', 'os.path.join', (['self.pseudo_mask_path', 'fname'], {}), '(self.pseudo_mask_path, fname)\n', (2723, 2753), False, 'import os\n'), ((3152, 3167), 'random.random', 'random.random', ([], {}), '()\n', (3165, 3167), False, 'import random\n'), ((3264, 3297), 'torchvision.transforms.RandomGrayscale', 'transforms.RandomGrayscale', ([], {'p': '(0.2)'}), '(p=0.2)\n', (3290, 3297), False, 'from torchvision import transforms\n'), ((3197, 3240), 'torchvision.transforms.ColorJitter', 'transforms.ColorJitter', (['(0.5)', '(0.5)', '(0.5)', '(0.25)'], {}), '(0.5, 0.5, 0.5, 0.25)\n', (3219, 3240), False, 'from torchvision import transforms\n')] |
#!/usr/bin/env python
# encoding: utf-8
from six import with_metaclass
from functools import wraps
from webob import Request, Response, exc
import re
from pybald.util import camel_to_underscore
from routes import redirect_to
from pybald import context
import json
import random
import uuid
import logging
console = logging.getLogger(__name__)
controller_pattern = re.compile(r'(\w+)Controller')
class CSRFValidationFailure(exc.HTTPForbidden):
pass
def csrf_protected(action_func):
"""
Decorator to add CSRF (cross-site request forgery) protection to POST
requests on an action. To use, include this decorator and provide the
token in any submitted forms.
For example, in the controller, do:
@action
@csrf_protected
def my_action(self, req):
...
And in the template:
<form action='my_action' method='post'>
${csrf_input}
...
</form>
"""
CSRF_TOKEN_POST_VARIABLE = '__csrf_token__'
@wraps(action_func)
def replacement(self, req):
if req.method == 'POST':
try:
if CSRF_TOKEN_POST_VARIABLE not in req.POST:
raise CSRFValidationFailure(
("No %r value found in POST data. Please make sure "
"that a ${csrf_input} is used in the form template.")
% CSRF_TOKEN_POST_VARIABLE)
if not self.session.stash.get("csrf_token"):
raise CSRFValidationFailure(
"CSRF validation failed: no validation token available "
"in this session.")
provided_csrf_token = req.POST.get(CSRF_TOKEN_POST_VARIABLE)
if provided_csrf_token != self.session.stash.get("csrf_token"):
raise CSRFValidationFailure(
"CSRF validation failed: token mismatch.")
else:
# success! wipe out the used token
self.session.stash(csrf_token=None)
#del self.session.csrf_token
except CSRFValidationFailure:
# gentle mode, redirect to GET version of the page
# return self._redirect_to(req.path_qs)
raise
# always stash a new token
new_token = str(uuid.uuid4()).replace("-", "")
self.session.stash(csrf_token=new_token)
self.csrf_input = ("<input type='hidden' name='%s' value='%s' />" % (
CSRF_TOKEN_POST_VARIABLE, new_token))
return action_func(self, req)
return replacement
# a no-op placeholder
def noop_func(*pargs, **kargs):
'''Do nothing'''
pass
def get_template_name(instance, method_name):
'''
Defines the template id to match against.
:param instance: the instance to generate a template for
:param method_name: the method to combine with the instance class
template path = controller name + '/' + action name, except in the case
of index. If the template is specified as part of the processed object
return that, short circuiting any other template name processing
This form may removed later, considered a candidate for deprecation
'''
template_id = getattr(instance, 'template_id', None)
if template_id:
return template_id
# build a default template name if one isn't explicitly set
try:
template_root_name = camel_to_underscore(
controller_pattern.search(instance.__class__.__name__
).group(1))
except AttributeError:
template_root_name = ''
return "/".join(filter(None, [template_root_name, method_name]))
# action / method decorator
def action(method):
'''
Decorates methods that are WSGI apps to turn them into pybald-style actions.
:param method: A method to turn into a pybald-style action.
This decorator is usually used to take the method of a controller instance
and add some syntactic sugar around it to allow the method to use WebOb
Request and Response objects. It will work with any method that
implements the WSGI spec.
It allows actions to work with WebOb request / response objects and handles
default behaviors, such as displaying the view when nothing is returned,
or setting up a plain text Response if a string is returned. It also
assigns instance variables from the ``pybald.extension`` environ variables
that can be set from other parts of the WSGI pipeline.
This decorator is optional but recommended for making working
with requests and responses easier.
'''
# the default template name is the controller class + method name
# the method name is pulled during decoration and stored for use
# in template lookups
template_name = method.__name__
# special case where 'call' or 'index' use the base class name
# for the template otherwise use the base name
if template_name in ('index', '__call__'):
template_name = ''
@wraps(method)
def action_wrapper(self, environ, start_response):
req = Request(environ)
# add any url variables as members of the controller
for varname, value in req.urlvars.items():
# Set the controller object to contain the url variables
# parsed from the dispatcher / router
setattr(self, varname, value)
# add the pybald extension dict to the controller
# object
for key, value in req.environ.setdefault('pybald.extension', {}).items():
setattr(self, key, value)
# TODO: fixme this is a hack
setattr(self, 'request', req)
setattr(self, 'request_url', req.url)
# set pre/post/view to a no-op if they don't exist
pre = getattr(self, '_pre', noop_func)
post = getattr(self, '_post', noop_func)
# set the template_id for this request
self.template_id = get_template_name(self, template_name)
# The response is either the controllers _pre code, whatever
# is returned from the controller
# or the view. So pre has precedence over
# the return which has precedence over the view
resp = (pre(req) or
method(self, req) or
context.render(template=self.template_id,
data=self.__dict__ or {}))
# if the response is currently a string
# wrap it in a response object
if isinstance(resp, str) or isinstance(resp, bytes):
resp = Response(body=resp, charset="utf-8")
# run the controllers post code
post(req, resp)
return resp(environ, start_response)
return action_wrapper
# def caching_pre(keys, method_name, prefix=''):
# '''Decorator for pybald _pre to return cached responses if available.'''
# if keys is None:
# keys = []
# def pre_wrapper(pre):
# def replacement(self, req):
# val = ":".join([prefix] + [str(getattr(self, k, '')) for
# k in keys] + [method_name])
# self.cache_key = base64.urlsafe_b64encode(hashlib.md5(val).digest())
# resp = project.mc.get(self.cache_key)
# if resp:
# return resp
# return pre(req)
# return replacement
# return pre_wrapper
# def caching_post(time=0):
# '''Decorator for pybald _post to cache/store responses.'''
# def post_wrapper(post):
# def replacement(self, req, resp):
# post(req, resp)
# # only cache 2XX or 4XX responses
# if (200 <= resp.status_code < 300) or (400 <= resp.status_code < 500):
# if 'X-Cache' not in resp.headers:
# resp.headerlist.append(('X-Cache', 'MISS'))
# project.mc.set(self.cache_key, resp, time)
# else:
# resp.headers['X-Cache'] = 'HIT'
# return replacement
# return post_wrapper
# regenerate a content_cache_prefix on every reload so that content will
# be force loaded after any full application restart
# This provides a way to cache static content for the duration of the
# application lifespan.
content_cache_prefix = hex(random.randrange(0, 2 ** 32 - 1))
# # memcache for actions
# def action_cached(prefix=content_cache_prefix, keys=None, time=0):
# '''
# Wrap actions and return pre-generated responses when appropriate.
# '''
# if keys is None:
# keys = []
# # def cached_wrapper(my_action_method):
# # @wraps(my_action_method)
# # def replacement(self, environ, start_response):
# # # bind newly wrapped methods to self
# # self._pre = caching_pre(keys,
# # my_action_method.__name__,
# # prefix=prefix)(self._pre
# # ).__get__(self, self.__class__)
# # self._post = caching_post(time)(self._post
# # ).__get__(self, self.__class__)
# # return my_action_method(self, environ, start_response)
# # # don't enable caching if requested
# # if project.DISABLE_STATIC_CONTENT_CACHE:
# # return my_action_method
# # return replacement
# # return cached_wrapper
class RegistryMount(type):
'''
A registry creating metaclass that keeps track of all defined classes that
inherit from a base class using this metaclass.
'''
# lifted almost verbatim from: http://martyalchin.com/2008/jan/10/simple-plugin-framework/
def __init__(cls, name, bases, attrs):
try:
cls.registry.append(cls)
except AttributeError:
# this is processing the first class (the mount point)
cls.registry = context.controller_registry
return super(RegistryMount, cls).__init__(name, bases, attrs)
class Controller(with_metaclass(RegistryMount, object)):
'''Base Controller that provides a registry mount
The registry keeps track of all countrollers defined in the project. The
Controller class also has some minor convenience methods attached.
'''
def __init__(self, *pargs, **kargs):
for key, value in kargs.items():
setattr(self, key, value)
def _pre(self, req):
pass
def _post(self, req, resp):
pass
def _redirect_to(self, *pargs, **kargs):
'''Redirect the controller'''
return redirect_to(*pargs, **kargs)
def _not_found(self, text=None):
'''Raise the 404 http_client_error exception.'''
raise exc.HTTPNotFound(text)
def _status(self, code):
'''Raise an http_client_error exception using a specific code'''
raise exc.status_map[int(code)]
def _JSON(self, data, status=200):
'''Return JSON object with the proper-ish headers.'''
res = Response(body=json.dumps(data),
status=status,
content_type="application/json",
charset='UTF-8')
return res
# alias for backwards copatibility
BaseController = Controller
| [
"logging.getLogger",
"webob.Request",
"routes.redirect_to",
"random.randrange",
"re.compile",
"webob.Response",
"json.dumps",
"functools.wraps",
"uuid.uuid4",
"pybald.context.render",
"six.with_metaclass",
"webob.exc.HTTPNotFound"
] | [((317, 344), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (334, 344), False, 'import logging\n'), ((367, 397), 're.compile', 're.compile', (['"""(\\\\w+)Controller"""'], {}), "('(\\\\w+)Controller')\n", (377, 397), False, 'import re\n'), ((10009, 10046), 'six.with_metaclass', 'with_metaclass', (['RegistryMount', 'object'], {}), '(RegistryMount, object)\n', (10023, 10046), False, 'from six import with_metaclass\n'), ((969, 987), 'functools.wraps', 'wraps', (['action_func'], {}), '(action_func)\n', (974, 987), False, 'from functools import wraps\n'), ((5047, 5060), 'functools.wraps', 'wraps', (['method'], {}), '(method)\n', (5052, 5060), False, 'from functools import wraps\n'), ((8279, 8311), 'random.randrange', 'random.randrange', (['(0)', '(2 ** 32 - 1)'], {}), '(0, 2 ** 32 - 1)\n', (8295, 8311), False, 'import random\n'), ((5130, 5146), 'webob.Request', 'Request', (['environ'], {}), '(environ)\n', (5137, 5146), False, 'from webob import Request, Response, exc\n'), ((10565, 10593), 'routes.redirect_to', 'redirect_to', (['*pargs'], {}), '(*pargs, **kargs)\n', (10576, 10593), False, 'from routes import redirect_to\n'), ((10703, 10725), 'webob.exc.HTTPNotFound', 'exc.HTTPNotFound', (['text'], {}), '(text)\n', (10719, 10725), False, 'from webob import Request, Response, exc\n'), ((6309, 6376), 'pybald.context.render', 'context.render', ([], {'template': 'self.template_id', 'data': '(self.__dict__ or {})'}), '(template=self.template_id, data=self.__dict__ or {})\n', (6323, 6376), False, 'from pybald import context\n'), ((6574, 6610), 'webob.Response', 'Response', ([], {'body': 'resp', 'charset': '"""utf-8"""'}), "(body=resp, charset='utf-8')\n", (6582, 6610), False, 'from webob import Request, Response, exc\n'), ((10999, 11015), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (11009, 11015), False, 'import json\n'), ((2330, 2342), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2340, 2342), False, 'import uuid\n')] |
import json
import requests
from datetime import timedelta
from django.http import HttpResponseRedirect
from django.utils import timezone
from django.utils.http import urlencode
from django.views.decorators.csrf import csrf_exempt
import jwt
from allauth.socialaccount.models import SocialApp, SocialToken
from allauth.socialaccount.providers.oauth2.client import OAuth2Error
from allauth.socialaccount.providers.oauth2.views import (
OAuth2Adapter,
OAuth2CallbackView,
OAuth2LoginView,
)
from .client import AppleOAuth2Client
from .provider import AppleProvider
class AppleOAuth2Adapter(OAuth2Adapter):
provider_id = AppleProvider.id
access_token_url = "https://appleid.apple.com/auth/token"
authorize_url = "https://appleid.apple.com/auth/authorize"
public_key_url = "https://appleid.apple.com/auth/keys"
def get_public_key(self, id_token):
"""
Get the public key which matches the `kid` in the id_token header.
"""
kid = jwt.get_unverified_header(id_token)["kid"]
apple_public_key = [
d
for d in requests.get(self.public_key_url).json()["keys"]
if d["kid"] == kid
][0]
public_key = jwt.algorithms.RSAAlgorithm.from_jwk(
json.dumps(apple_public_key)
)
return public_key
def get_client_id(self, provider):
app = SocialApp.objects.get(provider=provider.id)
return [aud.strip() for aud in app.client_id.split(",")]
def parse_token(self, data):
try:
token = SocialToken(token=data["access_token"])
token.token_secret = data.get("refresh_token", "")
public_key = self.get_public_key(data["id_token"])
provider = self.get_provider()
allowed_auds = self.get_client_id(provider)
token.user_data = jwt.decode(
data["id_token"],
public_key,
algorithms=["RS256"],
verify=True,
audience=allowed_auds,
)
expires_in = data.get(self.expires_in_key, None)
if expires_in:
token.expires_at = timezone.now() + timedelta(
seconds=int(expires_in)
)
return token
except jwt.PyJWTError as e:
raise OAuth2Error("Invalid id_token") from e
def complete_login(self, request, app, token, **kwargs):
extra_data = token.user_data
login = self.get_provider().sociallogin_from_response(
request, extra_data
)
login.state["id_token"] = token.user_data
return login
class AppleOAuth2ClientMixin:
def get_client(self, request, app):
client = super().get_client(request, app)
apple_client = AppleOAuth2Client(
client.request,
client.consumer_key,
client.consumer_secret,
client.access_token_method,
client.access_token_url,
client.callback_url,
client.scope,
key=client.key,
cert=client.cert,
)
return apple_client
class AppleOAuth2LoginView(AppleOAuth2ClientMixin, OAuth2LoginView):
"""
Custom AppleOAuth2LoginView to return AppleOAuth2Client
"""
pass
class AppleOAuth2CallbackView(AppleOAuth2ClientMixin, OAuth2CallbackView):
"""
Custom OAuth2CallbackView because `Sign In With Apple`:
* returns AppleOAuth2Client
* Apple requests callback by POST
"""
def dispatch(self, request, *args, **kwargs):
if request.method == "POST":
url = request.build_absolute_uri(request.get_full_path())
params = {
"code": request.POST.get("code"),
"state": request.POST.get("state"),
}
return HttpResponseRedirect("%s?%s" % (url, urlencode(params)))
if request.method == "GET":
return super().dispatch(request, *args, **kwargs)
oauth2_login = AppleOAuth2LoginView.adapter_view(AppleOAuth2Adapter)
oauth2_callback = csrf_exempt(
AppleOAuth2CallbackView.adapter_view(AppleOAuth2Adapter)
)
| [
"jwt.decode",
"django.utils.http.urlencode",
"json.dumps",
"jwt.get_unverified_header",
"requests.get",
"django.utils.timezone.now",
"allauth.socialaccount.models.SocialToken",
"allauth.socialaccount.models.SocialApp.objects.get",
"allauth.socialaccount.providers.oauth2.client.OAuth2Error"
] | [((1385, 1428), 'allauth.socialaccount.models.SocialApp.objects.get', 'SocialApp.objects.get', ([], {'provider': 'provider.id'}), '(provider=provider.id)\n', (1406, 1428), False, 'from allauth.socialaccount.models import SocialApp, SocialToken\n'), ((995, 1030), 'jwt.get_unverified_header', 'jwt.get_unverified_header', (['id_token'], {}), '(id_token)\n', (1020, 1030), False, 'import jwt\n'), ((1266, 1294), 'json.dumps', 'json.dumps', (['apple_public_key'], {}), '(apple_public_key)\n', (1276, 1294), False, 'import json\n'), ((1561, 1600), 'allauth.socialaccount.models.SocialToken', 'SocialToken', ([], {'token': "data['access_token']"}), "(token=data['access_token'])\n", (1572, 1600), False, 'from allauth.socialaccount.models import SocialApp, SocialToken\n'), ((1858, 1960), 'jwt.decode', 'jwt.decode', (["data['id_token']", 'public_key'], {'algorithms': "['RS256']", 'verify': '(True)', 'audience': 'allowed_auds'}), "(data['id_token'], public_key, algorithms=['RS256'], verify=True,\n audience=allowed_auds)\n", (1868, 1960), False, 'import jwt\n'), ((2344, 2375), 'allauth.socialaccount.providers.oauth2.client.OAuth2Error', 'OAuth2Error', (['"""Invalid id_token"""'], {}), "('Invalid id_token')\n", (2355, 2375), False, 'from allauth.socialaccount.providers.oauth2.client import OAuth2Error\n'), ((2175, 2189), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (2187, 2189), False, 'from django.utils import timezone\n'), ((3892, 3909), 'django.utils.http.urlencode', 'urlencode', (['params'], {}), '(params)\n', (3901, 3909), False, 'from django.utils.http import urlencode\n'), ((1102, 1135), 'requests.get', 'requests.get', (['self.public_key_url'], {}), '(self.public_key_url)\n', (1114, 1135), False, 'import requests\n')] |
#修改为 yolo-fastest
#修改 ResidualBlock, 从原来的 ->1x1->3x3-> 变为 ->1x1->3x3->1x1->
#修改 make_residual_block, 增加前面的卷积层
import tensorflow as tf
class DarkNetConv2D(tf.keras.layers.Layer):
def __init__(self, filters, kernel_size, strides, activation="leaky", groups=1):
super(DarkNetConv2D, self).__init__()
if groups != 1:
self.conv = tf.keras.layers.DepthwiseConv2D(kernel_size=kernel_size,
strides=strides,
padding="same")
else:
self.conv = tf.keras.layers.Conv2D(filters=filters,
kernel_size=kernel_size,
strides=strides,
padding="same")
#self.conv = tf.nn.conv2d(input, filter, strides=[1, 1, 1, 1], padding='SAME', data_format=format)
self.bn = tf.keras.layers.BatchNormalization()
if activation == "linear":
self.activation = 1
else:
self.activation = 0.1
def call(self, inputs, training=None, **kwargs):
x = self.conv(inputs)
x = self.bn(x, training=training)
x = tf.nn.leaky_relu(x, alpha=self.activation)
return x
class ResidualBlock(tf.keras.layers.Layer):
def __init__(self, filters1, filters2):
super(ResidualBlock, self).__init__()
self.conv1 = DarkNetConv2D(filters=filters1, kernel_size=(1, 1), strides=1, activation="leaky")
self.conv2 = DarkNetConv2D(filters=filters1, kernel_size=(3, 3), strides=1, activation="leaky", groups=filters1)
self.conv3 = DarkNetConv2D(filters=filters2, kernel_size=(1, 1), strides=1, activation="linear")
self.dropout1=tf.keras.layers.Dropout(0.15)
def call(self, inputs, training=None, **kwargs):
x = self.conv1(inputs, training=training)
x = self.conv2(x, training=training)
x = self.conv3(x, training=training)
x = self.dropout1(x, training=training)
x = tf.keras.layers.add([x, inputs])
return x
def make_residual_block(pre_conv_filters1, pre_conv_filters2, filters1, filters2, num_blocks, pre_conv_strides=1):
x = tf.keras.Sequential()
x.add(DarkNetConv2D(filters=pre_conv_filters1, kernel_size=(3, 3), strides=pre_conv_strides, activation="leaky", groups=pre_conv_filters1))
x.add(DarkNetConv2D(filters=pre_conv_filters2, kernel_size=(1, 1), strides=1, activation="linear"))
for _ in range(num_blocks):
x.add(ResidualBlock(filters1=filters1, filters2=filters2))
return x
class YoloFastestBackbone(tf.keras.Model):
def __init__(self):
super(YoloFastestBackbone, self).__init__()
self.conv1 = DarkNetConv2D(filters=8, kernel_size=(3, 3), strides=2, activation="leaky")
self.conv2 = DarkNetConv2D(filters=8, kernel_size=(1, 1), strides=1, activation="leaky")
self.block1 = make_residual_block(pre_conv_filters1=8, pre_conv_filters2=4, filters1=8, filters2=4, num_blocks=1)
self.conv3 = DarkNetConv2D(filters=24, kernel_size=(1, 1), strides=1, activation="leaky")
self.block2 = make_residual_block(pre_conv_filters1=24, pre_conv_filters2=8, filters1=32, filters2=8, num_blocks=2, pre_conv_strides=2)
self.conv4 = DarkNetConv2D(filters=32, kernel_size=(1, 1), strides=1, activation="leaky")
self.block3 = make_residual_block(pre_conv_filters1=32, pre_conv_filters2=8, filters1=48, filters2=8, num_blocks=2, pre_conv_strides=2)
self.conv5 = DarkNetConv2D(filters=48, kernel_size=(1, 1), strides=1, activation="leaky")
self.block4 = make_residual_block(pre_conv_filters1=48, pre_conv_filters2=16, filters1=96, filters2=16, num_blocks=4)
self.conv6 = DarkNetConv2D(filters=96, kernel_size=(1, 1), strides=1, activation="leaky")
self.block5 = make_residual_block(pre_conv_filters1=96, pre_conv_filters2=24, filters1=136, filters2=24, num_blocks=4, pre_conv_strides=2)
self.conv7 = DarkNetConv2D(filters=136, kernel_size=(1, 1), strides=1, activation="leaky")
self.block6 = make_residual_block(pre_conv_filters1=136, pre_conv_filters2=48, filters1=224, filters2=48, num_blocks=5, pre_conv_strides=2)
def call(self, inputs, training=None, **kwargs):
x = self.conv1(inputs, training=training)
x = self.conv2(x, training=training)
x = self.block1(x, training=training)
x = self.conv3(x, training=training)
x = self.block2(x, training=training)
x = self.conv4(x, training=training)
x = self.block3(x, training=training)
x = self.conv5(x, training=training)
x = self.block4(x, training=training)
x = self.conv6(x, training=training)
x = self.block5(x, training=training)
output_1 = self.conv7(x, training=training)
output_2 = self.block6(output_1, training=training)
# print(output_1.shape, output_2.shape, output_3.shape)
return output_2, output_1
class YOLOTail(tf.keras.layers.Layer):
def __init__(self, in_channels, out_channels):
super(YOLOTail, self).__init__()
self.conv1 = DarkNetConv2D(filters=in_channels, kernel_size=(1, 1), strides=1, activation="leaky")
self.conv2 = DarkNetConv2D(filters=in_channels, kernel_size=(3, 3), strides=1, activation="leaky", groups=in_channels)
self.conv3 = DarkNetConv2D(filters=in_channels, kernel_size=(3, 3), strides=1, activation="leaky", groups=in_channels)
self.conv4 = DarkNetConv2D(filters=4*in_channels/3, kernel_size=(1, 1), strides=1, activation="linear")
self.conv5 = DarkNetConv2D(filters=4*in_channels/3, kernel_size=(3, 3), strides=1, activation="leaky", groups=int(4*in_channels/3))
self.conv6 = DarkNetConv2D(filters=4*in_channels/3, kernel_size=(3, 3), strides=1, activation="leaky", groups=int(4*in_channels/3))
self.conv7 = DarkNetConv2D(filters=4*in_channels/3, kernel_size=(1, 1), strides=1, activation="linear")
self.normal_conv = tf.keras.layers.Conv2D(filters=out_channels,
kernel_size=(1, 1),
strides=1,
padding="same")
def call(self, inputs, training=None, **kwargs):
branch = self.conv1(inputs, training=training)
x = self.conv2(branch, training=training)
x = self.conv3(x, training=training)
x = self.conv4(x, training=training)
x = self.conv5(x, training=training)
x = self.conv6(x, training=training)
x = self.conv7(x, training=training)
stem = self.normal_conv(x)
return stem, branch
class YOLOV3(tf.keras.Model):
def __init__(self, out_channels):
super(YOLOV3, self).__init__()
self.darknet = YoloFastestBackbone()
self.tail_1 = YOLOTail(in_channels=96, out_channels=out_channels)
self.upsampling_1 = self._make_upsampling(num_filter=256)
self.tail_2 = YOLOTail(in_channels=96, out_channels=out_channels)
#self.upsampling_2 = self._make_upsampling(num_filter=256)
#self.tail_3 = YOLOTail(in_channels=256, out_channels=out_channels)
def _make_upsampling(self, num_filter):
layer = tf.keras.Sequential()
#layer.add(DarkNetConv2D(filters=num_filter, kernel_size=(1, 1), strides=1))
layer.add(tf.keras.layers.UpSampling2D(size=(2, 2)))
return layer
def call(self, inputs, training=None, mask=None):
x_1, x_2 = self.darknet(inputs, training=training)
stem_1, branch_1 = self.tail_1(x_1, training=training)
branch_1 = self.upsampling_1(branch_1, training=training)
x_2 = tf.keras.layers.concatenate([branch_1, x_2])
stem_2, _ = self.tail_2(x_2, training=training)
#branch_2 = self.upsampling_2(branch_2, training=training)
#x_3 = tf.keras.layers.concatenate([branch_1, x_3])
#stem_3, _ = self.tail_3(x_3, training=training)
return [stem_1, stem_2]
| [
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.UpSampling2D",
"tensorflow.keras.Sequential",
"tensorflow.keras.layers.Dropout",
"tensorflow.nn.leaky_relu",
"tensorflow.keras.layers.add",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.layers.concatenate",
"tensorflow.kera... | [((2250, 2271), 'tensorflow.keras.Sequential', 'tf.keras.Sequential', ([], {}), '()\n', (2269, 2271), True, 'import tensorflow as tf\n'), ((952, 988), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (986, 988), True, 'import tensorflow as tf\n'), ((1242, 1284), 'tensorflow.nn.leaky_relu', 'tf.nn.leaky_relu', (['x'], {'alpha': 'self.activation'}), '(x, alpha=self.activation)\n', (1258, 1284), True, 'import tensorflow as tf\n'), ((1790, 1819), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.15)'], {}), '(0.15)\n', (1813, 1819), True, 'import tensorflow as tf\n'), ((2074, 2106), 'tensorflow.keras.layers.add', 'tf.keras.layers.add', (['[x, inputs]'], {}), '([x, inputs])\n', (2093, 2106), True, 'import tensorflow as tf\n'), ((6110, 6205), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': 'out_channels', 'kernel_size': '(1, 1)', 'strides': '(1)', 'padding': '"""same"""'}), "(filters=out_channels, kernel_size=(1, 1), strides=1,\n padding='same')\n", (6132, 6205), True, 'import tensorflow as tf\n'), ((7371, 7392), 'tensorflow.keras.Sequential', 'tf.keras.Sequential', ([], {}), '()\n', (7390, 7392), True, 'import tensorflow as tf\n'), ((7817, 7861), 'tensorflow.keras.layers.concatenate', 'tf.keras.layers.concatenate', (['[branch_1, x_2]'], {}), '([branch_1, x_2])\n', (7844, 7861), True, 'import tensorflow as tf\n'), ((360, 453), 'tensorflow.keras.layers.DepthwiseConv2D', 'tf.keras.layers.DepthwiseConv2D', ([], {'kernel_size': 'kernel_size', 'strides': 'strides', 'padding': '"""same"""'}), "(kernel_size=kernel_size, strides=strides,\n padding='same')\n", (391, 453), True, 'import tensorflow as tf\n'), ((600, 702), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': 'filters', 'kernel_size': 'kernel_size', 'strides': 'strides', 'padding': '"""same"""'}), "(filters=filters, kernel_size=kernel_size, strides=\n strides, padding='same')\n", (622, 702), True, 'import tensorflow as tf\n'), ((7496, 7537), 'tensorflow.keras.layers.UpSampling2D', 'tf.keras.layers.UpSampling2D', ([], {'size': '(2, 2)'}), '(size=(2, 2))\n', (7524, 7537), True, 'import tensorflow as tf\n')] |
from django.db import models
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from django.db.models.signals import post_save
from django.dispatch import receiver
# Create your models here.
'''
we create a profile class that will help us save information on whether the user is a
a user or a driver
'''
class Profile(models.Model):
username=models.CharField(max_length=40)
profile_image=models.ImageField(upload_to='profiles/')
choices=(('Male','Male'),('Female','Female'))
sex=models.CharField(_('sex'),max_length=30,blank=True,choices=choices)
user_choices=(('Driver','Driver'),('Passenger','Passenger'))
user_type=models.CharField(_('user type'),max_length=30,blank=True,choices=user_choices)
user=models.OneToOneField(User,on_delete=models.CASCADE)
def __str__(self):
return self.username
@receiver(post_save, sender=User)
def update_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
instance.profile.save()
def delete_profile(self):
self.delete()
'''
we create a car model to save information about the car as users may have prefrences
'''
class Venue(models.Model):
name=models.CharField(max_length=255)
latitude = models.DecimalField(max_digits=9, decimal_places=6, null=True, blank=True)
longitude = models.DecimalField(max_digits=9, decimal_places=6, null=True, blank=True)
user=models.ForeignKey(Profile,null=True)
def __str__(self):
return self.name
def delete_venue(self):
self.save()
@classmethod
def search (cls,search_term):
locations=cls.objects.filter(name__icontains=search_term)
return locations
class Car(models.Model):
car_brand=models.CharField(max_length=30)
Number_plate=models.CharField(max_length=40)
seats_available=models.IntegerField()
users_car=models.ForeignKey(User,null=True)
location=models.ForeignKey(Venue,null=True)
def __str__(self):
return self.car_brand
'''
we create a driver model to save information of the driver and the car
'''
'''
we add the car foreign key to the driver model to save that the car belongs to the specific user and also query the db is easier
'''
class Driver(models.Model):
start=models.CharField(max_length=40)
destination=models.CharField(max_length=30)
user=models.ForeignKey(Profile,null=True)
car=models.ForeignKey(Car,null=True)
class Passenger(models.Model):
name=models.CharField(max_length=40)
national_id=models.CharField(max_length=40)
Reviews=models.CharField(max_length=40,blank=True)
where_are_you=models.ForeignKey(Venue,null=True)
user=models.ForeignKey(Profile,null=True)
Phone_number=models.CharField(max_length=40,null=True)
| [
"django.db.models.OneToOneField",
"django.utils.translation.ugettext_lazy",
"django.db.models.ForeignKey",
"django.db.models.IntegerField",
"django.db.models.ImageField",
"django.db.models.DecimalField",
"django.dispatch.receiver",
"django.db.models.CharField"
] | [((396, 427), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(40)'}), '(max_length=40)\n', (412, 427), False, 'from django.db import models\n'), ((446, 486), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""profiles/"""'}), "(upload_to='profiles/')\n", (463, 486), False, 'from django.db import models\n'), ((780, 832), 'django.db.models.OneToOneField', 'models.OneToOneField', (['User'], {'on_delete': 'models.CASCADE'}), '(User, on_delete=models.CASCADE)\n', (800, 832), False, 'from django.db import models\n'), ((890, 922), 'django.dispatch.receiver', 'receiver', (['post_save'], {'sender': 'User'}), '(post_save, sender=User)\n', (898, 922), False, 'from django.dispatch import receiver\n'), ((1272, 1304), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (1288, 1304), False, 'from django.db import models\n'), ((1320, 1394), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(9)', 'decimal_places': '(6)', 'null': '(True)', 'blank': '(True)'}), '(max_digits=9, decimal_places=6, null=True, blank=True)\n', (1339, 1394), False, 'from django.db import models\n'), ((1411, 1485), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(9)', 'decimal_places': '(6)', 'null': '(True)', 'blank': '(True)'}), '(max_digits=9, decimal_places=6, null=True, blank=True)\n', (1430, 1485), False, 'from django.db import models\n'), ((1495, 1532), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Profile'], {'null': '(True)'}), '(Profile, null=True)\n', (1512, 1532), False, 'from django.db import models\n'), ((1809, 1840), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)'}), '(max_length=30)\n', (1825, 1840), False, 'from django.db import models\n'), ((1858, 1889), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(40)'}), '(max_length=40)\n', (1874, 1889), False, 'from django.db import models\n'), ((1910, 1931), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (1929, 1931), False, 'from django.db import models\n'), ((1946, 1980), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'null': '(True)'}), '(User, null=True)\n', (1963, 1980), False, 'from django.db import models\n'), ((1993, 2028), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Venue'], {'null': '(True)'}), '(Venue, null=True)\n', (2010, 2028), False, 'from django.db import models\n'), ((2335, 2366), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(40)'}), '(max_length=40)\n', (2351, 2366), False, 'from django.db import models\n'), ((2383, 2414), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)'}), '(max_length=30)\n', (2399, 2414), False, 'from django.db import models\n'), ((2424, 2461), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Profile'], {'null': '(True)'}), '(Profile, null=True)\n', (2441, 2461), False, 'from django.db import models\n'), ((2469, 2502), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Car'], {'null': '(True)'}), '(Car, null=True)\n', (2486, 2502), False, 'from django.db import models\n'), ((2544, 2575), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(40)'}), '(max_length=40)\n', (2560, 2575), False, 'from django.db import models\n'), ((2592, 2623), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(40)'}), '(max_length=40)\n', (2608, 2623), False, 'from django.db import models\n'), ((2636, 2679), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(40)', 'blank': '(True)'}), '(max_length=40, blank=True)\n', (2652, 2679), False, 'from django.db import models\n'), ((2697, 2732), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Venue'], {'null': '(True)'}), '(Venue, null=True)\n', (2714, 2732), False, 'from django.db import models\n'), ((2741, 2778), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Profile'], {'null': '(True)'}), '(Profile, null=True)\n', (2758, 2778), False, 'from django.db import models\n'), ((2795, 2837), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(40)', 'null': '(True)'}), '(max_length=40, null=True)\n', (2811, 2837), False, 'from django.db import models\n'), ((562, 570), 'django.utils.translation.ugettext_lazy', '_', (['"""sex"""'], {}), "('sex')\n", (563, 570), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((709, 723), 'django.utils.translation.ugettext_lazy', '_', (['"""user type"""'], {}), "('user type')\n", (710, 723), True, 'from django.utils.translation import ugettext_lazy as _\n')] |
import functools
from flask import g, request
from lib.jwt_utils import verify_jwt
def LoginRequired(view_func):
@functools.wraps(view_func)
def check_auth(*args, **kwargs):
g.user_id = None
auth = request.headers.get('token')
payload = verify_jwt(auth)
if payload:
g.user_id = payload.get('user_id')
return view_func(*args, **kwargs)
else:
from utils.response_code import RET, ResponseData
return ResponseData(RET.TOKENERROR).to_dict()
return check_auth
| [
"lib.jwt_utils.verify_jwt",
"flask.request.headers.get",
"functools.wraps",
"utils.response_code.ResponseData"
] | [((120, 146), 'functools.wraps', 'functools.wraps', (['view_func'], {}), '(view_func)\n', (135, 146), False, 'import functools\n'), ((224, 252), 'flask.request.headers.get', 'request.headers.get', (['"""token"""'], {}), "('token')\n", (243, 252), False, 'from flask import g, request\n'), ((271, 287), 'lib.jwt_utils.verify_jwt', 'verify_jwt', (['auth'], {}), '(auth)\n', (281, 287), False, 'from lib.jwt_utils import verify_jwt\n'), ((496, 524), 'utils.response_code.ResponseData', 'ResponseData', (['RET.TOKENERROR'], {}), '(RET.TOKENERROR)\n', (508, 524), False, 'from utils.response_code import RET, ResponseData\n')] |
import os
import subprocess
import smtplib
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
EMAIL_HTML_START = '<html><head></head><body><p>'
EMAIL_HTML_END = '</p></body></html>'
"""
Send Email with attachments and text
attachments should be a list of objects that can be attached to a MIMEMultipart obj
"""
def sendEmail(sender, receivers, subject, body, attachments, is_body_html=False):
assert type(attachments) is list
assert type(receivers) is list
msg = MIMEMultipart()
text = MIMEText(body, 'html') if is_body_html else MIMEText(body)
# attach elements to email
msg.attach(text)
[msg.attach(attachment) for attachment in attachments]
# me == the sender's email address
# you == the recipient's email address
msg['Subject'] = subject
msg['From'] = sender
msg['To'] = ','.join(receivers)
try:
smtpObj = smtplib.SMTP('smtp.bucknell.edu')
smtpObj.sendmail(sender, receivers, msg.as_string())
# TODO: log this message instead of printing to console
print("Successfully sent email")
smtpObj.quit()
except SMTPException as e:
# TODO: log this message instead of printing to console
print("Error: unable to send email: {}".format(e))
"""
Installs highcharts-export-server if not already installed.
Returns 0 if successful, returns 1 if export server is not installed at the
expected path and could not be installed.
"""
eServerPath = "./node_modules/.bin/highcharts-export-server"
eServerName = "highcharts-export-server"
def prepareChartExport():
if not os.path.isfile(eServerPath):
if 0 != os.system("module load node && export ACCEPT_HIGHCHARTS_LICENSE=TRUE && npm install " + eServerName):
raise ImportError("Could not install chart export server")
"""
Generates a PNG image from a JSON Object
Assumes highcharts-export-server is present in the working directory
param chartJSON: A JSON string representing the chart being exported.
returns: A PNG MIMEImage object
"""
def exportChart(chartJSON): # TODO: Handle errors
prepareChartExport()
# Write chartJSON into chart.json
fp = open('chart.json', 'w')
fp.write(chartJSON)
fp.close()
# Run export server to create chart.png file
eServerCommand = "module load node && " + eServerPath + " -infile chart.json -outfile chart.png"
subprocess.check_output(eServerCommand, shell=True)
# Return chart.png image
fp = open('chart.png', 'rb') # Open in write binary mode
chartImage = MIMEImage(fp.read())
fp.close()
return chartImage
| [
"subprocess.check_output",
"smtplib.SMTP",
"os.path.isfile",
"email.mime.multipart.MIMEMultipart",
"os.system",
"email.mime.text.MIMEText"
] | [((555, 570), 'email.mime.multipart.MIMEMultipart', 'MIMEMultipart', ([], {}), '()\n', (568, 570), False, 'from email.mime.multipart import MIMEMultipart\n'), ((2442, 2493), 'subprocess.check_output', 'subprocess.check_output', (['eServerCommand'], {'shell': '(True)'}), '(eServerCommand, shell=True)\n', (2465, 2493), False, 'import subprocess\n'), ((582, 604), 'email.mime.text.MIMEText', 'MIMEText', (['body', '"""html"""'], {}), "(body, 'html')\n", (590, 604), False, 'from email.mime.text import MIMEText\n'), ((626, 640), 'email.mime.text.MIMEText', 'MIMEText', (['body'], {}), '(body)\n', (634, 640), False, 'from email.mime.text import MIMEText\n'), ((954, 987), 'smtplib.SMTP', 'smtplib.SMTP', (['"""smtp.bucknell.edu"""'], {}), "('smtp.bucknell.edu')\n", (966, 987), False, 'import smtplib\n'), ((1659, 1686), 'os.path.isfile', 'os.path.isfile', (['eServerPath'], {}), '(eServerPath)\n', (1673, 1686), False, 'import os\n'), ((1704, 1814), 'os.system', 'os.system', (["('module load node && export ACCEPT_HIGHCHARTS_LICENSE=TRUE && npm install ' +\n eServerName)"], {}), "(\n 'module load node && export ACCEPT_HIGHCHARTS_LICENSE=TRUE && npm install '\n + eServerName)\n", (1713, 1814), False, 'import os\n')] |
'''
@date: 31/03/2015
@author: <NAME>
Tests for generator
'''
import unittest
import numpy as np
import scipy.constants as constants
from PyHEADTAIL.trackers.longitudinal_tracking import RFSystems
import PyHEADTAIL.particles.generators as gf
from PyHEADTAIL.general.printers import SilentPrinter
class TestParticleGenerators(unittest.TestCase):
'''Test class for the new ParticleGenerator (generator_functional.py)'''
def setUp(self):
np.random.seed(0)
self.nparticles = 1000
self.epsx = 0.5
self.intensity = 1e11
self.charge = constants.e
self.mass = constants.m_p
self.circumference = 99
self.gamma = 27.1
self.generator = gf.ParticleGenerator(
self.nparticles, self.intensity,
self.charge, self.mass, self.circumference, self.gamma,
distribution_x=gf.gaussian2D(0.5),alpha_x=-0.7, beta_x=4, D_x=0,
distribution_z=gf.gaussian2D(3.0),
printer=SilentPrinter())
self.beam = self.generator.generate()
def tearDown(self):
pass
def test_particles_length(self):
'''Tests whether the coordinate arrays of the resulting beam
have the correct length'''
self.assertEqual(self.beam.x.size, self.nparticles,
'Length of x-beam coordinate array not correct')
def test_particles_coordinates(self):
'''Tests whether only the coordinates specified in the initializer
are initialized in the beam (e.g. yp is not)
'''
with self.assertRaises(AttributeError):
self.beam.yp
def test_update_beam_with_existing_coords(self):
'''Tests whether updating already existing coords produces
beam coordinates of the correct size
'''
self.generator.update(self.beam)
self.assertEqual(self.beam.x.size, self.nparticles,
'Updating existing coordinates leads to wrong' +
'coordinate lengths')
def test_update_beam_with_new_coords(self):
'''Tests whether adding new coordinates to the beam
works as expected
'''
x_copy = self.beam.x.copy()
longitudinal_generator = gf.ParticleGenerator(
self.nparticles, self.intensity, self.charge,
self.mass, self.circumference, self.gamma,
distribution_z=gf.gaussian2D(3.0))
longitudinal_generator.update(self.beam)
self.assertEqual(self.beam.dp.size, self.nparticles,
'Updating the beam with new coordinates leads to' +
'faulty coordinates')
for n in range(self.nparticles):
self.assertAlmostEqual(x_copy[n], self.beam.x[n],
msg='Updating the beam with new coordinates invalidates' +
'existing coordinates')
def test_distributions(self):
'''Tests whether the specified distributions return the coords
in the correct format (dimensions). If new distributions are added,
add them to the test here!
'''
# Gaussian
dist = gf.gaussian2D(0.1)
self.distribution_testing_implementation(dist)
# Uniform
dist = gf.uniform2D(-2., 3.)
self.distribution_testing_implementation(dist)
def test_import_distribution(self):
'''Tests whether import_distribution produces coordinate arrays of the
correct size'''
nparticles = 5
coords = [np.linspace(-2, 2, nparticles),
np.linspace(-3, 3, nparticles)]
import_generator = gf.ParticleGenerator(
nparticles, 1e11, constants.e, constants.m_p, 100, 10,
distribution_y=gf.import_distribution2D(coords))
beam = import_generator.generate()
self.assertEqual(len(beam.y), nparticles,
'import_generator produces coords with the wrong length')
self.assertEqual(len(beam.yp), nparticles,
'import_generator produces coords with the wrong length')
def test_rf_bucket_distribution(self):
'''Tests the functionality of the rf-bucket matchor'''
#SPS Q20 flattop
nparticles = 100
h1 = 4620
h2 = 4*4620
V1 = 10e6
V2 = 1e6
dphi1 = 0
dphi2 = 0
alpha = 0.00308
p_increment = 0
long_map = RFSystems(self.circumference, [h1, h2], [V1, V2],
[dphi1, dphi2], [alpha], self.gamma, p_increment, charge=self.charge, mass=self.mass)
bucket = long_map.get_bucket(gamma=self.gamma)
bunch = gf.ParticleGenerator(
nparticles, 1e11, constants.e, constants.m_p,
self.circumference, self.gamma,
distribution_z=gf.RF_bucket_distribution(
bucket, epsn_z=0.002, printer=SilentPrinter())).generate()
def test_cut_bucket_distribution(self):
'''Tests functionality of the cut-bucket matchor '''
nparticles = 100
h1 = 4620
h2 = 4*4620
V1 = 10e6
V2 = 1e6
dphi1 = 0
dphi2 = 0
alpha = 0.00308
p_increment = 0
long_map = RFSystems(self.circumference, [h1, h2], [V1, V2],
[dphi1, dphi2], [alpha], self.gamma, p_increment, charge=self.charge, mass=self.mass)
bucket = long_map.get_bucket(gamma=self.gamma)
is_accepted_fn = bucket.make_is_accepted(margin=0.)
bunch = gf.ParticleGenerator(
nparticles, 11, constants.e, constants.m_p,
self.circumference, self.gamma,
distribution_z=gf.cut_distribution(
is_accepted=is_accepted_fn,
distribution=gf.gaussian2D(0.01))).generate()
self.assertEqual(nparticles, len(bunch.z),
'bucket_cut_distribution loses particles')
self.assertTrue(np.sum(is_accepted_fn(bunch.z, bunch.dp)) == nparticles,
'not all particles generated with the cut RF matcher' +
' lie inside the specified separatrix')
def test_import_distribution_raises_error(self):
'''Tests whether the generation fails when the number of particles
and the size of the specified distribution list do not match
'''
nparticles = 10
coords = [np.linspace(-2, 2, nparticles+1),
np.linspace(-3, 3, nparticles+1)]
import_generator = gf.ParticleGenerator(
nparticles, 1e11, constants.e, constants.m_p, 100, 10,
distribution_y=gf.import_distribution2D(coords))
with self.assertRaises(AssertionError):
beam = import_generator.generate()
def distribution_testing_implementation(self, distribution):
'''Call this method with the distribution as a parameter.
distribution(n_particles) should be a valid command
'''
distribution_size = 100
X = distribution(distribution_size)
x = X[0]
p = X[1]
self.assertEqual(x.size, distribution_size,
'space-direction ([0]) of ' + str(distribution) +
'has wrong dimension')
self.assertEqual(p.size, distribution_size,
'momentum-direction ([1]) of ' + str(distribution) +
'has wrong dimension')
if __name__ == '__main__':
unittest.main()
| [
"PyHEADTAIL.trackers.longitudinal_tracking.RFSystems",
"PyHEADTAIL.general.printers.SilentPrinter",
"PyHEADTAIL.particles.generators.gaussian2D",
"PyHEADTAIL.particles.generators.import_distribution2D",
"PyHEADTAIL.particles.generators.uniform2D",
"numpy.linspace",
"numpy.random.seed",
"unittest.main"... | [((7430, 7445), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7443, 7445), False, 'import unittest\n'), ((457, 474), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (471, 474), True, 'import numpy as np\n'), ((3136, 3154), 'PyHEADTAIL.particles.generators.gaussian2D', 'gf.gaussian2D', (['(0.1)'], {}), '(0.1)\n', (3149, 3154), True, 'import PyHEADTAIL.particles.generators as gf\n'), ((3244, 3267), 'PyHEADTAIL.particles.generators.uniform2D', 'gf.uniform2D', (['(-2.0)', '(3.0)'], {}), '(-2.0, 3.0)\n', (3256, 3267), True, 'import PyHEADTAIL.particles.generators as gf\n'), ((4398, 4537), 'PyHEADTAIL.trackers.longitudinal_tracking.RFSystems', 'RFSystems', (['self.circumference', '[h1, h2]', '[V1, V2]', '[dphi1, dphi2]', '[alpha]', 'self.gamma', 'p_increment'], {'charge': 'self.charge', 'mass': 'self.mass'}), '(self.circumference, [h1, h2], [V1, V2], [dphi1, dphi2], [alpha],\n self.gamma, p_increment, charge=self.charge, mass=self.mass)\n', (4407, 4537), False, 'from PyHEADTAIL.trackers.longitudinal_tracking import RFSystems\n'), ((5197, 5336), 'PyHEADTAIL.trackers.longitudinal_tracking.RFSystems', 'RFSystems', (['self.circumference', '[h1, h2]', '[V1, V2]', '[dphi1, dphi2]', '[alpha]', 'self.gamma', 'p_increment'], {'charge': 'self.charge', 'mass': 'self.mass'}), '(self.circumference, [h1, h2], [V1, V2], [dphi1, dphi2], [alpha],\n self.gamma, p_increment, charge=self.charge, mass=self.mass)\n', (5206, 5336), False, 'from PyHEADTAIL.trackers.longitudinal_tracking import RFSystems\n'), ((3506, 3536), 'numpy.linspace', 'np.linspace', (['(-2)', '(2)', 'nparticles'], {}), '(-2, 2, nparticles)\n', (3517, 3536), True, 'import numpy as np\n'), ((3556, 3586), 'numpy.linspace', 'np.linspace', (['(-3)', '(3)', 'nparticles'], {}), '(-3, 3, nparticles)\n', (3567, 3586), True, 'import numpy as np\n'), ((6364, 6398), 'numpy.linspace', 'np.linspace', (['(-2)', '(2)', '(nparticles + 1)'], {}), '(-2, 2, nparticles + 1)\n', (6375, 6398), True, 'import numpy as np\n'), ((6416, 6450), 'numpy.linspace', 'np.linspace', (['(-3)', '(3)', '(nparticles + 1)'], {}), '(-3, 3, nparticles + 1)\n', (6427, 6450), True, 'import numpy as np\n'), ((873, 891), 'PyHEADTAIL.particles.generators.gaussian2D', 'gf.gaussian2D', (['(0.5)'], {}), '(0.5)\n', (886, 891), True, 'import PyHEADTAIL.particles.generators as gf\n'), ((950, 968), 'PyHEADTAIL.particles.generators.gaussian2D', 'gf.gaussian2D', (['(3.0)'], {}), '(3.0)\n', (963, 968), True, 'import PyHEADTAIL.particles.generators as gf\n'), ((990, 1005), 'PyHEADTAIL.general.printers.SilentPrinter', 'SilentPrinter', ([], {}), '()\n', (1003, 1005), False, 'from PyHEADTAIL.general.printers import SilentPrinter\n'), ((2401, 2419), 'PyHEADTAIL.particles.generators.gaussian2D', 'gf.gaussian2D', (['(3.0)'], {}), '(3.0)\n', (2414, 2419), True, 'import PyHEADTAIL.particles.generators as gf\n'), ((3739, 3771), 'PyHEADTAIL.particles.generators.import_distribution2D', 'gf.import_distribution2D', (['coords'], {}), '(coords)\n', (3763, 3771), True, 'import PyHEADTAIL.particles.generators as gf\n'), ((6601, 6633), 'PyHEADTAIL.particles.generators.import_distribution2D', 'gf.import_distribution2D', (['coords'], {}), '(coords)\n', (6625, 6633), True, 'import PyHEADTAIL.particles.generators as gf\n'), ((4861, 4876), 'PyHEADTAIL.general.printers.SilentPrinter', 'SilentPrinter', ([], {}), '()\n', (4874, 4876), False, 'from PyHEADTAIL.general.printers import SilentPrinter\n'), ((5735, 5754), 'PyHEADTAIL.particles.generators.gaussian2D', 'gf.gaussian2D', (['(0.01)'], {}), '(0.01)\n', (5748, 5754), True, 'import PyHEADTAIL.particles.generators as gf\n')] |
from lib.utils.misc import detect_os
MOT_info = {
"readme": "The sequences in MOT16 and MOT17 are the same, while the sequences in 2DMOT2015 are "
"not all the same with those in MOT17. To handle this, we filter 2DMOT2015 dataset, "
"i.e. those sequences that are contained in MOT17 will not be included in 2DMOT2015",
'year': ['MOT15', 'MOT16', 'MOT17', 'MOT20'],
'base_path': {
'MOT15': {
'im': '/home/liuqk/Dataset/MOT/2DMOT2015',
'label':'/home/liuqk/Dataset/MOT/2DMOT2015',
},
'MOT16': {
'im': '/home/liuqk/Dataset/MOT/MOT17Det',
'label':'/home/liuqk/Dataset/MOT/MOT16Labels',
},
'MOT17': {
'im': '/home/liuqk/Dataset/MOT/MOT17Det',
'label': '/home/liuqk/Dataset/MOT/MOT17Labels',
},
'MOT20': {
'im': '/home/liuqk/Dataset/MOT/MOT20',
'label': '/home/liuqk/Dataset/MOT/MOT20',
},
'MOT16-det-dpm-raw': '/home/liuqk/Dataset/MOT/MOT16-det-dpm-raw/',
},
'sequences': {
'MOT15': {
'train': ['ETH-Bahnhof', 'ETH-Sunnyday', 'KITTI-13', 'KITTI-17', 'PETS09-S2L1', 'TUD-Campus',
'TUD-Stadtmitte'],
'test': ['ADL-Rundle-1', 'ADL-Rundle-3', 'AVG-TownCentre', 'ETH-Crossing', 'ETH-Jelmoli', 'ETH-Linthescher',
'KITTI-16', 'KITTI-19', 'PETS09-S2L2', 'TUD-Crossing', 'Venice-1'],
'val': []
},
'MOT16': {
'train': ['MOT16-04', 'MOT16-11', 'MOT16-05', 'MOT16-13', 'MOT16-02'], #, 'MOT16-10', 'MOT16-09'],
'test': ['MOT16-12', 'MOT16-03', 'MOT16-01', 'MOT16-06', 'MOT16-07', 'MOT16-08', 'MOT16-12', 'MOT16-14'],
'val': ['MOT16-09', 'MOT16-10']
},
'MOT17': {
'train': ['MOT17-04', 'MOT17-11', 'MOT17-05', 'MOT17-13', 'MOT17-02'],
'test': ['MOT17-03', 'MOT17-01', 'MOT17-06', 'MOT17-07', 'MOT17-08', 'MOT17-12', 'MOT17-14'],
'val': ['MOT17-10', 'MOT17-09']
},
'MOT20':{
'train':['MOT20-01', 'MOT20-02', 'MOT20-03', 'MOT20-05'],
'test': ['MOT20-04', 'MOT20-06', 'MOT20-07', 'MOT20-08'],
'val': [],
},
},
'detectors': {
'MOT15': {
'train': [''],
'val': [''],
'test': ['']
},
'MOT16': {
'train': [''],
'val': [''],
'test': ['']
},
'MOT17': {
'train': ['DPM'],
'val': ['DPM'], #['SDP', 'FRCNN', 'DPM'],
'test': ['DPM', 'SDP', 'FRCNN']
},
'MOT20': {
'train': [''],
'val': [''],
'test': ['']
},
},
}
def get_mot_info():
mot_info = MOT_info.copy()
# modify the path based on the OS
operate_system = detect_os()
if operate_system == 'MAC_OS_X':
base_path = {
'MOT15': {
'im': '/Users/Qiankun/Learning/Dataset/MOT/2DMOT2015',
'label': '/Users/Qiankun/Learning/Dataset/MOT/2DMOT2015',
},
'MOT16': {
'im': '/Users/Qiankun/Learning/Dataset/MOT/MOT17Det',
'label': '/Users/Qiankun/Learning/Dataset/MOT/MOT16Labels',
},
'MOT17': {
'im': '/Users/Qiankun/Learning/Dataset/MOT/MOT17Det',
'label': '/Users/Qiankun/Learning/Dataset/MOT/MOT17Labels',
},
'MOT20': {
'im': '/Users/Qiankun/Learning/Dataset/MOT/MOT20',
'label': '/Users/Qiankun/Learning/Dataset/MOT/MOT20',
},
'MOT16-det-dpm-raw': '/home/liuqk/Dataset/MOT/MOT16-det-dpm-raw/',
}
elif operate_system == 'WINDOWS':
base_path = {
'MOT15': {
'im': 'F:\Datasets\MOT2DMOT2015',
'label': 'F:\Datasets\MOT2DMOT2015',
},
'MOT16': {
'im': 'F:\Datasets\MOT\MOT16',
'label': 'F:\Datasets\MOT\MOT16',
},
'MOT17': {
'im': 'F:\Datasets\MOT\MOT17',
'label': 'F:\Datasets\MOT\MOT17',
},
'MOT20': {
'im': 'F:\Datasets\MOT\MOT20',
'label': 'F:\Datasets\MOT\MOT20',
},
'MOT16-det-dpm-raw': '/home/liuqk/Dataset/MOT/MOT16-det-dpm-raw/',
}
elif operate_system == 'LINUX':
base_path = {
'MOT15': {
'im': '/home/liuqk/Dataset/MOT/2DMOT2015',
'label': '/home/liuqk/Dataset/MOT/2DMOT2015',
},
'MOT16': {
# 'im': '/home/liuqk/Dataset/MOT/MOT17Det',
# 'label': '/home/liuqk/Dataset/MOT/MOT16Labels',
'im': '/home/liuqk/Dataset/MOT/MOT16',
'label':'/home/liuqk/Dataset/MOT/MOT16',
},
'MOT17': {
# 'im': '/home/liuqk/Dataset/MOT/MOT17Det',
# 'label': '/home/liuqk/Dataset/MOT/MOT17Labels',
'im': '/home/liuqk/Dataset/MOT/MOT17',
'label':'/home/liuqk/Dataset/MOT/MOT17',
},
'MOT20': {
'im': '/home/liuqk/Dataset/MOT/MOT20',
'label': '/home/liuqk/Dataset/MOT/MOT20',
},
'MOT16-det-dpm-raw': '/home/liuqk/Dataset/MOT/MOT16-det-dpm-raw/',
}
else:
raise NotImplementedError('Unkonwn operating system {}'.format(operate_system))
mot_info['base_path'] = base_path
return mot_info
| [
"lib.utils.misc.detect_os"
] | [((2883, 2894), 'lib.utils.misc.detect_os', 'detect_os', ([], {}), '()\n', (2892, 2894), False, 'from lib.utils.misc import detect_os\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-10 20:35
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
def populate_new_fields(apps, schema_editor):
TillClosure = apps.get_model('cashup', 'TillClosure')
for tc in TillClosure.objects.all():
tc.identity = tc.pk
tc.object_created_time = tc.close_time
tc.updated_by = tc.closed_by
tc.version_created_time = tc.close_time
tc.save()
class Migration(migrations.Migration):
dependencies = [
('cashup', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='tillclosure',
name='identity',
field=models.PositiveIntegerField(default=0, editable=False, blank=True),
preserve_default=False,
),
migrations.AddField(
model_name='tillclosure',
name='object_created_time',
field=models.DateTimeField(default=django.utils.timezone.now, editable=False, blank=True),
preserve_default=False,
),
migrations.AddField(
model_name='tillclosure',
name='updated_by',
field=models.ForeignKey(editable=False, blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='updated_tillclosures', to='cashup.Personnel'),
),
migrations.AddField(
model_name='tillclosure',
name='version_created_time',
field=models.DateTimeField(default=django.utils.timezone.now, editable=False, blank=True),
preserve_default=False,
),
migrations.AddField(
model_name='tillclosure',
name='version_number',
field=models.PositiveIntegerField(default=1, editable=False, blank=True),
preserve_default=False,
),
migrations.AddField(
model_name='tillclosure',
name='version_superseded_time',
field=models.DateTimeField(editable=False, blank=True, null=True),
),
migrations.RunPython(populate_new_fields, migrations.RunPython.noop),
migrations.AlterField(
model_name='tillclosure',
name='identity',
field=models.PositiveIntegerField(editable=False, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='tillclosure',
name='object_created_time',
field=models.DateTimeField(editable=False, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='tillclosure',
name='updated_by',
field=models.ForeignKey(editable=False, blank=True, on_delete=django.db.models.deletion.PROTECT, related_name='updated_tillclosures', to='cashup.Personnel'),
),
migrations.AlterField(
model_name='tillclosure',
name='version_created_time',
field=models.DateTimeField(editable=False, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='tillclosure',
name='version_number',
field=models.PositiveIntegerField(editable=False, blank=True),
preserve_default=True,
),
]
| [
"django.db.models.DateTimeField",
"django.db.migrations.RunPython",
"django.db.models.PositiveIntegerField",
"django.db.models.ForeignKey"
] | [((2145, 2213), 'django.db.migrations.RunPython', 'migrations.RunPython', (['populate_new_fields', 'migrations.RunPython.noop'], {}), '(populate_new_fields, migrations.RunPython.noop)\n', (2165, 2213), False, 'from django.db import migrations, models\n'), ((781, 847), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'default': '(0)', 'editable': '(False)', 'blank': '(True)'}), '(default=0, editable=False, blank=True)\n', (808, 847), False, 'from django.db import migrations, models\n'), ((1021, 1108), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'django.utils.timezone.now', 'editable': '(False)', 'blank': '(True)'}), '(default=django.utils.timezone.now, editable=False,\n blank=True)\n', (1041, 1108), False, 'from django.db import migrations, models\n'), ((1269, 1440), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'editable': '(False)', 'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.PROTECT', 'related_name': '"""updated_tillclosures"""', 'to': '"""cashup.Personnel"""'}), "(editable=False, blank=True, null=True, on_delete=django.\n db.models.deletion.PROTECT, related_name='updated_tillclosures', to=\n 'cashup.Personnel')\n", (1286, 1440), False, 'from django.db import migrations, models\n'), ((1569, 1656), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'django.utils.timezone.now', 'editable': '(False)', 'blank': '(True)'}), '(default=django.utils.timezone.now, editable=False,\n blank=True)\n', (1589, 1656), False, 'from django.db import migrations, models\n'), ((1821, 1887), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'default': '(1)', 'editable': '(False)', 'blank': '(True)'}), '(default=1, editable=False, blank=True)\n', (1848, 1887), False, 'from django.db import migrations, models\n'), ((2065, 2124), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'editable': '(False)', 'blank': '(True)', 'null': '(True)'}), '(editable=False, blank=True, null=True)\n', (2085, 2124), False, 'from django.db import migrations, models\n'), ((2331, 2386), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'editable': '(False)', 'blank': '(True)'}), '(editable=False, blank=True)\n', (2358, 2386), False, 'from django.db import migrations, models\n'), ((2561, 2609), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'editable': '(False)', 'blank': '(True)'}), '(editable=False, blank=True)\n', (2581, 2609), False, 'from django.db import migrations, models\n'), ((2775, 2935), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'editable': '(False)', 'blank': '(True)', 'on_delete': 'django.db.models.deletion.PROTECT', 'related_name': '"""updated_tillclosures"""', 'to': '"""cashup.Personnel"""'}), "(editable=False, blank=True, on_delete=django.db.models.\n deletion.PROTECT, related_name='updated_tillclosures', to=\n 'cashup.Personnel')\n", (2792, 2935), False, 'from django.db import migrations, models\n'), ((3066, 3114), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'editable': '(False)', 'blank': '(True)'}), '(editable=False, blank=True)\n', (3086, 3114), False, 'from django.db import migrations, models\n'), ((3284, 3339), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'editable': '(False)', 'blank': '(True)'}), '(editable=False, blank=True)\n', (3311, 3339), False, 'from django.db import migrations, models\n')] |
# Copyright 2020-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""operator dsl function: tile"""
import akg.tvm
import akg.topi
import akg.utils as utils
@utils.check_input_type(akg.tvm.tensor.Tensor, (list, tuple), (str, type(None)))
def Tile(data, multiples, target=utils.CCE):
"""
Repeats the data in the specified dimensions according to the multiples.
Args:
data (tvm.tensor.Tensor): Tensor of type float16, float32.
multiples (Union[list, tuple]): Elements must be int. The number of repetitions.
Returns:
tvm.tensor.Tensor, has the same dtype as data.
Supported Platforms:
'Ascend', 'GPU', 'CPU'
"""
utils.check_supported_target(target)
shape = [x.value for x in data.shape]
dtype = data.dtype
utils.check_shape(shape)
utils.ops_dtype_check(dtype, utils.DtypeForDavinci.ALL_TYPES)
utils.check_int_list(multiples, "multiples")
output = akg.topi.tile(data, multiples)
return output | [
"akg.utils.ops_dtype_check",
"akg.utils.check_shape",
"akg.utils.check_int_list",
"akg.utils.check_supported_target"
] | [((1206, 1242), 'akg.utils.check_supported_target', 'utils.check_supported_target', (['target'], {}), '(target)\n', (1234, 1242), True, 'import akg.utils as utils\n'), ((1312, 1336), 'akg.utils.check_shape', 'utils.check_shape', (['shape'], {}), '(shape)\n', (1329, 1336), True, 'import akg.utils as utils\n'), ((1341, 1402), 'akg.utils.ops_dtype_check', 'utils.ops_dtype_check', (['dtype', 'utils.DtypeForDavinci.ALL_TYPES'], {}), '(dtype, utils.DtypeForDavinci.ALL_TYPES)\n', (1362, 1402), True, 'import akg.utils as utils\n'), ((1407, 1451), 'akg.utils.check_int_list', 'utils.check_int_list', (['multiples', '"""multiples"""'], {}), "(multiples, 'multiples')\n", (1427, 1451), True, 'import akg.utils as utils\n')] |
##
# File: SchemaDefLoaderDbTests.py
# Author: <NAME>
# Date: 29-Mar-2018
# Version: 0.001
#
# Updates:
# 20-Jun-2018 jdw updates for new schema generation and data preparation tools
#
##
"""
Tests for creating and loading rdbms database (mysql) using PDBx/mmCIF data files
and external schema definition.
"""
__docformat__ = "restructuredtext en"
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__license__ = "Apache 2.0"
import logging
import os
import platform
import time
import unittest
from rcsb.db.define.SchemaDefAccess import SchemaDefAccess
from rcsb.db.mysql.Connection import Connection
from rcsb.db.mysql.MyDbUtil import MyDbQuery
from rcsb.db.mysql.SchemaDefLoader import SchemaDefLoader
from rcsb.db.sql.SqlGen import SqlGenAdmin
from rcsb.utils.repository.RepositoryProvider import RepositoryProvider
from rcsb.db.utils.SchemaProvider import SchemaProvider
from rcsb.utils.config.ConfigUtil import ConfigUtil
logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s]-%(module)s.%(funcName)s: %(message)s")
logger = logging.getLogger()
HERE = os.path.abspath(os.path.dirname(__file__))
TOPDIR = os.path.dirname(os.path.dirname(os.path.dirname(HERE)))
class SchemaDefLoaderDbTests(unittest.TestCase):
def __init__(self, methodName="runTest"):
super(SchemaDefLoaderDbTests, self).__init__(methodName)
self.__verbose = True
def setUp(self):
self.__isMac = platform.system() == "Darwin"
self.__excludeType = None if self.__isMac else "optional"
self.__verbose = True
#
fileLimit = 100
numProc = 2
self.__cachePath = os.path.join(TOPDIR, "CACHE")
self.__workPath = os.path.join(HERE, "test-output")
mockTopPath = os.path.join(TOPDIR, "rcsb", "mock-data")
configPath = os.path.join(TOPDIR, "rcsb", "db", "config", "exdb-config-example.yml")
#
configName = "site_info_configuration"
self.__cfgOb = ConfigUtil(configPath=configPath, defaultSectionName=configName, mockTopPath=mockTopPath)
self.__resourceName = "MYSQL_DB"
#
self.__schP = SchemaProvider(self.__cfgOb, self.__cachePath, useCache=True)
self.__rpP = RepositoryProvider(cfgOb=self.__cfgOb, numProc=numProc, fileLimit=fileLimit, cachePath=self.__cachePath)
#
#
self.__startTime = time.time()
logger.debug("Starting %s at %s", self.id(), time.strftime("%Y %m %d %H:%M:%S", time.localtime()))
def tearDown(self):
endTime = time.time()
logger.debug("Completed %s at %s (%.4f seconds)", self.id(), time.strftime("%Y %m %d %H:%M:%S", time.localtime()), endTime - self.__startTime)
def __schemaCreate(self, schemaDefObj):
"""Create table schema using schema definition"""
try:
tableIdList = schemaDefObj.getSchemaIdList()
sqlGen = SqlGenAdmin(self.__verbose)
sqlL = sqlGen.createDatabaseSQL(schemaDefObj.getDatabaseName())
for tableId in tableIdList:
tableDefObj = schemaDefObj.getSchemaObject(tableId)
sqlL.extend(sqlGen.createTableSQL(databaseName=schemaDefObj.getDatabaseName(), tableDefObj=tableDefObj))
logger.debug("Schema creation SQL string\n %s\n\n", "\n".join(sqlL))
with Connection(cfgOb=self.__cfgOb, resourceName=self.__resourceName) as client:
myQ = MyDbQuery(dbcon=client, verbose=self.__verbose)
#
# Permit warnings to support "drop table if exists" for missing tables.
#
myQ.setWarning("ignore")
ret = myQ.sqlCommand(sqlCommandList=sqlL)
logger.debug("\n\n+INFO mysql server returns %r\n", ret)
self.assertTrue(ret)
except Exception as e:
logger.exception("Failing with %s", str(e))
self.fail()
# ------------- - -------------------------------------------------------------------------------------------
def testSchemaCreate(self):
"""Create table schema for BIRD, chemical component, and PDBx data."""
cD = self.__schP.makeSchemaDef("bird", dataTyping="SQL", saveSchema=True)
sd = SchemaDefAccess(cD)
self.__schemaCreate(sd)
#
cD = self.__schP.makeSchemaDef("chem_comp", dataTyping="SQL", saveSchema=True)
sd = SchemaDefAccess(cD)
self.__schemaCreate(sd)
#
# cD = self.__schP.makeSchemaDef("pdbx", dataTyping="SQL", saveSchema=True)
# sd = SchemaDefAccess(cD)
self.__schemaCreate(sd)
def testLoadBirdReference(self):
try:
cD = self.__schP.makeSchemaDef("bird", dataTyping="SQL", saveSchema=True)
sd = SchemaDefAccess(cD)
self.__schemaCreate(sd)
inputPathList = self.__rpP.getLocatorObjList(contentType="bird")
inputPathList.extend(self.__rpP.getLocatorObjList(contentType="bird_family"))
#
with Connection(cfgOb=self.__cfgOb, resourceName=self.__resourceName) as client:
sdl = SchemaDefLoader(
self.__cfgOb,
schemaDefObj=sd,
dbCon=client,
cachePath=self.__cachePath,
workPath=self.__workPath,
cleanUp=False,
warnings="error",
verbose=self.__verbose,
restoreUseStash=False,
restoreUseGit=True,
providerTypeExclude=self.__excludeType,
)
ok = sdl.load(inputPathList=inputPathList, loadType="batch-file")
self.assertTrue(ok)
except Exception as e:
logger.exception("Failing with %s", str(e))
self.fail()
def testReLoadBirdReference(self):
try:
cD = self.__schP.makeSchemaDef("bird", dataTyping="SQL", saveSchema=True)
sd = SchemaDefAccess(cD)
self.__schemaCreate(sd)
inputPathList = self.__rpP.getLocatorObjList(contentType="bird")
inputPathList.extend(self.__rpP.getLocatorObjList(contentType="bird_family"))
#
with Connection(cfgOb=self.__cfgOb, resourceName=self.__resourceName) as client:
sdl = SchemaDefLoader(
self.__cfgOb,
schemaDefObj=sd,
dbCon=client,
cachePath=self.__cachePath,
workPath=self.__workPath,
cleanUp=False,
warnings="error",
verbose=self.__verbose,
restoreUseStash=False,
restoreUseGit=True,
providerTypeExclude=self.__excludeType,
)
sdl.load(inputPathList=inputPathList, loadType="batch-file")
#
logger.debug("INFO BATCH FILE RELOAD TEST --------------------------------------------\n")
ok = sdl.load(inputPathList=inputPathList, loadType="batch-file", deleteOpt="all")
self.assertTrue(ok)
#
logger.debug("\n\n\n+INFO BATCH INSERT RELOAD TEST --------------------------------------------\n")
ok = sdl.load(inputPathList=inputPathList, loadType="batch-file", deleteOpt="selected")
self.assertTrue(ok)
except Exception as e:
logger.exception("Failing with %s", str(e))
self.fail()
def testLoadChemCompReference(self):
try:
cD = self.__schP.makeSchemaDef("chem_comp", dataTyping="SQL", saveSchema=True)
sd = SchemaDefAccess(cD)
self.__schemaCreate(sd)
inputPathList = self.__rpP.getLocatorObjList(contentType="chem_comp")
with Connection(cfgOb=self.__cfgOb, resourceName=self.__resourceName) as client:
sdl = SchemaDefLoader(
self.__cfgOb,
schemaDefObj=sd,
dbCon=client,
cachePath=self.__cachePath,
workPath=self.__workPath,
cleanUp=False,
warnings="error",
verbose=self.__verbose,
restoreUseStash=False,
restoreUseGit=True,
providerTypeExclude=self.__excludeType,
)
ok = sdl.load(inputPathList=inputPathList, loadType="batch-file")
self.assertTrue(ok)
except Exception as e:
logger.exception("Failing with %s", str(e))
self.fail()
@unittest.skip("Disable test - schema not optimized for mysql limitations")
def testLoadPdbxFiles(self):
try:
cD = self.__schP.makeSchemaDef("pdbx", dataTyping="SQL", saveSchema=True)
sd = SchemaDefAccess(cD)
self.__schemaCreate(sd)
inputPathList = self.__rpP.getLocatorObjList(contentType="pdbx")
logger.debug("Input path list %r", inputPathList)
with Connection(cfgOb=self.__cfgOb, resourceName=self.__resourceName) as client:
sdl = SchemaDefLoader(
self.__cfgOb,
schemaDefObj=sd,
dbCon=client,
cachePath=self.__cachePath,
workPath=self.__workPath,
cleanUp=False,
warnings="error",
verbose=self.__verbose,
restoreUseStash=False,
restoreUseGit=True,
providerTypeExclude=self.__excludeType,
)
ok = sdl.load(inputPathList=inputPathList, loadType="batch-insert", deleteOpt="all")
self.assertTrue(ok)
except Exception as e:
logger.exception("Failing with %s", str(e))
self.fail()
def createSchemaSuite():
suiteSelect = unittest.TestSuite()
suiteSelect.addTest(SchemaDefLoaderDbTests("testSchemaCreate"))
return suiteSelect
def loadReferenceSuite():
suiteSelect = unittest.TestSuite()
suiteSelect.addTest(SchemaDefLoaderDbTests("testLoadBirdReference"))
suiteSelect.addTest(SchemaDefLoaderDbTests("testReLoadBirdReference"))
suiteSelect.addTest(SchemaDefLoaderDbTests("testLoadChemCompReference"))
# suiteSelect.addTest(SchemaDefLoaderDbTests("testLoadPdbxFiles"))
return suiteSelect
if __name__ == "__main__":
mySuite = createSchemaSuite()
unittest.TextTestRunner(verbosity=2).run(mySuite)
mySuite = loadReferenceSuite()
unittest.TextTestRunner(verbosity=2).run(mySuite)
| [
"logging.basicConfig",
"unittest.TestSuite",
"logging.getLogger",
"time.localtime",
"rcsb.db.mysql.MyDbUtil.MyDbQuery",
"rcsb.db.utils.SchemaProvider.SchemaProvider",
"rcsb.db.mysql.SchemaDefLoader.SchemaDefLoader",
"os.path.join",
"rcsb.utils.repository.RepositoryProvider.RepositoryProvider",
"os... | [((937, 1056), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s [%(levelname)s]-%(module)s.%(funcName)s: %(message)s"""'}), "(level=logging.INFO, format=\n '%(asctime)s [%(levelname)s]-%(module)s.%(funcName)s: %(message)s')\n", (956, 1056), False, 'import logging\n'), ((1061, 1080), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1078, 1080), False, 'import logging\n'), ((1105, 1130), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1120, 1130), False, 'import os\n'), ((8704, 8778), 'unittest.skip', 'unittest.skip', (['"""Disable test - schema not optimized for mysql limitations"""'], {}), "('Disable test - schema not optimized for mysql limitations')\n", (8717, 8778), False, 'import unittest\n'), ((10026, 10046), 'unittest.TestSuite', 'unittest.TestSuite', ([], {}), '()\n', (10044, 10046), False, 'import unittest\n'), ((10184, 10204), 'unittest.TestSuite', 'unittest.TestSuite', ([], {}), '()\n', (10202, 10204), False, 'import unittest\n'), ((1173, 1194), 'os.path.dirname', 'os.path.dirname', (['HERE'], {}), '(HERE)\n', (1188, 1194), False, 'import os\n'), ((1641, 1670), 'os.path.join', 'os.path.join', (['TOPDIR', '"""CACHE"""'], {}), "(TOPDIR, 'CACHE')\n", (1653, 1670), False, 'import os\n'), ((1697, 1730), 'os.path.join', 'os.path.join', (['HERE', '"""test-output"""'], {}), "(HERE, 'test-output')\n", (1709, 1730), False, 'import os\n'), ((1753, 1794), 'os.path.join', 'os.path.join', (['TOPDIR', '"""rcsb"""', '"""mock-data"""'], {}), "(TOPDIR, 'rcsb', 'mock-data')\n", (1765, 1794), False, 'import os\n'), ((1816, 1887), 'os.path.join', 'os.path.join', (['TOPDIR', '"""rcsb"""', '"""db"""', '"""config"""', '"""exdb-config-example.yml"""'], {}), "(TOPDIR, 'rcsb', 'db', 'config', 'exdb-config-example.yml')\n", (1828, 1887), False, 'import os\n'), ((1968, 2061), 'rcsb.utils.config.ConfigUtil.ConfigUtil', 'ConfigUtil', ([], {'configPath': 'configPath', 'defaultSectionName': 'configName', 'mockTopPath': 'mockTopPath'}), '(configPath=configPath, defaultSectionName=configName,\n mockTopPath=mockTopPath)\n', (1978, 2061), False, 'from rcsb.utils.config.ConfigUtil import ConfigUtil\n'), ((2131, 2192), 'rcsb.db.utils.SchemaProvider.SchemaProvider', 'SchemaProvider', (['self.__cfgOb', 'self.__cachePath'], {'useCache': '(True)'}), '(self.__cfgOb, self.__cachePath, useCache=True)\n', (2145, 2192), False, 'from rcsb.db.utils.SchemaProvider import SchemaProvider\n'), ((2214, 2322), 'rcsb.utils.repository.RepositoryProvider.RepositoryProvider', 'RepositoryProvider', ([], {'cfgOb': 'self.__cfgOb', 'numProc': 'numProc', 'fileLimit': 'fileLimit', 'cachePath': 'self.__cachePath'}), '(cfgOb=self.__cfgOb, numProc=numProc, fileLimit=fileLimit,\n cachePath=self.__cachePath)\n', (2232, 2322), False, 'from rcsb.utils.repository.RepositoryProvider import RepositoryProvider\n'), ((2366, 2377), 'time.time', 'time.time', ([], {}), '()\n', (2375, 2377), False, 'import time\n'), ((2528, 2539), 'time.time', 'time.time', ([], {}), '()\n', (2537, 2539), False, 'import time\n'), ((4228, 4247), 'rcsb.db.define.SchemaDefAccess.SchemaDefAccess', 'SchemaDefAccess', (['cD'], {}), '(cD)\n', (4243, 4247), False, 'from rcsb.db.define.SchemaDefAccess import SchemaDefAccess\n'), ((4390, 4409), 'rcsb.db.define.SchemaDefAccess.SchemaDefAccess', 'SchemaDefAccess', (['cD'], {}), '(cD)\n', (4405, 4409), False, 'from rcsb.db.define.SchemaDefAccess import SchemaDefAccess\n'), ((1434, 1451), 'platform.system', 'platform.system', ([], {}), '()\n', (1449, 1451), False, 'import platform\n'), ((2885, 2912), 'rcsb.db.sql.SqlGen.SqlGenAdmin', 'SqlGenAdmin', (['self.__verbose'], {}), '(self.__verbose)\n', (2896, 2912), False, 'from rcsb.db.sql.SqlGen import SqlGenAdmin\n'), ((4757, 4776), 'rcsb.db.define.SchemaDefAccess.SchemaDefAccess', 'SchemaDefAccess', (['cD'], {}), '(cD)\n', (4772, 4776), False, 'from rcsb.db.define.SchemaDefAccess import SchemaDefAccess\n'), ((5989, 6008), 'rcsb.db.define.SchemaDefAccess.SchemaDefAccess', 'SchemaDefAccess', (['cD'], {}), '(cD)\n', (6004, 6008), False, 'from rcsb.db.define.SchemaDefAccess import SchemaDefAccess\n'), ((7721, 7740), 'rcsb.db.define.SchemaDefAccess.SchemaDefAccess', 'SchemaDefAccess', (['cD'], {}), '(cD)\n', (7736, 7740), False, 'from rcsb.db.define.SchemaDefAccess import SchemaDefAccess\n'), ((8928, 8947), 'rcsb.db.define.SchemaDefAccess.SchemaDefAccess', 'SchemaDefAccess', (['cD'], {}), '(cD)\n', (8943, 8947), False, 'from rcsb.db.define.SchemaDefAccess import SchemaDefAccess\n'), ((10591, 10627), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (10614, 10627), False, 'import unittest\n'), ((10680, 10716), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (10703, 10716), False, 'import unittest\n'), ((2466, 2482), 'time.localtime', 'time.localtime', ([], {}), '()\n', (2480, 2482), False, 'import time\n'), ((2644, 2660), 'time.localtime', 'time.localtime', ([], {}), '()\n', (2658, 2660), False, 'import time\n'), ((3317, 3381), 'rcsb.db.mysql.Connection.Connection', 'Connection', ([], {'cfgOb': 'self.__cfgOb', 'resourceName': 'self.__resourceName'}), '(cfgOb=self.__cfgOb, resourceName=self.__resourceName)\n', (3327, 3381), False, 'from rcsb.db.mysql.Connection import Connection\n'), ((3415, 3462), 'rcsb.db.mysql.MyDbUtil.MyDbQuery', 'MyDbQuery', ([], {'dbcon': 'client', 'verbose': 'self.__verbose'}), '(dbcon=client, verbose=self.__verbose)\n', (3424, 3462), False, 'from rcsb.db.mysql.MyDbUtil import MyDbQuery\n'), ((5012, 5076), 'rcsb.db.mysql.Connection.Connection', 'Connection', ([], {'cfgOb': 'self.__cfgOb', 'resourceName': 'self.__resourceName'}), '(cfgOb=self.__cfgOb, resourceName=self.__resourceName)\n', (5022, 5076), False, 'from rcsb.db.mysql.Connection import Connection\n'), ((5110, 5377), 'rcsb.db.mysql.SchemaDefLoader.SchemaDefLoader', 'SchemaDefLoader', (['self.__cfgOb'], {'schemaDefObj': 'sd', 'dbCon': 'client', 'cachePath': 'self.__cachePath', 'workPath': 'self.__workPath', 'cleanUp': '(False)', 'warnings': '"""error"""', 'verbose': 'self.__verbose', 'restoreUseStash': '(False)', 'restoreUseGit': '(True)', 'providerTypeExclude': 'self.__excludeType'}), "(self.__cfgOb, schemaDefObj=sd, dbCon=client, cachePath=self\n .__cachePath, workPath=self.__workPath, cleanUp=False, warnings='error',\n verbose=self.__verbose, restoreUseStash=False, restoreUseGit=True,\n providerTypeExclude=self.__excludeType)\n", (5125, 5377), False, 'from rcsb.db.mysql.SchemaDefLoader import SchemaDefLoader\n'), ((6244, 6308), 'rcsb.db.mysql.Connection.Connection', 'Connection', ([], {'cfgOb': 'self.__cfgOb', 'resourceName': 'self.__resourceName'}), '(cfgOb=self.__cfgOb, resourceName=self.__resourceName)\n', (6254, 6308), False, 'from rcsb.db.mysql.Connection import Connection\n'), ((6342, 6609), 'rcsb.db.mysql.SchemaDefLoader.SchemaDefLoader', 'SchemaDefLoader', (['self.__cfgOb'], {'schemaDefObj': 'sd', 'dbCon': 'client', 'cachePath': 'self.__cachePath', 'workPath': 'self.__workPath', 'cleanUp': '(False)', 'warnings': '"""error"""', 'verbose': 'self.__verbose', 'restoreUseStash': '(False)', 'restoreUseGit': '(True)', 'providerTypeExclude': 'self.__excludeType'}), "(self.__cfgOb, schemaDefObj=sd, dbCon=client, cachePath=self\n .__cachePath, workPath=self.__workPath, cleanUp=False, warnings='error',\n verbose=self.__verbose, restoreUseStash=False, restoreUseGit=True,\n providerTypeExclude=self.__excludeType)\n", (6357, 6609), False, 'from rcsb.db.mysql.SchemaDefLoader import SchemaDefLoader\n'), ((7877, 7941), 'rcsb.db.mysql.Connection.Connection', 'Connection', ([], {'cfgOb': 'self.__cfgOb', 'resourceName': 'self.__resourceName'}), '(cfgOb=self.__cfgOb, resourceName=self.__resourceName)\n', (7887, 7941), False, 'from rcsb.db.mysql.Connection import Connection\n'), ((7975, 8242), 'rcsb.db.mysql.SchemaDefLoader.SchemaDefLoader', 'SchemaDefLoader', (['self.__cfgOb'], {'schemaDefObj': 'sd', 'dbCon': 'client', 'cachePath': 'self.__cachePath', 'workPath': 'self.__workPath', 'cleanUp': '(False)', 'warnings': '"""error"""', 'verbose': 'self.__verbose', 'restoreUseStash': '(False)', 'restoreUseGit': '(True)', 'providerTypeExclude': 'self.__excludeType'}), "(self.__cfgOb, schemaDefObj=sd, dbCon=client, cachePath=self\n .__cachePath, workPath=self.__workPath, cleanUp=False, warnings='error',\n verbose=self.__verbose, restoreUseStash=False, restoreUseGit=True,\n providerTypeExclude=self.__excludeType)\n", (7990, 8242), False, 'from rcsb.db.mysql.SchemaDefLoader import SchemaDefLoader\n'), ((9141, 9205), 'rcsb.db.mysql.Connection.Connection', 'Connection', ([], {'cfgOb': 'self.__cfgOb', 'resourceName': 'self.__resourceName'}), '(cfgOb=self.__cfgOb, resourceName=self.__resourceName)\n', (9151, 9205), False, 'from rcsb.db.mysql.Connection import Connection\n'), ((9239, 9506), 'rcsb.db.mysql.SchemaDefLoader.SchemaDefLoader', 'SchemaDefLoader', (['self.__cfgOb'], {'schemaDefObj': 'sd', 'dbCon': 'client', 'cachePath': 'self.__cachePath', 'workPath': 'self.__workPath', 'cleanUp': '(False)', 'warnings': '"""error"""', 'verbose': 'self.__verbose', 'restoreUseStash': '(False)', 'restoreUseGit': '(True)', 'providerTypeExclude': 'self.__excludeType'}), "(self.__cfgOb, schemaDefObj=sd, dbCon=client, cachePath=self\n .__cachePath, workPath=self.__workPath, cleanUp=False, warnings='error',\n verbose=self.__verbose, restoreUseStash=False, restoreUseGit=True,\n providerTypeExclude=self.__excludeType)\n", (9254, 9506), False, 'from rcsb.db.mysql.SchemaDefLoader import SchemaDefLoader\n')] |
import numpy as np
import matplotlib.pyplot as plt
def filter_rms_error(filter_object,
to_filter_data_lambda,
desired_filter_data_lambda,
dt=0.01,
start_time=0.0,
end_time=10.0,
skip_initial=0,
use_pressure_error=False,
abs_tol=2.0,
rel_tol=0.02,
generate_plot=False):
"""Calculates root-mean-square (RMS) error between data calculated
by a filter and a reference function that nominally should yield
equal data.
Parameters
----------
filter_object : object
An object representing the filter being tested. It must have
the following functions defined.
filter_object(dt: float)
filter_object.append(datum: float)
filter_object.get_datum() -> float
to_filter_data_lambda : lambda
A function representing the data being fed to the filter. It
should be of the form
to_filter_lambda(time: np.array) -> np.array
desired_filter_data_lambda : lambda
A function representing output that the filter_object output
should be nominally equal to. It should be of the form
desired_filter_data_lambda(time: np.array) -> np.array
start_time=0.0 : float
end_time=10.0 : float
dt=0.01 : float
Represents a time interval in seconds of [start_time, end_time)
with steps of dt between. Calculated as
np.arange(start_time, end_time, dt).
skip_initial=0 : int
Ignores the first skip_inital data points when calculating
error. This is useful when a filter has an initial transient
before it starts returning useful data.
use_pressure_error=False : bool
Instead of calculating direct RMS error, this function will
calculate a normalized error based on given tolerances. This is
useful for ventilators trying to calculate pressure meeting
ISO 80601-2-80:2018 192.168.127.12.1. Default values for the
tolerances are based on this standard.
abs_tol=2.0 : float
The design absolute tolerance when calculating pressure error,
i.e. +/- abs_tol. Only used if use_pressure_error == True.
rel_tol=0.02 : float
The design relative tolerance when calculating pressure error,
i.e. +/- rel_tol * desired_filter_data(t).
generate_plot=False : bool
If True, then a plot of the filter data and
desired_filter_data_lambda with respect to time will be
generated. Note that this should be false in non-interactive
contexts.
Returns
-------
error : float
If use_pressure_error is False,
This returns the RMS error between the filter output and
the output of desired_filter_data_lambda.
If use_pressure_error is True,
This returns a normalized error between the filter output
and the output of desired_filter_data_lambda. If error < 1,
then the typical error is within the design tolerance. When
testing, you can add a safety factor to the error by
asserting that the error must be less than 1/safety_factor.
"""
t = np.arange(start_time, end_time, dt)
test_filter = filter_object(dt)
to_filter_data = to_filter_data_lambda(t)
filtered_data = np.array([])
desired_filtered_data = desired_filter_data_lambda(t)
for i in range(len(to_filter_data)):
test_filter.append(to_filter_data[i])
filtered_data = np.append(filtered_data,
test_filter.get_datum())
if generate_plot:
figure, axis = plt.subplots()
axis.plot(t, to_filter_data, label="To Filter Data")
axis.plot(t, filtered_data, label="Filtered Data")
axis.plot(t, desired_filtered_data, label="Desired Filtered Data")
axis.legend()
plt.show()
if not use_pressure_error:
return _root_mean_square(
(filtered_data - desired_filtered_data)[skip_initial:])
else:
return _pressure_error(filtered_data[skip_initial:],
desired_filtered_data[skip_initial:])
def _root_mean_square(np_array):
return np.sqrt(np.mean(np.square(np_array)))
def _pressure_error(calculated_pressure,
actual_pressure,
abs_tol=2.0,
rel_tol=0.02):
return _root_mean_square(
(calculated_pressure - actual_pressure)
/ (np.full_like(actual_pressure, abs_tol) + rel_tol * actual_pressure)
)
| [
"numpy.full_like",
"numpy.square",
"numpy.array",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((3339, 3374), 'numpy.arange', 'np.arange', (['start_time', 'end_time', 'dt'], {}), '(start_time, end_time, dt)\n', (3348, 3374), True, 'import numpy as np\n'), ((3477, 3489), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3485, 3489), True, 'import numpy as np\n'), ((3789, 3803), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3801, 3803), True, 'import matplotlib.pyplot as plt\n'), ((4029, 4039), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4037, 4039), True, 'import matplotlib.pyplot as plt\n'), ((4376, 4395), 'numpy.square', 'np.square', (['np_array'], {}), '(np_array)\n', (4385, 4395), True, 'import numpy as np\n'), ((4635, 4673), 'numpy.full_like', 'np.full_like', (['actual_pressure', 'abs_tol'], {}), '(actual_pressure, abs_tol)\n', (4647, 4673), True, 'import numpy as np\n')] |
import requests
from lxml import etree
import urllib3
#京东爬虫
class JdSpider:
def __init__(self):
self.url_temp = "https://search.jd.com/Search?keyword={}"
self.headers = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36"}
#发送请求,获取响应
def parse_url(self,url):
urllib3.disable_warnings()
response = requests.get(url,headers=self.headers,verify=False)
return response.content.decode()
#保存数据
def save_content_list(self, content_list):
pass
#提取数据
def get_content_list(self,html_str):
# print(html_str)
html = etree.HTML(html_str)
li_list = html.xpath("//div[@id='J_goodsList']//li") # 分组
content_list = list()
for li in li_list: # 遍历每个商品
book = dict()
book["source"] = '京东'
#标题
book["title"] = li.xpath(".//div[contains(@class,'p-name')]/a/em/text()")
book["title"] = book["title"][0] if len(book["title"]) > 0 else None
#购买链接
book["link"] = li.xpath(".//div[@class='p-name']/a/@href")
book["link"] = 'https:'+book["link"][0] if len(book["link"]) > 0 else None
#图片
book["img"] = li.xpath(".//div[@class='p-img']/a/img/@data-lazy-img")
book["img"] = 'https:'+book["img"][0] if len(book["img"]) > 0 else None
#价格
book["price"] = li.xpath(".//div[@class='p-price']/strong/i/text()")
book["price"] = book["price"][0] if len(book["price"]) > 0 else None
#商家
book["store"] = li.xpath(".//a[@class='curr-shop hd-shopname']/@title")
book["store"] = book["store"][0] if len(book["store"]) > 0 else None
content_list.append(book)
return content_list
def run(self,isbn="9787115428028"):
#1.准备url
url = self.url_temp.format(isbn)
#2.发送请求,获取响应
html_str = self.parse_url(url)
#3.提取数据
content_list = self.get_content_list(html_str)
#4.保存数据
self.save_content_list(content_list)
return content_list
if __name__ == '__main__':
jd = JdSpider()
jd.run() | [
"urllib3.disable_warnings",
"lxml.etree.HTML",
"requests.get"
] | [((388, 414), 'urllib3.disable_warnings', 'urllib3.disable_warnings', ([], {}), '()\n', (412, 414), False, 'import urllib3\n'), ((435, 488), 'requests.get', 'requests.get', (['url'], {'headers': 'self.headers', 'verify': '(False)'}), '(url, headers=self.headers, verify=False)\n', (447, 488), False, 'import requests\n'), ((702, 722), 'lxml.etree.HTML', 'etree.HTML', (['html_str'], {}), '(html_str)\n', (712, 722), False, 'from lxml import etree\n')] |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import patches
from matplotlib.legend_handler import HandlerTuple
from scipy.integrate import quad, simps
from math import * #lets the user enter complicated functions easily, eg: exp(3*sin(x**2))
import pyinputplus as pyip # makes taking inputs more convenient for the user
from warnings import filterwarnings
# prevents a warning for calling ax.set_yscale("symlog", linthresh=ACERTAINVALUE) with linthresh where it is.
filterwarnings("ignore", category=__import__('matplotlib').cbook.mplDeprecation)
#can avoid having a whole bunch of message if the polynomial interpolation is bad while displaying Simpson's rule's parabolas.
filterwarnings("ignore", category=np.RankWarning)
#This function is unique because it's the only one to graph many things on each axis, thus it is separate in order to not make special cases in the rest of the code
def Riemann(fig, formula, f, xmin, xmax, boolLogX, boolLogY, listGraph_n, listComp_n, lenContinuous, exact, errorBound):
continuous_x = np.linspace(xmin, xmax, lenContinuous)
continuous_y = f(continuous_x)
interval = xmax - xmin
listListGraph_xLeft = [np.linspace(xmin, xmax - (interval) / n, n) for n in listGraph_n]
listListGraph_yLeft = [f(list_x) for list_x in listListGraph_xLeft]
listListComp_xLeft = [np.linspace(xmin, xmax - (interval) / n, n) for n in listComp_n]
listListComp_yLeft = [f(list_x) for list_x in listListComp_xLeft]
listListGraph_xRight = [np.linspace(xmin + (interval) / n, xmax, n) for n in listGraph_n]
listListGraph_yRight = [f(list_x) for list_x in listListGraph_xRight]
listListComp_xRight = [np.linspace(xmin + (interval) / n, xmax, n) for n in listComp_n]
listListComp_yRight = [f(list_x) for list_x in listListComp_xRight]
listListGraph_x = [np.linspace(xmin, xmax - interval / n, n) + interval / (2 * n) for n in listGraph_n]
listListGraph_y = [(list_yLeft + list_yRight) / 2 for list_yLeft, list_yRight in zip(listListGraph_yLeft, listListGraph_yRight)]
listListComp_y = [(list_yLeft + list_yRight) / 2 for list_yLeft, list_yRight in zip(listListComp_yLeft, listListComp_yRight)]
listWidth = [interval / n for n in listGraph_n]
fig.suptitle(f"Study of the approximation of the integral of the function y = {formula} on the interval ({xmin}, {xmax}) "
f"by left and right Riemann sums and their average and of the quality of the approximations compared to the exact value")
#This fills the left side of the figure, filling it row by row.
nbCol = ceil(len(listGraph_n) / 5)
areaAxes = [fig.add_subplot(ceil(len(listGraph_n) / nbCol), 2 * nbCol, i) for i in range(1, 2 * len(listGraph_n)) if 0 <= i % (2 * nbCol) - 1 < nbCol]
for i, ax in enumerate(areaAxes):
ax.bar(listListGraph_xLeft[i], listListGraph_yLeft[i], align='edge', alpha=0.5, width=listWidth[i], label="Left sum",
color=list("#70db70" if y >= 0 else "#ff6666" for y in listListGraph_yLeft[i])) #green and red
ax.bar(listListGraph_x[i], listListGraph_y[i], align='center', alpha=0.5, width=listWidth[i], label="Average of left and right sums",
color=list("#071ba3" if y >= 0 else "#d8e30e" for y in listListGraph_y[i])) # blue and orange
ax.bar(listListGraph_xRight[i], listListGraph_yRight[i], align='edge', alpha=0.5, width=-listWidth[i], label="Right sum",
color=list("#6815a3" if y >= 0 else "#e08f0b" for y in listListGraph_yRight[i])) # purple and yellow
ax.plot(continuous_x, continuous_y)
if boolLogX:
ax.set_xscale('symlog', linthreshy=interval / (10 * max(len(list_) for list_ in listListGraph_x + listListComp_xRight)))
if boolLogY:
ax.set_yscale('symlog', linthreshy=absGetNonZeroMin(listListGraph_yLeft + listListComp_yLeft + listListGraph_yRight + listListComp_yRight))
ax.set_title(f"{listGraph_n[i]} rectangles")
ax.grid(True)
#stuff for the legend to display both colors for each barplot. See last answer at
#https://stackoverflow.com/questions/31478077/how-to-make-two-markers-share-the-same-label-in-the-legend-using-matplotlib
#for explanation, by user rouckas
legendpatches = ((patches.Patch(color=color1, alpha=0.5), patches.Patch(color=color2, alpha=0.5))
for color1, color2 in zip(("#70db70", "#071ba3", "#6815a3"), ("#ff6666", "#d8e30e", "#e08f0b")))
areaAxes[0].legend((legendpatches), ("Left sum", "Average of left and right sums", "Right sum"), handler_map={tuple: HandlerTuple(ndivide=None)}, fontsize=10)
errorBounder = np.vectorize(lambda x: x if abs(x) > errorBound else 0)
#the sorting here is to keep the implicit mapping between the lists of y values and the n values, which gets sorted later.
listDistLeft = errorBounder(np.fromiter(((list_y.mean() * (interval) - exact) for list_y in sorted(listListGraph_yLeft + listListComp_yLeft, key=len)), dtype=float))
listDistMid = errorBounder(np.fromiter(((list_y.mean() * (interval) - exact) for list_y in sorted(listListGraph_y + listListComp_y, key=len)), dtype=float))
listDistRight = errorBounder(np.fromiter(((list_y.mean() * (interval) - exact) for list_y in sorted(listListGraph_yRight + listListComp_yRight, key=len)), dtype=float))
accuracyAxes = [fig.add_subplot(2, 2, i) for i in (2, 4)]
if 0 in listDistLeft + listDistRight + listDistMid:
global exactMessage
exactMessage = True
if exact == 0:
titles = ("difference for each approximation compared to the exact value of the integral, 0",
"difference for each approximation compared to the exact value of the integral, 0, on a logarithmic scale")
else:
listDistLeft, listDistMid, listDistRight = map(lambda x: 100 * x / exact, (listDistLeft, listDistMid, listDistRight))
titles = (f"relative percentage error for each approximation compared to the exact integral: {niceStr(exact)}",
f"relative percentage error for each approximation compared to the exact integral: {niceStr(exact)}, on a logarithmic scale")
#sorted to avoid lines going back and forth because it wouldn't be monotonically increasing.
listTot_n = list(sorted(listGraph_n + listComp_n))
ax = accuracyAxes[0]
for listDist, color, label in zip((listDistLeft, listDistMid, listDistRight), ("#70db70", "#071ba3", "#6815a3"),
("Left sums", "Average of left and right sums", "Right sums")):
ax.plot(listTot_n, listDist, color=color, label=label)
for x, y in zip(listTot_n * 3, np.concatenate((listDistLeft, listDistMid, listDistRight))):
ax.text(x, y, niceStr(y))
ax.grid(True)
ax.set_title(titles[0])
ax.legend()
ax = accuracyAxes[1]
for listDist, color, label in zip((listDistLeft, listDistMid, listDistRight), ("#70db70", "#071ba3", "#6815a3"),
("Left sums", "Average of left and right sums", "Right sums")):
ax.plot(listTot_n, listDist, color=color, label=label)
ax.set_xscale("log")
ax.get_xaxis().set_tick_params(which='minor', size=0)
ax.get_xaxis().set_tick_params(which='minor', width=0)
ax.set_yscale("symlog", linthreshy=absGetNonZeroMin(np.concatenate((listDistLeft, listDistMid, listDistRight))) * 0.9)
good_ylim(ax, np.concatenate((listDistLeft, listDistMid, listDistRight))) # sets the y limits to something a bit cleaner
for x, y in zip(listTot_n * 3, np.concatenate((listDistLeft, listDistMid, listDistRight))):
ax.text(x, y, niceStr(y))
ax.set_title(titles[1])
ax.grid(True, which='major')
ax.legend()
class Midpoint:
def __init__(self, f, xmin, xmax, listGraph_n, listComp_n):
self.interval = xmax - xmin
self.listListGraph_x = [np.linspace(xmin, xmax - self.interval / n, n) + self.interval / (2*n) for n in listGraph_n]
self.listListGraph_y = [f(list_x) for list_x in self.listListGraph_x]
self.listListComp_x = [np.linspace(xmin, xmax - self.interval / n, n) + self.interval / (2*n) for n in listComp_n]
self.listListComp_y = [f(list_x) for list_x in self.listListComp_x]
self.listWidth = [self.interval / n for n in listGraph_n]
def titleSpecs(self): return "some midpoint sums"
def shapeName(self): return "rectangles"
def listDist(self, exact):
return np.fromiter(((list_y.mean() * (self.interval) - exact) for list_y in sorted(self.listListGraph_y + self.listListComp_y, key=len)), dtype=float)
def graph(self, ax, i):
ax.bar(self.listListGraph_x[i], self.listListGraph_y[i], alpha=0.5, width=self.listWidth[i],
color=["#70db70" if y >= 0 else "#ff6666" for y in self.listListGraph_y[i]])
class Trapezoidal:
def __init__(self, f, xmin, xmax, listGraph_n, listComp_n):
self.interval = xmax - xmin
self.listListGraph_x = [np.linspace(xmin, xmax, n+1) for n in listGraph_n]
self.listListGraph_y = [f(list_x) for list_x in self.listListGraph_x]
self.listListComp_x = [np.linspace(xmin, xmax, n+1) for n in listComp_n]
self.listListComp_y = [f(list_x) for list_x in self.listListComp_x]
def titleSpecs(self): return "some trapezoidal sums"
def shapeName(self): return "trapezia"
def listDist(self, exact):
return np.fromiter((((list_y.sum() - (list_y[0] + list_y[-1]) / 2) / (len(list_y) - 1) * (self.interval) - exact)
for list_y in sorted(self.listListGraph_y + self.listListComp_y, key=len)), dtype=float)
def graph(self, ax, i):
ax.plot(self.listListGraph_x[i], self.listListGraph_y[i], color='#8b008b', linestyle='--')
ax.fill_between(self.listListGraph_x[i], self.listListGraph_y[i], alpha=0.5, interpolate=True,
color=["#70db70" if y >= 0 else "#ff6666" for y in self.listListGraph_y[i]])
class Simpson:
def __init__(self, f, xmin, xmax, listGraph_n, listComp_n): #the list_ns contain a number of parabolas.
self.interval = xmax - xmin
self.listListGraph_x = [np.linspace(xmin, xmax, 2*n + 1) for n in listGraph_n] # 2n + 1 sub-intervals
self.listListGraph_y = [f(list_x) for list_x in self.listListGraph_x]
self.listListComp_x = [np.linspace(xmin, xmax, 2*n + 1) for n in listComp_n]
self.listListComp_y = [f(list_x) for list_x in self.listListComp_x]
def titleSpecs(self): return "Simpson's rule"
def shapeName(self): return "parabolas"
def listDist(self, exact):
return np.fromiter(((simps(list_y, list_x) - exact) for list_x, list_y in
zip(list(sorted(self.listListGraph_x + self.listListComp_x, key=len)),
list(sorted(self.listListGraph_y + self.listListComp_y, key=len)))),
dtype = float)
def graph(self, ax, i):
"""
separate it into n intervals, find the fitting parabola on each of them, grab the corresponding x values from continuous_x,
use them to get y values with the polynomial, plot them.
"""
global continuous_x
listData_x = self.listListGraph_x[i]
listData_y = self.listListGraph_y[i]
n = (len(listData_x) - 1) // 2 # number of parabolas
toPlot_y = []
for i_inter in range(n):
x_data = listData_x[2*i_inter:2*i_inter+3]
y_data = listData_y[2*i_inter:2*i_inter+3]
poly = np.polyfit(x_data, y_data, 2)
list_x = continuous_x[len(continuous_x) * i_inter // n: len(continuous_x) * (i_inter+1) // n]
list_y = np.polyval(poly, list_x)
toPlot_y.extend(list_y)
ax.plot(continuous_x, toPlot_y, color='#8b008b', linestyle='--')
def firstDigit(num):
digits = '123456789'
for char in str(num):
if char in digits:
return int(char)
def good_ylim(ax, values): # symlog scale can give ugly limits for y values. This fixes that with a 0 and a power of 10 times a digit, like 9e-2.
mini, maxi = min(values), max(values)
newBottom, newTop = ax.get_ylim()
if mini < 0 < maxi:
newBottom = -(firstDigit(mini) + 1) * 10 ** floor(log10(-mini))
newTop = (firstDigit(maxi) + 1) * 10 ** floor(log10(maxi))
elif mini < maxi <= 0 :
newBottom = -(firstDigit(mini) + 1) * 10 ** floor(log10(-mini))
newTop = 0
elif 0 <= mini < maxi:
newBottom = 0
newTop = (firstDigit(maxi) + 1) * 10 ** floor(log10(maxi))
ax.set_ylim(newBottom, newTop)
def niceStr(val): #gives a nice value, avoids having far too many digits display.
if 100 < abs(val) < 1000000: #just take away a few decimal digits
return str(round(val, max(0, 6 - floor(log10(abs(val))))))
#if it is in scientific notation, keep the scientific notation, just reduce the number of digits
string = str(val)
end = string.find('e')
if end != -1:
return string[:min(7, end)] + string[end:]
else:
return string[:min(7, len(string))]
def looper(func, check): #for taking inputs: if there is an error, then ask for input again. Used on inputs that are quite error prone: listGraph_n and listComp_n.
while True:
try:
list_ = func()
if check(list_): #raises Exception if wrong
return list_
except Exception as e:
print("An error occured, so you will be asked for that input again, it is probably a typo, but just in case it isn't, here is the error message", e, sep='\n')
def getvalue(variable): #input taker
global tier
tuple = tiers[variable]
if tier < tuple[0]: return eval(tuple[1])
else: return eval(tuple[2])
def raiseEx(text): #to raise exceptions in lambda functions
raise Exception(text)
def absGetNonZeroMin(values): #returns the smallest positive non-zero value in the list, positive, used to set linthreshy on symlog scales, as it can't be 0
return min(abs(val) for val in values if val) if any(values) else 1
def main():
print("If you want to stop the program (which is an infinite loop), enter 0 as a level of customization and the program will terminate")
while True: #just a loop allowing to test many functions without quitting/restarting the program.
global tier, tiers
tier = pyip.inputInt("How much customization do you want ? 0: stop, 1: minimum, 2: average, 3: advanced : ", min=0, max=3)
if tier == 0:
break
#concept: the first value is the default value, the second value is what is executed (with getvalue) to get the value.
tiers = {"boologX": (2, 'False', """pyip.inputYesNo("Logarithmic x scale for graphing f(x) ? [y/n]", yesVal='y', noVal='n') == 'y'"""),
"boologY": (2, 'False', """pyip.inputYesNo("Logarithmic y scale for graphing f(x) ? [y/n]", yesVal='y', noVal='n') == 'y'"""),
"listGraph_n": (2, '[10, 100, 1000]', """list(map(int, input("what number of intervals/shapes would you like to study ? use comma separated values, "
"eg: 10, 100, 1000, 10000, spaces don't matter: ").split(',')))"""),
"listComp_n": (3, '[]', #the + sign on the next line is for proper string formatting: indented code without indented string.
"""input('''Enter anything that evaluates to a regular python list of integers, such as [10, 100, 1000] or [3**i for i in range(2, 10)],\n''' +
'''these will be added to the computations to display more points in the accuracy graphs:\n''')"""),
"lenContinuous": (3, '10000', """pyip.inputInt("How many values should be used to plot f(x) ? For graphing purposes only: ")""")}
formula = input("f(x) = ")
f = np.vectorize(lambda x: eval(formula))
xmin, xmax = eval(input("Interval of integration: xmin, xmax = "))
boolLogX = getvalue("boologX")
boolLogY = getvalue("boologY")
listGraph_n = looper(lambda: getvalue('listGraph_n'), lambda list_: True if isinstance(list_, list) and all(isinstance(x, int) for x in list_) else \
raiseEx("It should evaluate to a list of integers"))
listComp_n = [] if tier < 3 else looper(lambda : (eval(eval(tiers['listComp_n'][2]))),
lambda list_: True if isinstance(list_, list) and all(isinstance(x, int) and x >= 1 for x in list_) else \
raiseEx("It should evaluate to a list of integers all >= 1")) #the first eval gets the comprehension, the second eval computes it.
#these 3 are used to graph the function.
global continuous_x #can be accessed by methods that need it, like simpson's rule
lenContinuous = getvalue("lenContinuous")
continuous_x = np.linspace(xmin, xmax, lenContinuous)
continuous_y = f(continuous_x)
dictMethod = {
1: Riemann,
2: Midpoint,
3: Trapezoidal,
4: Simpson,
}
exact, errorBound = quad(f, xmin, xmax)
errorBounder = np.vectorize(lambda x: x if abs(x) > errorBound else 0)
global exactMessage
exactMessage = False
numbers = looper(lambda: list(map(int, input("What methods would you like to use ? all methods called will be executed one after the other, the results will be displayed "
"at the end." + '\n' +
"1 for Riemann sums, 2 for midpoint rule, 3 for trapezoidal rule, 4 for Simpson's rule: ")
.split(','))),
lambda values: True if all(isinstance(val, int) and 1 <= val <= 4 for val in values) else raiseEx("These should all be integers between 1 and 4"))
for number in numbers:
fig = plt.figure()
if number == 1: # this function is a unique case.
Riemann(fig, formula, f, xmin, xmax, boolLogX, boolLogY, listGraph_n, listComp_n, lenContinuous, exact, errorBound)
fig.subplots_adjust(left=0.05, bottom=0.05, right=0.95, top=0.9, wspace=0.1, hspace=0.6)
plt.draw()
continue
method = dictMethod[number](f, xmin, xmax, listGraph_n, listComp_n)
fig.suptitle(f"Study of the approximation of the function y = {formula} on the interval ({xmin}, {xmax}) "
f"with {method.titleSpecs()} and of the quality of the approximations compared to the exact value")
nbCol = ceil(len(listGraph_n) / 5)
areaAxes = (fig.add_subplot(ceil(len(listGraph_n) / nbCol), 2 * nbCol, i) for i in
range(1, 2 * len(listGraph_n)) if 0 <= i % (2 * nbCol) - 1 < nbCol) # the left side of the figure, filled row by row.
for i, ax in enumerate(areaAxes):
method.graph(ax, i)
ax.plot(continuous_x, continuous_y)
if boolLogX:
ax.set_xscale('symlog', linthreshy=(xmax - xmin) / 10 * max(len(list_) for list_ in listGraph_n + listComp_n)) #shouldn't be visible, unless you're really
# picky about your graphs.
if boolLogY:
ax.set_yscale('symlog', linthreshy=absGetNonZeroMin(method.listListGraph_y + method.listListComp_y))
ax.set_title(f"{listGraph_n[i]} {method.shapeName()}")
ax.grid(True)
listDist = method.listDist(exact)
accuracyAxes = [fig.add_subplot(2, 2, i) for i in (2, 4)]
listDist = errorBounder(listDist)
if 0 in listDist:
exactMessage = True
if exact == 0:
titles = ("difference for each approximation compared to the exact value of the integral, 0",
"difference for each approximation compared to the exact value of the integral, 0, on a logarithmic scale")
else:
listDist = listDist * 100 / exact
titles = (f"relative percentage error for each approximation compared to the exact integral: {niceStr(exact)}",
f"relative percentage error for each approximation compared to the exact integral: {niceStr(exact)}, on a logarithmic scale")
#sorted for nicer graph: prevents line going back and forth by making it monotically increasing. The same sorting order is applied in each method
listTot_n = list(sorted(listGraph_n + listComp_n))
ax = accuracyAxes[0]
ax.plot(listTot_n, listDist)
for x, y in zip(listTot_n, listDist):
ax.text(x, y, niceStr(y))
ax.grid(True)
ax.set_title(titles[0])
ax = accuracyAxes[1]
ax.plot(listTot_n, listDist)
ax.set_xscale("log")
ax.get_xaxis().set_tick_params(which='minor', size=0)
ax.get_xaxis().set_tick_params(which='minor', width=0)
ax.set_yscale("symlog", linthreshy=absGetNonZeroMin(listDist) * 0.9)
good_ylim(ax, listDist) # sets the y limits to something a bit cleaner
for x, y in zip(listTot_n, listDist):
ax.text(x, y, niceStr(y))
ax.set_title(titles[1])
ax.grid(True, which='major')
fig.subplots_adjust(left=0.05, bottom=0.05,right=0.95, top=0.9, wspace=0.1, hspace=0.5)
plt.draw()
if exactMessage:
print(f"Some 0s are displayed in the accuracy check, however this does not mean necessarily mean the accuracy is perfect:\n"
f"the exact value is computed with a certain margin of error, here it is {niceStr(errorBound)}\n"
f"and any 0 displayed here means the inacurracy is less than this, and thus too small to be evaluated properly")
plt.show()
if __name__ == '__main__':
main()
| [
"matplotlib.legend_handler.HandlerTuple",
"numpy.polyfit",
"scipy.integrate.quad",
"scipy.integrate.simps",
"numpy.linspace",
"pyinputplus.inputInt",
"numpy.polyval",
"matplotlib.patches.Patch",
"numpy.concatenate",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.draw",
"warnings.filterwarnings... | [((710, 759), 'warnings.filterwarnings', 'filterwarnings', (['"""ignore"""'], {'category': 'np.RankWarning'}), "('ignore', category=np.RankWarning)\n", (724, 759), False, 'from warnings import filterwarnings\n'), ((1070, 1108), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', 'lenContinuous'], {}), '(xmin, xmax, lenContinuous)\n', (1081, 1108), True, 'import numpy as np\n'), ((1201, 1242), 'numpy.linspace', 'np.linspace', (['xmin', '(xmax - interval / n)', 'n'], {}), '(xmin, xmax - interval / n, n)\n', (1212, 1242), True, 'import numpy as np\n'), ((1367, 1408), 'numpy.linspace', 'np.linspace', (['xmin', '(xmax - interval / n)', 'n'], {}), '(xmin, xmax - interval / n, n)\n', (1378, 1408), True, 'import numpy as np\n'), ((1532, 1573), 'numpy.linspace', 'np.linspace', (['(xmin + interval / n)', 'xmax', 'n'], {}), '(xmin + interval / n, xmax, n)\n', (1543, 1573), True, 'import numpy as np\n'), ((1701, 1742), 'numpy.linspace', 'np.linspace', (['(xmin + interval / n)', 'xmax', 'n'], {}), '(xmin + interval / n, xmax, n)\n', (1712, 1742), True, 'import numpy as np\n'), ((6729, 6787), 'numpy.concatenate', 'np.concatenate', (['(listDistLeft, listDistMid, listDistRight)'], {}), '((listDistLeft, listDistMid, listDistRight))\n', (6743, 6787), True, 'import numpy as np\n'), ((7491, 7549), 'numpy.concatenate', 'np.concatenate', (['(listDistLeft, listDistMid, listDistRight)'], {}), '((listDistLeft, listDistMid, listDistRight))\n', (7505, 7549), True, 'import numpy as np\n'), ((7635, 7693), 'numpy.concatenate', 'np.concatenate', (['(listDistLeft, listDistMid, listDistRight)'], {}), '((listDistLeft, listDistMid, listDistRight))\n', (7649, 7693), True, 'import numpy as np\n'), ((14635, 14760), 'pyinputplus.inputInt', 'pyip.inputInt', (['"""How much customization do you want ? 0: stop, 1: minimum, 2: average, 3: advanced : """'], {'min': '(0)', 'max': '(3)'}), "(\n 'How much customization do you want ? 0: stop, 1: minimum, 2: average, 3: advanced : '\n , min=0, max=3)\n", (14648, 14760), True, 'import pyinputplus as pyip\n'), ((17294, 17332), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', 'lenContinuous'], {}), '(xmin, xmax, lenContinuous)\n', (17305, 17332), True, 'import numpy as np\n'), ((17544, 17563), 'scipy.integrate.quad', 'quad', (['f', 'xmin', 'xmax'], {}), '(f, xmin, xmax)\n', (17548, 17563), False, 'from scipy.integrate import quad, simps\n'), ((22491, 22501), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (22499, 22501), True, 'import matplotlib.pyplot as plt\n'), ((1863, 1904), 'numpy.linspace', 'np.linspace', (['xmin', '(xmax - interval / n)', 'n'], {}), '(xmin, xmax - interval / n, n)\n', (1874, 1904), True, 'import numpy as np\n'), ((4314, 4352), 'matplotlib.patches.Patch', 'patches.Patch', ([], {'color': 'color1', 'alpha': '(0.5)'}), '(color=color1, alpha=0.5)\n', (4327, 4352), False, 'from matplotlib import patches\n'), ((4354, 4392), 'matplotlib.patches.Patch', 'patches.Patch', ([], {'color': 'color2', 'alpha': '(0.5)'}), '(color=color2, alpha=0.5)\n', (4367, 4392), False, 'from matplotlib import patches\n'), ((9093, 9123), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', '(n + 1)'], {}), '(xmin, xmax, n + 1)\n', (9104, 9123), True, 'import numpy as np\n'), ((9255, 9285), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', '(n + 1)'], {}), '(xmin, xmax, n + 1)\n', (9266, 9285), True, 'import numpy as np\n'), ((10299, 10333), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', '(2 * n + 1)'], {}), '(xmin, xmax, 2 * n + 1)\n', (10310, 10333), True, 'import numpy as np\n'), ((10488, 10522), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', '(2 * n + 1)'], {}), '(xmin, xmax, 2 * n + 1)\n', (10499, 10522), True, 'import numpy as np\n'), ((11708, 11737), 'numpy.polyfit', 'np.polyfit', (['x_data', 'y_data', '(2)'], {}), '(x_data, y_data, 2)\n', (11718, 11737), True, 'import numpy as np\n'), ((11867, 11891), 'numpy.polyval', 'np.polyval', (['poly', 'list_x'], {}), '(poly, list_x)\n', (11877, 11891), True, 'import numpy as np\n'), ((18399, 18411), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (18409, 18411), True, 'import matplotlib.pyplot as plt\n'), ((22058, 22068), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (22066, 22068), True, 'import matplotlib.pyplot as plt\n'), ((4635, 4661), 'matplotlib.legend_handler.HandlerTuple', 'HandlerTuple', ([], {'ndivide': 'None'}), '(ndivide=None)\n', (4647, 4661), False, 'from matplotlib.legend_handler import HandlerTuple\n'), ((7969, 8015), 'numpy.linspace', 'np.linspace', (['xmin', '(xmax - self.interval / n)', 'n'], {}), '(xmin, xmax - self.interval / n, n)\n', (7980, 8015), True, 'import numpy as np\n'), ((8173, 8219), 'numpy.linspace', 'np.linspace', (['xmin', '(xmax - self.interval / n)', 'n'], {}), '(xmin, xmax - self.interval / n, n)\n', (8184, 8219), True, 'import numpy as np\n'), ((18731, 18741), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (18739, 18741), True, 'import matplotlib.pyplot as plt\n'), ((7405, 7463), 'numpy.concatenate', 'np.concatenate', (['(listDistLeft, listDistMid, listDistRight)'], {}), '((listDistLeft, listDistMid, listDistRight))\n', (7419, 7463), True, 'import numpy as np\n'), ((10781, 10802), 'scipy.integrate.simps', 'simps', (['list_y', 'list_x'], {}), '(list_y, list_x)\n', (10786, 10802), False, 'from scipy.integrate import quad, simps\n')] |
# coding: utf-8
import smtplib
msg = "Line 1\nLine 2\nLine 3"
server = smtplib.SMTP('localhost')
server.sendmail('<EMAIL>', '<EMAIL>', msg)
server.quit()
| [
"smtplib.SMTP"
] | [((73, 98), 'smtplib.SMTP', 'smtplib.SMTP', (['"""localhost"""'], {}), "('localhost')\n", (85, 98), False, 'import smtplib\n')] |
import os
import trimesh
import unittest
import pocketing
import numpy as np
def get_model(file_name):
"""
Load a model from the models directory by expanding paths out.
Parameters
------------
file_name : str
Name of file in `models`
Returns
------------
mesh : trimesh.Geometry
Trimesh object or similar
"""
pwd = os.path.dirname(os.path.abspath(
os.path.expanduser(__file__)))
return trimesh.load(os.path.abspath(
os.path.join(pwd, '../models', file_name)))
class PocketTest(unittest.TestCase):
def test_contour(self):
path = get_model('wrench.dxf')
poly = path.polygons_full[0]
# generate tool paths
toolpaths = pocketing.contour.contour_parallel(poly, .05)
assert all(trimesh.util.is_shape(i, (-1, 2))
for i in toolpaths)
def test_troch(self):
path = get_model('wrench.dxf')
polygon = path.polygons_full[0]
# set radius arbitrarily
radius = .125
# set step to 10% of tool radius
step = radius * 0.10
# generate our trochoids
toolpath = pocketing.trochoidal.toolpath(
polygon, step=step)
assert trimesh.util.is_shape(toolpath, (-1, 2))
def test_archimedian(self):
# test generating a simple archimedean spiral
spiral = pocketing.spiral.archimedean(0.5, 2.0, 0.125)
assert trimesh.util.is_shape(spiral, (-1, 3, 2))
def test_helix(self):
# check a 3D helix
# set values off a tool radius
tool_radius = 0.25
radius = tool_radius * 1.2
pitch = tool_radius * 0.3
height = 2.0
# create the helix
h = pocketing.spiral.helix(
radius=radius,
height=height,
pitch=pitch,)
# should be 3-point arcs
check_arcs(h)
# heights should start and end correctly
assert np.isclose(h[0][0][2], 0.0)
assert np.isclose(h[-1][-1][2], height)
# check the flattened 2D radius
radii = np.linalg.norm(h.reshape((-1, 3))[:, :2], axis=1)
assert np.allclose(radii, radius)
def check_arcs(arcs):
# arcs should be 2D or 2D 3-point arcs
assert trimesh.util.is_shape(arcs, (-1, 3, (3, 2)))
# make sure arcs start where previous arc begins
for a, b in zip(arcs[:-1], arcs[1:]):
assert np.allclose(a[2], b[0])
if __name__ == '__main__':
unittest.main()
| [
"numpy.allclose",
"pocketing.contour.contour_parallel",
"numpy.isclose",
"os.path.join",
"pocketing.spiral.archimedean",
"pocketing.trochoidal.toolpath",
"unittest.main",
"pocketing.spiral.helix",
"trimesh.util.is_shape",
"os.path.expanduser"
] | [((2261, 2305), 'trimesh.util.is_shape', 'trimesh.util.is_shape', (['arcs', '(-1, 3, (3, 2))'], {}), '(arcs, (-1, 3, (3, 2)))\n', (2282, 2305), False, 'import trimesh\n'), ((2473, 2488), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2486, 2488), False, 'import unittest\n'), ((732, 778), 'pocketing.contour.contour_parallel', 'pocketing.contour.contour_parallel', (['poly', '(0.05)'], {}), '(poly, 0.05)\n', (766, 778), False, 'import pocketing\n'), ((1155, 1204), 'pocketing.trochoidal.toolpath', 'pocketing.trochoidal.toolpath', (['polygon'], {'step': 'step'}), '(polygon, step=step)\n', (1184, 1204), False, 'import pocketing\n'), ((1234, 1274), 'trimesh.util.is_shape', 'trimesh.util.is_shape', (['toolpath', '(-1, 2)'], {}), '(toolpath, (-1, 2))\n', (1255, 1274), False, 'import trimesh\n'), ((1379, 1424), 'pocketing.spiral.archimedean', 'pocketing.spiral.archimedean', (['(0.5)', '(2.0)', '(0.125)'], {}), '(0.5, 2.0, 0.125)\n', (1407, 1424), False, 'import pocketing\n'), ((1440, 1481), 'trimesh.util.is_shape', 'trimesh.util.is_shape', (['spiral', '(-1, 3, 2)'], {}), '(spiral, (-1, 3, 2))\n', (1461, 1481), False, 'import trimesh\n'), ((1733, 1798), 'pocketing.spiral.helix', 'pocketing.spiral.helix', ([], {'radius': 'radius', 'height': 'height', 'pitch': 'pitch'}), '(radius=radius, height=height, pitch=pitch)\n', (1755, 1798), False, 'import pocketing\n'), ((1958, 1985), 'numpy.isclose', 'np.isclose', (['h[0][0][2]', '(0.0)'], {}), '(h[0][0][2], 0.0)\n', (1968, 1985), True, 'import numpy as np\n'), ((2001, 2033), 'numpy.isclose', 'np.isclose', (['h[-1][-1][2]', 'height'], {}), '(h[-1][-1][2], height)\n', (2011, 2033), True, 'import numpy as np\n'), ((2156, 2182), 'numpy.allclose', 'np.allclose', (['radii', 'radius'], {}), '(radii, radius)\n', (2167, 2182), True, 'import numpy as np\n'), ((2416, 2439), 'numpy.allclose', 'np.allclose', (['a[2]', 'b[0]'], {}), '(a[2], b[0])\n', (2427, 2439), True, 'import numpy as np\n'), ((414, 442), 'os.path.expanduser', 'os.path.expanduser', (['__file__'], {}), '(__file__)\n', (432, 442), False, 'import os\n'), ((494, 535), 'os.path.join', 'os.path.join', (['pwd', '"""../models"""', 'file_name'], {}), "(pwd, '../models', file_name)\n", (506, 535), False, 'import os\n'), ((798, 831), 'trimesh.util.is_shape', 'trimesh.util.is_shape', (['i', '(-1, 2)'], {}), '(i, (-1, 2))\n', (819, 831), False, 'import trimesh\n')] |
# -*- coding: utf-8 -*-
"""
@File: evaluator.py
@Description: This is an application for evaluating clustering performance
on 20 newsgroup data.
@Author: <NAME>
@EMail: <EMAIL>
@Created_on: 04/05/2017
@python_version: 3.5
===============================================================================
"""
import os
import logging
# Set logging
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(name)s [%(levelname)s] %(message)s',)
log = logging.getLogger("ClusteringEvaluation")
# Global variables
TRUE_LABEL = os.path.join(os.getcwd(),
"output",
"clusters",
"PatentEmbedding.rclass")
PREDICTED_LABEL = os.path.join(os.getcwd(),
"output",
"clusters",
"PatentCluster")
SUBCLUSTER_MAP = {"0": "rec.sport.hockey",
"1": "rec.sport.baseball",
"2": "comp.sys.ibm.pc.hardware",
"3": "comp.windows.x",
"4": "misc.forsale",
"5": "sci.crypt",
"6": "sci.crypt",
"7": "talk.politics.guns",
"8": "comp.os.ms-windows.misc",
"9": "comp.sys.mac.hardware",
"10": "rec.autos",
"11": "rec.motorcycles",
"12": "soc.religion.christian",
"13": "sci.med",
"14": "talk.politics.mideast",
"15": "comp.graphics",
"16": "alt.atheism",
"17": "sci.electronics",
"18": "sci.space",
"19": "talk.politics.misc"}
SUPERCLUSTER_MAP = {"comp.sys.ibm.pc.hardware": 0,
"comp.windows.x": 0,
"comp.os.ms-windows.misc": 0,
"comp.sys.mac.hardware": 0,
"comp.graphics": 0,
"rec.sport.hockey": 1,
"rec.sport.baseball": 1,
"rec.autos": 1,
"rec.motorcycles": 1,
"sci.crypt": 2,
"sci.med": 2,
"sci.space": 2,
"sci.electronics": 2,
"misc.forsale": 3,
"talk.politics.guns": 4,
"talk.politics.mideast": 4,
"talk.politics.misc": 4,
"soc.religion.christian": 5,
"alt.atheism": 5,
"talk.religion.misc": 5}
def main():
log.info("Performing clustering evaluation")
# Statistics for individual cluster
subCluster = {}
superCluster = {}
# Initialize sub-clusters' and super-clusters' statistics
# as a triplet [<True Predictions> <Total Predictions> <Purity>]
for i in range(20):
subCluster[str(i)] = [0, 0, 0]
superCluster[str(i)] = [0, 0, 0]
with open(TRUE_LABEL, "r") as t, open(PREDICTED_LABEL, "r") as p:
trueLabels = t.readlines()
trueLabels = map(lambda x: x.strip(), trueLabels)
predictedLabels = p.readlines()
predictedLabels = list(map(lambda x: x.strip(), predictedLabels))
# Calculate 'True Predictions' and 'Total Predictions'
# for every cluster
for i, trueLabel in enumerate(trueLabels):
predictedLabel = predictedLabels[i]
if trueLabel == SUBCLUSTER_MAP[predictedLabel]:
subCluster[predictedLabel][0] += 1
subCluster[predictedLabel][1] += 1
if (SUPERCLUSTER_MAP[trueLabel] ==
SUPERCLUSTER_MAP[SUBCLUSTER_MAP[predictedLabel]]):
superCluster[predictedLabel][0] += 1
superCluster[predictedLabel][1] += 1
# Calculate 'Purity' for every cluster
for i in range(20):
subCluster[str(i)][2] = float(subCluster[str(i)][0]) / subCluster[str(i)][1]
superCluster[str(i)][2] = float(superCluster[str(i)][0]) / superCluster[str(i)][1]
log.info("Sub-cluster evaluation: ")
log.info("True Predictions\tTotal Predictions\tPurity")
for i in range(20):
log.info("%d\t\t\t%d\t\t\t%f",
subCluster[str(i)][0],
subCluster[str(i)][1],
subCluster[str(i)][2])
log.info("Super-cluster evaluation: ")
log.info("True Predictions\tTotal Predictions\tPurity")
for i in range(20):
log.info("%d\t\t\t%d\t\t\t%f",
superCluster[str(i)][0],
superCluster[str(i)][1],
superCluster[str(i)][2])
# Overall statistics for sub-clusters and super-clusters
overallStatistics = {}
overallStatistics["SubCluster"] = [0, 0, 0]
overallStatistics["SuperCluster"] = [0, 0, 0]
# Overall statistics for sub-clusters
truePredictions = 0
totalPredictions = 0
for cluster, stat in subCluster.items():
truePredictions += stat[0]
totalPredictions += stat[1]
overallStatistics["SubCluster"][0] = truePredictions
overallStatistics["SubCluster"][1] = totalPredictions
overallStatistics["SubCluster"][2] = float(truePredictions) / totalPredictions
# Overall statistics for super-clusters
truePredictions = 0
totalPredictions = 0
for cluster, stat in superCluster.items():
truePredictions += stat[0]
totalPredictions += stat[1]
overallStatistics["SuperCluster"][0] = truePredictions
overallStatistics["SuperCluster"][1] = totalPredictions
overallStatistics["SuperCluster"][2] = float(truePredictions) / totalPredictions
log.info("Overall statistics for sub-clusters: ")
log.info("True Predictions\tTotal Predictions\tPurity")
log.info("%d\t\t%d\t\t\t%f",
overallStatistics["SubCluster"][0],
overallStatistics["SubCluster"][1],
overallStatistics["SubCluster"][2])
log.info("Overall statistics for super-clusters: ")
log.info("True Predictions\tTotal Predictions\tPurity")
log.info("%d\t\t%d\t\t\t%f",
overallStatistics["SuperCluster"][0],
overallStatistics["SuperCluster"][1],
overallStatistics["SuperCluster"][2])
# Combined overall statistics for both sub-clusters and super-clusters
truePredictions = 0
totalPredictions = 0
for clusterType, stat in overallStatistics.items():
truePredictions += stat[0]
totalPredictions += stat[1]
combinedOverallPurity = float(truePredictions) / totalPredictions
log.info("Combined overall statistics for both sub-clusters and super-clusters: ")
log.info("True Predictions\tTotal Predictions\tPurity")
log.info("%d\t\t%d\t\t\t%f",
truePredictions,
totalPredictions,
combinedOverallPurity)
if __name__ == "__main__":
main()
| [
"logging.basicConfig",
"logging.getLogger",
"os.getcwd"
] | [((401, 504), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s %(name)s [%(levelname)s] %(message)s"""'}), "(level=logging.INFO, format=\n '%(asctime)s %(name)s [%(levelname)s] %(message)s')\n", (420, 504), False, 'import logging\n'), ((527, 568), 'logging.getLogger', 'logging.getLogger', (['"""ClusteringEvaluation"""'], {}), "('ClusteringEvaluation')\n", (544, 568), False, 'import logging\n'), ((616, 627), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (625, 627), False, 'import os\n'), ((786, 797), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (795, 797), False, 'import os\n')] |
import bisect
from collections import deque
from copy import deepcopy
from fractions import Fraction
from functools import reduce
import heapq as hq
import io
from itertools import combinations, permutations
import math
from math import factorial
import re
import sys
sys.setrecursionlimit(10000)
#from numba import njit
import numpy as np
_INPUT_1 = """\
2 2
2
1 2
2
1 2
"""
_INPUT_2 = """\
2 2
2
1 2
2
2 1
"""
def solve():
N, M = [int(x) for x in input().split()]
num = []
for _ in range(M):
k = int(input())
num.append([int(x) for x in input().split()])
print(num)
if __file__ != './Main.py':
if '_INPUT_1' in globals():
sys.stdin = io.StringIO(_INPUT_1)
solve()
if '_INPUT_2' in globals():
sys.stdin = io.StringIO(_INPUT_2)
solve()
if '_INPUT_3' in globals():
sys.stdin = io.StringIO(_INPUT_3)
solve()
else:
solve()
| [
"sys.setrecursionlimit",
"io.StringIO"
] | [((268, 296), 'sys.setrecursionlimit', 'sys.setrecursionlimit', (['(10000)'], {}), '(10000)\n', (289, 296), False, 'import sys\n'), ((696, 717), 'io.StringIO', 'io.StringIO', (['_INPUT_1'], {}), '(_INPUT_1)\n', (707, 717), False, 'import io\n'), ((786, 807), 'io.StringIO', 'io.StringIO', (['_INPUT_2'], {}), '(_INPUT_2)\n', (797, 807), False, 'import io\n'), ((876, 897), 'io.StringIO', 'io.StringIO', (['_INPUT_3'], {}), '(_INPUT_3)\n', (887, 897), False, 'import io\n')] |
# from django.core.cache import cache
from django.contrib.auth.models import User
from django.db.models.signals import post_delete, post_save, pre_delete
from django.dispatch import receiver
from . import tasks, models
from network.management.commands.update_dendrogram import \
call_update_dendrogram
from network.tasks.analysis.network import update_organism_network
# Note: post save experiment actions are relegated to tasks executed from
# forms and management commands.
@receiver(post_save, sender=models.Follow)
def follow_post_save_update(sender, instance, created, **kwargs):
if created:
lock = tasks.locks.UserRecUpdateQueueLock(
instance.following, instance.followed)
if lock.add():
(tasks.recommendations
.update_user_recommendations
.si(instance.following.pk, instance.followed.pk).delay())
models.Activity.objects.get_or_create(
user=instance.following,
followed_user=instance.followed,
)
@receiver(pre_delete, sender=models.Follow)
def follow_pre_delete_update(sender, instance, **kwargs):
lock = tasks.locks.UserRecUpdateQueueLock(
instance.following, instance.followed)
if lock.add():
(tasks.recommendations
.update_user_recommendations
.si(instance.following.pk, instance.followed.pk).delay())
try:
models.Activity.objects.filter(
user=instance.following,
followed_user=instance.followed,
).delete()
except models.Activity.DoesNotExist:
pass
@receiver(post_save, sender=models.Favorite)
def favorite_post_save_update(sender, instance, created, **kwargs):
if created:
lock = tasks.locks.ExpRecUpdateQueueLock(instance.experiment)
if lock.add():
(tasks.recommendations
.update_experiment_recommendations
.si(instance.experiment.pk).delay())
models.Activity.objects.get_or_create(
user=instance.user,
favorited_experiment=instance.experiment,
)
@receiver(pre_delete, sender=models.Favorite)
def favorite_pre_delete_update(sender, instance, **kwargs):
lock = tasks.locks.ExpRecUpdateQueueLock(instance.experiment)
if lock.add():
(tasks.recommendations
.update_experiment_recommendations
.si(instance.experiment.pk).delay())
try:
models.Activity.objects.filter(
user=instance.user,
favorited_experiment=instance.experiment,
).delete()
except models.Activity.DoesNotExist:
pass
@receiver(pre_delete, sender=User)
def user_pre_delete(sender, instance, **kwargs):
my_user = models.MyUser.objects.get(user=instance)
my_user.delete()
@receiver(post_delete, sender=models.Experiment)
def experiment_post_delete(sender, instance, **kwargs):
# if instance.project.name in ['ENCODE']:
# update_organism_network.si(
# instance.organism.pk,
# instance.experiment_type.pk,
# ).delay()
# call_update_dendrogram.si(
# instance.organism.pk,
# instance.experiment_type.pk,
# ).delay()
for my_user in models.MyUser.objects.filter(experiment=instance):
update_organism_network.si(
instance.organism.pk,
instance.experiment_type.pk,
my_user_pk=my_user.pk,
).delay()
call_update_dendrogram.si(
instance.organism.pk,
instance.experiment_type.pk,
my_user_pk=my_user.pk,
).delay()
| [
"django.dispatch.receiver",
"network.management.commands.update_dendrogram.call_update_dendrogram.si",
"network.tasks.analysis.network.update_organism_network.si"
] | [((485, 526), 'django.dispatch.receiver', 'receiver', (['post_save'], {'sender': 'models.Follow'}), '(post_save, sender=models.Follow)\n', (493, 526), False, 'from django.dispatch import receiver\n'), ((1035, 1077), 'django.dispatch.receiver', 'receiver', (['pre_delete'], {'sender': 'models.Follow'}), '(pre_delete, sender=models.Follow)\n', (1043, 1077), False, 'from django.dispatch import receiver\n'), ((1603, 1646), 'django.dispatch.receiver', 'receiver', (['post_save'], {'sender': 'models.Favorite'}), '(post_save, sender=models.Favorite)\n', (1611, 1646), False, 'from django.dispatch import receiver\n'), ((2114, 2158), 'django.dispatch.receiver', 'receiver', (['pre_delete'], {'sender': 'models.Favorite'}), '(pre_delete, sender=models.Favorite)\n', (2122, 2158), False, 'from django.dispatch import receiver\n'), ((2647, 2680), 'django.dispatch.receiver', 'receiver', (['pre_delete'], {'sender': 'User'}), '(pre_delete, sender=User)\n', (2655, 2680), False, 'from django.dispatch import receiver\n'), ((2809, 2856), 'django.dispatch.receiver', 'receiver', (['post_delete'], {'sender': 'models.Experiment'}), '(post_delete, sender=models.Experiment)\n', (2817, 2856), False, 'from django.dispatch import receiver\n'), ((3314, 3419), 'network.tasks.analysis.network.update_organism_network.si', 'update_organism_network.si', (['instance.organism.pk', 'instance.experiment_type.pk'], {'my_user_pk': 'my_user.pk'}), '(instance.organism.pk, instance.experiment_type.\n pk, my_user_pk=my_user.pk)\n', (3340, 3419), False, 'from network.tasks.analysis.network import update_organism_network\n'), ((3478, 3581), 'network.management.commands.update_dendrogram.call_update_dendrogram.si', 'call_update_dendrogram.si', (['instance.organism.pk', 'instance.experiment_type.pk'], {'my_user_pk': 'my_user.pk'}), '(instance.organism.pk, instance.experiment_type.pk,\n my_user_pk=my_user.pk)\n', (3503, 3581), False, 'from network.management.commands.update_dendrogram import call_update_dendrogram\n')] |
import torch
from torch import nn
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class Attention(nn.Module):
"""
Attention Network.
"""
def __init__(self, encoder_dim, decoder_dim, attention_dim):
"""
:param encoder_dim: feature size of encoded images
:param decoder_dim: size of decoder's RNN
:param attention_dim: size of the attention network
"""
super(Attention, self).__init__()
self.encoder_att = nn.Linear(encoder_dim, attention_dim) # linear layer to transform encoded image
self.decoder_att = nn.Linear(decoder_dim, attention_dim) # linear layer to transform decoder's output
self.full_att = nn.Linear(attention_dim, 1) # linear layer to calculate values to be softmax-ed
self.relu = nn.ReLU()
self.softmax = nn.Softmax(dim=1) # softmax layer to calculate weights
def forward(self, encoder_out, decoder_hidden):
"""
Forward propagation.
:param encoder_out: encoded images, a tensor of dimension (batch_size, num_pixels, encoder_dim)
:param decoder_hidden: previous decoder output, a tensor of dimension (batch_size, decoder_dim)
:return: attention weighted encoding, weights
"""
att1 = self.encoder_att(encoder_out) # (batch_size, num_pixels, attention_dim)
att2 = self.decoder_att(decoder_hidden) # (batch_size, attention_dim)
att = self.full_att(self.relu(att1 + att2.unsqueeze(1))).squeeze(2) # (batch_size, num_pixels)
alpha = self.softmax(att) # (batch_size, num_pixels)
attention_weighted_encoding = (encoder_out * alpha.unsqueeze(2)).sum(dim=1) # (batch_size, encoder_dim)
return attention_weighted_encoding, alpha | [
"torch.nn.Softmax",
"torch.nn.ReLU",
"torch.cuda.is_available",
"torch.nn.Linear"
] | [((67, 92), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (90, 92), False, 'import torch\n'), ((501, 538), 'torch.nn.Linear', 'nn.Linear', (['encoder_dim', 'attention_dim'], {}), '(encoder_dim, attention_dim)\n', (510, 538), False, 'from torch import nn\n'), ((609, 646), 'torch.nn.Linear', 'nn.Linear', (['decoder_dim', 'attention_dim'], {}), '(decoder_dim, attention_dim)\n', (618, 646), False, 'from torch import nn\n'), ((717, 744), 'torch.nn.Linear', 'nn.Linear', (['attention_dim', '(1)'], {}), '(attention_dim, 1)\n', (726, 744), False, 'from torch import nn\n'), ((818, 827), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (825, 827), False, 'from torch import nn\n'), ((851, 868), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (861, 868), False, 'from torch import nn\n')] |
import gym
import random
import numpy as np
import tflearn
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression
from statistics import median, mean
from collections import Counter
LR = 1e-3
env = gym.make('CartPole-v0')
env.reset()
goal_steps = 500
score_requirement = 50
initial_games = 10000
def some_random_games_first():
for _ in range(5):
for t in range(200):
env.reset()
env.render()
action = env.action_space.sample()
observation, reward, done, info = env.step(action)
if done:
break
some_random_games_first()
def generate_traning_data():
training_data = []
scores = []
accepted_scores = []
for _ in range(initial_games):
score = 0
game_memory = []
prev_observation = []
for _ in range(goal_steps):
action = random.randrange(0, 2)
observation, reward, done, info = env.step(action)
if len(prev_observation) > 0:
game_memory.append([prev_observation, action])
prev_observation = observation
score += reward
if done:
break
if score > score_requirement:
accepted_scores.append(score)
for data in game_memory:
if data[1] == 1:
output = [0, 1]
elif data[1] == 0:
output = [1, 0]
training_data.append([data[0], output])
env.reset()
scores.append(score)
training_data_save = np.array(training_data)
np.save('saved.npy', training_data_save)
print('Avg score: ', mean(accepted_scores))
print('Median score: ', median(accepted_scores))
print(Counter(accepted_scores))
return training_data
def neural_network_model(input_size):
network = input_data(shape=[None, input_size, 1], name='input')
network = fully_connected(network, 128, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 512, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 128, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 2, activation='softmax')
network = regression(
network,
optimizer='adam',
learning_rate=LR,
loss='categorical_crossentropy',
name='targets')
model = tflearn.DNN(network, tensorboard_dir='log')
return model
def train_model(training_data, model=False):
x = np.array([i[0] for i in training_data]).reshape(-1, len(training_data[0][0]), 1)
y = [i[1] for i in training_data]
if not model:
model = neural_network_model(input_size=len(x[0]))
model.fit(
{'input': x},
{'targets': y},
n_epoch=5,
snapshot_step=500,
show_metric=True,
run_id='openai_learning'
)
return model
training_data = generate_traning_data()
model = train_model(training_data)
scores = []
choices = []
for each_game in range(10):
score = 0
game_memory = []
prev_obs = []
env.reset()
for _ in range(goal_steps):
env.render()
if len(prev_obs) == 0:
action = random.randrange(0, 2)
else:
action = np.argmax(model.predict(prev_obs.reshape(-1, len(prev_obs), 1))[0])
choices.append(action)
new_observation, reward, done, info = env.step(action)
prev_obs = new_observation
game_memory.append([new_observation, action])
score += reward
if done:
break
scores.append(score)
print('Avg score:', sum(scores) / len(scores))
print('choice 1:{} choice 0:{}'.format(choices.count(1) / len(choices), choices.count(0) / len(choices)))
print(score_requirement)
env.close()
| [
"statistics.mean",
"tflearn.layers.core.dropout",
"tflearn.layers.core.fully_connected",
"random.randrange",
"tflearn.DNN",
"statistics.median",
"numpy.array",
"collections.Counter",
"tflearn.layers.core.input_data",
"tflearn.layers.estimator.regression",
"gym.make",
"numpy.save"
] | [((261, 284), 'gym.make', 'gym.make', (['"""CartPole-v0"""'], {}), "('CartPole-v0')\n", (269, 284), False, 'import gym\n'), ((1632, 1655), 'numpy.array', 'np.array', (['training_data'], {}), '(training_data)\n', (1640, 1655), True, 'import numpy as np\n'), ((1660, 1700), 'numpy.save', 'np.save', (['"""saved.npy"""', 'training_data_save'], {}), "('saved.npy', training_data_save)\n", (1667, 1700), True, 'import numpy as np\n'), ((1919, 1972), 'tflearn.layers.core.input_data', 'input_data', ([], {'shape': '[None, input_size, 1]', 'name': '"""input"""'}), "(shape=[None, input_size, 1], name='input')\n", (1929, 1972), False, 'from tflearn.layers.core import input_data, dropout, fully_connected\n'), ((1987, 2035), 'tflearn.layers.core.fully_connected', 'fully_connected', (['network', '(128)'], {'activation': '"""relu"""'}), "(network, 128, activation='relu')\n", (2002, 2035), False, 'from tflearn.layers.core import input_data, dropout, fully_connected\n'), ((2050, 2071), 'tflearn.layers.core.dropout', 'dropout', (['network', '(0.8)'], {}), '(network, 0.8)\n', (2057, 2071), False, 'from tflearn.layers.core import input_data, dropout, fully_connected\n'), ((2086, 2134), 'tflearn.layers.core.fully_connected', 'fully_connected', (['network', '(256)'], {'activation': '"""relu"""'}), "(network, 256, activation='relu')\n", (2101, 2134), False, 'from tflearn.layers.core import input_data, dropout, fully_connected\n'), ((2149, 2170), 'tflearn.layers.core.dropout', 'dropout', (['network', '(0.8)'], {}), '(network, 0.8)\n', (2156, 2170), False, 'from tflearn.layers.core import input_data, dropout, fully_connected\n'), ((2185, 2233), 'tflearn.layers.core.fully_connected', 'fully_connected', (['network', '(512)'], {'activation': '"""relu"""'}), "(network, 512, activation='relu')\n", (2200, 2233), False, 'from tflearn.layers.core import input_data, dropout, fully_connected\n'), ((2248, 2269), 'tflearn.layers.core.dropout', 'dropout', (['network', '(0.8)'], {}), '(network, 0.8)\n', (2255, 2269), False, 'from tflearn.layers.core import input_data, dropout, fully_connected\n'), ((2284, 2332), 'tflearn.layers.core.fully_connected', 'fully_connected', (['network', '(256)'], {'activation': '"""relu"""'}), "(network, 256, activation='relu')\n", (2299, 2332), False, 'from tflearn.layers.core import input_data, dropout, fully_connected\n'), ((2347, 2368), 'tflearn.layers.core.dropout', 'dropout', (['network', '(0.8)'], {}), '(network, 0.8)\n', (2354, 2368), False, 'from tflearn.layers.core import input_data, dropout, fully_connected\n'), ((2383, 2431), 'tflearn.layers.core.fully_connected', 'fully_connected', (['network', '(128)'], {'activation': '"""relu"""'}), "(network, 128, activation='relu')\n", (2398, 2431), False, 'from tflearn.layers.core import input_data, dropout, fully_connected\n'), ((2446, 2467), 'tflearn.layers.core.dropout', 'dropout', (['network', '(0.8)'], {}), '(network, 0.8)\n', (2453, 2467), False, 'from tflearn.layers.core import input_data, dropout, fully_connected\n'), ((2482, 2531), 'tflearn.layers.core.fully_connected', 'fully_connected', (['network', '(2)'], {'activation': '"""softmax"""'}), "(network, 2, activation='softmax')\n", (2497, 2531), False, 'from tflearn.layers.core import input_data, dropout, fully_connected\n'), ((2546, 2655), 'tflearn.layers.estimator.regression', 'regression', (['network'], {'optimizer': '"""adam"""', 'learning_rate': 'LR', 'loss': '"""categorical_crossentropy"""', 'name': '"""targets"""'}), "(network, optimizer='adam', learning_rate=LR, loss=\n 'categorical_crossentropy', name='targets')\n", (2556, 2655), False, 'from tflearn.layers.estimator import regression\n'), ((2704, 2747), 'tflearn.DNN', 'tflearn.DNN', (['network'], {'tensorboard_dir': '"""log"""'}), "(network, tensorboard_dir='log')\n", (2715, 2747), False, 'import tflearn\n'), ((1727, 1748), 'statistics.mean', 'mean', (['accepted_scores'], {}), '(accepted_scores)\n', (1731, 1748), False, 'from statistics import median, mean\n'), ((1778, 1801), 'statistics.median', 'median', (['accepted_scores'], {}), '(accepted_scores)\n', (1784, 1801), False, 'from statistics import median, mean\n'), ((1813, 1837), 'collections.Counter', 'Counter', (['accepted_scores'], {}), '(accepted_scores)\n', (1820, 1837), False, 'from collections import Counter\n'), ((937, 959), 'random.randrange', 'random.randrange', (['(0)', '(2)'], {}), '(0, 2)\n', (953, 959), False, 'import random\n'), ((2821, 2860), 'numpy.array', 'np.array', (['[i[0] for i in training_data]'], {}), '([i[0] for i in training_data])\n', (2829, 2860), True, 'import numpy as np\n'), ((3512, 3534), 'random.randrange', 'random.randrange', (['(0)', '(2)'], {}), '(0, 2)\n', (3528, 3534), False, 'import random\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import mptt.fields
import tree_app.ltreefield
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.RunSQL(
"""
CREATE EXTENSION ltree;
"""
),
migrations.CreateModel(
name='Ltree',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('path', tree_app.ltreefield.LtreeField()),
('type', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='Mptt',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('type', models.CharField(max_length=20)),
('lft', models.PositiveIntegerField(editable=False, db_index=True)),
('rght', models.PositiveIntegerField(editable=False, db_index=True)),
('tree_id', models.PositiveIntegerField(editable=False, db_index=True)),
('level', models.PositiveIntegerField(editable=False, db_index=True)),
('parent', mptt.fields.TreeForeignKey(related_name='children', blank=True, to='tree_app.Mptt', null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Raw',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('type', models.CharField(max_length=20)),
('parent', models.ForeignKey(related_name='children', blank=True, to='tree_app.Raw', null=True)),
],
),
]
| [
"django.db.models.ForeignKey",
"django.db.models.AutoField",
"django.db.models.PositiveIntegerField",
"django.db.models.CharField",
"django.db.migrations.RunSQL"
] | [((249, 323), 'django.db.migrations.RunSQL', 'migrations.RunSQL', (['"""\n CREATE EXTENSION ltree;\n """'], {}), '("""\n CREATE EXTENSION ltree;\n """)\n', (266, 323), False, 'from django.db import migrations, models\n'), ((449, 542), 'django.db.models.AutoField', 'models.AutoField', ([], {'verbose_name': '"""ID"""', 'serialize': '(False)', 'auto_created': '(True)', 'primary_key': '(True)'}), "(verbose_name='ID', serialize=False, auto_created=True,\n primary_key=True)\n", (465, 542), False, 'from django.db import migrations, models\n'), ((626, 657), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (642, 657), False, 'from django.db import migrations, models\n'), ((787, 880), 'django.db.models.AutoField', 'models.AutoField', ([], {'verbose_name': '"""ID"""', 'serialize': '(False)', 'auto_created': '(True)', 'primary_key': '(True)'}), "(verbose_name='ID', serialize=False, auto_created=True,\n primary_key=True)\n", (803, 880), False, 'from django.db import migrations, models\n'), ((904, 935), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (920, 935), False, 'from django.db import migrations, models\n'), ((962, 1020), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'editable': '(False)', 'db_index': '(True)'}), '(editable=False, db_index=True)\n', (989, 1020), False, 'from django.db import migrations, models\n'), ((1048, 1106), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'editable': '(False)', 'db_index': '(True)'}), '(editable=False, db_index=True)\n', (1075, 1106), False, 'from django.db import migrations, models\n'), ((1137, 1195), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'editable': '(False)', 'db_index': '(True)'}), '(editable=False, db_index=True)\n', (1164, 1195), False, 'from django.db import migrations, models\n'), ((1224, 1282), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'editable': '(False)', 'db_index': '(True)'}), '(editable=False, db_index=True)\n', (1251, 1282), False, 'from django.db import migrations, models\n'), ((1607, 1700), 'django.db.models.AutoField', 'models.AutoField', ([], {'verbose_name': '"""ID"""', 'serialize': '(False)', 'auto_created': '(True)', 'primary_key': '(True)'}), "(verbose_name='ID', serialize=False, auto_created=True,\n primary_key=True)\n", (1623, 1700), False, 'from django.db import migrations, models\n'), ((1724, 1755), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)'}), '(max_length=20)\n', (1740, 1755), False, 'from django.db import migrations, models\n'), ((1785, 1873), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'related_name': '"""children"""', 'blank': '(True)', 'to': '"""tree_app.Raw"""', 'null': '(True)'}), "(related_name='children', blank=True, to='tree_app.Raw',\n null=True)\n", (1802, 1873), False, 'from django.db import migrations, models\n')] |
from application.core.entity.account import Account
from application.core.port.create_account_port import CreateAccountPort
class AccountFactory(CreateAccountPort):
def create_account(self, payload: dict) -> Account:
return Account(**payload)
| [
"application.core.entity.account.Account"
] | [((239, 257), 'application.core.entity.account.Account', 'Account', ([], {}), '(**payload)\n', (246, 257), False, 'from application.core.entity.account import Account\n')] |
# (c) Copyright IBM Corp. 2021
# (c) Copyright Instana Inc. 2021
""" Module to assist in the data collection about the google cloud run service revision instance entity """
import os
from ....log import logger
from instana.collector.helpers.base import BaseHelper
from ....util import DictionaryOfStan
class InstanceEntityHelper(BaseHelper):
""" This class helps in collecting data about the google cloud run service revision instance entity """
def collect_metrics(self, **kwargs):
"""
Collect and return metrics data (and optionally snapshot data) for this task
@return: list - with one plugin entity
"""
plugins = []
plugin_data = dict()
instance_metadata = kwargs.get('instance_metadata', {})
project_metadata = kwargs.get('project_metadata', {})
try:
plugin_data["name"] = "com.instana.plugin.gcp.run.revision.instance"
plugin_data["entityId"] = instance_metadata.get("id")
plugin_data["data"] = DictionaryOfStan()
plugin_data["data"]["runtime"] = "python"
plugin_data["data"]["region"] = instance_metadata.get("region").split("/")[-1]
plugin_data["data"]["service"] = self.collector.service
plugin_data["data"]["configuration"] = self.collector.configuration
plugin_data["data"]["revision"] = self.collector.revision
plugin_data["data"]["instanceId"] = plugin_data["entityId"]
plugin_data["data"]["port"] = os.getenv("PORT", "")
plugin_data["data"]["numericProjectId"] = project_metadata.get("numericProjectId")
plugin_data["data"]["projectId"] = project_metadata.get("projectId")
except Exception:
logger.debug("collect_service_revision_entity_metrics: ", exc_info=True)
finally:
plugins.append(plugin_data)
return plugins
| [
"os.getenv"
] | [((1518, 1539), 'os.getenv', 'os.getenv', (['"""PORT"""', '""""""'], {}), "('PORT', '')\n", (1527, 1539), False, 'import os\n')] |
from django.shortcuts import render, redirect, get_object_or_404
from .forms import UrlForm
from .models import Url
from django.contrib import messages
def index(req):
if(req.method == "POST"):
form = UrlForm(req.POST)
if form.is_valid():
url, created = Url.objects.get_or_create(long_url = form.cleaned_data["long_url"])
path = req.build_absolute_uri()
full_short_url = path + url.short_url
messages.success(req, full_short_url)
return redirect('/')
else:
form = UrlForm()
return render(req, 'urls/index.html', {'form': form })
def redirect_user(req, short_url):
url = get_object_or_404(Url, short_url=short_url)
long_url = url.long_url
return redirect(long_url)
| [
"django.shortcuts.render",
"django.shortcuts.redirect",
"django.shortcuts.get_object_or_404",
"django.contrib.messages.success"
] | [((587, 633), 'django.shortcuts.render', 'render', (['req', '"""urls/index.html"""', "{'form': form}"], {}), "(req, 'urls/index.html', {'form': form})\n", (593, 633), False, 'from django.shortcuts import render, redirect, get_object_or_404\n'), ((683, 726), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Url'], {'short_url': 'short_url'}), '(Url, short_url=short_url)\n', (700, 726), False, 'from django.shortcuts import render, redirect, get_object_or_404\n'), ((766, 784), 'django.shortcuts.redirect', 'redirect', (['long_url'], {}), '(long_url)\n', (774, 784), False, 'from django.shortcuts import render, redirect, get_object_or_404\n'), ((464, 501), 'django.contrib.messages.success', 'messages.success', (['req', 'full_short_url'], {}), '(req, full_short_url)\n', (480, 501), False, 'from django.contrib import messages\n'), ((521, 534), 'django.shortcuts.redirect', 'redirect', (['"""/"""'], {}), "('/')\n", (529, 534), False, 'from django.shortcuts import render, redirect, get_object_or_404\n')] |
from django.shortcuts import render
def post_login_view(request):
user = request.user
return render(request, 'users/post-login.html', {
'user': user,
})
| [
"django.shortcuts.render"
] | [((104, 160), 'django.shortcuts.render', 'render', (['request', '"""users/post-login.html"""', "{'user': user}"], {}), "(request, 'users/post-login.html', {'user': user})\n", (110, 160), False, 'from django.shortcuts import render\n')] |
# 3rd party import
import tensorflow as tf
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.framework import dtypes
# stdlib import
# module import
import model_utils
def UQP_nce_loss(model, user_idxs, query_word_idxs, product_idxs, word_idxs):
"""
Args:
model: (BasicModel)
user_idxs: (tf.int32 with Shape: [batch_size, 1])
query_word_idxs: (tf.int32 with Shape: [batch_size, max_query_length])
product_idxs: (tf.int32 with Shape: [batch_size, 1])
word_idxs: (tf,int32 with Shape: [batch_size, 1])
Return:
UQP loss: (tf.float) See paper: <NAME>, <NAME>, <NAME>, <NAME>, <NAME>. 2017.
Learning a Hierarchical Embedding Model for Personalized ProductSearch. In Proceedings of SIGIR ’17
"""
batch_size = array_ops.shape(word_idxs)[0]#get batch_size
loss = None
regularization_terms = []
#word prediction loss
uw_loss, uw_embs = single_nce_loss(model,user_idxs, model.user_emb, word_idxs, model.word_emb,
model.word_bias, model.vocab_size, model.vocab_distribute)
pw_loss, pw_embs = single_nce_loss(model,product_idxs, model.product_emb, word_idxs, model.word_emb,
model.word_bias, model.vocab_size, model.vocab_distribute)
loss = uw_loss + pw_loss
regularization_terms += uw_embs + pw_embs
# get query vector
query_vec, qw_embs = model_utils.get_query_embedding(model, query_word_idxs, None)
regularization_terms += qw_embs
#product prediction loss
uqr_loss, uqr_embs = pair_search_loss(model, query_vec, user_idxs, model.user_emb, product_idxs, model.product_emb,
model.product_bias, model.product_size, model.product_distribute)
regularization_terms += uqr_embs
loss += uqr_loss
# L2 regularization
if model.hparams.L2_lambda > 0:
l2_loss = tf.nn.l2_loss(regularization_terms[0])
for i in range(1,len(regularization_terms)):
l2_loss += tf.nn.l2_loss(regularization_terms[i])
loss += model.hparams.L2_lambda * l2_loss
return loss / math_ops.cast(batch_size, dtypes.float32)
def pair_search_loss(model, query_vec, example_idxs, example_emb, label_idxs, label_emb, label_bias, label_size, label_distribution):
batch_size = array_ops.shape(example_idxs)[0]#get batch_size
# Nodes to compute the nce loss w/ candidate sampling.
labels_matrix = tf.reshape(tf.cast(label_idxs,dtype=tf.int64),[batch_size, 1])
# Negative sampling.
sampled_ids, _, _ = (tf.nn.fixed_unigram_candidate_sampler(
true_classes=labels_matrix,
num_true=1,
num_sampled=model.negative_sample,
unique=False,
range_max=label_size,
distortion=0.75,
unigrams=label_distribution))
#get example embeddings [batch_size, embed_size]
example_vec = tf.nn.embedding_lookup(example_emb, example_idxs) * (1-model.Wu) + query_vec * model.Wu
#get label embeddings and bias [batch_size, embed_size], [batch_size, 1]
true_w = tf.nn.embedding_lookup(label_emb, label_idxs)
true_b = tf.nn.embedding_lookup(label_bias, label_idxs)
#get sampled embeddings and bias [num_sampled, embed_size], [num_sampled, 1]
sampled_w = tf.nn.embedding_lookup(label_emb, sampled_ids)
sampled_b = tf.nn.embedding_lookup(label_bias, sampled_ids)
# True logits: [batch_size, 1]
true_logits = tf.reduce_sum(tf.multiply(example_vec, true_w), 1) + true_b
# Sampled logits: [batch_size, num_sampled]
# We replicate sampled noise lables for all examples in the batch
# using the matmul.
sampled_b_vec = tf.reshape(sampled_b, [model.negative_sample])
sampled_logits = tf.matmul(example_vec, sampled_w, transpose_b=True) + sampled_b_vec
return nce_loss(model, true_logits, sampled_logits), [example_vec, true_w, sampled_w]
def single_nce_loss(model, example_idxs, example_emb, label_idxs, label_emb, label_bias, label_size, label_distribution):
batch_size = array_ops.shape(example_idxs)[0]#get batch_size
# Nodes to compute the nce loss w/ candidate sampling.
labels_matrix = tf.reshape(tf.cast(label_idxs,dtype=tf.int64),[batch_size, 1])
# Negative sampling.
sampled_ids, _, _ = (tf.nn.fixed_unigram_candidate_sampler(
true_classes=labels_matrix,
num_true=1,
num_sampled=model.negative_sample,
unique=False,
range_max=label_size,
distortion=0.75,
unigrams=label_distribution))
#get example embeddings [batch_size, embed_size]
example_vec = tf.nn.embedding_lookup(example_emb, example_idxs)
#get label embeddings and bias [batch_size, embed_size], [batch_size, 1]
true_w = tf.nn.embedding_lookup(label_emb, label_idxs)
true_b = tf.nn.embedding_lookup(label_bias, label_idxs)
#get sampled embeddings and bias [num_sampled, embed_size], [num_sampled, 1]
sampled_w = tf.nn.embedding_lookup(label_emb, sampled_ids)
sampled_b = tf.nn.embedding_lookup(label_bias, sampled_ids)
# True logits: [batch_size, 1]
true_logits = tf.reduce_sum(tf.multiply(example_vec, true_w), 1) + true_b
# Sampled logits: [batch_size, num_sampled]
# We replicate sampled noise lables for all examples in the batch
# using the matmul.
sampled_b_vec = tf.reshape(sampled_b, [model.negative_sample])
sampled_logits = tf.matmul(example_vec, sampled_w, transpose_b=True) + sampled_b_vec
return nce_loss(model, true_logits, sampled_logits), [example_vec, true_w, sampled_w]
#return model.nce_loss(true_logits, true_logits)
def nce_loss(model, true_logits, sampled_logits):
"Build the graph for the NCE loss."
# cross-entropy(logits, labels)
true_xent = tf.nn.sigmoid_cross_entropy_with_logits(
logits=true_logits, labels=tf.ones_like(true_logits))
sampled_xent = tf.nn.sigmoid_cross_entropy_with_logits(
logits=sampled_logits, labels=tf.zeros_like(sampled_logits))
# NCE-loss is the sum of the true and noise (sampled words)
# contributions, averaged over the batch.
nce_loss_tensor = (tf.reduce_sum(true_xent) + tf.reduce_sum(sampled_xent))
return nce_loss_tensor
| [
"tensorflow.nn.embedding_lookup",
"tensorflow.reduce_sum",
"tensorflow.multiply",
"tensorflow.nn.l2_loss",
"tensorflow.python.ops.array_ops.shape",
"model_utils.get_query_embedding",
"tensorflow.nn.fixed_unigram_candidate_sampler",
"tensorflow.matmul",
"tensorflow.reshape",
"tensorflow.zeros_like"... | [((1414, 1475), 'model_utils.get_query_embedding', 'model_utils.get_query_embedding', (['model', 'query_word_idxs', 'None'], {}), '(model, query_word_idxs, None)\n', (1445, 1475), False, 'import model_utils\n'), ((2464, 2668), 'tensorflow.nn.fixed_unigram_candidate_sampler', 'tf.nn.fixed_unigram_candidate_sampler', ([], {'true_classes': 'labels_matrix', 'num_true': '(1)', 'num_sampled': 'model.negative_sample', 'unique': '(False)', 'range_max': 'label_size', 'distortion': '(0.75)', 'unigrams': 'label_distribution'}), '(true_classes=labels_matrix, num_true=\n 1, num_sampled=model.negative_sample, unique=False, range_max=\n label_size, distortion=0.75, unigrams=label_distribution)\n', (2501, 2668), True, 'import tensorflow as tf\n'), ((2921, 2966), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['label_emb', 'label_idxs'], {}), '(label_emb, label_idxs)\n', (2943, 2966), True, 'import tensorflow as tf\n'), ((2977, 3023), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['label_bias', 'label_idxs'], {}), '(label_bias, label_idxs)\n', (2999, 3023), True, 'import tensorflow as tf\n'), ((3116, 3162), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['label_emb', 'sampled_ids'], {}), '(label_emb, sampled_ids)\n', (3138, 3162), True, 'import tensorflow as tf\n'), ((3176, 3223), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['label_bias', 'sampled_ids'], {}), '(label_bias, sampled_ids)\n', (3198, 3223), True, 'import tensorflow as tf\n'), ((3483, 3529), 'tensorflow.reshape', 'tf.reshape', (['sampled_b', '[model.negative_sample]'], {}), '(sampled_b, [model.negative_sample])\n', (3493, 3529), True, 'import tensorflow as tf\n'), ((4070, 4274), 'tensorflow.nn.fixed_unigram_candidate_sampler', 'tf.nn.fixed_unigram_candidate_sampler', ([], {'true_classes': 'labels_matrix', 'num_true': '(1)', 'num_sampled': 'model.negative_sample', 'unique': '(False)', 'range_max': 'label_size', 'distortion': '(0.75)', 'unigrams': 'label_distribution'}), '(true_classes=labels_matrix, num_true=\n 1, num_sampled=model.negative_sample, unique=False, range_max=\n label_size, distortion=0.75, unigrams=label_distribution)\n', (4107, 4274), True, 'import tensorflow as tf\n'), ((4354, 4403), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['example_emb', 'example_idxs'], {}), '(example_emb, example_idxs)\n', (4376, 4403), True, 'import tensorflow as tf\n'), ((4489, 4534), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['label_emb', 'label_idxs'], {}), '(label_emb, label_idxs)\n', (4511, 4534), True, 'import tensorflow as tf\n'), ((4545, 4591), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['label_bias', 'label_idxs'], {}), '(label_bias, label_idxs)\n', (4567, 4591), True, 'import tensorflow as tf\n'), ((4684, 4730), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['label_emb', 'sampled_ids'], {}), '(label_emb, sampled_ids)\n', (4706, 4730), True, 'import tensorflow as tf\n'), ((4744, 4791), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['label_bias', 'sampled_ids'], {}), '(label_bias, sampled_ids)\n', (4766, 4791), True, 'import tensorflow as tf\n'), ((5051, 5097), 'tensorflow.reshape', 'tf.reshape', (['sampled_b', '[model.negative_sample]'], {}), '(sampled_b, [model.negative_sample])\n', (5061, 5097), True, 'import tensorflow as tf\n'), ((867, 893), 'tensorflow.python.ops.array_ops.shape', 'array_ops.shape', (['word_idxs'], {}), '(word_idxs)\n', (882, 893), False, 'from tensorflow.python.ops import array_ops\n'), ((1844, 1882), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['regularization_terms[0]'], {}), '(regularization_terms[0])\n', (1857, 1882), True, 'import tensorflow as tf\n'), ((2043, 2084), 'tensorflow.python.ops.math_ops.cast', 'math_ops.cast', (['batch_size', 'dtypes.float32'], {}), '(batch_size, dtypes.float32)\n', (2056, 2084), False, 'from tensorflow.python.ops import math_ops\n'), ((2235, 2264), 'tensorflow.python.ops.array_ops.shape', 'array_ops.shape', (['example_idxs'], {}), '(example_idxs)\n', (2250, 2264), False, 'from tensorflow.python.ops import array_ops\n'), ((2367, 2402), 'tensorflow.cast', 'tf.cast', (['label_idxs'], {'dtype': 'tf.int64'}), '(label_idxs, dtype=tf.int64)\n', (2374, 2402), True, 'import tensorflow as tf\n'), ((3548, 3599), 'tensorflow.matmul', 'tf.matmul', (['example_vec', 'sampled_w'], {'transpose_b': '(True)'}), '(example_vec, sampled_w, transpose_b=True)\n', (3557, 3599), True, 'import tensorflow as tf\n'), ((3841, 3870), 'tensorflow.python.ops.array_ops.shape', 'array_ops.shape', (['example_idxs'], {}), '(example_idxs)\n', (3856, 3870), False, 'from tensorflow.python.ops import array_ops\n'), ((3973, 4008), 'tensorflow.cast', 'tf.cast', (['label_idxs'], {'dtype': 'tf.int64'}), '(label_idxs, dtype=tf.int64)\n', (3980, 4008), True, 'import tensorflow as tf\n'), ((5116, 5167), 'tensorflow.matmul', 'tf.matmul', (['example_vec', 'sampled_w'], {'transpose_b': '(True)'}), '(example_vec, sampled_w, transpose_b=True)\n', (5125, 5167), True, 'import tensorflow as tf\n'), ((5801, 5825), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['true_xent'], {}), '(true_xent)\n', (5814, 5825), True, 'import tensorflow as tf\n'), ((5828, 5855), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['sampled_xent'], {}), '(sampled_xent)\n', (5841, 5855), True, 'import tensorflow as tf\n'), ((1944, 1982), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['regularization_terms[i]'], {}), '(regularization_terms[i])\n', (1957, 1982), True, 'import tensorflow as tf\n'), ((2748, 2797), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['example_emb', 'example_idxs'], {}), '(example_emb, example_idxs)\n', (2770, 2797), True, 'import tensorflow as tf\n'), ((3286, 3318), 'tensorflow.multiply', 'tf.multiply', (['example_vec', 'true_w'], {}), '(example_vec, true_w)\n', (3297, 3318), True, 'import tensorflow as tf\n'), ((4854, 4886), 'tensorflow.multiply', 'tf.multiply', (['example_vec', 'true_w'], {}), '(example_vec, true_w)\n', (4865, 4886), True, 'import tensorflow as tf\n'), ((5528, 5553), 'tensorflow.ones_like', 'tf.ones_like', (['true_logits'], {}), '(true_logits)\n', (5540, 5553), True, 'import tensorflow as tf\n'), ((5645, 5674), 'tensorflow.zeros_like', 'tf.zeros_like', (['sampled_logits'], {}), '(sampled_logits)\n', (5658, 5674), True, 'import tensorflow as tf\n')] |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import seaborn as sns
sns.set(style='ticks', context='paper', palette='colorblind')
try:
import cmocean.cm as cmo
cmocean_flag = True
except:
cmocean_flag = False
class pltClass:
def __init__(self):
self.__info__ = 'Python qc package plt class'
def float_ncep_inair(sdn, flt, ncep, ax=None, legend=True):
if ax is None:
fig, ax = plt.subplots()
else:
fig = ax.get_figure()
ax.plot(sdn, flt, linewidth=2, label='Float')
ax.plot(sdn, ncep, linewidth=2, label='NCEP')
if legend:
ax.legend(loc=3)
mhr = mdates.MonthLocator(interval=4)
mihr = mdates.MonthLocator()
fmt = mdates.DateFormatter('%b %Y')
ax.xaxis.set_major_locator(mhr)
ax.xaxis.set_major_formatter(fmt)
ax.xaxis.set_minor_locator(mihr)
ax.set_ylabel('pO$_2$ (mbar)')
for tick in ax.get_xticklabels():
tick.set_rotation(45)
g = pltClass()
g.fig = fig
g.axes = [ax]
return g
def float_woa_surface(sdn, flt, woa, ax=None, legend=True):
if ax is None:
fig, ax = plt.subplots()
else:
fig = ax.get_figure()
ax.plot(sdn, flt, linewidth=2, label='Float')
ax.plot(sdn, woa, linewidth=2, label='WOA18')
if legend:
ax.legend(loc=3)
mhr = mdates.MonthLocator(interval=4)
mihr = mdates.MonthLocator()
fmt = mdates.DateFormatter('%b %Y')
ax.xaxis.set_major_locator(mhr)
ax.xaxis.set_major_formatter(fmt)
ax.xaxis.set_minor_locator(mihr)
ax.set_ylabel('O$_2$ Saturation %')
for tick in ax.get_xticklabels():
tick.set_rotation(45)
g = pltClass()
g.fig = fig
g.axes = [ax]
return g
def gains(sdn, gains, inair=True, ax=None, legend=True):
if ax is None:
fig, ax = plt.subplots()
else:
fig = ax.get_figure()
ax.plot(sdn, gains, 'o', markeredgewidth=0.5, markersize=5, markeredgecolor='grey', zorder=3, label='Gains')
ax.axhline(np.nanmean(gains), color='k', linestyle='--', label='Mean = {:.2f}'.format(np.nanmean(gains)), zorder=2)
ax.axhline(1.0, color='k', linestyle='-', linewidth=0.5, label=None,zorder=1)
if legend:
ax.legend(loc=3)
mhr = mdates.MonthLocator(interval=4)
mihr = mdates.MonthLocator()
fmt = mdates.DateFormatter('%b %Y')
ax.xaxis.set_major_locator(mhr)
ax.xaxis.set_major_formatter(fmt)
ax.xaxis.set_minor_locator(mihr)
ax.set_ylabel('O$_2$ Gain (unitless)')
for tick in ax.get_xticklabels():
tick.set_rotation(45)
g = pltClass()
g.fig = fig
g.axes = [ax]
return g
def gainplot(sdn, float_data, ref_data, gainvals, ref):
fig, axes = plt.subplots(2,1,sharex=True)
if ref == 'NCEP':
g1 = float_ncep_inair(sdn, float_data, ref_data, ax=axes[0])
g2 = gains(sdn, gainvals, inair=False, ax=axes[1])
elif ref == 'WOA':
g1 = float_woa_surface(sdn, float_data, ref_data, ax=axes[0])
g2 = gains(sdn, gainvals, inair=False, ax=axes[1])
g = pltClass()
g.fig = fig
g.axes = axes
return g
def var_cscatter(df, varname='DOXY', cmap=None, ax=None, ylim=(0,2000), clabel=None, vmin=None, vmax=None, **kwargs):
# define colormaps
if cmocean_flag:
color_maps = dict(
TEMP=cmo.thermal,
TEMP_ADJUSTED=cmo.thermal,
PSAL=cmo.haline,
PSAL_ADJUSTED=cmo.haline,
PDEN=cmo.dense,
CHLA=cmo.algae,
CHLA_ADJUSTED=cmo.algae,
BBP700=cmo.matter,
BBP700_ADJUSTED=cmo.matter,
DOXY=cmo.ice,
DOXY_ADJUSTED=cmo.ice,
DOWNWELLING_IRRADIANCE=cmo.solar,
)
else:
color_maps = dict(
TEMP=plt.cm.inferno,
TEMP_ADJUSTED=plt.cm.inferno,
PSAL=plt.cm.viridis,
PSAL_ADJUSTED=plt.cm.viridis,
PDEN=plt.cm.cividis,
CHLA=plt.cm.YlGn,
CHLA_ADJUSTED=plt.cm.YlGn,
BBP700=plt.cm.pink_r,
BBP700_ADJUSTED=plt.cm.pink_r,
DOXY=plt.cm.YlGnBu_r,
DOXY_ADJUSTED=plt.cm.YlGnBu_r,
DOWNWELLING_IRRADIANCE=plt.cm.magma,
)
if clabel is None:
var_units = dict(
TEMP='Temperature ({}C)'.format(chr(176)),
TEMP_ADJUSTED='Temperature ({}C)'.format(chr(176)),
PSAL='Practical Salinity',
PSAL_ADJUSTED='Practical Salinity',
PDEN='Potential Density (kg m${-3}$)',
CHLA='Chlorophyll (mg m$^{-3}$',
CHLA_ADJUSTED='Chlorophyll (mg m$^{-3}$',
BBP700='$\mathsf{b_{bp}}$ (m$^{-1}$)',
BBP700_ADJUSTED='$\mathsf{b_{bp}}$ (m$^{-1}$)',
DOXY='Diss. Oxygen ($\mathregular{\mu}$mol kg$^{-1}$)',
DOXY_ADJUSTED='Diss. Oxygen ($\mathregular{\mu}$mol kg$^{-1}$)',
DOWNWELLING_IRRADIANCE='Downwelling Irradiance (W m$^{-2}$)',
)
clabel = var_units[varname]
if cmap is None:
cmap = color_maps[varname]
if ax is None:
fig, ax = plt.subplots()
else:
fig = ax.get_figure()
df = df.loc[df.PRES < ylim[1]+50]
if vmin is None:
vmin = 1.05*df[varname].min()
if vmax is None:
vmax = 0.95*df[varname].max()
im = ax.scatter(df.SDN, df.PRES, c=df[varname], s=50, cmap=cmap, vmin=vmin, vmax=vmax, **kwargs)
cb = plt.colorbar(im, ax=ax)
cb.set_label(clabel)
ax.set_ylim(ylim)
ax.invert_yaxis()
ax.set_ylabel('Depth (dbar)')
w, h = fig.get_figwidth(), fig.get_figheight()
fig.set_size_inches(w*2, h)
mhr = mdates.MonthLocator(interval=4)
mihr = mdates.MonthLocator()
fmt = mdates.DateFormatter('%b %Y')
ax.xaxis.set_major_locator(mhr)
ax.xaxis.set_major_formatter(fmt)
ax.xaxis.set_minor_locator(mihr)
g = pltClass()
g.fig = fig
g.axes = [ax]
g.cb = cb
return g
def profiles(df, varlist=['DOXY'], Ncycle=1, Nprof=np.inf, zvar='PRES', xlabels=None, ylabel=None, axes=None, ylim=None, **kwargs):
if xlabels is None:
var_units = dict(
TEMP='Temperature ({}C)'.format(chr(176)),
TEMP_ADJUSTED='Temperature ({}C)'.format(chr(176)),
PSAL='Practical Salinity',
PSAL_ADJUSTED='Practical Salinity',
PDEN='Potential Density (kg m$^{-3}$)',
CHLA='Chlorophyll (mg m$^{-3}$',
CHLA_ADJUSTED='Chlorophyll (mg m$^{-3}$',
BBP700='$\mathsf{b_{bp}}$ (m$^{-1}$)',
BBP700_ADJUSTED='$\mathsf{b_{bp}}$ (m$^{-1}$)',
CDOM='CDOM (mg m$^{-3}$)',
CDOM_ADJUSTED='CDOM (mg m$^{-3}$)',
DOXY='Diss. Oxygen ($\mathregular{\mu}$mol kg$^{-1}$)',
DOXY_ADJUSTED='Diss. Oxygen ($\mathregular{\mu}$mol kg$^{-1}$)',
DOWNWELLING_IRRADIANCE='Downwelling Irradiance (W m$^{-2}$)',
)
xlabels = [var_units[v] if v in var_units.keys() else '' for v in varlist]
cm = plt.cm.gray_r
if axes is None:
fig, axes = plt.subplots(1, len(varlist), sharey=True)
if len(varlist) == 1:
axes = [axes]
elif len(varlist) > 1:
fig = axes[0].get_figure()
else:
fig = axes.get_figure()
axes = [axes]
if ylim is None:
if zvar == 'PRES':
ylim=(0,2000)
if ylabel is None:
ylabel = 'Pressure (dbar)'
elif zvar == 'PDEN':
ylim = (df.PDEN.min(), df.PDEN.max())
if ylabel is None:
ylabel = 'Density (kg m$^{-3}$)'
df.loc[df[zvar] > ylim[1]*1.1] = np.nan
CYCNUM = df.CYCLE.unique()
greyflag = False
if not 'color' in kwargs.keys():
greyflag = True
else:
c = kwargs.pop('color')
if Nprof > CYCNUM.shape[0]:
Nprof = CYCNUM.shape[0]
for i,v in enumerate(varlist):
for n in range(Nprof):
subset_df = df.loc[df.CYCLE == CYCNUM[Ncycle-1 + n-1]]
if greyflag:
c = cm(0.75*(CYCNUM[Ncycle-1 + n-1]/CYCNUM[-1])+0.25)
axes[i].plot(subset_df[v], subset_df[zvar], color=c, **kwargs)
axes[i].set_ylim(ylim[::-1])
axes[i].set_xlabel(xlabels[i])
subset_df = df.loc[df.CYCLE == CYCNUM[Ncycle-1]]
date = mdates.num2date(subset_df.SDN.iloc[0]).strftime('%d %b, %Y')
axes[0].set_ylabel(ylabel)
if Nprof != 1:
axes[0].set_title('Cyc. {:d}-{:d}, {}'.format(int(CYCNUM[Ncycle-1]), int(CYCNUM[Ncycle-1+Nprof-1]), date))
else:
axes[0].set_title('Cyc. {:d}, {}'.format(int(CYCNUM[Ncycle-1]), date))
w, h = fig.get_figwidth(), fig.get_figheight()
fig.set_size_inches(w*len(varlist)/3, h)
g = pltClass()
g.fig = fig
g.axes = axes
return g
def qc_profiles(df, varlist=['DOXY'], Ncycle=1, Nprof=np.inf, zvar='PRES', xlabels=None, ylabel=None, axes=None, ylim=None, **kwargs):
if xlabels is None:
var_units = dict(
TEMP='Temperature ({}C)'.format(chr(176)),
TEMP_ADJUSTED='Temperature ({}C)'.format(chr(176)),
PSAL='Practical Salinity',
PSAL_ADJUSTED='Practical Salinity',
PDEN='Potential Density (kg m$^{-3}$)',
CHLA='Chlorophyll (mg m$^{-3}$',
CHLA_ADJUSTED='Chlorophyll (mg m$^{-3}$',
BBP700='$\mathsf{b_{bp}}$ (m$^{-1}$)',
BBP700_ADJUSTED='$\mathsf{b_{bp}}$ (m$^{-1}$)',
CDOM='CDOM (mg m$^{-3}$)',
CDOM_ADJUSTED='CDOM (mg m$^{-3}$)',
DOXY='Diss. Oxygen ($\mathregular{\mu}$mol kg$^{-1}$)',
DOXY_ADJUSTED='Diss. Oxygen ($\mathregular{\mu}$mol kg$^{-1}$)',
DOWNWELLING_IRRADIANCE='Downwelling Irradiance (W m$^{-2}$)',
)
xlabels = [var_units[v] for v in varlist]
if axes is None:
fig, axes = plt.subplots(1, len(varlist), sharey=True)
if len(varlist) == 1:
axes = [axes]
elif len(varlist) > 1:
fig = axes[0].get_figure()
else:
fig = axes.get_figure()
axes = [axes]
if ylim is None:
if zvar == 'PRES':
ylim=(0,2000)
if ylabel is None:
ylabel = 'Pressure (dbar)'
elif zvar == 'PDEN':
ylim = (df.PDEN.min(), df.PDEN.max())
if ylabel is None:
ylabel = 'Density (kg m$^{-3}$)'
df.loc[df[zvar] > ylim[1]*1.1] = np.nan
CYCNUM = df.CYCLE.unique()
if Nprof > CYCNUM.shape[0]:
Nprof = CYCNUM.shape[0]
groups = {'Good':[1,2,5], 'Probably Bad':[3], 'Bad':[4], 'Interpolated':[8]}
colors = {'Good':'green', 'Probably Bad':'yellow', 'Bad':'red', 'Interpolated':'blue'}
for i,v in enumerate(varlist):
vqc = v + '_QC'
for n in range(Nprof):
subset_df = df.loc[df.CYCLE == CYCNUM[Ncycle-1 + n-1]]
for k,f in groups.items():
flag_subset_df = subset_df[subset_df[vqc].isin(f)]
axes[i].plot(flag_subset_df[v], flag_subset_df[zvar], 'o', markeredgewidth=0.1, markeredgecolor='k', markerfacecolor=colors[k], **kwargs)
axes[i].set_ylim(ylim[::-1])
axes[i].set_xlabel(xlabels[i])
subset_df = df.loc[df.CYCLE == CYCNUM[Ncycle-1]]
date = mdates.num2date(subset_df.SDN.iloc[0]).strftime('%d %b, %Y')
axes[0].set_ylabel(ylabel)
if Nprof != 1:
axes[0].set_title('Cyc. {:d}-{:d}, {}'.format(int(CYCNUM[Ncycle-1]), int(CYCNUM[Ncycle-1+Nprof-1]), date))
else:
axes[0].set_title('Cyc. {:d}, {}'.format(int(CYCNUM[Ncycle-1]), date))
w, h = fig.get_figwidth(), fig.get_figheight()
fig.set_size_inches(w*len(varlist)/3, h)
g = pltClass()
g.fig = fig
g.axes = axes
return g
| [
"matplotlib.dates.num2date",
"seaborn.set",
"matplotlib.dates.MonthLocator",
"matplotlib.dates.DateFormatter",
"matplotlib.pyplot.colorbar",
"numpy.nanmean",
"matplotlib.pyplot.subplots"
] | [((109, 170), 'seaborn.set', 'sns.set', ([], {'style': '"""ticks"""', 'context': '"""paper"""', 'palette': '"""colorblind"""'}), "(style='ticks', context='paper', palette='colorblind')\n", (116, 170), True, 'import seaborn as sns\n'), ((666, 697), 'matplotlib.dates.MonthLocator', 'mdates.MonthLocator', ([], {'interval': '(4)'}), '(interval=4)\n', (685, 697), True, 'import matplotlib.dates as mdates\n'), ((709, 730), 'matplotlib.dates.MonthLocator', 'mdates.MonthLocator', ([], {}), '()\n', (728, 730), True, 'import matplotlib.dates as mdates\n'), ((742, 771), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%b %Y"""'], {}), "('%b %Y')\n", (762, 771), True, 'import matplotlib.dates as mdates\n'), ((1366, 1397), 'matplotlib.dates.MonthLocator', 'mdates.MonthLocator', ([], {'interval': '(4)'}), '(interval=4)\n', (1385, 1397), True, 'import matplotlib.dates as mdates\n'), ((1409, 1430), 'matplotlib.dates.MonthLocator', 'mdates.MonthLocator', ([], {}), '()\n', (1428, 1430), True, 'import matplotlib.dates as mdates\n'), ((1442, 1471), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%b %Y"""'], {}), "('%b %Y')\n", (1462, 1471), True, 'import matplotlib.dates as mdates\n'), ((2283, 2314), 'matplotlib.dates.MonthLocator', 'mdates.MonthLocator', ([], {'interval': '(4)'}), '(interval=4)\n', (2302, 2314), True, 'import matplotlib.dates as mdates\n'), ((2326, 2347), 'matplotlib.dates.MonthLocator', 'mdates.MonthLocator', ([], {}), '()\n', (2345, 2347), True, 'import matplotlib.dates as mdates\n'), ((2359, 2388), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%b %Y"""'], {}), "('%b %Y')\n", (2379, 2388), True, 'import matplotlib.dates as mdates\n'), ((2762, 2793), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'sharex': '(True)'}), '(2, 1, sharex=True)\n', (2774, 2793), True, 'import matplotlib.pyplot as plt\n'), ((5488, 5511), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['im'], {'ax': 'ax'}), '(im, ax=ax)\n', (5500, 5511), True, 'import matplotlib.pyplot as plt\n'), ((5712, 5743), 'matplotlib.dates.MonthLocator', 'mdates.MonthLocator', ([], {'interval': '(4)'}), '(interval=4)\n', (5731, 5743), True, 'import matplotlib.dates as mdates\n'), ((5755, 5776), 'matplotlib.dates.MonthLocator', 'mdates.MonthLocator', ([], {}), '()\n', (5774, 5776), True, 'import matplotlib.dates as mdates\n'), ((5788, 5817), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%b %Y"""'], {}), "('%b %Y')\n", (5808, 5817), True, 'import matplotlib.dates as mdates\n'), ((457, 471), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (469, 471), True, 'import matplotlib.pyplot as plt\n'), ((1157, 1171), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1169, 1171), True, 'import matplotlib.pyplot as plt\n'), ((1859, 1873), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1871, 1873), True, 'import matplotlib.pyplot as plt\n'), ((2043, 2060), 'numpy.nanmean', 'np.nanmean', (['gains'], {}), '(gains)\n', (2053, 2060), True, 'import numpy as np\n'), ((5163, 5177), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (5175, 5177), True, 'import matplotlib.pyplot as plt\n'), ((8390, 8428), 'matplotlib.dates.num2date', 'mdates.num2date', (['subset_df.SDN.iloc[0]'], {}), '(subset_df.SDN.iloc[0])\n', (8405, 8428), True, 'import matplotlib.dates as mdates\n'), ((11361, 11399), 'matplotlib.dates.num2date', 'mdates.num2date', (['subset_df.SDN.iloc[0]'], {}), '(subset_df.SDN.iloc[0])\n', (11376, 11399), True, 'import matplotlib.dates as mdates\n'), ((2118, 2135), 'numpy.nanmean', 'np.nanmean', (['gains'], {}), '(gains)\n', (2128, 2135), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
P-spline versions of Whittaker functions
----------------------------------------
pybaselines contains penalized spline (P-spline) versions of all of the
Whittaker-smoothing-based algorithms implemented in pybaselines. The reason
for doing so was that P-splines offer additional user flexibility when choosing
parameters for fitting and more easily work for unequally spaced data. This example
will examine the relationship of `lam` versus the number of data points when fitting
a baseline with the :func:`.arpls` function and its P-spline version,
:func:`.pspline_arpls`.
Note that the exact optimal `lam` values reported in this example are not of significant
use since they depend on many other factors such as the baseline curvature, noise, peaks,
etc.; however, the examined trends can be used to simplify the process of selecting `lam`
values for fitting new datasets.
"""
# sphinx_gallery_thumbnail_number = 2
from itertools import cycle
import matplotlib.pyplot as plt
import numpy as np
from pybaselines import spline, whittaker
# local import with setup code
from example_helpers import make_data, optimize_lam
# %%
# The baseline for this example is an exponentially decaying baseline, shown below.
# Other baseline types could be examined, similar to the
# :ref:`Whittaker lam vs data size example <sphx_glr_examples_whittaker_plot_lam_vs_data_size.py>`,
# which should give similar results.
plt.plot(make_data(1000, bkg_type='exponential')[0])
# %%
# For each function, the optimal `lam` value will be calculated for data sizes
# ranging from 500 to 20000 points. Further, the intercept and slope of the linear fit
# of the log of the data size, N, and the log of the `lam` value will be reported.
# The number of knots for the P-spline version is fixed at the default, 100 (the effect
# of the number of knots versus optimal `lam` is shown in another
# :ref:`example <sphx_glr_examples_spline_plot_lam_vs_num_knots.py>`).
print('Function, intercept & slope of log(N) vs log(lam) fit')
print('-' * 60)
show_plots = False # for debugging
num_points = np.logspace(np.log10(500), np.log10(20000), 6, dtype=int)
symbols = cycle(['o', 's'])
_, ax = plt.subplots()
legend = [[], []]
for i, func in enumerate((whittaker.arpls, spline.pspline_arpls)):
func_name = func.__name__
best_lams = np.empty_like(num_points, float)
min_lam = None
for j, num_x in enumerate(num_points):
y, baseline = make_data(num_x, bkg_type='exponential')
# use a slightly lower tolerance to speed up the calculation
min_lam = optimize_lam(y, baseline, func, min_lam, tol=1e-2, max_iter=50)
best_lams[j] = min_lam
if show_plots:
plt.figure(num=num_x)
if i == 0:
plt.plot(y)
plt.plot(baseline)
plt.plot(func(y, lam=10**min_lam)[0], '--')
fit = np.polynomial.polynomial.Polynomial.fit(np.log10(num_points), best_lams, 1)
coeffs = fit.convert().coef
print(f'{func_name:<16} {coeffs}')
line = 10**fit(np.log10(num_points))
handle_1 = ax.plot(num_points, line, label=func_name)[0]
handle_2 = ax.plot(num_points, 10**best_lams, next(symbols))[0]
legend[0].append((handle_1, handle_2))
legend[1].append(func_name)
ax.loglog()
ax.legend(*legend)
ax.set_xlabel('Input Array Size, N')
ax.set_ylabel('Optimal lam Value')
plt.show()
# %%
# The results shown above demonstrate that the slope of the `lam` vs data
# size best fit line is much smaller for the P-spline based version of arpls.
# This means that once the number of knots is fixed for a particular baseline,
# the required `lam` value should be much less affected by a change in the
# number of data points (assuming the curvature of the data does not change).
#
# The above results are particularly useful when processing very large datasets.
# A `lam` value greater than ~1e14 typically causes numerical issues that can cause
# the solver to fail. Most Whittaker-smoothing-based algorithms reach that `lam`
# cutoff when the number of points is around ~20,000-500,000 (depends on the exact
# algorithm). Since the P-spline versions do not experience such a large increase in
# the required `lam`, they are more suited to fit those larger datasets. Additionally,
# the required `lam` value for the P-spline versions can be lowered simply by reducing
# the number of knots.
#
# It should be addressed that a similar result could be obtained using the regular
# Whittaker-smoothing-based version by truncating the number of points to a fixed
# value. That, however, would require additional processing steps to smooth out the
# resulting baseline after interpolating back to the original data size. Thus, the
# P-spline versions require less user-intervention to achieve the same result.
| [
"itertools.cycle",
"numpy.log10",
"example_helpers.make_data",
"matplotlib.pyplot.plot",
"example_helpers.optimize_lam",
"matplotlib.pyplot.figure",
"numpy.empty_like",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((2171, 2188), 'itertools.cycle', 'cycle', (["['o', 's']"], {}), "(['o', 's'])\n", (2176, 2188), False, 'from itertools import cycle\n'), ((2197, 2211), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2209, 2211), True, 'import matplotlib.pyplot as plt\n'), ((3388, 3398), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3396, 3398), True, 'import matplotlib.pyplot as plt\n'), ((2115, 2128), 'numpy.log10', 'np.log10', (['(500)'], {}), '(500)\n', (2123, 2128), True, 'import numpy as np\n'), ((2130, 2145), 'numpy.log10', 'np.log10', (['(20000)'], {}), '(20000)\n', (2138, 2145), True, 'import numpy as np\n'), ((2343, 2375), 'numpy.empty_like', 'np.empty_like', (['num_points', 'float'], {}), '(num_points, float)\n', (2356, 2375), True, 'import numpy as np\n'), ((1450, 1489), 'example_helpers.make_data', 'make_data', (['(1000)'], {'bkg_type': '"""exponential"""'}), "(1000, bkg_type='exponential')\n", (1459, 1489), False, 'from example_helpers import make_data, optimize_lam\n'), ((2460, 2500), 'example_helpers.make_data', 'make_data', (['num_x'], {'bkg_type': '"""exponential"""'}), "(num_x, bkg_type='exponential')\n", (2469, 2500), False, 'from example_helpers import make_data, optimize_lam\n'), ((2588, 2651), 'example_helpers.optimize_lam', 'optimize_lam', (['y', 'baseline', 'func', 'min_lam'], {'tol': '(0.01)', 'max_iter': '(50)'}), '(y, baseline, func, min_lam, tol=0.01, max_iter=50)\n', (2600, 2651), False, 'from example_helpers import make_data, optimize_lam\n'), ((2930, 2950), 'numpy.log10', 'np.log10', (['num_points'], {}), '(num_points)\n', (2938, 2950), True, 'import numpy as np\n'), ((2719, 2740), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'num': 'num_x'}), '(num=num_x)\n', (2729, 2740), True, 'import matplotlib.pyplot as plt\n'), ((2804, 2822), 'matplotlib.pyplot.plot', 'plt.plot', (['baseline'], {}), '(baseline)\n', (2812, 2822), True, 'import matplotlib.pyplot as plt\n'), ((3056, 3076), 'numpy.log10', 'np.log10', (['num_points'], {}), '(num_points)\n', (3064, 3076), True, 'import numpy as np\n'), ((2780, 2791), 'matplotlib.pyplot.plot', 'plt.plot', (['y'], {}), '(y)\n', (2788, 2791), True, 'import matplotlib.pyplot as plt\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
# CODE NAME HERE
# CODE DESCRIPTION HERE
Created on 2021-07-08
@author: cook
"""
from astropy.io import fits
from astropy.table import Table
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
# =============================================================================
# Define variables
# =============================================================================
path = '/data/spirou/data/minidata2/reduced/2020-08-31/'
# =============================================================================
# Define functions
# =============================================================================
def diff_image(imagepath, imagename):
try:
hdu1 = fits.open(os.path.join(imagepath, imagename))
hdu2 = fits.open(imagename)
except:
print('Skipping {0} [non-fits]'.format(imagename))
return
for extnum in range(len(hdu1)):
# get name
name = '{0}[{1}]'.format(imagename, extnum)
print('=' * 50)
print(name)
print('=' * 50)
if extnum >= len(hdu2):
print('\tEXTENSION {0} MISSING HDU2'.format(extnum))
continue
# deal with image hdu
if isinstance(hdu1[extnum], fits.ImageHDU):
imdiff = fits.diff.ImageDataDiff(hdu1[extnum].data, hdu2[extnum].data)
print(imdiff.report())
diff = hdu1[extnum].data - hdu2[extnum].data
if np.nansum(diff) != 0:
fig, frame = plt.subplots(ncols=1, nrows=1)
pos = frame.imshow(diff, aspect='auto', origin='lower')
frame.set(title=name)
fig.colorbar(pos, ax=frame)
plt.show()
plt.close()
elif isinstance(hdu1[extnum], fits.BinTableHDU):
imdiff = fits.diff.TableDataDiff(hdu1[extnum].data, hdu2[extnum].data)
print(imdiff.report())
else:
print('\tSkipping (not ImageHDU or BinHDU)')
# =============================================================================
# Start of code
# =============================================================================
if __name__ == "__main__":
files = np.array(os.listdir('.'))
last_modified = []
# get last modified for all files
for filename in files:
last_modified.append(os.path.getmtime(filename))
# sort by last modified
sortmask = np.argsort(last_modified)
files = files[sortmask]
# diff images in order
for filename in files:
diff_image(path, filename)
# =============================================================================
# End of code
# =============================================================================
| [
"astropy.io.fits.diff.ImageDataDiff",
"os.listdir",
"os.path.join",
"astropy.io.fits.diff.TableDataDiff",
"numpy.argsort",
"matplotlib.pyplot.close",
"astropy.io.fits.open",
"os.path.getmtime",
"numpy.nansum",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((2454, 2479), 'numpy.argsort', 'np.argsort', (['last_modified'], {}), '(last_modified)\n', (2464, 2479), True, 'import numpy as np\n'), ((809, 829), 'astropy.io.fits.open', 'fits.open', (['imagename'], {}), '(imagename)\n', (818, 829), False, 'from astropy.io import fits\n'), ((2249, 2264), 'os.listdir', 'os.listdir', (['"""."""'], {}), "('.')\n", (2259, 2264), False, 'import os\n'), ((758, 792), 'os.path.join', 'os.path.join', (['imagepath', 'imagename'], {}), '(imagepath, imagename)\n', (770, 792), False, 'import os\n'), ((1316, 1377), 'astropy.io.fits.diff.ImageDataDiff', 'fits.diff.ImageDataDiff', (['hdu1[extnum].data', 'hdu2[extnum].data'], {}), '(hdu1[extnum].data, hdu2[extnum].data)\n', (1339, 1377), False, 'from astropy.io import fits\n'), ((2383, 2409), 'os.path.getmtime', 'os.path.getmtime', (['filename'], {}), '(filename)\n', (2399, 2409), False, 'import os\n'), ((1486, 1501), 'numpy.nansum', 'np.nansum', (['diff'], {}), '(diff)\n', (1495, 1501), True, 'import numpy as np\n'), ((1537, 1567), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': '(1)', 'nrows': '(1)'}), '(ncols=1, nrows=1)\n', (1549, 1567), True, 'import matplotlib.pyplot as plt\n'), ((1738, 1748), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1746, 1748), True, 'import matplotlib.pyplot as plt\n'), ((1765, 1776), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1774, 1776), True, 'import matplotlib.pyplot as plt\n'), ((1855, 1916), 'astropy.io.fits.diff.TableDataDiff', 'fits.diff.TableDataDiff', (['hdu1[extnum].data', 'hdu2[extnum].data'], {}), '(hdu1[extnum].data, hdu2[extnum].data)\n', (1878, 1916), False, 'from astropy.io import fits\n')] |
"""
FAILS-- can't grid and pack in same parent container (here, root window)
"""
from tkinter import *
from grid2 import gridbox, packbox
root = Tk()
frm = Frame(root)
frm.pack() # this works
gridbox(frm) # gridbox must have its own parent in which to grid
packbox(root)
Button(root, text='Quit', command=root.quit).pack()
mainloop()
| [
"grid2.gridbox",
"grid2.packbox"
] | [((205, 217), 'grid2.gridbox', 'gridbox', (['frm'], {}), '(frm)\n', (212, 217), False, 'from grid2 import gridbox, packbox\n'), ((279, 292), 'grid2.packbox', 'packbox', (['root'], {}), '(root)\n', (286, 292), False, 'from grid2 import gridbox, packbox\n')] |
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.patches import Rectangle,Circle
import mpl_toolkits.mplot3d.art3d as art3d
fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
ax = plt.axes(projection='3d')
x,y,z = 10,0,0
dx,dy,dz = 12,12,10
p = Circle((x,y),radius=dx/2,color='b')
p2 = Circle((x,y),radius=dx/2,color='b')
ax.add_patch(p)
ax.add_patch(p2)
art3d.pathpatch_2d_to_3d(p, z=z, zdir="z")
art3d.pathpatch_2d_to_3d(p2, z=z+dz, zdir="z")
us = np.linspace(0, 2 * np.pi, 32)
zs = np.linspace(0, 10, 2)
us, zs = np.meshgrid(us, zs)
xs = 6 * np.cos(us)
ys = 6 * np.sin(us)
ax.plot_surface(xs, ys, zs, color='g')
plt.show() | [
"matplotlib.pyplot.figure",
"numpy.linspace",
"matplotlib.pyplot.axes",
"mpl_toolkits.mplot3d.art3d.pathpatch_2d_to_3d",
"numpy.cos",
"numpy.sin",
"numpy.meshgrid",
"matplotlib.patches.Circle",
"matplotlib.pyplot.show"
] | [((192, 204), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (202, 204), True, 'import matplotlib.pyplot as plt\n'), ((257, 282), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'projection': '"""3d"""'}), "(projection='3d')\n", (265, 282), True, 'import matplotlib.pyplot as plt\n'), ((325, 365), 'matplotlib.patches.Circle', 'Circle', (['(x, y)'], {'radius': '(dx / 2)', 'color': '"""b"""'}), "((x, y), radius=dx / 2, color='b')\n", (331, 365), False, 'from matplotlib.patches import Rectangle, Circle\n'), ((366, 406), 'matplotlib.patches.Circle', 'Circle', (['(x, y)'], {'radius': '(dx / 2)', 'color': '"""b"""'}), "((x, y), radius=dx / 2, color='b')\n", (372, 406), False, 'from matplotlib.patches import Rectangle, Circle\n'), ((435, 477), 'mpl_toolkits.mplot3d.art3d.pathpatch_2d_to_3d', 'art3d.pathpatch_2d_to_3d', (['p'], {'z': 'z', 'zdir': '"""z"""'}), "(p, z=z, zdir='z')\n", (459, 477), True, 'import mpl_toolkits.mplot3d.art3d as art3d\n'), ((478, 526), 'mpl_toolkits.mplot3d.art3d.pathpatch_2d_to_3d', 'art3d.pathpatch_2d_to_3d', (['p2'], {'z': '(z + dz)', 'zdir': '"""z"""'}), "(p2, z=z + dz, zdir='z')\n", (502, 526), True, 'import mpl_toolkits.mplot3d.art3d as art3d\n'), ((531, 560), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(32)'], {}), '(0, 2 * np.pi, 32)\n', (542, 560), True, 'import numpy as np\n'), ((567, 588), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(2)'], {}), '(0, 10, 2)\n', (578, 588), True, 'import numpy as np\n'), ((600, 619), 'numpy.meshgrid', 'np.meshgrid', (['us', 'zs'], {}), '(us, zs)\n', (611, 619), True, 'import numpy as np\n'), ((705, 715), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (713, 715), True, 'import matplotlib.pyplot as plt\n'), ((631, 641), 'numpy.cos', 'np.cos', (['us'], {}), '(us)\n', (637, 641), True, 'import numpy as np\n'), ((652, 662), 'numpy.sin', 'np.sin', (['us'], {}), '(us)\n', (658, 662), True, 'import numpy as np\n')] |
#
# Copyright (C) 2015 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import argparse
import os
import subprocess
import sys
THIS_DIR = os.path.dirname(os.path.realpath(__file__))
ANDROID_DIR = os.path.realpath(os.path.join(THIS_DIR, '../..'))
class ArgParser(argparse.ArgumentParser):
def __init__(self):
super(ArgParser, self).__init__()
self.add_argument(
'--compiler', choices=('clang', 'gcc'), default='clang')
self.add_argument(
'--bitness', choices=(32, 64), type=int, default=32)
self.add_argument('--host', action='store_true')
def gen_test_config(bitness, compiler, host):
testconfig_mk_path = os.path.join(THIS_DIR, 'buildcmds/testconfig.mk')
with open(testconfig_mk_path, 'w') as test_config:
if compiler == 'clang':
print('LOCAL_CLANG := true', file=test_config)
elif compiler == 'gcc':
print('LOCAL_CLANG := false', file=test_config)
if bitness == 32:
print('LOCAL_MULTILIB := 32', file=test_config)
elif bitness == 64:
print('LOCAL_MULTILIB := 64', file=test_config)
if compiler == 'clang':
print('LOCAL_CXX := $(LOCAL_PATH)/buildcmdscc $(CLANG_CXX)',
file=test_config)
else:
if host:
prefix = 'HOST_'
else:
prefix = 'TARGET_'
print('LOCAL_CXX := $(LOCAL_PATH)/buildcmdscc '
'$($(LOCAL_2ND_ARCH_VAR_PREFIX){}CXX)'.format(prefix),
file=test_config)
if host:
print('include $(BUILD_HOST_EXECUTABLE)', file=test_config)
else:
print('include $(BUILD_EXECUTABLE)', file=test_config)
def mmm(path):
makefile = os.path.join(path, 'Android.mk')
main_mk = 'build/core/main.mk'
env = dict(os.environ)
env['ONE_SHOT_MAKEFILE'] = makefile
env['LIBCXX_TESTING'] = 'true'
cmd = ['make', '-C', ANDROID_DIR, '-f', main_mk, 'all_modules']
subprocess.check_call(cmd, env=env)
def gen_build_cmds(bitness, compiler, host):
gen_test_config(bitness, compiler, host)
mmm(os.path.join(THIS_DIR, 'buildcmds'))
def main():
args, lit_args = ArgParser().parse_known_args()
lit_path = os.path.join(ANDROID_DIR, 'external/llvm/utils/lit/lit.py')
gen_build_cmds(args.bitness, args.compiler, args.host)
mode_str = 'host' if args.host else 'device'
android_mode_arg = '--param=android_mode=' + mode_str
test_path = os.path.join(THIS_DIR, 'test')
lit_args = ['-sv', android_mode_arg] + lit_args
cmd = ['python', lit_path] + lit_args + [test_path]
sys.exit(subprocess.call(cmd))
if __name__ == '__main__':
main()
| [
"os.path.realpath",
"os.path.join",
"subprocess.call",
"subprocess.check_call"
] | [((727, 753), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (743, 753), False, 'import os\n'), ((786, 817), 'os.path.join', 'os.path.join', (['THIS_DIR', '"""../.."""'], {}), "(THIS_DIR, '../..')\n", (798, 817), False, 'import os\n'), ((1247, 1296), 'os.path.join', 'os.path.join', (['THIS_DIR', '"""buildcmds/testconfig.mk"""'], {}), "(THIS_DIR, 'buildcmds/testconfig.mk')\n", (1259, 1296), False, 'import os\n'), ((2345, 2377), 'os.path.join', 'os.path.join', (['path', '"""Android.mk"""'], {}), "(path, 'Android.mk')\n", (2357, 2377), False, 'import os\n'), ((2588, 2623), 'subprocess.check_call', 'subprocess.check_call', (['cmd'], {'env': 'env'}), '(cmd, env=env)\n', (2609, 2623), False, 'import subprocess\n'), ((2842, 2901), 'os.path.join', 'os.path.join', (['ANDROID_DIR', '"""external/llvm/utils/lit/lit.py"""'], {}), "(ANDROID_DIR, 'external/llvm/utils/lit/lit.py')\n", (2854, 2901), False, 'import os\n'), ((3085, 3115), 'os.path.join', 'os.path.join', (['THIS_DIR', '"""test"""'], {}), "(THIS_DIR, 'test')\n", (3097, 3115), False, 'import os\n'), ((2724, 2759), 'os.path.join', 'os.path.join', (['THIS_DIR', '"""buildcmds"""'], {}), "(THIS_DIR, 'buildcmds')\n", (2736, 2759), False, 'import os\n'), ((3238, 3258), 'subprocess.call', 'subprocess.call', (['cmd'], {}), '(cmd)\n', (3253, 3258), False, 'import subprocess\n')] |
# -*- coding: utf-8 -*-
# --author-- lanhua.zhou
""" reference文件操作集合 """
import os
import shutil
import logging
import maya.cmds as cmds
import maya.mel as mm
import pymel.core as pm
import zfused_maya.core.filefunc as filefunc
logger = logging.getLogger(__name__)
def publish_file(files, src, dst):
""" upload files
"""
_files = files
for _file in _files:
# alembic cache file
_extend_file = _file.split(src)[-1]
if _extend_file.startswith("/"):
_extend_file = _extend_file[1::]
_backup_file = os.path.join(dst, _extend_file)
# upload alembic cache file
_result = filefunc.publish_file(_file, _backup_file)
def local_file(files, src, dst):
""" local download files
"""
_files = files
for _file in _files:
# backup texture file
_extend_file = _file.split(src)[-1]
if _extend_file.startswith("/"):
_extend_file = _extend_file[1::]
_local_file = os.path.join(dst, _extend_file)
# downlocal texture file
#_result = filefunc.publish_file(_texture_file, _backup_texture_file)
_local_dir = os.path.dirname(_local_file)
if not os.path.isdir(_local_dir):
os.makedirs(_local_dir)
_result = shutil.copy(_file, _local_file)
def change_node_path(nodes, src, dst):
""" change file nodes path
"""
_file_nodes = nodes
for _file_node in _file_nodes:
# change reference file
_ori_file_texture_path = cmds.referenceQuery(_file_node, f = True, wcn = True)
_file_texture_path = _ori_file_texture_path
_extend_file = _file_texture_path.split(src)[-1]
if _extend_file.startswith("/"):
_extend_file = _extend_file[1::]
_new_file_text_path = "%s/%s"%(dst, _extend_file)
#while True:
#cmds.setAttr("{}.abc_File".format(_file_node), _new_file_text_path, type = 'string')
#if cmds.getAttr("{}.abc_File".format(_file_node)) == _new_file_text_path:
# break
cmds.file(_new_file_text_path, loadReference = _file_node, options = "v=0;")
def nodes(is_local = True):
""" 获取reference节点
:rtype: list
"""
_file_nodes = cmds.ls(type = "reference")
_result_nodes = []
for _file_node in _file_nodes:
_has_attr = cmds.objExists("{}.is_local".format(_file_node))
if not _has_attr:
continue
_is_local = cmds.getAttr("{}.is_local".format(_file_node))
if _is_local == "false":
continue
_result_nodes.append(_file_node)
return _result_nodes
def files():
"""get reference file
:rtype: list
"""
_nodes = nodes()
files = []
for _file_node in _nodes:
# get reference file
_file_name = cmds.referenceQuery(_file_node, f = True, wcn = True)
files.append(_file_name)
return files
def paths(files):
""" 获取文件路径交集
:rtype: list
"""
#get texture sets
def _get_set(path):
# 获取文件路径集合
_list = []
def _get_path(_path, _list):
_path_new = os.path.dirname(_path)
if _path_new != _path:
_list.append(_path_new)
_get_path(_path_new, _list)
_get_path(path, _list)
return _list
def _get_file_set_list(_files):
_files_set_dict = {}
_set_list = []
for _f in _files:
_set = set(_get_set(_f))
_set_list.append(_set)
return _set_list
def _set(set_list,value):
_frist = set_list[0]
value.append(_frist)
_left_list = []
for i in set_list:
_com = _frist & i
if not _com:
_left_list.append(i)
continue
value[len(value)-1] = _com
if _left_list:
_set(_left_list, value)
_set_list = _get_file_set_list(files)
if not _set_list:
return []
_value = []
_set(_set_list, _value)
return _value
def import_all_references(except_namespaces = []):
""" 导入所有reference文件
"""
done = False
while (done == False or (len(pm.listReferences()) != 0)):
refs = pm.listReferences()
#get rel refs
pro = []
if except_namespaces:
for ref in refs:
if ref.namespace not in except_namespaces:
#refs.remove(ref)
pro.append(ref)
if pro:
refs = pro
sn = len(refs)
en = 0
for ref in refs:
if ref.isLoaded():
done = False
ref.importContents()
else:
en += 1
done = True
if sn == en:
return True
return True
def remove_all_namespaces():
allNameSpace = cmds.namespaceInfo(":", recurse = True, listOnlyNamespaces = True, absoluteName = True)
for whole_ns in allNameSpace :
if whole_ns != ":UI" and whole_ns != ":shared":
ns = whole_ns.split(':')[-1]
try :
pm.namespace(mv=[ns,':'],f=1)
if ns in pm.namespaceInfo(lon=1):
pm.namespace(rm=ns)
logger.info('namespace "{}" removed succeed'.format(ns))
except Exception as e:
logger.warning('namespace "{}" removed error'.format(ns)) | [
"logging.getLogger",
"maya.cmds.ls",
"os.makedirs",
"pymel.core.namespace",
"pymel.core.listReferences",
"os.path.join",
"pymel.core.namespaceInfo",
"os.path.dirname",
"maya.cmds.namespaceInfo",
"os.path.isdir",
"shutil.copy",
"maya.cmds.file",
"zfused_maya.core.filefunc.publish_file",
"ma... | [((243, 270), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (260, 270), False, 'import logging\n'), ((2238, 2263), 'maya.cmds.ls', 'cmds.ls', ([], {'type': '"""reference"""'}), "(type='reference')\n", (2245, 2263), True, 'import maya.cmds as cmds\n'), ((4844, 4930), 'maya.cmds.namespaceInfo', 'cmds.namespaceInfo', (['""":"""'], {'recurse': '(True)', 'listOnlyNamespaces': '(True)', 'absoluteName': '(True)'}), "(':', recurse=True, listOnlyNamespaces=True, absoluteName\n =True)\n", (4862, 4930), True, 'import maya.cmds as cmds\n'), ((565, 596), 'os.path.join', 'os.path.join', (['dst', '_extend_file'], {}), '(dst, _extend_file)\n', (577, 596), False, 'import os\n'), ((652, 694), 'zfused_maya.core.filefunc.publish_file', 'filefunc.publish_file', (['_file', '_backup_file'], {}), '(_file, _backup_file)\n', (673, 694), True, 'import zfused_maya.core.filefunc as filefunc\n'), ((995, 1026), 'os.path.join', 'os.path.join', (['dst', '_extend_file'], {}), '(dst, _extend_file)\n', (1007, 1026), False, 'import os\n'), ((1160, 1188), 'os.path.dirname', 'os.path.dirname', (['_local_file'], {}), '(_local_file)\n', (1175, 1188), False, 'import os\n'), ((1285, 1316), 'shutil.copy', 'shutil.copy', (['_file', '_local_file'], {}), '(_file, _local_file)\n', (1296, 1316), False, 'import shutil\n'), ((1522, 1571), 'maya.cmds.referenceQuery', 'cmds.referenceQuery', (['_file_node'], {'f': '(True)', 'wcn': '(True)'}), '(_file_node, f=True, wcn=True)\n', (1541, 1571), True, 'import maya.cmds as cmds\n'), ((2066, 2138), 'maya.cmds.file', 'cmds.file', (['_new_file_text_path'], {'loadReference': '_file_node', 'options': '"""v=0;"""'}), "(_new_file_text_path, loadReference=_file_node, options='v=0;')\n", (2075, 2138), True, 'import maya.cmds as cmds\n'), ((2813, 2862), 'maya.cmds.referenceQuery', 'cmds.referenceQuery', (['_file_node'], {'f': '(True)', 'wcn': '(True)'}), '(_file_node, f=True, wcn=True)\n', (2832, 2862), True, 'import maya.cmds as cmds\n'), ((4213, 4232), 'pymel.core.listReferences', 'pm.listReferences', ([], {}), '()\n', (4230, 4232), True, 'import pymel.core as pm\n'), ((1204, 1229), 'os.path.isdir', 'os.path.isdir', (['_local_dir'], {}), '(_local_dir)\n', (1217, 1229), False, 'import os\n'), ((1243, 1266), 'os.makedirs', 'os.makedirs', (['_local_dir'], {}), '(_local_dir)\n', (1254, 1266), False, 'import os\n'), ((3124, 3146), 'os.path.dirname', 'os.path.dirname', (['_path'], {}), '(_path)\n', (3139, 3146), False, 'import os\n'), ((4169, 4188), 'pymel.core.listReferences', 'pm.listReferences', ([], {}), '()\n', (4186, 4188), True, 'import pymel.core as pm\n'), ((5098, 5129), 'pymel.core.namespace', 'pm.namespace', ([], {'mv': "[ns, ':']", 'f': '(1)'}), "(mv=[ns, ':'], f=1)\n", (5110, 5129), True, 'import pymel.core as pm\n'), ((5153, 5176), 'pymel.core.namespaceInfo', 'pm.namespaceInfo', ([], {'lon': '(1)'}), '(lon=1)\n', (5169, 5176), True, 'import pymel.core as pm\n'), ((5198, 5217), 'pymel.core.namespace', 'pm.namespace', ([], {'rm': 'ns'}), '(rm=ns)\n', (5210, 5217), True, 'import pymel.core as pm\n')] |
import os
import ast
import sys
import math
import time
import string
import hashlib
import tempfile
import subprocess
from operator import itemgetter
from contextlib import contextmanager
from getpass import getpass
import random; random = random.SystemRandom()
import sdb.subprocess_compat as subprocess
from sdb.util import force_bytes
from sdb.clipboard import set_clipboard_once, ClipboardException
from sdb.diceware import WORDS
from sdb import gpg_agent
def encode(records):
res = []
for record in records:
res.append(repr(record))
return ('\n'.join(res) + '\n').encode('utf-8')
def decode(str):
records = []
for line in str.decode('utf-8').split('\n'):
if line:
records.append(ast.literal_eval(line))
return records
CASE_ALPHABET = string.ascii_letters
ALPHANUMERIC = CASE_ALPHABET + string.digits
EVERYTHING = ALPHANUMERIC + string.punctuation
def gen_password(choices=ALPHANUMERIC, length=10):
return ''.join(random.choice(choices) for i in range(length))
def requirements_satisfied(requirements, str):
return all([i in str for i in requirements])
def gen_password_require(requirements, choices=ALPHANUMERIC, length=10):
"""
Generate a password containing all the characters in requirements
"""
if len(requirements) > length or not requirements_satisfied(requirements, choices):
raise Exception(
"That's impossible, you can't make a password containing %r with only %r!" % (
requirements, choices))
while True:
pw = gen_password(choices, length)
if requirements_satisfied(requirements, pw):
return pw
def gen_password_entropy(entropy, choices=ALPHANUMERIC):
"""
Generates a password of the desired entropy, calculating the length as
required.
"""
required_length = int(math.ceil(entropy / math.log(len(choices), 2)))
return gen_password(choices=choices, length=required_length)
def match(needle, haystack):
score = 1
j = 0
last_match = 0
for c in needle:
while j < len(haystack) and haystack[j] != c:
j += 1
if j >= len(haystack):
return 0
score += 1 / (last_match + 1.)
last_match = j
j += 1
return score
def record_score(term, records):
return match(term, records[0] + records[1] + records[3])
def search(term, records):
records = [(record_score(term, i), i) for i in records]
records = list(filter(itemgetter(0), records))
records.sort(key=itemgetter(0), reverse=True)
return [i[1] for i in records]
def is_unique_list(lst):
return len(lst) == len(set(lst))
def disambiguate(records):
choices = [itemgetter(0),
itemgetter(0, 1),
itemgetter(0, 1, 3)]
for choice in choices:
result = list(map(choice, records))
if is_unique_list(result):
return result
# just in case none were unique
return records
class GPGException(Exception):
pass
class IncorrectPasswordException(GPGException):
pass
class InvalidEncryptedFileException(GPGException):
pass
class FileCorruptionException(GPGException):
pass
def gpg_exception_factory(returncode, message):
if returncode == 2:
if b'decryption failed: bad key' in message:
return IncorrectPasswordException(message)
if b'CRC error;' in message:
return FileCorruptionException(message)
if b'fatal: zlib inflate problem: invalid distance' in message:
return FileCorruptionException(message)
if b'decryption failed: invalid packet' in message:
return FileCorruptionException(message)
if b'no valid OpenPGP data found':
return InvalidEncryptedFileException(message)
return Exception("unkown error", returncode, message)
def dencrypt(command, pw, data):
"""
Encrypts or decrypts, by running command
"""
if '\n' in pw:
raise Exception('Newlines not allowed in passwords')
proc = subprocess.Popen(
command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
proc.stdin.write(force_bytes(pw))
proc.stdin.write(b'\n')
proc.stdin.write(data)
output, erroroutput = proc.communicate()
if proc.returncode != 0:
raise gpg_exception_factory(proc.returncode, erroroutput)
return output
def encrypt(pw, data):
return dencrypt(
['gpg', '-c',
'--passphrase-fd', '0',
'--batch',
'--armor',
'--cipher-algo', 'AES',
'--digest-algo', 'SHA256'],
pw,
data,
)
def decrypt(pw, data):
return dencrypt(
['gpg', '-d', '--passphrase-fd', '0', '--batch'],
pw,
data
)
def get_tmp_file(filename):
file_parts = os.path.split(filename)
return os.path.join(*file_parts[:-1] + ('.' + file_parts[-1].lstrip('.') + '.tmp',))
def get_backup_file(filename):
file_parts = os.path.split(filename)
return os.path.join(*file_parts[:-1] + ('.' + file_parts[-1].lstrip('.') + '.bak',))
@contextmanager
def atomic_replace(filename):
"""
::
with atomic_replace(filename) as f:
f.write('asdf')
with atomic_replace(filename) as f:
f.write('asdf')
raise Exception
# nothing happens to the file
"""
tmpfile_name = get_tmp_file(filename)
fd = os.open(tmpfile_name, os.O_CREAT | os.O_EXCL | os.O_RDWR, 0o600)
try:
f = os.fdopen(fd, "w+b")
yield f
f.flush()
os.fsync(fd) # fdatasync? I don't know
f.seek(0)
new_content = f.read()
if not new_content:
raise Exception("I don't think you want to blank this file...")
try:
with open(filename, 'rb') as current_f:
current_content = current_f.read()
except IOError:
current_content = b''
if current_content != new_content:
with open(get_backup_file(filename), 'w+b') as backup_file:
backup_file.write(current_content)
except:
# If there was an exception, remove the temporary file and reraise
os.unlink(tmpfile_name)
raise
else:
# No exception, rename the temp file over the original
os.rename(tmpfile_name, filename)
finally:
f.close()
def edit_in_editor(current):
EDITOR = os.environ.get('EDITOR', 'vim')
with tempfile.NamedTemporaryFile(mode='w+') as f:
try:
f.write(current)
f.flush()
subprocess.call([EDITOR, f.name])
f.seek(0)
return f.read()
finally:
# don't leave potentially private data lying around
f.write('0' * os.path.getsize(f.name))
f.flush()
def pretty_record(record):
s = '%s@%s' % (record[1], record[0])
if record[3]:
s += ': ' + record[3]
return s
class InteractiveSession(object):
def __init__(self, args, output=sys.stdout, input=sys.stdin, password=None):
self.args = args
self.file = args.file
self.output = output
self.input = input
try:
self.gpg_agent = gpg_agent.GpgAgent()
except KeyError:
self.gpg_agent = None
self.gpg_agent_password_id = 'sdb_m:{file_fingerprint}'.format(
file_fingerprint=hashlib.md5(force_bytes(self.file)).hexdigest()
)
self.password = password
if not self.password:
self.password = self.get_master_password()
def get_master_password(self, error=None):
if self.password:
return self.password
if self.input == sys.stdin:
if self.gpg_agent:
error = error or 'X'
self.password = self.gpg_agent.get_passphrase(
self.gpg_agent_password_id,
prompt='Master password',
error=error
)
else:
if error:
self.output.write('Error: {error}, try again: '.format(error=error))
self.password = getpass()
else:
self.output.write('Password: ')
self.output.flush()
self.password = self.input.readline().rstrip('\n')
return self.password
def clear_master_password(self):
self.password = None
if self.gpg_agent:
self.gpg_agent.clear_passphrase(self.gpg_agent_password_id)
def prompt(self, prompt='', required=True, password=False):
while True:
if password and self.input == sys.stdin:
line = getpass(prompt)
else:
self.output.write(prompt)
self.output.flush()
line = self.input.readline().rstrip('\n')
if not required or line:
return line
def get_record(self, domain=None):
domain = domain or self.prompt('Domain: ')
username = self.prompt('Username: ')
password = self.prompt(
'Password [blank to generate]: ',
required=False,
password=True
) or gen_password_entropy(128)
notes = self.prompt('Notes: ', required=False)
return (domain, username, password, notes)
def edit_record(self, record):
new_record = list(record)
new_record[0] = self.prompt('Name [%s]: ' % record[0], required=False) or record[0]
new_record[1] = self.prompt('Username [%s]: ' % record[1], required=False) or record[1]
pw = self.prompt('Password []/g: ', required=False, password=True) or record[2]
if pw == 'g':
new_record[2] = gen_password_entropy(128)
elif pw:
new_record[2] = pw
self.output.write("Notes: %s\n" % record[3])
edit = self.prompt('Edit? [n]: ', required=False) or 'n'
if edit[0] == 'y':
new_record[3] = edit_in_editor(record[3])
return tuple(new_record)
def find_record(self, query, records):
possibilities = search(query, records)
if len(possibilities) > 1:
choices = disambiguate(possibilities)
for i, choice in enumerate(choices):
self.output.write('%s) %s\n' % (i, choice))
choice = self.prompt('Which did you mean? [0]: ', required=False) or 0
return possibilities[int(choice)]
else:
return possibilities[0]
def read_records(self, error=None):
try:
with open(self.file, 'rb') as f:
password = self.get_master_password(error)
try:
return decode(decrypt(password, f.read()))
except IncorrectPasswordException:
self.clear_master_password()
return self.read_records(error='Incorrect password')
except:
self.clear_master_password()
raise
except IOError:
return []
def add_action(self):
record = self.get_record(self.args.domain or self.prompt('Domain: '))
def add(records):
return records + [record]
self.edit_transaction(add)
def show_action(self, clipboard=10):
record = self.find_record(self.args.domain or self.prompt("Domain: "), self.read_records())
self.output.write(pretty_record(record))
self.output.write("\n")
if clipboard:
try:
self.output.write("username in clipboard\n")
set_clipboard_once(record[1])
self.output.write("password in clipboard\n")
set_clipboard_once(record[2])
except ClipboardException as e:
self.output.write("couldn't set clipboard: %s\n" % e.output.split('\n')[0])
self.output.write(record[2])
self.output.write("\n")
else:
return record[2]
def edit_transaction(self, callback):
with atomic_replace(self.file) as out:
records = callback(self.read_records())
assert isinstance(records, list)
if not is_unique_list(records):
raise Exception("You have two identical records. I don't think you want this.")
out.write(encrypt(self.password, encode(records)))
out.seek(0)
assert records == decode(decrypt(self.password, out.read()))
def edit_action(self):
def edit(records):
record = self.find_record(self.args.domain or self.prompt('Domain: '), records)
new_record = self.edit_record(record)
for i, choice in enumerate(records):
if choice == record:
records[i] = tuple(new_record)
return records
self.edit_transaction(edit)
def delete_action(self):
def delete(records):
record = self.find_record(self.args.domain or self.prompt('Domain: '), records)
self.output.write(pretty_record(record))
self.output.write('\n')
confirm = self.prompt('Really? [n]: ', required=False) or 'n'
if confirm[0] == 'y':
for i, choice in enumerate(records):
if choice == record:
del records[i]
else:
self.output.write("Ok, cancelled\n")
return records
self.edit_transaction(delete)
def raw_action(self):
try:
# PY3
output = self.output.buffer
except AttributeError:
output = self.output
output.write(encode(self.read_records()))
| [
"os.open",
"sdb.subprocess_compat.call",
"os.fsync",
"sdb.clipboard.set_clipboard_once",
"operator.itemgetter",
"sdb.util.force_bytes",
"sdb.gpg_agent.GpgAgent",
"getpass.getpass",
"os.path.split",
"os.unlink",
"tempfile.NamedTemporaryFile",
"os.path.getsize",
"random.choice",
"os.rename",... | [((241, 262), 'random.SystemRandom', 'random.SystemRandom', ([], {}), '()\n', (260, 262), False, 'import random\n'), ((4054, 4154), 'sdb.subprocess_compat.Popen', 'subprocess.Popen', (['command'], {'stdin': 'subprocess.PIPE', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), '(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n', (4070, 4154), True, 'import sdb.subprocess_compat as subprocess\n'), ((4865, 4888), 'os.path.split', 'os.path.split', (['filename'], {}), '(filename)\n', (4878, 4888), False, 'import os\n'), ((5028, 5051), 'os.path.split', 'os.path.split', (['filename'], {}), '(filename)\n', (5041, 5051), False, 'import os\n'), ((5474, 5536), 'os.open', 'os.open', (['tmpfile_name', '(os.O_CREAT | os.O_EXCL | os.O_RDWR)', '(384)'], {}), '(tmpfile_name, os.O_CREAT | os.O_EXCL | os.O_RDWR, 384)\n', (5481, 5536), False, 'import os\n'), ((6479, 6510), 'os.environ.get', 'os.environ.get', (['"""EDITOR"""', '"""vim"""'], {}), "('EDITOR', 'vim')\n", (6493, 6510), False, 'import os\n'), ((2714, 2727), 'operator.itemgetter', 'itemgetter', (['(0)'], {}), '(0)\n', (2724, 2727), False, 'from operator import itemgetter\n'), ((2744, 2760), 'operator.itemgetter', 'itemgetter', (['(0)', '(1)'], {}), '(0, 1)\n', (2754, 2760), False, 'from operator import itemgetter\n'), ((2777, 2796), 'operator.itemgetter', 'itemgetter', (['(0)', '(1)', '(3)'], {}), '(0, 1, 3)\n', (2787, 2796), False, 'from operator import itemgetter\n'), ((4210, 4225), 'sdb.util.force_bytes', 'force_bytes', (['pw'], {}), '(pw)\n', (4221, 4225), False, 'from sdb.util import force_bytes\n'), ((5560, 5580), 'os.fdopen', 'os.fdopen', (['fd', '"""w+b"""'], {}), "(fd, 'w+b')\n", (5569, 5580), False, 'import os\n'), ((5623, 5635), 'os.fsync', 'os.fsync', (['fd'], {}), '(fd)\n', (5631, 5635), False, 'import os\n'), ((6370, 6403), 'os.rename', 'os.rename', (['tmpfile_name', 'filename'], {}), '(tmpfile_name, filename)\n', (6379, 6403), False, 'import os\n'), ((6520, 6558), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'mode': '"""w+"""'}), "(mode='w+')\n", (6547, 6558), False, 'import tempfile\n'), ((984, 1006), 'random.choice', 'random.choice', (['choices'], {}), '(choices)\n', (997, 1006), False, 'import random\n'), ((2496, 2509), 'operator.itemgetter', 'itemgetter', (['(0)'], {}), '(0)\n', (2506, 2509), False, 'from operator import itemgetter\n'), ((2542, 2555), 'operator.itemgetter', 'itemgetter', (['(0)'], {}), '(0)\n', (2552, 2555), False, 'from operator import itemgetter\n'), ((6251, 6274), 'os.unlink', 'os.unlink', (['tmpfile_name'], {}), '(tmpfile_name)\n', (6260, 6274), False, 'import os\n'), ((6641, 6674), 'sdb.subprocess_compat.call', 'subprocess.call', (['[EDITOR, f.name]'], {}), '([EDITOR, f.name])\n', (6656, 6674), True, 'import sdb.subprocess_compat as subprocess\n'), ((7281, 7301), 'sdb.gpg_agent.GpgAgent', 'gpg_agent.GpgAgent', ([], {}), '()\n', (7299, 7301), False, 'from sdb import gpg_agent\n'), ((738, 760), 'ast.literal_eval', 'ast.literal_eval', (['line'], {}), '(line)\n', (754, 760), False, 'import ast\n'), ((8224, 8233), 'getpass.getpass', 'getpass', ([], {}), '()\n', (8231, 8233), False, 'from getpass import getpass\n'), ((8743, 8758), 'getpass.getpass', 'getpass', (['prompt'], {}), '(prompt)\n', (8750, 8758), False, 'from getpass import getpass\n'), ((11662, 11691), 'sdb.clipboard.set_clipboard_once', 'set_clipboard_once', (['record[1]'], {}), '(record[1])\n', (11680, 11691), False, 'from sdb.clipboard import set_clipboard_once, ClipboardException\n'), ((11769, 11798), 'sdb.clipboard.set_clipboard_once', 'set_clipboard_once', (['record[2]'], {}), '(record[2])\n', (11787, 11798), False, 'from sdb.clipboard import set_clipboard_once, ClipboardException\n'), ((6832, 6855), 'os.path.getsize', 'os.path.getsize', (['f.name'], {}), '(f.name)\n', (6847, 6855), False, 'import os\n'), ((7475, 7497), 'sdb.util.force_bytes', 'force_bytes', (['self.file'], {}), '(self.file)\n', (7486, 7497), False, 'from sdb.util import force_bytes\n')] |
# Copyright 2021-present, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
from torch.optim import SGD
import torch
import torchvision
from argparse import Namespace
from utils.conf import get_device
def extract_features(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
return x
class ContinualModel(nn.Module):
"""
Continual learning model.
"""
NAME = None
COMPATIBILITY = []
def __init__(self, backbone: nn.Module, loss: nn.Module,
args: Namespace, transform: torchvision.transforms) -> None:
super(ContinualModel, self).__init__()
self.net = backbone
self.loss = loss
self.args = args
self.transform = transform
self.opt = SGD(self.net.parameters(), lr=self.args.lr)
self.device = get_device()
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Computes a forward pass.
:param x: batch of inputs
:param task_label: some models require the task label
:return: the result of the computation
"""
return self.net(x)
def observe(self, inputs: torch.Tensor, labels: torch.Tensor,
not_aug_inputs: torch.Tensor) -> float:
"""
Compute a training step over a given batch of examples.
:param inputs: batch of examples
:param labels: ground-truth labels
:param kwargs: some methods could require additional parameters
:return: the value of the loss function
"""
pass
@staticmethod
def discard_unsupervised_labels(inputs, labels, not_aug_inputs):
mask = labels < 1000
return inputs[mask], labels[mask], not_aug_inputs[mask]
@staticmethod
def discard_supervised_labels(inputs, labels, not_aug_inputs):
mask = labels >= 1000
return inputs[mask], labels[mask], not_aug_inputs[mask]
def guess_notaug_weighted(self, inputs, labels, not_aug_inputs):
mask = labels > 999
if not self.buffer.is_empty():
if mask.sum():
with torch.no_grad():
cur_feats = extract_features(self.feature_extractor,
torch.stack([self.norm_transform(ee) for ee in not_aug_inputs]))
_, buf_labels, buf_feats = self.buffer.get_data(100)
buf_feats = buf_feats.unsqueeze(1)
dists = - (buf_feats - cur_feats).pow(2).sum(2)
soft_dists = torch.softmax((dists - dists.mean(0)) / dists.std(0), dim=0)
lab = self.eye[buf_labels].unsqueeze(1) * soft_dists.unsqueeze(2)
labels[mask] = lab.mean(0).max(dim=1)[1][mask]
assert (labels < 999).all()
else:
not_aug_inputs = not_aug_inputs[labels < 999]
inputs = inputs[labels < 999]
labels = labels[labels < 999]
if inputs.shape[0]:
with torch.no_grad():
cur_feats = extract_features(self.feature_extractor,
torch.stack([self.norm_transform(ee) for ee in not_aug_inputs]))
else:
cur_feats = inputs
return inputs, labels, not_aug_inputs, cur_feats
def pseudo_label(self, inputs, labels, not_aug_inputs, conf=5.5):
self.net.eval()
with torch.no_grad():
psi_outputs = self.net(inputs)
confs = psi_outputs[:, self.cpt * self.task: self.cpt * (self.task+1)].topk(2, axis=1)[0]
confs = confs[:, 0] - confs[:, 1]
conf_thresh = conf
confidence_mask = confs > conf_thresh #torch.zeros_like(labels).bool()
_, psi_labels = torch.max(psi_outputs.data[:, self.cpt * self.task: self.cpt * (self.task+1)], 1)
psi_labels += self.cpt * self.task
out_labels = labels.clone()
if confidence_mask.sum():
out_labels[(labels > 999) & confidence_mask] = psi_labels[(labels > 999) & confidence_mask]
self.net.train()
return self.drop_unlabeled(inputs, out_labels, not_aug_inputs)[:-1]
def guess_notaug(self, labels, not_aug_inputs):
if (labels > 999).sum():
extract_features(self.feature_extractor, not_aug_inputs)
feats = self.buffer.logits.unsqueeze(1)
labels = self.eye[self.buffer.labels[(self.class_means - feats).pow(2).sum(2).topk(
self.args.k, largest=False)[1]].mode()[0]]
return labels
def drop_unlabeled(self, inputs, labels, not_aug_inputs):
not_aug_inputs = not_aug_inputs[labels < 1000]
inputs = inputs[labels < 1000]
labels = labels[labels < 1000]
if inputs.shape[0] and hasattr(self, 'feature_extractor'):
with torch.no_grad():
cur_feats = extract_features(self.feature_extractor,
torch.stack([self.norm_transform(ee) for ee in not_aug_inputs]))
else:
cur_feats = inputs
return inputs, labels, not_aug_inputs, cur_feats
| [
"torch.no_grad",
"torch.max",
"torch.flatten",
"utils.conf.get_device"
] | [((603, 622), 'torch.flatten', 'torch.flatten', (['x', '(1)'], {}), '(x, 1)\n', (616, 622), False, 'import torch\n'), ((1143, 1155), 'utils.conf.get_device', 'get_device', ([], {}), '()\n', (1153, 1155), False, 'from utils.conf import get_device\n'), ((3714, 3729), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3727, 3729), False, 'import torch\n'), ((4077, 4164), 'torch.max', 'torch.max', (['psi_outputs.data[:, self.cpt * self.task:self.cpt * (self.task + 1)]', '(1)'], {}), '(psi_outputs.data[:, self.cpt * self.task:self.cpt * (self.task + \n 1)], 1)\n', (4086, 4164), False, 'import torch\n'), ((5163, 5178), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5176, 5178), False, 'import torch\n'), ((2416, 2431), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2429, 2431), False, 'import torch\n'), ((3292, 3307), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3305, 3307), False, 'import torch\n')] |
import argparse
import datetime
import sys
import threading
import time
import matplotlib.pyplot as plt
import numpy
import yaml
from .__about__ import __copyright__, __version__
from .main import (
cooldown,
measure_temp,
measure_core_frequency,
measure_ambient_temperature,
test,
)
def _get_version_text():
return "\n".join(
[
"stressberry {} [Python {}.{}.{}]".format(
__version__,
sys.version_info.major,
sys.version_info.minor,
sys.version_info.micro,
),
__copyright__,
]
)
def _get_parser_run():
parser = argparse.ArgumentParser(
description="Run stress test for the Raspberry Pi."
)
parser.add_argument(
"--version", "-v", action="version", version=_get_version_text()
)
parser.add_argument(
"-n",
"--name",
type=str,
default="stressberry data",
help="name the data set (default: 'stressberry data')",
)
parser.add_argument(
"-t",
"--temperature-file",
type=str,
default=None,
help="temperature file e.g /sys/class/thermal/thermal_zone0/temp (default: vcgencmd)",
)
parser.add_argument(
"-d",
"--duration",
type=int,
default=300,
help="stress test duration in seconds (default: 300)",
)
parser.add_argument(
"-i",
"--idle",
type=int,
default=150,
help="idle time in seconds at start and end of stress test (default: 150)",
)
parser.add_argument(
"--cooldown",
type=int,
default=60,
help="poll interval seconds to check for stable temperature (default: 60)",
)
parser.add_argument(
"-c",
"--cores",
type=int,
default=None,
help="number of CPU cores to stress (default: all)",
)
parser.add_argument(
"-f",
"--frequency-file",
type=str,
default=None,
help="CPU core frequency file e.g. /sys/devices/system/cpu/cpu0/cpufreq/scaling_cur_freq (default: vcgencmd)",
)
parser.add_argument(
"-a",
"--ambient",
type=str,
nargs=2,
default=None,
help="measure ambient temperature. Sensor Type [11|22|2302] <GPIO Number> e.g. 2302 26",
)
parser.add_argument("outfile", type=argparse.FileType("w"), help="output data file")
return parser
def run(argv=None):
parser = _get_parser_run()
args = parser.parse_args(argv)
# Cool down first
print("Awaiting stable baseline temperature...")
cooldown(interval=args.cooldown, filename=args.temperature_file)
# Start the stress test in another thread
t = threading.Thread(
target=lambda: test(args.duration, args.idle, args.cores), args=()
)
t.start()
times = []
temps = []
freqs = []
ambient = []
while t.is_alive():
times.append(time.time())
temps.append(measure_temp(args.temperature_file))
freqs.append(measure_core_frequency(args.frequency_file))
if args.ambient:
ambient_temperature = measure_ambient_temperature(
sensor_type=args.ambient[0], pin=args.ambient[1]
)
if ambient_temperature is None:
# Reading the sensor can return None if it times out.
# If never had a good result, probably configuration error
# Else use last known value if available or worst case set to zero
if not ambient:
message = "Could not read ambient temperature sensor {} on pin {}".format(
args.ambient[0], args.ambient[1]
)
else:
message = "WARN - Could not read ambient temperature, using last good value"
print(message)
ambient_temperature = next(
(temp for temp in reversed(ambient) if temp is not None), 0
)
ambient.append(ambient_temperature)
delta_t = temps[-1] - ambient[-1]
print(
"Temperature (current | ambient | ΔT): {:4.1f}°C | {:4.1f}°C | {:4.1f}°C - Frequency: {:4.0f}MHz".format(
temps[-1], ambient[-1], delta_t, freqs[-1]
)
)
else:
print(
"Current temperature: {:4.1f}°C - Frequency: {:4.0f}MHz".format(
temps[-1], freqs[-1]
)
)
# Choose the sample interval such that we have a respectable number of
# data points
t.join(2.0)
# normalize times
time0 = times[0]
times = [tm - time0 for tm in times]
args.outfile.write(
"# This file was created by stressberry v{} on {}\n".format(
__version__, datetime.datetime.now()
)
)
yaml.dump(
{
"name": args.name,
"time": times,
"temperature": temps,
"cpu frequency": freqs,
"ambient": ambient,
},
args.outfile,
)
return
def plot(argv=None):
parser = _get_parser_plot()
args = parser.parse_args(argv)
data = [yaml.load(f, Loader=yaml.SafeLoader) for f in args.infiles]
# sort the data such that the data series with the lowest terminal
# temperature is plotted last (and appears in the legend last)
terminal_temps = [d["temperature"][-1] for d in data]
order = [i[0] for i in sorted(enumerate(terminal_temps), key=lambda x: x[1])]
# actually plot it
fig = plt.figure()
ax1 = fig.add_subplot(1, 1, 1)
for k in order[::-1]:
if args.delta_t:
temperature_data = numpy.subtract(
data[k]["temperature"], data[k]["ambient"]
)
else:
temperature_data = data[k]["temperature"]
ax1.plot(
data[k]["time"], temperature_data, label=data[k]["name"], lw=args.line_width
)
ax1.grid()
if not args.hide_legend:
ax1.legend(loc="upper left", bbox_to_anchor=(1.03, 1.0), borderaxespad=0)
if args.delta_t:
plot_yaxis_label = "Δ temperature °C (over ambient)"
else:
plot_yaxis_label = "temperature °C"
ax1.set_xlabel("time (s)")
ax1.set_ylabel(plot_yaxis_label)
ax1.set_xlim([data[-1]["time"][0], data[-1]["time"][-1]])
if args.temp_lims:
ax1.set_ylim(*args.temp_lims)
# Only plot frequencies when using a single input file
if len(data) == 1 and args.frequency:
ax2 = plt.twinx()
ax2.set_ylabel("core frequency (MHz)")
if args.freq_lims:
ax2.set_ylim(*args.freq_lims)
try:
for k in order[::-1]:
ax2.plot(
data[k]["time"],
data[k]["cpu frequency"],
label=data[k]["name"],
color="C1",
alpha=0.9,
lw=args.line_width,
)
ax1.set_zorder(ax2.get_zorder() + 1) # put ax1 plot in front of ax2
ax1.patch.set_visible(False) # hide the 'canvas'
except KeyError():
print("Source data does not contain CPU frequency data.")
if args.outfile is not None:
plt.savefig(
args.outfile,
transparent=args.transparent,
bbox_inches="tight",
dpi=args.dpi,
)
else:
plt.show()
return
def _get_parser_plot():
parser = argparse.ArgumentParser(description="Plot stress test data.")
parser.add_argument(
"--version", "-v", action="version", version=_get_version_text()
)
parser.add_argument(
"infiles",
nargs="+",
type=argparse.FileType("r"),
help="input YAML file(s) (default: stdin)",
)
parser.add_argument(
"-o",
"--outfile",
help=(
"if specified, the plot is written to this file "
"(default: show on screen)"
),
)
parser.add_argument(
"-t",
"--temp-lims",
type=float,
nargs=2,
default=None,
help="limits for the temperature (default: data limits)",
)
parser.add_argument(
"-d",
"--dpi",
type=int,
default=None,
help="image resolution in dots per inch when written to file",
)
parser.add_argument(
"-f",
"--frequency",
help="plot CPU core frequency (single input files only)",
action="store_true",
)
parser.add_argument(
"-l",
"--freq-lims",
type=float,
nargs=2,
default=None,
help="limits for the frequency scale (default: data limits)",
)
parser.add_argument("--hide-legend", help="do not draw legend", action="store_true")
parser.add_argument(
"--not-transparent",
dest="transparent",
help="do not make images transparent",
action="store_false",
default=True,
)
parser.add_argument(
"-lw", "--line-width", type=float, default=None, help="line width"
)
parser.add_argument(
"--delta-t",
action="store_true",
default=False,
help="Use Delta-T (core - ambient) temperature instead of CPU core temperature",
)
return parser
| [
"argparse.FileType",
"matplotlib.pyplot.savefig",
"argparse.ArgumentParser",
"yaml.dump",
"matplotlib.pyplot.twinx",
"yaml.load",
"numpy.subtract",
"datetime.datetime.now",
"matplotlib.pyplot.figure",
"time.time",
"matplotlib.pyplot.show"
] | [((665, 741), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Run stress test for the Raspberry Pi."""'}), "(description='Run stress test for the Raspberry Pi.')\n", (688, 741), False, 'import argparse\n'), ((4974, 5103), 'yaml.dump', 'yaml.dump', (["{'name': args.name, 'time': times, 'temperature': temps, 'cpu frequency':\n freqs, 'ambient': ambient}", 'args.outfile'], {}), "({'name': args.name, 'time': times, 'temperature': temps,\n 'cpu frequency': freqs, 'ambient': ambient}, args.outfile)\n", (4983, 5103), False, 'import yaml\n'), ((5680, 5692), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5690, 5692), True, 'import matplotlib.pyplot as plt\n'), ((7613, 7674), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Plot stress test data."""'}), "(description='Plot stress test data.')\n", (7636, 7674), False, 'import argparse\n'), ((5308, 5344), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.SafeLoader'}), '(f, Loader=yaml.SafeLoader)\n', (5317, 5344), False, 'import yaml\n'), ((6654, 6665), 'matplotlib.pyplot.twinx', 'plt.twinx', ([], {}), '()\n', (6663, 6665), True, 'import matplotlib.pyplot as plt\n'), ((7384, 7478), 'matplotlib.pyplot.savefig', 'plt.savefig', (['args.outfile'], {'transparent': 'args.transparent', 'bbox_inches': '"""tight"""', 'dpi': 'args.dpi'}), "(args.outfile, transparent=args.transparent, bbox_inches='tight',\n dpi=args.dpi)\n", (7395, 7478), True, 'import matplotlib.pyplot as plt\n'), ((7552, 7562), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7560, 7562), True, 'import matplotlib.pyplot as plt\n'), ((2438, 2460), 'argparse.FileType', 'argparse.FileType', (['"""w"""'], {}), "('w')\n", (2455, 2460), False, 'import argparse\n'), ((3014, 3025), 'time.time', 'time.time', ([], {}), '()\n', (3023, 3025), False, 'import time\n'), ((4930, 4953), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4951, 4953), False, 'import datetime\n'), ((5810, 5868), 'numpy.subtract', 'numpy.subtract', (["data[k]['temperature']", "data[k]['ambient']"], {}), "(data[k]['temperature'], data[k]['ambient'])\n", (5824, 5868), False, 'import numpy\n'), ((7855, 7877), 'argparse.FileType', 'argparse.FileType', (['"""r"""'], {}), "('r')\n", (7872, 7877), False, 'import argparse\n')] |
import random
import logging
from math import sqrt
from rec.dataset.dataset import Dataset
from rec.recommender.base import SessionAwareRecommender
from collections import defaultdict, Counter
import tqdm
class SessionKnnRecommender(SessionAwareRecommender):
def __init__(self, k=100, sample_size=1000, similarity='cosine', sampling='recent'):
super(SessionKnnRecommender, self).__init__()
self.logger = logging.getLogger(self.__class__.__name__)
self.k = k
self.sample_size = sample_size
self.similarity = similarity
assert self.similarity in ['cosine', 'jaccard', 'sorensen_dice']
self._similarity_func = getattr(SetSimilarities, self.similarity)
self.sampling = sampling
assert self.sampling in ['random', 'recent', 'common']
self._sampling_func = getattr(self, f'_sampling_{self.sampling}')
# indexes
self.item_session_map = defaultdict(set)
self.session_item_map = dict()
self.session_ts = dict()
self.items_distribution = dict()
def fit(self, train_dataset: Dataset, valid_data=None, valid_measures=None):
assert isinstance(train_dataset, Dataset)
self.logger.info('Creating recommender indexes...')
self.create_item_session_maps(train_dataset)
self.create_items_distribution(train_dataset)
self.logger.debug("Model preparation completed.")
def create_items_distribution(self, train_dataset):
counter = {train_dataset.items_idx_to_id[k]: v for k, v in Counter(train_dataset.items_idx_distrib).items()}
for item in train_dataset.items.keys():
if item not in counter:
counter[item] = 0
self.items_distribution = counter
def predict_single_session(self, session, n=10):
neighbours = self.nearest_neighbours(session)
result = self.session_items_rank(neighbours)
result.sort(key=lambda x: (x[1], self.items_distribution[x[0]]), reverse=True)
return [item[0] for item in result][:min(n, len(result))]
def nearest_neighbours(self, session):
sessions = self.possible_neighbours(session)
items = session.clicked_items_set()
rank = [(other, self.session_similarity(items, other)) for other in sessions]
rank.sort(key=lambda x: x[1], reverse=True)
return rank[0:min(self.k, len(rank))]
def create_item_session_maps(self, train_dataset: Dataset):
for session in tqdm.tqdm(train_dataset.all_sessions_list()):
item_set = session.clicked_items_set()
self.session_item_map[session.id] = item_set
self.session_ts[session.id] = session.timestamp_start
for item in item_set:
self.item_session_map[item].add(session.id)
self.item_session_map.default_factory = None
def session_items_rank(self, neighbours):
items = self.items_from_sessions(neighbours)
return [(item, self.item_rank(item, neighbours)) for item in items]
def item_rank(self, item, neighbours):
item_sessions = self.item_session_map[item]
return sum([x[1] for x in neighbours if x[0] in item_sessions])
def items_from_sessions(self, sessions):
items = set()
for session in sessions:
items |= self.session_item_map[session[0]]
return items
def possible_neighbours(self, session):
"""
to gain some performance sample possible nearest neighbours
session which have at least one common viewed item
:param session: Session
:return: Set
"""
items = session.clicked_items_set()
common_sessions = set()
for item in items:
if item in self.item_session_map:
common_sessions |= self.item_session_map[item]
return self._sampling_func(common_sessions, session)
def session_similarity(self, this_items, other_session):
"""
calculate similarity between two sessions
using method specified in class constructor
:param this_items: Set
:param other_session: Session
:return:
"""
other_items = self.session_item_map[other_session]
return self._similarity_func(this_items, other_items)
def _sampling_random(self, sessions, session):
sample_size = min(self.sample_size, len(sessions))
return random.sample(sessions, sample_size)
def _sampling_recent(self, sessions, session):
"""
get most recent sessions based on session timestamp
:param session: Session
:param sessions: Set
:return: set of self.sample_size most recent sessions
"""
rank = [(sid, self.session_ts[sid]) for sid in sessions]
rank.sort(key=lambda x: x[1], reverse=True)
result = [x[0] for x in rank]
sample_size = min(self.sample_size, len(sessions))
return result[:sample_size]
def _sampling_common(self, sessions, session):
"""
get sessions with most common items set
:param session: Session
:param sessions: Set
:return:
"""
rank = [(ses, len(self.session_item_map[ses] & session.clicked_items_set())) for ses in sessions]
rank.sort(key=lambda x: x[1])
result = [x[0] for x in rank]
sample_size = min(self.sample_size, len(sessions))
return result[:sample_size]
def __str__(self):
return 'SKNN'
class SetSimilarities:
@staticmethod
def cosine(first, second):
len_first = len(first)
len_second = len(second)
len_inner_join = len(first & second)
return len_inner_join / (sqrt(len_first) * sqrt(len_second))
@staticmethod
def jaccard(first, second):
len_first = len(first)
len_second = len(second)
len_inner_join = len(first & second)
return len_inner_join / (len_first + len_second - len_inner_join)
@staticmethod
def sorensen_dice(first, second):
len_inter = len(first & second)
len_both_sum = len(first) + len(second)
return len_inter / len_both_sum | [
"logging.getLogger",
"random.sample",
"math.sqrt",
"collections.Counter",
"collections.defaultdict"
] | [((428, 470), 'logging.getLogger', 'logging.getLogger', (['self.__class__.__name__'], {}), '(self.__class__.__name__)\n', (445, 470), False, 'import logging\n'), ((933, 949), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (944, 949), False, 'from collections import defaultdict, Counter\n'), ((4412, 4448), 'random.sample', 'random.sample', (['sessions', 'sample_size'], {}), '(sessions, sample_size)\n', (4425, 4448), False, 'import random\n'), ((5699, 5714), 'math.sqrt', 'sqrt', (['len_first'], {}), '(len_first)\n', (5703, 5714), False, 'from math import sqrt\n'), ((5717, 5733), 'math.sqrt', 'sqrt', (['len_second'], {}), '(len_second)\n', (5721, 5733), False, 'from math import sqrt\n'), ((1544, 1584), 'collections.Counter', 'Counter', (['train_dataset.items_idx_distrib'], {}), '(train_dataset.items_idx_distrib)\n', (1551, 1584), False, 'from collections import defaultdict, Counter\n')] |
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""RDF Values are responsible for serialization."""
from __future__ import unicode_literals
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import object
import functools
import urllib.parse
import urllib.request, urllib.parse, urllib.error
import binascii
import posixpath
import rdflib
from pyaff4 import registry
from pyaff4 import utils
# pylint: disable=protected-access
class Memoize(object):
def __call__(self, f):
f.memo_pad = {}
@functools.wraps(f)
def Wrapped(self, *args):
key = tuple(args)
if len(f.memo_pad) > 100:
f.memo_pad.clear()
if key not in f.memo_pad:
f.memo_pad[key] = f(self, *args)
return f.memo_pad[key]
return Wrapped
class RDFValue(object):
datatype = ""
def __init__(self, initializer=None):
self.Set(initializer)
def GetRaptorTerm(self):
return rdflib.Literal(self.SerializeToString(),
datatype=self.datatype)
def SerializeToString(self):
"""Serializes to a sequence of bytes."""
return ""
def UnSerializeFromString(self, string):
"""Unserializes from bytes."""
raise NotImplementedError
def Set(self, string):
raise NotImplementedError
def __bytes__(self):
return self.SerializeToString()
def __eq__(self, other):
return utils.SmartStr(self) == utils.SmartStr(other)
def __req__(self, other):
return utils.SmartStr(self) == utils.SmartStr(other)
def __hash__(self):
return hash(self.SerializeToString())
class RDFBytes(RDFValue):
value = b""
datatype = rdflib.XSD.hexBinary
def SerializeToString(self):
return binascii.hexlify(self.value)
def UnSerializeFromString(self, string):
self.Set(binascii.unhexlify(string))
def Set(self, data):
self.value = data
def __eq__(self, other):
if isinstance(other, RDFBytes):
return self.value == other.value
class XSDString(RDFValue):
"""A unicode string."""
datatype = rdflib.XSD.string
def SerializeToString(self):
return utils.SmartStr(self.value)
def UnSerializeFromString(self, string):
self.Set(utils.SmartUnicode(string))
def Set(self, data):
self.value = utils.SmartUnicode(data)
def __str__(self):
return self.value
@functools.total_ordering
class XSDInteger(RDFValue):
datatype = rdflib.XSD.integer
def SerializeToString(self):
return utils.SmartStr(self.value)
def UnSerializeFromString(self, string):
self.Set(int(string))
def Set(self, data):
self.value = int(data)
def __eq__(self, other):
if isinstance(other, XSDInteger):
return self.value == other.value
return self.value == other
def __int__(self):
return self.value
def __long__(self):
return int(self.value)
def __cmp__(self, o):
return self.value - o
def __add__(self, o):
return self.value + o
def __lt__(self, o):
return self.value < o
def __str__(self):
return str(self.value)
class RDFHash(XSDString):
# value is the hex encoded digest.
def __eq__(self, other):
if isinstance(other, RDFHash):
if self.datatype == other.datatype:
return self.value == other.value
return utils.SmartStr(self.value) == utils.SmartStr(other)
def __ne__(self, other):
return not self == other
def digest(self):
return binascii.unhexlify(self.value)
class SHA512Hash(RDFHash):
datatype = rdflib.URIRef("http://aff4.org/Schema#SHA512")
class SHA256Hash(RDFHash):
datatype = rdflib.URIRef("http://aff4.org/Schema#SHA256")
class SHA1Hash(RDFHash):
datatype = rdflib.URIRef("http://aff4.org/Schema#SHA1")
class Blake2bHash(RDFHash):
datatype = rdflib.URIRef("http://aff4.org/Schema#Blake2b")
class MD5Hash(RDFHash):
datatype = rdflib.URIRef("http://aff4.org/Schema#MD5")
class SHA512BlockMapHash(RDFHash):
datatype = rdflib.URIRef("http://aff4.org/Schema#blockMapHashSHA512")
class URN(RDFValue):
"""Represent a URN.
According to RFC1738 URLs must be encoded in ASCII. Therefore the
internal representation of a URN is bytes. When creating the URN
from other forms (e.g. filenames, we assume UTF8 encoding if the
filename is a unicode string.
"""
# The encoded URN as a unicode string.
value = None
original_filename = None
@classmethod
def FromFileName(cls, filename):
"""Parse the URN from filename.
Filename may be a unicode string, in which case it will be
UTF8 encoded into the URN. URNs are always ASCII.
"""
result = cls("file:%s" % urllib.request.pathname2url(filename))
result.original_filename = filename
return result
@classmethod
def NewURNFromFilename(cls, filename):
return cls.FromFileName(filename)
def ToFilename(self):
# For file: urls we exactly reverse the conversion applied in
# FromFileName.
if self.value.startswith("file:"):
return urllib.request.url2pathname(self.value[5:])
components = self.Parse()
if components.scheme == "file":
return components.path
def GetRaptorTerm(self):
return rdflib.URIRef(self.value)
def SerializeToString(self):
components = self.Parse()
return utils.SmartStr(urllib.parse.urlunparse(components))
def UnSerializeFromString(self, string):
utils.AssertStr(string)
self.Set(utils.SmartUnicode(string))
return self
def Set(self, data):
if data is None:
return
elif isinstance(data, URN):
self.value = data.value
else:
utils.AssertUnicode(data)
self.value = data
def Parse(self):
return self._Parse(self.value)
# URL parsing seems to be slow in Python so we cache it as much as possible.
@Memoize()
def _Parse(self, value):
components = urllib.parse.urlparse(value)
# dont normalise path for http URI's
if components.scheme and not components.scheme == "http":
normalized_path = posixpath.normpath(components.path)
if normalized_path == ".":
normalized_path = ""
components = components._replace(path=normalized_path)
if not components.scheme:
# For file:// URNs, we need to parse them from a filename.
components = components._replace(
netloc="",
path=urllib.request.pathname2url(value),
scheme="file")
self.original_filename = value
return components
def Scheme(self):
components = self.Parse()
return components.scheme
def Append(self, component, quote=True):
components = self.Parse()
if quote:
component = urllib.parse.quote(component)
# Work around usual posixpath.join bug.
component = component.lstrip("/")
new_path = posixpath.normpath(posixpath.join(
"/", components.path, component))
components = components._replace(path=new_path)
return URN(urllib.parse.urlunparse(components))
def RelativePath(self, urn):
urn_value = str(urn)
if urn_value.startswith(self.value):
return urn_value[len(self.value):]
def __str__(self):
return self.value
def __lt__(self, other):
return self.value < utils.SmartUnicode(other)
def __repr__(self):
return "<%s>" % self.value
def AssertURN(urn):
if not isinstance(urn, URN):
raise TypeError("Expecting a URN.")
def AssertURN(urn):
if not isinstance(urn, URN):
raise TypeError("Expecting a URN.")
registry.RDF_TYPE_MAP.update({
rdflib.XSD.hexBinary: RDFBytes,
rdflib.XSD.string: XSDString,
rdflib.XSD.integer: XSDInteger,
rdflib.XSD.int: XSDInteger,
rdflib.XSD.long: XSDInteger,
rdflib.URIRef("http://aff4.org/Schema#SHA512"): SHA512Hash,
rdflib.URIRef("http://aff4.org/Schema#SHA256"): SHA256Hash,
rdflib.URIRef("http://aff4.org/Schema#SHA1"): SHA1Hash,
rdflib.URIRef("http://aff4.org/Schema#MD5"): MD5Hash,
rdflib.URIRef("http://aff4.org/Schema#Blake2b"): Blake2bHash,
rdflib.URIRef("http://aff4.org/Schema#blockMapHashSHA512"): SHA512BlockMapHash,
rdflib.URIRef("http://afflib.org/2009/aff4#SHA512"): SHA512Hash,
rdflib.URIRef("http://afflib.org/2009/aff4#SHA256"): SHA256Hash,
rdflib.URIRef("http://afflib.org/2009/aff4#SHA1"): SHA1Hash,
rdflib.URIRef("http://afflib.org/2009/aff4#MD5"): MD5Hash,
rdflib.URIRef("http://afflib.org/2009/aff4#blockMapHashSHA512"): SHA512BlockMapHash
})
| [
"posixpath.join",
"pyaff4.utils.SmartStr",
"binascii.hexlify",
"functools.wraps",
"builtins.str",
"future.standard_library.install_aliases",
"pyaff4.utils.AssertUnicode",
"posixpath.normpath",
"pyaff4.utils.AssertStr",
"pyaff4.utils.SmartUnicode",
"rdflib.URIRef",
"binascii.unhexlify"
] | [((723, 757), 'future.standard_library.install_aliases', 'standard_library.install_aliases', ([], {}), '()\n', (755, 757), False, 'from future import standard_library\n'), ((4338, 4384), 'rdflib.URIRef', 'rdflib.URIRef', (['"""http://aff4.org/Schema#SHA512"""'], {}), "('http://aff4.org/Schema#SHA512')\n", (4351, 4384), False, 'import rdflib\n'), ((4429, 4475), 'rdflib.URIRef', 'rdflib.URIRef', (['"""http://aff4.org/Schema#SHA256"""'], {}), "('http://aff4.org/Schema#SHA256')\n", (4442, 4475), False, 'import rdflib\n'), ((4518, 4562), 'rdflib.URIRef', 'rdflib.URIRef', (['"""http://aff4.org/Schema#SHA1"""'], {}), "('http://aff4.org/Schema#SHA1')\n", (4531, 4562), False, 'import rdflib\n'), ((4608, 4655), 'rdflib.URIRef', 'rdflib.URIRef', (['"""http://aff4.org/Schema#Blake2b"""'], {}), "('http://aff4.org/Schema#Blake2b')\n", (4621, 4655), False, 'import rdflib\n'), ((4697, 4740), 'rdflib.URIRef', 'rdflib.URIRef', (['"""http://aff4.org/Schema#MD5"""'], {}), "('http://aff4.org/Schema#MD5')\n", (4710, 4740), False, 'import rdflib\n'), ((4793, 4851), 'rdflib.URIRef', 'rdflib.URIRef', (['"""http://aff4.org/Schema#blockMapHashSHA512"""'], {}), "('http://aff4.org/Schema#blockMapHashSHA512')\n", (4806, 4851), False, 'import rdflib\n'), ((1122, 1140), 'functools.wraps', 'functools.wraps', (['f'], {}), '(f)\n', (1137, 1140), False, 'import functools\n'), ((2414, 2442), 'binascii.hexlify', 'binascii.hexlify', (['self.value'], {}), '(self.value)\n', (2430, 2442), False, 'import binascii\n'), ((2840, 2866), 'pyaff4.utils.SmartStr', 'utils.SmartStr', (['self.value'], {}), '(self.value)\n', (2854, 2866), False, 'from pyaff4 import utils\n'), ((3005, 3029), 'pyaff4.utils.SmartUnicode', 'utils.SmartUnicode', (['data'], {}), '(data)\n', (3023, 3029), False, 'from pyaff4 import utils\n'), ((3219, 3245), 'pyaff4.utils.SmartStr', 'utils.SmartStr', (['self.value'], {}), '(self.value)\n', (3233, 3245), False, 'from pyaff4 import utils\n'), ((3846, 3861), 'builtins.str', 'str', (['self.value'], {}), '(self.value)\n', (3849, 3861), False, 'from builtins import str\n'), ((4263, 4293), 'binascii.unhexlify', 'binascii.unhexlify', (['self.value'], {}), '(self.value)\n', (4281, 4293), False, 'import binascii\n'), ((6098, 6123), 'rdflib.URIRef', 'rdflib.URIRef', (['self.value'], {}), '(self.value)\n', (6111, 6123), False, 'import rdflib\n'), ((6313, 6336), 'pyaff4.utils.AssertStr', 'utils.AssertStr', (['string'], {}), '(string)\n', (6328, 6336), False, 'from pyaff4 import utils\n'), ((8123, 8131), 'builtins.str', 'str', (['urn'], {}), '(urn)\n', (8126, 8131), False, 'from builtins import str\n'), ((8824, 8870), 'rdflib.URIRef', 'rdflib.URIRef', (['"""http://aff4.org/Schema#SHA512"""'], {}), "('http://aff4.org/Schema#SHA512')\n", (8837, 8870), False, 'import rdflib\n'), ((8888, 8934), 'rdflib.URIRef', 'rdflib.URIRef', (['"""http://aff4.org/Schema#SHA256"""'], {}), "('http://aff4.org/Schema#SHA256')\n", (8901, 8934), False, 'import rdflib\n'), ((8952, 8996), 'rdflib.URIRef', 'rdflib.URIRef', (['"""http://aff4.org/Schema#SHA1"""'], {}), "('http://aff4.org/Schema#SHA1')\n", (8965, 8996), False, 'import rdflib\n'), ((9012, 9055), 'rdflib.URIRef', 'rdflib.URIRef', (['"""http://aff4.org/Schema#MD5"""'], {}), "('http://aff4.org/Schema#MD5')\n", (9025, 9055), False, 'import rdflib\n'), ((9070, 9117), 'rdflib.URIRef', 'rdflib.URIRef', (['"""http://aff4.org/Schema#Blake2b"""'], {}), "('http://aff4.org/Schema#Blake2b')\n", (9083, 9117), False, 'import rdflib\n'), ((9136, 9194), 'rdflib.URIRef', 'rdflib.URIRef', (['"""http://aff4.org/Schema#blockMapHashSHA512"""'], {}), "('http://aff4.org/Schema#blockMapHashSHA512')\n", (9149, 9194), False, 'import rdflib\n'), ((9220, 9271), 'rdflib.URIRef', 'rdflib.URIRef', (['"""http://afflib.org/2009/aff4#SHA512"""'], {}), "('http://afflib.org/2009/aff4#SHA512')\n", (9233, 9271), False, 'import rdflib\n'), ((9289, 9340), 'rdflib.URIRef', 'rdflib.URIRef', (['"""http://afflib.org/2009/aff4#SHA256"""'], {}), "('http://afflib.org/2009/aff4#SHA256')\n", (9302, 9340), False, 'import rdflib\n'), ((9358, 9407), 'rdflib.URIRef', 'rdflib.URIRef', (['"""http://afflib.org/2009/aff4#SHA1"""'], {}), "('http://afflib.org/2009/aff4#SHA1')\n", (9371, 9407), False, 'import rdflib\n'), ((9423, 9471), 'rdflib.URIRef', 'rdflib.URIRef', (['"""http://afflib.org/2009/aff4#MD5"""'], {}), "('http://afflib.org/2009/aff4#MD5')\n", (9436, 9471), False, 'import rdflib\n'), ((9486, 9549), 'rdflib.URIRef', 'rdflib.URIRef', (['"""http://afflib.org/2009/aff4#blockMapHashSHA512"""'], {}), "('http://afflib.org/2009/aff4#blockMapHashSHA512')\n", (9499, 9549), False, 'import rdflib\n'), ((2076, 2096), 'pyaff4.utils.SmartStr', 'utils.SmartStr', (['self'], {}), '(self)\n', (2090, 2096), False, 'from pyaff4 import utils\n'), ((2100, 2121), 'pyaff4.utils.SmartStr', 'utils.SmartStr', (['other'], {}), '(other)\n', (2114, 2121), False, 'from pyaff4 import utils\n'), ((2168, 2188), 'pyaff4.utils.SmartStr', 'utils.SmartStr', (['self'], {}), '(self)\n', (2182, 2188), False, 'from pyaff4 import utils\n'), ((2192, 2213), 'pyaff4.utils.SmartStr', 'utils.SmartStr', (['other'], {}), '(other)\n', (2206, 2213), False, 'from pyaff4 import utils\n'), ((2506, 2532), 'binascii.unhexlify', 'binascii.unhexlify', (['string'], {}), '(string)\n', (2524, 2532), False, 'import binascii\n'), ((2930, 2956), 'pyaff4.utils.SmartUnicode', 'utils.SmartUnicode', (['string'], {}), '(string)\n', (2948, 2956), False, 'from pyaff4 import utils\n'), ((4110, 4136), 'pyaff4.utils.SmartStr', 'utils.SmartStr', (['self.value'], {}), '(self.value)\n', (4124, 4136), False, 'from pyaff4 import utils\n'), ((4140, 4161), 'pyaff4.utils.SmartStr', 'utils.SmartStr', (['other'], {}), '(other)\n', (4154, 4161), False, 'from pyaff4 import utils\n'), ((6354, 6380), 'pyaff4.utils.SmartUnicode', 'utils.SmartUnicode', (['string'], {}), '(string)\n', (6372, 6380), False, 'from pyaff4 import utils\n'), ((7006, 7041), 'posixpath.normpath', 'posixpath.normpath', (['components.path'], {}), '(components.path)\n', (7024, 7041), False, 'import posixpath\n'), ((7893, 7940), 'posixpath.join', 'posixpath.join', (['"""/"""', 'components.path', 'component'], {}), "('/', components.path, component)\n", (7907, 7940), False, 'import posixpath\n'), ((8332, 8357), 'pyaff4.utils.SmartUnicode', 'utils.SmartUnicode', (['other'], {}), '(other)\n', (8350, 8357), False, 'from pyaff4 import utils\n'), ((6571, 6596), 'pyaff4.utils.AssertUnicode', 'utils.AssertUnicode', (['data'], {}), '(data)\n', (6590, 6596), False, 'from pyaff4 import utils\n')] |
#!/usr/bin/env python3
import json
import argparse
import os
from collections import OrderedDict
from utils import normalize
from utils import exact_match_score, regex_match_score, get_rank
from utils import slugify, aggregate, aggregate_ans
from utils import Tokenizer
from multiprocessing import Pool as ProcessPool
# import numpy as np
import pickle as pk
import sys
import time
import numpy as np
ENCODING = "utf-8"
DOC_MEAN = 8.5142
DOC_STD = 2.8324
# ANS_MEAN=86486
# ANS_STD=256258
ANS_MEAN = 11588614
ANS_STD = 98865053
all_corr_rank = []
# def process_record(data_line_, prediction_line_, neg_gap_, feature_dir_, record_dir_, match_fn):
def process_record(data_line_, prediction_line_, neg_gap_, feature_dir_, record_dir_, match_fn, all_doc_scores,
all_ans_scores, z_scores):
missing_count_ = 0
total_count_ = 0
stop_count_ = 0
data = json.loads(data_line_)
question = data['question']
q_id = slugify(question)
q_path = os.path.join(feature_dir_, '%s.json' % q_id)
n_q = [0 for _ in Tokenizer.FEAT]
if os.path.exists(q_path):
q_data = open(q_path, encoding=ENCODING).read()
record = json.loads(q_data)
q_ner = record['ner']
q_pos = record['pos']
for feat in q_ner + q_pos:
n_q[Tokenizer.FEAT_DICT[feat]] += 1
else:
print('question feature file %s not exist!' % q_path)
sys.stdout.flush()
missing_count_ += 1
return missing_count_, total_count_, stop_count_
answer = [normalize(a) for a in data['answer']]
prediction = json.loads(prediction_line_)
# MAKE SURE REVERSE IS TRUE
ranked_prediction = sorted(prediction, key=lambda k: k['doc_score'], reverse=True)
correct_rank = get_rank(prediction, answer, match_fn)
if correct_rank > 150:
# if correct_rank < 50 or correct_rank > 150:
return missing_count_, total_count_, stop_count_
all_corr_rank.append(correct_rank - 1)
all_n_p = []
all_n_a = []
all_p_scores = []
all_a_scores = []
all_probs = []
all_spans = []
repeats = 0
for i, entry in enumerate(ranked_prediction):
doc_id = entry['doc_id']
start = int(entry['start'])
end = int(entry['end'])
doc_score = entry['doc_score']
ans_score = entry['span_score']
prob = entry['prob']
span = entry['span']
# RESTRICT TO MAX 1000000000
# print("Threshold 1000000")
# ans_score=min(ans_score, 1000000) #restrict to max of million
if span in all_spans:
repeats += 1
all_spans.append(span)
################Calculate sample z score (t statistic) for answer score
if all_a_scores == [] or len(
all_a_scores) == 1: # dont use a_zscore feature at the beginning or if we only have 1
a_zscore = 0
else: # Take the sample mean of the previous ones, take zscore of the current with respect to that
# sample_mean = np.mean(all_a_scores + [ans_score])
sample_mean = np.mean(all_a_scores)
# sample_std = np.std(all_a_scores + [ans_score])
sample_std = np.std(all_a_scores)
# if sample_std != 0:
a_zscore = (ans_score - sample_mean) / sample_std
# else:
# a_zscore = 0
z_scores.append(a_zscore)
# THESE ARE FOR STATISTISTICS OVER ENTIRE DATA SET, IGNORE
all_doc_scores.append(doc_score)
all_ans_scores.append(ans_score)
corr_doc_score = (doc_score - DOC_MEAN) / DOC_STD
corr_ans_mean_score = (np.mean(all_a_scores + [ans_score]) - ANS_MEAN) / ANS_STD
all_probs.append(prob)
###############
p_pos = dict()
p_ner = dict()
feat_file = os.path.join(feature_dir_, '%s.json' % doc_id)
if os.path.exists(feat_file):
record = json.load(open(feat_file))
p_ner[doc_id] = record['ner']
p_pos[doc_id] = record['pos']
n_p = [0 for _ in Tokenizer.FEAT]
n_a = [0 for _ in Tokenizer.FEAT]
for feat in p_ner[doc_id] + p_pos[doc_id]:
n_p[Tokenizer.FEAT_DICT[feat]] += 1
for feat in p_ner[doc_id][start:end + 1] + p_pos[doc_id][start:end + 1]:
n_a[Tokenizer.FEAT_DICT[feat]] += 1
all_n_p.append(n_p)
all_n_a.append(n_a)
all_p_scores.append(doc_score)
all_a_scores.append(ans_score)
f_np = aggregate(all_n_p)
f_na = aggregate(all_n_a)
f_sp = aggregate(all_p_scores)
f_sa = aggregate_ans(all_a_scores)
record = OrderedDict()
# sp, nq, np, na, ha
record['sp'] = f_sp
record['nq'] = list(map(float, n_q))
record['np'] = f_np
record['na'] = f_na
record['sa'] = f_sa
record['a_zscore'] = a_zscore
record['corr_doc_score'] = corr_doc_score
record['i'] = i
record['prob_avg'] = sum(all_probs) / len(all_probs)
record['prob'] = prob
record['repeats'] = repeats
record['ans_avg'] = corr_ans_mean_score
if i + 1 == correct_rank:
# if i + 1 >= correct_rank:
record['stop'] = 1
stop_count_ += 1
write_record = True
# if i % neg_gap_ ==0:
# write_record = True
# else:
# write_record = False
should_return = True
# if i + 1 - correct_rank > 30:
# should_return = True
# else:
# should_return = False
else:
should_return = False
if i % neg_gap_ == 0:
record['stop'] = 0
write_record = True
else:
write_record = False
if write_record:
record_path = os.path.join(record_dir_, '%s_%s.pkl' % (q_id, doc_id))
with open(record_path, 'wb') as f:
pk.dump(record, f)
total_count_ += 1
if should_return:
return missing_count_, total_count_, stop_count_
return missing_count_, total_count_, stop_count_
if __name__ == '__main__':
# unzip trec.tgz to trec
# below is an example run, take 114.5s(on mac mini 2012), generated 15571 records, 7291 of them are stop labels
# python prepare_data.py -p CuratedTrec-test-lstm.preds.txt -a CuratedTrec-test.txt -f trec -r records
#
all_doc_scores = []
all_ans_scores = []
z_scores = []
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--prediction_file',
help='prediction file, e.g. CuratedTrec-test-lstm.preds_train.txt')
parser.add_argument('-a', '--answer_file', help='data set with labels, e.g. CuratedTrec-test_train.txt')
parser.add_argument('-nm', '--no_multiprocess', action='store_true', help='default to use multiprocessing')
parser.add_argument('-ns', '--negative_scale', type=int, default=10, help='scale factor for negative samples')
parser.add_argument('-r', '--record_dir', default=None, help='dir to save generated records data set')
parser.add_argument('-f', '--feature_dir', default=None,
help='dir that contains json features files, unzip squad.tgz or trec.tgz to get that dir')
parser.add_argument('-rg', '--regex', action='store_true', help='default to use exact match')
args = parser.parse_args()
match_func = regex_match_score if args.regex else exact_match_score
missing_count = 0
total_count = 0
stop_count = 0
answer_file = args.answer_file
prediction_file = args.prediction_file
record_dir = args.record_dir
if not os.path.exists(record_dir):
os.makedirs(record_dir)
feature_dir = args.feature_dir
if not os.path.exists(feature_dir):
print('feature_dir does not exist!')
exit(-1)
s = time.time()
if args.no_multiprocess:
for data_line, prediction_line in zip(open(answer_file, encoding=ENCODING),
open(prediction_file, encoding=ENCODING)):
# missing, total, stop = process_record(data_line, prediction_line, args.negative_scale,
# feature_dir, record_dir, match_func)
missing, total, stop = process_record(data_line, prediction_line, args.negative_scale,
feature_dir, record_dir, match_func, all_doc_scores, all_ans_scores,
z_scores)
missing_count += missing
stop_count += stop
total_count += total
print('processed %d records...' % total_count)
sys.stdout.flush()
else:
print('using multiprocessing...')
result_handles = []
async_pool = ProcessPool()
for data_line, prediction_line in zip(open(answer_file, encoding=ENCODING),
open(prediction_file, encoding=ENCODING)):
param = (data_line, prediction_line, args.negative_scale,
feature_dir, record_dir, match_func)
handle = async_pool.apply_async(process_record, param)
result_handles.append(handle)
for result in result_handles:
missing, total, stop = result.get()
missing_count += missing
stop_count += stop
total_count += total
print('processed %d records, stop: %d' % (total_count, stop_count))
sys.stdout.flush()
e = time.time()
print('%d records' % total_count)
print('%d stop labels' % stop_count)
print('%d docs not found' % missing_count)
print('took %.4f s' % (e - s))
# all_ans_scores = list(map(lambda x: min([x, 1000000]), all_ans_scores))
doc_mean = np.mean(all_doc_scores)
ans_mean = np.mean(all_ans_scores)
doc_std = np.std(all_doc_scores)
ans_std = np.std(all_ans_scores)
z_std = np.std(z_scores)
z_mean = np.mean(z_scores)
print("Doc Mean {}".format(doc_mean))
print("Doc Std {}".format(doc_std))
print("Ans Mean {}".format(ans_mean))
print("Ans Std {}".format(ans_std))
print("Doc Max {}".format(max(all_doc_scores)))
print("Ans Max {}".format(max(all_ans_scores)))
print("Z Std {}".format(z_std))
print("Z Max {}".format(max(z_scores)))
print("Z Mean {}".format(z_mean))
print(len(all_corr_rank))
print("i Std {}".format(np.std(all_corr_rank)))
print("i Mean {}".format(np.mean(all_corr_rank)))
| [
"os.path.exists",
"json.loads",
"numpy.mean",
"collections.OrderedDict",
"pickle.dump",
"utils.slugify",
"argparse.ArgumentParser",
"utils.normalize",
"utils.aggregate",
"utils.aggregate_ans",
"os.makedirs",
"os.path.join",
"utils.get_rank",
"multiprocessing.Pool",
"numpy.std",
"sys.st... | [((885, 907), 'json.loads', 'json.loads', (['data_line_'], {}), '(data_line_)\n', (895, 907), False, 'import json\n'), ((951, 968), 'utils.slugify', 'slugify', (['question'], {}), '(question)\n', (958, 968), False, 'from utils import slugify, aggregate, aggregate_ans\n'), ((982, 1026), 'os.path.join', 'os.path.join', (['feature_dir_', "('%s.json' % q_id)"], {}), "(feature_dir_, '%s.json' % q_id)\n", (994, 1026), False, 'import os\n'), ((1072, 1094), 'os.path.exists', 'os.path.exists', (['q_path'], {}), '(q_path)\n', (1086, 1094), False, 'import os\n'), ((1585, 1613), 'json.loads', 'json.loads', (['prediction_line_'], {}), '(prediction_line_)\n', (1595, 1613), False, 'import json\n'), ((1752, 1790), 'utils.get_rank', 'get_rank', (['prediction', 'answer', 'match_fn'], {}), '(prediction, answer, match_fn)\n', (1760, 1790), False, 'from utils import exact_match_score, regex_match_score, get_rank\n'), ((6700, 6725), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6723, 6725), False, 'import argparse\n'), ((8080, 8091), 'time.time', 'time.time', ([], {}), '()\n', (8089, 8091), False, 'import time\n'), ((9798, 9809), 'time.time', 'time.time', ([], {}), '()\n', (9807, 9809), False, 'import time\n'), ((10064, 10087), 'numpy.mean', 'np.mean', (['all_doc_scores'], {}), '(all_doc_scores)\n', (10071, 10087), True, 'import numpy as np\n'), ((10103, 10126), 'numpy.mean', 'np.mean', (['all_ans_scores'], {}), '(all_ans_scores)\n', (10110, 10126), True, 'import numpy as np\n'), ((10141, 10163), 'numpy.std', 'np.std', (['all_doc_scores'], {}), '(all_doc_scores)\n', (10147, 10163), True, 'import numpy as np\n'), ((10178, 10200), 'numpy.std', 'np.std', (['all_ans_scores'], {}), '(all_ans_scores)\n', (10184, 10200), True, 'import numpy as np\n'), ((10213, 10229), 'numpy.std', 'np.std', (['z_scores'], {}), '(z_scores)\n', (10219, 10229), True, 'import numpy as np\n'), ((10243, 10260), 'numpy.mean', 'np.mean', (['z_scores'], {}), '(z_scores)\n', (10250, 10260), True, 'import numpy as np\n'), ((1169, 1187), 'json.loads', 'json.loads', (['q_data'], {}), '(q_data)\n', (1179, 1187), False, 'import json\n'), ((1411, 1429), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1427, 1429), False, 'import sys\n'), ((1530, 1542), 'utils.normalize', 'normalize', (['a'], {}), '(a)\n', (1539, 1542), False, 'from utils import normalize\n'), ((3887, 3933), 'os.path.join', 'os.path.join', (['feature_dir_', "('%s.json' % doc_id)"], {}), "(feature_dir_, '%s.json' % doc_id)\n", (3899, 3933), False, 'import os\n'), ((3945, 3970), 'os.path.exists', 'os.path.exists', (['feat_file'], {}), '(feat_file)\n', (3959, 3970), False, 'import os\n'), ((4569, 4587), 'utils.aggregate', 'aggregate', (['all_n_p'], {}), '(all_n_p)\n', (4578, 4587), False, 'from utils import slugify, aggregate, aggregate_ans\n'), ((4603, 4621), 'utils.aggregate', 'aggregate', (['all_n_a'], {}), '(all_n_a)\n', (4612, 4621), False, 'from utils import slugify, aggregate, aggregate_ans\n'), ((4637, 4660), 'utils.aggregate', 'aggregate', (['all_p_scores'], {}), '(all_p_scores)\n', (4646, 4660), False, 'from utils import slugify, aggregate, aggregate_ans\n'), ((4676, 4703), 'utils.aggregate_ans', 'aggregate_ans', (['all_a_scores'], {}), '(all_a_scores)\n', (4689, 4703), False, 'from utils import slugify, aggregate, aggregate_ans\n'), ((4722, 4735), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4733, 4735), False, 'from collections import OrderedDict\n'), ((7875, 7901), 'os.path.exists', 'os.path.exists', (['record_dir'], {}), '(record_dir)\n', (7889, 7901), False, 'import os\n'), ((7911, 7934), 'os.makedirs', 'os.makedirs', (['record_dir'], {}), '(record_dir)\n', (7922, 7934), False, 'import os\n'), ((7981, 8008), 'os.path.exists', 'os.path.exists', (['feature_dir'], {}), '(feature_dir)\n', (7995, 8008), False, 'import os\n'), ((9067, 9080), 'multiprocessing.Pool', 'ProcessPool', ([], {}), '()\n', (9078, 9080), True, 'from multiprocessing import Pool as ProcessPool\n'), ((3107, 3128), 'numpy.mean', 'np.mean', (['all_a_scores'], {}), '(all_a_scores)\n', (3114, 3128), True, 'import numpy as np\n'), ((3227, 3247), 'numpy.std', 'np.std', (['all_a_scores'], {}), '(all_a_scores)\n', (3233, 3247), True, 'import numpy as np\n'), ((6025, 6080), 'os.path.join', 'os.path.join', (['record_dir_', "('%s_%s.pkl' % (q_id, doc_id))"], {}), "(record_dir_, '%s_%s.pkl' % (q_id, doc_id))\n", (6037, 6080), False, 'import os\n'), ((8947, 8965), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (8963, 8965), False, 'import sys\n'), ((9770, 9788), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (9786, 9788), False, 'import sys\n'), ((10707, 10728), 'numpy.std', 'np.std', (['all_corr_rank'], {}), '(all_corr_rank)\n', (10713, 10728), True, 'import numpy as np\n'), ((10760, 10782), 'numpy.mean', 'np.mean', (['all_corr_rank'], {}), '(all_corr_rank)\n', (10767, 10782), True, 'import numpy as np\n'), ((3706, 3741), 'numpy.mean', 'np.mean', (['(all_a_scores + [ans_score])'], {}), '(all_a_scores + [ans_score])\n', (3713, 3741), True, 'import numpy as np\n'), ((6144, 6162), 'pickle.dump', 'pk.dump', (['record', 'f'], {}), '(record, f)\n', (6151, 6162), True, 'import pickle as pk\n')] |
# -*- encoding: UTF-8 -*-
from naoqi import ALProxy
IP = '127.0.0.1'
PORT = 49340
motion_proxy = ALProxy("ALMotion",IP,PORT)
motion_proxy.openHand('LHand')
motion_proxy.openHand('RHand')
| [
"naoqi.ALProxy"
] | [((100, 129), 'naoqi.ALProxy', 'ALProxy', (['"""ALMotion"""', 'IP', 'PORT'], {}), "('ALMotion', IP, PORT)\n", (107, 129), False, 'from naoqi import ALProxy\n')] |
from usefull import read_db
def test_read_csv_db_simple():
'''
page msg parent choice end
1 1. Mi sembra che 0 False False
2 ...se ti trovassi 1 True False
'''
assert read_db('db_simple.csv')[0]['page'] == 1
assert read_db('db_simple.csv')[0]['msg'][-3:] == 'che'
assert read_db('db_simple.csv')[1]['msg'][:9] == '...se ti '
assert read_db('db_simple.csv')[0]['end'] == False
def test_read_real_db():
assert len(read_db('db.csv')) == 167
| [
"usefull.read_db"
] | [((487, 504), 'usefull.read_db', 'read_db', (['"""db.csv"""'], {}), "('db.csv')\n", (494, 504), False, 'from usefull import read_db\n'), ((225, 249), 'usefull.read_db', 'read_db', (['"""db_simple.csv"""'], {}), "('db_simple.csv')\n", (232, 249), False, 'from usefull import read_db\n'), ((402, 426), 'usefull.read_db', 'read_db', (['"""db_simple.csv"""'], {}), "('db_simple.csv')\n", (409, 426), False, 'from usefull import read_db\n'), ((277, 301), 'usefull.read_db', 'read_db', (['"""db_simple.csv"""'], {}), "('db_simple.csv')\n", (284, 301), False, 'from usefull import read_db\n'), ((337, 361), 'usefull.read_db', 'read_db', (['"""db_simple.csv"""'], {}), "('db_simple.csv')\n", (344, 361), False, 'from usefull import read_db\n')] |
#!/usr/bin/env python3
import layers
import cv2
from os.path import join
import numpy as np
# import tensorflow as tf
import Augmentor
vw = 320
vh = 320
class Augment:
def __init__(self):
self.w = 2 * 640
self.h = 2 * 480
self.canvas = np.zeros((self.h, self.w, 3), dtype=np.uint8)
def update(self, _im):
# sc = .4
# h, w = (int(sc * _im.shape[0]), int(sc * _im.shape[1]))
vh = _im.shape[0]
vw = _im.shape[1]
# im = cv2.resize(_im, (w, h))
self.canvas[100:(100 + vh), :vw, :] = _im
cv2.putText(self.canvas, "Original", (0, 50), cv2.FONT_HERSHEY_COMPLEX, 1.0, (255, 255, 255))
cv2.putText(self.canvas, "Distorted", (0, 150 + vh), cv2.FONT_HERSHEY_COMPLEX, 1.0, (255, 255, 255))
self.canvas[(200 + vh):(200 + 2 * vh), :vw, :] = _im
cv2.imshow("Image Augment", self.canvas)
cv2.waitKey(0)
if __name__ == "__main__":
data_root = "/mnt/4102422c-af52-4b55-988f-df7544b35598/dataset/KITTI/KITTI_Odometry/"
seq = "14"
vo_fn = data_root + "dataset/poses/" + seq.zfill(2) + ".txt"
im_dir = data_root + "dataset/sequences/" + seq.zfill(2)
aux_dir = "/home/handuo/projects/paper/image_base/downloads"
i = 0
gui = Augment()
# with tf.Session() as sess:
ims = []
p = Augmentor.Pipeline(join(im_dir, "image_0/"), output_directory="../../../output", save_format="JPEG")
# print("Has %s samples." % (len(p.augmentor_images)))
p.zoom(probability=0.3, min_factor=0.9, max_factor=1.2)
p.skew(probability=0.75, magnitude=0.3)
# p.random_erasing(probability=0.5, rectangle_area=0.3)
p.multi_erasing(probability=0.5, max_x_axis=0.3, max_y_axis=0.15, max_num=4)
# p.rotate(probability=0.5, max_left_rotation=6, max_right_rotation=6)
p.sample(10)
| [
"os.path.join",
"cv2.imshow",
"cv2.putText",
"numpy.zeros",
"cv2.waitKey"
] | [((268, 313), 'numpy.zeros', 'np.zeros', (['(self.h, self.w, 3)'], {'dtype': 'np.uint8'}), '((self.h, self.w, 3), dtype=np.uint8)\n', (276, 313), True, 'import numpy as np\n'), ((576, 673), 'cv2.putText', 'cv2.putText', (['self.canvas', '"""Original"""', '(0, 50)', 'cv2.FONT_HERSHEY_COMPLEX', '(1.0)', '(255, 255, 255)'], {}), "(self.canvas, 'Original', (0, 50), cv2.FONT_HERSHEY_COMPLEX, 1.0,\n (255, 255, 255))\n", (587, 673), False, 'import cv2\n'), ((678, 783), 'cv2.putText', 'cv2.putText', (['self.canvas', '"""Distorted"""', '(0, 150 + vh)', 'cv2.FONT_HERSHEY_COMPLEX', '(1.0)', '(255, 255, 255)'], {}), "(self.canvas, 'Distorted', (0, 150 + vh), cv2.\n FONT_HERSHEY_COMPLEX, 1.0, (255, 255, 255))\n", (689, 783), False, 'import cv2\n'), ((850, 890), 'cv2.imshow', 'cv2.imshow', (['"""Image Augment"""', 'self.canvas'], {}), "('Image Augment', self.canvas)\n", (860, 890), False, 'import cv2\n'), ((899, 913), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (910, 913), False, 'import cv2\n'), ((1345, 1369), 'os.path.join', 'join', (['im_dir', '"""image_0/"""'], {}), "(im_dir, 'image_0/')\n", (1349, 1369), False, 'from os.path import join\n')] |
# MIT License
#
# Copyright (c) 2019 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
##
# Errorcodes
# @Since: 22-MAR-2019
# @Author: <NAME>
# @Version: 20190322.0 - JBE - Initial
import logging
import supporting.errorcode as err
# Returncode, ErrorCode, ErrorMessage, Resolution, Area, Severity
OK = err.Errorcode(0, '0', 'No errors encountered.', 'No action needed.', 'Result', logging.INFO)
IGNORE = err.Errorcode(-1, 'GEN-0001', 'Ignored', 'No action needed.', 'Result', logging.WARNING)
##
# General errors, mostly related to environment and/or privileges
LOGDIR_NOTSET = err.Errorcode(1, 'ENV-0001', 'LOGDIR not defined', 'Set the environment variable', 'Environment',
logging.FATAL)
LOGDIR_NF = err.Errorcode(1, 'ENV-0002', 'LOGDIR not found', 'Set the environment variable to an existing directory',
'Environment', logging.FATAL)
LOGDIR_NW = err.Errorcode(1, 'ENV-0003', 'Cannot write to LOGDIR',
'Set the environment variable to a writeable directory', 'Environment', logging.FATAL)
FILE_NF = err.Errorcode(1, 'ENV-0004', 'Cannot find file', 'Check the file\'s path and permissions', 'Environment',
logging.ERROR)
FILE_NW = err.Errorcode(1, 'ENV-0005', 'Cannot write to file', 'Check the file\'s path and permissions', 'Environment',
logging.ERROR)
##
# General build-deploy-run related errors. Return code is 2
DEPLOYLIST_NF = err.Errorcode(2, 'GENDEPLOY-0001', 'Deploylist not found', 'Check config directory and file name.',
'DatabaseArtifact', logging.FATAL)
COMMAND_FAILED = err.Errorcode(2, 'GENRUN-0001', 'Command failed', 'Check command output', 'executeCommand',
logging.ERROR)
# Database artifact errors. Return code will be 10
NO_DEPLOYLIST = err.Errorcode(10, 'DBDEPLOY-0001', 'No dbdeploylist defined', 'Set the environment variable',
'DatabaseArtifact', logging.FATAL)
SOURCESQLDIR_NOTSET = err.Errorcode(10, 'DBDEPLOY-0002', 'SourceSqlDir is not defined', 'Set the environment variable',
'DatabaseArtifact', logging.ERROR)
TARGETSQLDIR_NOTSET = err.Errorcode(10, 'DBDEPLOY-0003', 'TargetSqlDir is not defined', 'Set the environment variable',
'DatabaseArtifact', logging.ERROR)
SQLFILE_NF = err.Errorcode(10, 'DBDEPLOY-0004', 'SQL file not found', 'Check the deploy file content',
'DatabaseArtifact', logging.ERROR)
##
# Directory based errors
DIRECTORY_NF = err.Errorcode(10, 'DIRDEPLOY-0001', 'Directory not found', 'Check the deploy file content',
'DirectoryArtifact', logging.ERROR)
##
# Database deploy errors
SQLPLUS_ERROR = err.Errorcode(10, 'DBDEPLOY-0005', 'sqlplus return an error.', 'Check the log output', 'DatabaseDeploy',
logging.ERROR)
# Informatica artifact errors. Return code will be 20
NOT_IMPLEMENTED = err.Errorcode(20, 'INFACICD-0001',
'Result unknown. Function may not have been implemented completely',
'Ask your developer to implement the logic completely.', 'InformaticaArtifact',
logging.WARNING)
INFACMD_FAILED = err.Errorcode(20, 'INFACICD-0002', 'infacmd command failed.',
'Check the log and/or ask your administrator.', 'InformaticaArtifact', logging.ERROR)
INFACMD_LIST_CONN_FAILED = err.Errorcode(20, 'INFACICD-0003', 'infacmd failed to list connections.',
'Check the error message.', 'ListConnections', logging.ERROR)
INFACMD_LIST_CONN_OPTIONS_FAILED = err.Errorcode(20, 'INFACICD-0004', 'infacmd failed to list connection options.',
'Check the error message.', 'ListConnectionOptions', logging.ERROR)
INFACMD_NOCONNECTIONNAME = err.Errorcode(20, 'INFACICD-0005', 'No connection name provided.',
'Provide a connection name for which you want the options to be listed.',
'ListConnectionOptions', logging.ERROR)
INFACMD_EXPORT_CONN_FAILED = err.Errorcode(20, 'INFACICD-0006', 'Export Connections failed.',
'Check the error message.', 'ExportConnections', logging.ERROR)
INFACMD_IMPORT_CONN_FAILED = err.Errorcode(20, 'INFACICD-0007', 'Import Connections failed.',
'Check the error message.', 'ImportConnections', logging.ERROR)
INVALID_DEPLOY_TYPE = err.Errorcode(20, 'INFACICD-0008', 'INTERNAL ERROR: Invalid deploy type.',
'Internal error: Contact the development team.', 'InformaticaApp', logging.ERROR)
# Informatica run errors. Return code will be 30
INFACMD_NOPROFILE = err.Errorcode(30, 'INFARUN-0001', 'No profile name provided.',
'Provide the complete path of the profile to be executed.', 'RunProfile',
logging.ERROR)
INFACMD_PROFILE_FAILED = err.Errorcode(30, 'INFARUN-0002', 'infacmd run profile command failed.',
'Check the log and/or ask your administrator.', 'RunProfile', logging.ERROR)
INFACMD_NOSCORECARD = err.Errorcode(30, 'INFARUN-0003', 'No scorecard name provided.',
'Provide the complete path of the scorecard to be executed.', 'RunScorecard',
logging.ERROR)
INFACMD_SCORECARD_FAILED = err.Errorcode(30, 'INFARUN-0004', 'infacmd run scorecard command failed.',
'Check the log and/or ask your administrator.', 'RunScorecard', logging.ERROR)
INFACMD_NOMAPPING = err.Errorcode(30, 'INFARUN-0005', 'No mapping provided', 'Provide the mapping you want to run.',
'RunMapping', logging.ERROR)
INFACMD_NOAPPFORMAPPING = err.Errorcode(30, 'INFARUN-0006', 'Application of the mapping was not provided',
'Provide the application that contains the mapping you want to run.',
'RunMapping', logging.ERROR)
INFACMD_MAPPING_FAILED = err.Errorcode(30, 'INFARUN-0007', 'infacmd run mapping command failed.',
'Check the log and/or ask your administrator.', 'RunMapping', logging.ERROR)
INFACMD_NOPROJECT = err.Errorcode(30, 'INFARUN-0008', 'No project name provided.',
'Provide a name for the project to be created.', 'CreateProject', logging.ERROR)
INFACMD_NOFOLDER = err.Errorcode(30, 'INFARUN-0009', 'No project and/or folder name provided.',
'Provide the project and a name for the folder to be created.', 'CreateFolder',
logging.ERROR)
INFACMD_CREATE_FOLDER_FAILED = err.Errorcode(30, 'INFARUN-0010', 'Folder could not be created.',
'Check the error message.', 'CreateFolder', logging.ERROR)
INFACMD_CREATE_PROJECT_FAILED = err.Errorcode(30, 'INFARUN-0011', 'Project could not be created.',
'Check the error message.', 'CreateProject', logging.ERROR)
INFACMD_DELETE_PROJECT_FAILED = err.Errorcode(30, 'INFARUN-0012', 'Project could not be removed.',
'Check the error message.', 'DeleteProject', logging.ERROR)
INFACMD_DELETE_FOLDER_FAILED = err.Errorcode(30, 'INFARUN-0012', 'Folder could not be removed.',
'Check the error message.', 'DeleteFolder', logging.ERROR)
INFACMD_NOWORKFLOW = err.Errorcode(30, 'INFARUN-0013', 'No application, workflow and/or wait provided',
'You need to specify the application name, workflow and whether to wait (true) or not (false).',
'RunWorkflow', logging.ERROR)
INFACMD_WORKFLOW_FAILED = err.Errorcode(30, 'INFARUN-0014', 'Workflow failed.', 'Check the error message and logs.',
'RunWorkflow', logging.ERROR)
##
# Manage Security errors
INFACMD_NOUSERNAME = err.Errorcode(30, 'INFASEC-0001', 'No user name, password and/or full name provided.',
'Provide a name, password, and full name for the user to be created.', 'CreateUser',
logging.ERROR)
INFACMD_NOUSERNAME_DELETION = err.Errorcode(30, 'INFASEC-0002', 'No user name provided.',
'Provide the username to be deleted.', 'DeleteUser', logging.ERROR)
INFACMD_NOEXPORTFILENAME = err.Errorcode(30, 'INFASEC-0003', 'No export file name provided.',
'Provide the name for the export file.', 'ExportUsersAndGroups', logging.ERROR)
INFACMD_NOIMPORTFILENAME = err.Errorcode(30, 'INFASEC-0004', 'No import file name provided.',
'Provide the name of the file to be imported.', 'ImportUsersAndGroups',
logging.ERROR)
INFACMD_CREATE_USER_FAILED = err.Errorcode(30, 'INFASEC-0005', 'User creation failed.', 'Check the error message.',
'CreateUser', logging.ERROR)
INFACMD_DELETE_USER_FAILED = err.Errorcode(30, 'INFASEC-0006', 'User deletion failed.', 'Check the error message.',
'DeleteUser', logging.ERROR)
INFACMD_CREATE_GROUP_FAILED = err.Errorcode(30, 'INFASEC-0007', 'Group creation failed.', 'Check the error message.',
'CreateGroup', logging.ERROR)
INFACMD_DELETE_GROUP_FAILED = err.Errorcode(30, 'INFASEC-0018', 'Group deletion failed.', 'Check the error message.',
'DeleteGroup', logging.ERROR)
INFACMD_EXPORT_USRGRP_FAILED = err.Errorcode(30, 'INFASEC-0019', 'Users and groups export failed.',
'Check the error message.', 'ExportUsersAndGroups', logging.ERROR)
INFACMD_IMPORT_USRGRP_FAILED = err.Errorcode(30, 'INFASEC-0020', 'Users and groups import failed.',
'Check the error message.', 'ImportUsersAndGroups', logging.ERROR)
INFACMD_ADD_PERM_FAILED = err.Errorcode(30, 'INFASEC-0021', 'Permissions could not be added.',
'Check the error message.', 'AddPermissions', logging.ERROR)
INFACMD_REMOVE_PERM_FAILED = err.Errorcode(30, 'INFASEC-0023', 'Permissions could not be remvoed.',
'Check the error message.', 'RemovePermissions', logging.ERROR)
INFACMD_SET_CONN_FAILED = err.Errorcode(30, 'INFASEC-0024', 'Permissions could not be set.', 'Check the error message.',
'SetPermissions', logging.ERROR)
INFACMD_NOGROUPNAME = err.Errorcode(30, 'INFASEC-0025', 'No group name provided.',
'Provide a name for the group to be created.', 'CreateGroup', logging.ERROR)
INFACMD_NOGROUPNAME_DELETION = err.Errorcode(30, 'INFASEC-0026', 'No group name provided.',
'Provide the name of the group to be deleted.', 'DeleteGroup',
logging.ERROR)
##
# Scheduler artifact errors
INVALID_SCHEDULER_ENTRY_TYPE = err.Errorcode(40, 'SCHDEPLOY-0001', 'Invalid scheduler entry type.',
'Provide a valid scheduler entry type, eg. dags, jobascode, plugin. Check schedulerConstants.py for more.',
'SchedulerArtifact', logging.ERROR)
SCHEDULERFILE_NF = err.Errorcode(41, 'SCHDEPLOY-0002', 'Scheduler file not found.',
'Provide a valid scheduler file. Check the scheduler deploy list.',
'SchedulerArtifact', logging.ERROR)
| [
"supporting.errorcode.Errorcode"
] | [((1341, 1437), 'supporting.errorcode.Errorcode', 'err.Errorcode', (['(0)', '"""0"""', '"""No errors encountered."""', '"""No action needed."""', '"""Result"""', 'logging.INFO'], {}), "(0, '0', 'No errors encountered.', 'No action needed.',\n 'Result', logging.INFO)\n", (1354, 1437), True, 'import supporting.errorcode as err\n'), ((1443, 1535), 'supporting.errorcode.Errorcode', 'err.Errorcode', (['(-1)', '"""GEN-0001"""', '"""Ignored"""', '"""No action needed."""', '"""Result"""', 'logging.WARNING'], {}), "(-1, 'GEN-0001', 'Ignored', 'No action needed.', 'Result',\n logging.WARNING)\n", (1456, 1535), True, 'import supporting.errorcode as err\n'), ((1617, 1733), 'supporting.errorcode.Errorcode', 'err.Errorcode', (['(1)', '"""ENV-0001"""', '"""LOGDIR not defined"""', '"""Set the environment variable"""', '"""Environment"""', 'logging.FATAL'], {}), "(1, 'ENV-0001', 'LOGDIR not defined',\n 'Set the environment variable', 'Environment', logging.FATAL)\n", (1630, 1733), True, 'import supporting.errorcode as err\n'), ((1772, 1915), 'supporting.errorcode.Errorcode', 'err.Errorcode', (['(1)', '"""ENV-0002"""', '"""LOGDIR not found"""', '"""Set the environment variable to an existing directory"""', '"""Environment"""', 'logging.FATAL'], {}), "(1, 'ENV-0002', 'LOGDIR not found',\n 'Set the environment variable to an existing directory', 'Environment',\n logging.FATAL)\n", (1785, 1915), True, 'import supporting.errorcode as err\n'), ((1946, 2095), 'supporting.errorcode.Errorcode', 'err.Errorcode', (['(1)', '"""ENV-0003"""', '"""Cannot write to LOGDIR"""', '"""Set the environment variable to a writeable directory"""', '"""Environment"""', 'logging.FATAL'], {}), "(1, 'ENV-0003', 'Cannot write to LOGDIR',\n 'Set the environment variable to a writeable directory', 'Environment',\n logging.FATAL)\n", (1959, 2095), True, 'import supporting.errorcode as err\n'), ((2124, 2247), 'supporting.errorcode.Errorcode', 'err.Errorcode', (['(1)', '"""ENV-0004"""', '"""Cannot find file"""', '"""Check the file\'s path and permissions"""', '"""Environment"""', 'logging.ERROR'], {}), '(1, \'ENV-0004\', \'Cannot find file\',\n "Check the file\'s path and permissions", \'Environment\', logging.ERROR)\n', (2137, 2247), True, 'import supporting.errorcode as err\n'), ((2279, 2406), 'supporting.errorcode.Errorcode', 'err.Errorcode', (['(1)', '"""ENV-0005"""', '"""Cannot write to file"""', '"""Check the file\'s path and permissions"""', '"""Environment"""', 'logging.ERROR'], {}), '(1, \'ENV-0005\', \'Cannot write to file\',\n "Check the file\'s path and permissions", \'Environment\', logging.ERROR)\n', (2292, 2406), True, 'import supporting.errorcode as err\n'), ((2507, 2645), 'supporting.errorcode.Errorcode', 'err.Errorcode', (['(2)', '"""GENDEPLOY-0001"""', '"""Deploylist not found"""', '"""Check config directory and file name."""', '"""DatabaseArtifact"""', 'logging.FATAL'], {}), "(2, 'GENDEPLOY-0001', 'Deploylist not found',\n 'Check config directory and file name.', 'DatabaseArtifact', logging.FATAL)\n", (2520, 2645), True, 'import supporting.errorcode as err\n'), ((2689, 2799), 'supporting.errorcode.Errorcode', 'err.Errorcode', (['(2)', '"""GENRUN-0001"""', '"""Command failed"""', '"""Check command output"""', '"""executeCommand"""', 'logging.ERROR'], {}), "(2, 'GENRUN-0001', 'Command failed', 'Check command output',\n 'executeCommand', logging.ERROR)\n", (2702, 2799), True, 'import supporting.errorcode as err\n'), ((2895, 3027), 'supporting.errorcode.Errorcode', 'err.Errorcode', (['(10)', '"""DBDEPLOY-0001"""', '"""No dbdeploylist defined"""', '"""Set the environment variable"""', '"""DatabaseArtifact"""', 'logging.FATAL'], {}), "(10, 'DBDEPLOY-0001', 'No dbdeploylist defined',\n 'Set the environment variable', 'DatabaseArtifact', logging.FATAL)\n", (2908, 3027), True, 'import supporting.errorcode as err\n'), ((3076, 3212), 'supporting.errorcode.Errorcode', 'err.Errorcode', (['(10)', '"""DBDEPLOY-0002"""', '"""SourceSqlDir is not defined"""', '"""Set the environment variable"""', '"""DatabaseArtifact"""', 'logging.ERROR'], {}), "(10, 'DBDEPLOY-0002', 'SourceSqlDir is not defined',\n 'Set the environment variable', 'DatabaseArtifact', logging.ERROR)\n", (3089, 3212), True, 'import supporting.errorcode as err\n'), ((3267, 3403), 'supporting.errorcode.Errorcode', 'err.Errorcode', (['(10)', '"""DBDEPLOY-0003"""', '"""TargetSqlDir is not defined"""', '"""Set the environment variable"""', '"""DatabaseArtifact"""', 'logging.ERROR'], {}), "(10, 'DBDEPLOY-0003', 'TargetSqlDir is not defined',\n 'Set the environment variable', 'DatabaseArtifact', logging.ERROR)\n", (3280, 3403), True, 'import supporting.errorcode as err\n'), ((3449, 3577), 'supporting.errorcode.Errorcode', 'err.Errorcode', (['(10)', '"""DBDEPLOY-0004"""', '"""SQL file not found"""', '"""Check the deploy file content"""', '"""DatabaseArtifact"""', 'logging.ERROR'], {}), "(10, 'DBDEPLOY-0004', 'SQL file not found',\n 'Check the deploy file content', 'DatabaseArtifact', logging.ERROR)\n", (3462, 3577), True, 'import supporting.errorcode as err\n'), ((3645, 3776), 'supporting.errorcode.Errorcode', 'err.Errorcode', (['(10)', '"""DIRDEPLOY-0001"""', '"""Directory not found"""', '"""Check the deploy file content"""', '"""DirectoryArtifact"""', 'logging.ERROR'], {}), "(10, 'DIRDEPLOY-0001', 'Directory not found',\n 'Check the deploy file content', 'DirectoryArtifact', logging.ERROR)\n", (3658, 3776), True, 'import supporting.errorcode as err\n'), ((3847, 3970), 'supporting.errorcode.Errorcode', 'err.Errorcode', (['(10)', '"""DBDEPLOY-0005"""', '"""sqlplus return an error."""', '"""Check the log output"""', '"""DatabaseDeploy"""', 'logging.ERROR'], {}), "(10, 'DBDEPLOY-0005', 'sqlplus return an error.',\n 'Check the log output', 'DatabaseDeploy', logging.ERROR)\n", (3860, 3970), True, 'import supporting.errorcode as err\n'), ((4070, 4282), 'supporting.errorcode.Errorcode', 'err.Errorcode', (['(20)', '"""INFACICD-0001"""', '"""Result unknown. Function may not have been implemented completely"""', '"""Ask your developer to implement the logic completely."""', '"""InformaticaArtifact"""', 'logging.WARNING'], {}), "(20, 'INFACICD-0001',\n 'Result unknown. Function may not have been implemented completely',\n 'Ask your developer to implement the logic completely.',\n 'InformaticaArtifact', logging.WARNING)\n", (4083, 4282), True, 'import supporting.errorcode as err\n'), ((4384, 4539), 'supporting.errorcode.Errorcode', 'err.Errorcode', (['(20)', '"""INFACICD-0002"""', '"""infacmd command failed."""', '"""Check the log and/or ask your administrator."""', '"""InformaticaArtifact"""', 'logging.ERROR'], {}), "(20, 'INFACICD-0002', 'infacmd command failed.',\n 'Check the log and/or ask your administrator.', 'InformaticaArtifact',\n logging.ERROR)\n", (4397, 4539), True, 'import supporting.errorcode as err\n'), ((4590, 4729), 'supporting.errorcode.Errorcode', 'err.Errorcode', (['(20)', '"""INFACICD-0003"""', '"""infacmd failed to list connections."""', '"""Check the error message."""', '"""ListConnections"""', 'logging.ERROR'], {}), "(20, 'INFACICD-0003', 'infacmd failed to list connections.',\n 'Check the error message.', 'ListConnections', logging.ERROR)\n", (4603, 4729), True, 'import supporting.errorcode as err\n'), ((4802, 4958), 'supporting.errorcode.Errorcode', 'err.Errorcode', (['(20)', '"""INFACICD-0004"""', '"""infacmd failed to list connection options."""', '"""Check the error message."""', '"""ListConnectionOptions"""', 'logging.ERROR'], {}), "(20, 'INFACICD-0004',\n 'infacmd failed to list connection options.',\n 'Check the error message.', 'ListConnectionOptions', logging.ERROR)\n", (4815, 4958), True, 'import supporting.errorcode as err\n'), ((5027, 5215), 'supporting.errorcode.Errorcode', 'err.Errorcode', (['(20)', '"""INFACICD-0005"""', '"""No connection name provided."""', '"""Provide a connection name for which you want the options to be listed."""', '"""ListConnectionOptions"""', 'logging.ERROR'], {}), "(20, 'INFACICD-0005', 'No connection name provided.',\n 'Provide a connection name for which you want the options to be listed.',\n 'ListConnectionOptions', logging.ERROR)\n", (5040, 5215), True, 'import supporting.errorcode as err\n'), ((5319, 5451), 'supporting.errorcode.Errorcode', 'err.Errorcode', (['(20)', '"""INFACICD-0006"""', '"""Export Connections failed."""', '"""Check the error message."""', '"""ExportConnections"""', 'logging.ERROR'], {}), "(20, 'INFACICD-0006', 'Export Connections failed.',\n 'Check the error message.', 'ExportConnections', logging.ERROR)\n", (5332, 5451), True, 'import supporting.errorcode as err\n'), ((5520, 5652), 'supporting.errorcode.Errorcode', 'err.Errorcode', (['(20)', '"""INFACICD-0007"""', '"""Import Connections failed."""', '"""Check the error message."""', '"""ImportConnections"""', 'logging.ERROR'], {}), "(20, 'INFACICD-0007', 'Import Connections failed.',\n 'Check the error message.', 'ImportConnections', logging.ERROR)\n", (5533, 5652), True, 'import supporting.errorcode as err\n'), ((5714, 5878), 'supporting.errorcode.Errorcode', 'err.Errorcode', (['(20)', '"""INFACICD-0008"""', '"""INTERNAL ERROR: Invalid deploy type."""', '"""Internal error: Contact the development team."""', '"""InformaticaApp"""', 'logging.ERROR'], {}), "(20, 'INFACICD-0008', 'INTERNAL ERROR: Invalid deploy type.',\n 'Internal error: Contact the development team.', 'InformaticaApp',\n logging.ERROR)\n", (5727, 5878), True, 'import supporting.errorcode as err\n'), ((5977, 6136), 'supporting.errorcode.Errorcode', 'err.Errorcode', (['(30)', '"""INFARUN-0001"""', '"""No profile name provided."""', '"""Provide the complete path of the profile to be executed."""', '"""RunProfile"""', 'logging.ERROR'], {}), "(30, 'INFARUN-0001', 'No profile name provided.',\n 'Provide the complete path of the profile to be executed.',\n 'RunProfile', logging.ERROR)\n", (5990, 6136), True, 'import supporting.errorcode as err\n'), ((6222, 6380), 'supporting.errorcode.Errorcode', 'err.Errorcode', (['(30)', '"""INFARUN-0002"""', '"""infacmd run profile command failed."""', '"""Check the log and/or ask your administrator."""', '"""RunProfile"""', 'logging.ERROR'], {}), "(30, 'INFARUN-0002', 'infacmd run profile command failed.',\n 'Check the log and/or ask your administrator.', 'RunProfile', logging.ERROR\n )\n", (6235, 6380), True, 'import supporting.errorcode as err\n'), ((6433, 6598), 'supporting.errorcode.Errorcode', 'err.Errorcode', (['(30)', '"""INFARUN-0003"""', '"""No scorecard name provided."""', '"""Provide the complete path of the scorecard to be executed."""', '"""RunScorecard"""', 'logging.ERROR'], {}), "(30, 'INFARUN-0003', 'No scorecard name provided.',\n 'Provide the complete path of the scorecard to be executed.',\n 'RunScorecard', logging.ERROR)\n", (6446, 6598), True, 'import supporting.errorcode as err\n'), ((6690, 6852), 'supporting.errorcode.Errorcode', 'err.Errorcode', (['(30)', '"""INFARUN-0004"""', '"""infacmd run scorecard command failed."""', '"""Check the log and/or ask your administrator."""', '"""RunScorecard"""', 'logging.ERROR'], {}), "(30, 'INFARUN-0004', 'infacmd run scorecard command failed.',\n 'Check the log and/or ask your administrator.', 'RunScorecard', logging\n .ERROR)\n", (6703, 6852), True, 'import supporting.errorcode as err\n'), ((6905, 7034), 'supporting.errorcode.Errorcode', 'err.Errorcode', (['(30)', '"""INFARUN-0005"""', '"""No mapping provided"""', '"""Provide the mapping you want to run."""', '"""RunMapping"""', 'logging.ERROR'], {}), "(30, 'INFARUN-0005', 'No mapping provided',\n 'Provide the mapping you want to run.', 'RunMapping', logging.ERROR)\n", (6918, 7034), True, 'import supporting.errorcode as err\n'), ((7091, 7282), 'supporting.errorcode.Errorcode', 'err.Errorcode', (['(30)', '"""INFARUN-0006"""', '"""Application of the mapping was not provided"""', '"""Provide the application that contains the mapping you want to run."""', '"""RunMapping"""', 'logging.ERROR'], {}), "(30, 'INFARUN-0006',\n 'Application of the mapping was not provided',\n 'Provide the application that contains the mapping you want to run.',\n 'RunMapping', logging.ERROR)\n", (7104, 7282), True, 'import supporting.errorcode as err\n'), ((7376, 7534), 'supporting.errorcode.Errorcode', 'err.Errorcode', (['(30)', '"""INFARUN-0007"""', '"""infacmd run mapping command failed."""', '"""Check the log and/or ask your administrator."""', '"""RunMapping"""', 'logging.ERROR'], {}), "(30, 'INFARUN-0007', 'infacmd run mapping command failed.',\n 'Check the log and/or ask your administrator.', 'RunMapping', logging.ERROR\n )\n", (7389, 7534), True, 'import supporting.errorcode as err\n'), ((7585, 7736), 'supporting.errorcode.Errorcode', 'err.Errorcode', (['(30)', '"""INFARUN-0008"""', '"""No project name provided."""', '"""Provide a name for the project to be created."""', '"""CreateProject"""', 'logging.ERROR'], {}), "(30, 'INFARUN-0008', 'No project name provided.',\n 'Provide a name for the project to be created.', 'CreateProject',\n logging.ERROR)\n", (7598, 7736), True, 'import supporting.errorcode as err\n'), ((7782, 7961), 'supporting.errorcode.Errorcode', 'err.Errorcode', (['(30)', '"""INFARUN-0009"""', '"""No project and/or folder name provided."""', '"""Provide the project and a name for the folder to be created."""', '"""CreateFolder"""', 'logging.ERROR'], {}), "(30, 'INFARUN-0009', 'No project and/or folder name provided.',\n 'Provide the project and a name for the folder to be created.',\n 'CreateFolder', logging.ERROR)\n", (7795, 7961), True, 'import supporting.errorcode as err\n'), ((8051, 8179), 'supporting.errorcode.Errorcode', 'err.Errorcode', (['(30)', '"""INFARUN-0010"""', '"""Folder could not be created."""', '"""Check the error message."""', '"""CreateFolder"""', 'logging.ERROR'], {}), "(30, 'INFARUN-0010', 'Folder could not be created.',\n 'Check the error message.', 'CreateFolder', logging.ERROR)\n", (8064, 8179), True, 'import supporting.errorcode as err\n'), ((8253, 8383), 'supporting.errorcode.Errorcode', 'err.Errorcode', (['(30)', '"""INFARUN-0011"""', '"""Project could not be created."""', '"""Check the error message."""', '"""CreateProject"""', 'logging.ERROR'], {}), "(30, 'INFARUN-0011', 'Project could not be created.',\n 'Check the error message.', 'CreateProject', logging.ERROR)\n", (8266, 8383), True, 'import supporting.errorcode as err\n'), ((8458, 8588), 'supporting.errorcode.Errorcode', 'err.Errorcode', (['(30)', '"""INFARUN-0012"""', '"""Project could not be removed."""', '"""Check the error message."""', '"""DeleteProject"""', 'logging.ERROR'], {}), "(30, 'INFARUN-0012', 'Project could not be removed.',\n 'Check the error message.', 'DeleteProject', logging.ERROR)\n", (8471, 8588), True, 'import supporting.errorcode as err\n'), ((8662, 8790), 'supporting.errorcode.Errorcode', 'err.Errorcode', (['(30)', '"""INFARUN-0012"""', '"""Folder could not be removed."""', '"""Check the error message."""', '"""DeleteFolder"""', 'logging.ERROR'], {}), "(30, 'INFARUN-0012', 'Folder could not be removed.',\n 'Check the error message.', 'DeleteFolder', logging.ERROR)\n", (8675, 8790), True, 'import supporting.errorcode as err\n'), ((8853, 9075), 'supporting.errorcode.Errorcode', 'err.Errorcode', (['(30)', '"""INFARUN-0013"""', '"""No application, workflow and/or wait provided"""', '"""You need to specify the application name, workflow and whether to wait (true) or not (false)."""', '"""RunWorkflow"""', 'logging.ERROR'], {}), "(30, 'INFARUN-0013',\n 'No application, workflow and/or wait provided',\n 'You need to specify the application name, workflow and whether to wait (true) or not (false).'\n , 'RunWorkflow', logging.ERROR)\n", (8866, 9075), True, 'import supporting.errorcode as err\n'), ((9159, 9283), 'supporting.errorcode.Errorcode', 'err.Errorcode', (['(30)', '"""INFARUN-0014"""', '"""Workflow failed."""', '"""Check the error message and logs."""', '"""RunWorkflow"""', 'logging.ERROR'], {}), "(30, 'INFARUN-0014', 'Workflow failed.',\n 'Check the error message and logs.', 'RunWorkflow', logging.ERROR)\n", (9172, 9283), True, 'import supporting.errorcode as err\n'), ((9370, 9568), 'supporting.errorcode.Errorcode', 'err.Errorcode', (['(30)', '"""INFASEC-0001"""', '"""No user name, password and/or full name provided."""', '"""Provide a name, password, and full name for the user to be created."""', '"""CreateUser"""', 'logging.ERROR'], {}), "(30, 'INFASEC-0001',\n 'No user name, password and/or full name provided.',\n 'Provide a name, password, and full name for the user to be created.',\n 'CreateUser', logging.ERROR)\n", (9383, 9568), True, 'import supporting.errorcode as err\n'), ((9657, 9788), 'supporting.errorcode.Errorcode', 'err.Errorcode', (['(30)', '"""INFASEC-0002"""', '"""No user name provided."""', '"""Provide the username to be deleted."""', '"""DeleteUser"""', 'logging.ERROR'], {}), "(30, 'INFASEC-0002', 'No user name provided.',\n 'Provide the username to be deleted.', 'DeleteUser', logging.ERROR)\n", (9670, 9788), True, 'import supporting.errorcode as err\n'), ((9856, 10010), 'supporting.errorcode.Errorcode', 'err.Errorcode', (['(30)', '"""INFASEC-0003"""', '"""No export file name provided."""', '"""Provide the name for the export file."""', '"""ExportUsersAndGroups"""', 'logging.ERROR'], {}), "(30, 'INFASEC-0003', 'No export file name provided.',\n 'Provide the name for the export file.', 'ExportUsersAndGroups',\n logging.ERROR)\n", (9869, 10010), True, 'import supporting.errorcode as err\n'), ((10071, 10232), 'supporting.errorcode.Errorcode', 'err.Errorcode', (['(30)', '"""INFASEC-0004"""', '"""No import file name provided."""', '"""Provide the name of the file to be imported."""', '"""ImportUsersAndGroups"""', 'logging.ERROR'], {}), "(30, 'INFASEC-0004', 'No import file name provided.',\n 'Provide the name of the file to be imported.', 'ImportUsersAndGroups',\n logging.ERROR)\n", (10084, 10232), True, 'import supporting.errorcode as err\n'), ((10336, 10455), 'supporting.errorcode.Errorcode', 'err.Errorcode', (['(30)', '"""INFASEC-0005"""', '"""User creation failed."""', '"""Check the error message."""', '"""CreateUser"""', 'logging.ERROR'], {}), "(30, 'INFASEC-0005', 'User creation failed.',\n 'Check the error message.', 'CreateUser', logging.ERROR)\n", (10349, 10455), True, 'import supporting.errorcode as err\n'), ((10524, 10643), 'supporting.errorcode.Errorcode', 'err.Errorcode', (['(30)', '"""INFASEC-0006"""', '"""User deletion failed."""', '"""Check the error message."""', '"""DeleteUser"""', 'logging.ERROR'], {}), "(30, 'INFASEC-0006', 'User deletion failed.',\n 'Check the error message.', 'DeleteUser', logging.ERROR)\n", (10537, 10643), True, 'import supporting.errorcode as err\n'), ((10713, 10834), 'supporting.errorcode.Errorcode', 'err.Errorcode', (['(30)', '"""INFASEC-0007"""', '"""Group creation failed."""', '"""Check the error message."""', '"""CreateGroup"""', 'logging.ERROR'], {}), "(30, 'INFASEC-0007', 'Group creation failed.',\n 'Check the error message.', 'CreateGroup', logging.ERROR)\n", (10726, 10834), True, 'import supporting.errorcode as err\n'), ((10905, 11026), 'supporting.errorcode.Errorcode', 'err.Errorcode', (['(30)', '"""INFASEC-0018"""', '"""Group deletion failed."""', '"""Check the error message."""', '"""DeleteGroup"""', 'logging.ERROR'], {}), "(30, 'INFASEC-0018', 'Group deletion failed.',\n 'Check the error message.', 'DeleteGroup', logging.ERROR)\n", (10918, 11026), True, 'import supporting.errorcode as err\n'), ((11098, 11237), 'supporting.errorcode.Errorcode', 'err.Errorcode', (['(30)', '"""INFASEC-0019"""', '"""Users and groups export failed."""', '"""Check the error message."""', '"""ExportUsersAndGroups"""', 'logging.ERROR'], {}), "(30, 'INFASEC-0019', 'Users and groups export failed.',\n 'Check the error message.', 'ExportUsersAndGroups', logging.ERROR)\n", (11111, 11237), True, 'import supporting.errorcode as err\n'), ((11310, 11449), 'supporting.errorcode.Errorcode', 'err.Errorcode', (['(30)', '"""INFASEC-0020"""', '"""Users and groups import failed."""', '"""Check the error message."""', '"""ImportUsersAndGroups"""', 'logging.ERROR'], {}), "(30, 'INFASEC-0020', 'Users and groups import failed.',\n 'Check the error message.', 'ImportUsersAndGroups', logging.ERROR)\n", (11323, 11449), True, 'import supporting.errorcode as err\n'), ((11517, 11650), 'supporting.errorcode.Errorcode', 'err.Errorcode', (['(30)', '"""INFASEC-0021"""', '"""Permissions could not be added."""', '"""Check the error message."""', '"""AddPermissions"""', 'logging.ERROR'], {}), "(30, 'INFASEC-0021', 'Permissions could not be added.',\n 'Check the error message.', 'AddPermissions', logging.ERROR)\n", (11530, 11650), True, 'import supporting.errorcode as err\n'), ((11716, 11854), 'supporting.errorcode.Errorcode', 'err.Errorcode', (['(30)', '"""INFASEC-0023"""', '"""Permissions could not be remvoed."""', '"""Check the error message."""', '"""RemovePermissions"""', 'logging.ERROR'], {}), "(30, 'INFASEC-0023', 'Permissions could not be remvoed.',\n 'Check the error message.', 'RemovePermissions', logging.ERROR)\n", (11729, 11854), True, 'import supporting.errorcode as err\n'), ((11920, 12051), 'supporting.errorcode.Errorcode', 'err.Errorcode', (['(30)', '"""INFASEC-0024"""', '"""Permissions could not be set."""', '"""Check the error message."""', '"""SetPermissions"""', 'logging.ERROR'], {}), "(30, 'INFASEC-0024', 'Permissions could not be set.',\n 'Check the error message.', 'SetPermissions', logging.ERROR)\n", (11933, 12051), True, 'import supporting.errorcode as err\n'), ((12110, 12256), 'supporting.errorcode.Errorcode', 'err.Errorcode', (['(30)', '"""INFASEC-0025"""', '"""No group name provided."""', '"""Provide a name for the group to be created."""', '"""CreateGroup"""', 'logging.ERROR'], {}), "(30, 'INFASEC-0025', 'No group name provided.',\n 'Provide a name for the group to be created.', 'CreateGroup', logging.ERROR\n )\n", (12123, 12256), True, 'import supporting.errorcode as err\n'), ((12315, 12462), 'supporting.errorcode.Errorcode', 'err.Errorcode', (['(30)', '"""INFASEC-0026"""', '"""No group name provided."""', '"""Provide the name of the group to be deleted."""', '"""DeleteGroup"""', 'logging.ERROR'], {}), "(30, 'INFASEC-0026', 'No group name provided.',\n 'Provide the name of the group to be deleted.', 'DeleteGroup', logging.\n ERROR)\n", (12328, 12462), True, 'import supporting.errorcode as err\n'), ((12607, 12828), 'supporting.errorcode.Errorcode', 'err.Errorcode', (['(40)', '"""SCHDEPLOY-0001"""', '"""Invalid scheduler entry type."""', '"""Provide a valid scheduler entry type, eg. dags, jobascode, plugin. Check schedulerConstants.py for more."""', '"""SchedulerArtifact"""', 'logging.ERROR'], {}), "(40, 'SCHDEPLOY-0001', 'Invalid scheduler entry type.',\n 'Provide a valid scheduler entry type, eg. dags, jobascode, plugin. Check schedulerConstants.py for more.'\n , 'SchedulerArtifact', logging.ERROR)\n", (12620, 12828), True, 'import supporting.errorcode as err\n'), ((12929, 13105), 'supporting.errorcode.Errorcode', 'err.Errorcode', (['(41)', '"""SCHDEPLOY-0002"""', '"""Scheduler file not found."""', '"""Provide a valid scheduler file. Check the scheduler deploy list."""', '"""SchedulerArtifact"""', 'logging.ERROR'], {}), "(41, 'SCHDEPLOY-0002', 'Scheduler file not found.',\n 'Provide a valid scheduler file. Check the scheduler deploy list.',\n 'SchedulerArtifact', logging.ERROR)\n", (12942, 13105), True, 'import supporting.errorcode as err\n')] |
"""
player.py - the Player class
------------------------
"""
import TicTacToe.tictactoe as tictactoe
class Player:
""""
Defines a TicTacToe game with an AI opponent.
"""
def __init__(self):
self._winners = tictactoe.winners()
self._state = set()
def play_field(self, field_number):
"""
Alters the state for the player to add the played field_number.
"""
self._state.add(field_number)
def reduce_winners(self, opponent_state):
"""
Recalculates the remaining set of winners based on the opponent_state.
"""
self._winners = tictactoe.reduce_winners(self._winners, opponent_state)
@property
def winners(self):
"""
Returns the set of winners for the player.
"""
return self._winners
@property
def state(self):
"""
Returns the state, i.e. the set of field_numbers played,
for the player.
"""
return self._state
@property
def is_winner(self):
"""
Returns True, if the player has a winning combination and False
otherwise.
"""
return tictactoe.check_win(self._state)
| [
"TicTacToe.tictactoe.reduce_winners",
"TicTacToe.tictactoe.winners",
"TicTacToe.tictactoe.check_win"
] | [((245, 264), 'TicTacToe.tictactoe.winners', 'tictactoe.winners', ([], {}), '()\n', (262, 264), True, 'import TicTacToe.tictactoe as tictactoe\n'), ((650, 705), 'TicTacToe.tictactoe.reduce_winners', 'tictactoe.reduce_winners', (['self._winners', 'opponent_state'], {}), '(self._winners, opponent_state)\n', (674, 705), True, 'import TicTacToe.tictactoe as tictactoe\n'), ((1214, 1246), 'TicTacToe.tictactoe.check_win', 'tictactoe.check_win', (['self._state'], {}), '(self._state)\n', (1233, 1246), True, 'import TicTacToe.tictactoe as tictactoe\n')] |
from qtpy import QtWidgets, QtCore
from pyqtgraph.widgets.SpinBox import SpinBox
from pyqtgraph.parametertree.parameterTypes.basetypes import WidgetParameterItem
from pymodaq.daq_utils.daq_utils import scroll_log, scroll_linear
import numpy as np
class SliderSpinBox(QtWidgets.QWidget):
def __init__(self, *args, subtype='lin', **kwargs):
super().__init__()
self.subtype = subtype
self.initUI(*args, **kwargs)
self.valueChanged = self.spinbox.valueChanged # (value) for compatibility with QSpinBox
self.sigValueChanged = self.spinbox.sigValueChanged # (self)
self.sigValueChanging = self.spinbox.sigValueChanging # (self, value) sent immediately; no delay.
self.sigChanged = self.spinbox.sigValueChanged
@property
def opts(self):
return self.spinbox.opts
@opts.setter
def opts(self, **opts):
self.setOpts(**opts)
def setOpts(self, **opts):
self.spinbox.setOpts(**opts)
if 'visible' in opts:
self.slider.setVisible(opts['visible'])
def insert_widget(self ,widget, row=0):
self.vlayout.insertWidget(row, widget)
def initUI(self, *args, **kwargs):
"""
Init the User Interface.
"""
self.vlayout = QtWidgets.QVBoxLayout()
self.slider = QtWidgets.QSlider(QtCore.Qt.Horizontal)
self.slider.setMinimumWidth(50)
self.slider.setMinimum(0)
self.slider.setMaximum(100)
if 'value' in kwargs:
value = kwargs.pop('value')
else:
if 'bounds' in kwargs:
value = kwargs['bounds'][0]
else:
value = 1
self.spinbox = SpinBox(parent=None, value=value, **kwargs)
self.vlayout.addWidget(self.slider)
self.vlayout.addWidget(self.spinbox)
self.vlayout.setSpacing(0)
self.vlayout.setContentsMargins(0, 0, 0, 0)
self.setLayout(self.vlayout)
self.slider.valueChanged.connect(self.update_spinbox)
self.spinbox.valueChanged.connect(self.update_slide)
def update_spinbox(self, val):
"""
val is a percentage [0-100] used in order to set the spinbox value between its min and max
"""
min_val = float(self.opts['bounds'][0])
max_val = float(self.opts['bounds'][1])
if self.subtype == 'log':
val_out = scroll_log(val, min_val, max_val)
else:
val_out = scroll_linear(val, min_val, max_val)
try:
self.slider.valueChanged.disconnect(self.update_spinbox)
self.spinbox.valueChanged.disconnect(self.update_slide)
except Exception:
pass
self.spinbox.setValue(val_out)
self.slider.valueChanged.connect(self.update_spinbox)
self.spinbox.valueChanged.connect(self.update_slide)
def update_slide(self, val):
"""
val is the spinbox value between its min and max
"""
min_val = float(self.opts['bounds'][0])
max_val = float(self.opts['bounds'][1])
try:
self.slider.valueChanged.disconnect(self.update_spinbox)
self.spinbox.valueChanged.disconnect(self.update_slide)
except Exception:
pass
if self.subtype == 'linear':
value = int((val - min_val) / (max_val - min_val) * 100)
else:
value = int((np.log10(val) - np.log10(min_val)) / (np.log10(max_val) - np.log10(min_val)) * 100)
self.slider.setValue(value)
self.slider.valueChanged.connect(self.update_spinbox)
self.spinbox.valueChanged.connect(self.update_slide)
def setValue(self, val):
self.spinbox.setValue(val)
def value(self):
return self.spinbox.value()
class SliderParameterItem(WidgetParameterItem):
"""Registered parameter type which displays a QLineEdit"""
def makeWidget(self):
opts = self.param.opts
defs = {
'value': 0, 'min': None, 'max': None,
'step': 1.0, 'dec': False,
'siPrefix': False, 'suffix': '', 'decimals': 12,
}
if 'subtype' not in opts:
opts['subtype'] = 'linear'
defs['bounds'] = [0., self.param.value()] # max value set to default value when no max given
if 'limits' not in opts:
if 'min' in opts:
defs['bounds'][0] = opts['min']
if 'max' in opts:
defs['bounds'][1] = opts['max']
else:
defs['bounds'] = opts['limits']
w = SliderSpinBox(subtype=opts['subtype'], bounds=defs['bounds'], value=defs['value'])
self.setSizeHint(1, QtCore.QSize(50, 50))
return w | [
"numpy.log10",
"qtpy.QtWidgets.QVBoxLayout",
"pymodaq.daq_utils.daq_utils.scroll_log",
"qtpy.QtCore.QSize",
"pymodaq.daq_utils.daq_utils.scroll_linear",
"pyqtgraph.widgets.SpinBox.SpinBox",
"qtpy.QtWidgets.QSlider"
] | [((1283, 1306), 'qtpy.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', ([], {}), '()\n', (1304, 1306), False, 'from qtpy import QtWidgets, QtCore\n'), ((1329, 1368), 'qtpy.QtWidgets.QSlider', 'QtWidgets.QSlider', (['QtCore.Qt.Horizontal'], {}), '(QtCore.Qt.Horizontal)\n', (1346, 1368), False, 'from qtpy import QtWidgets, QtCore\n'), ((1709, 1752), 'pyqtgraph.widgets.SpinBox.SpinBox', 'SpinBox', ([], {'parent': 'None', 'value': 'value'}), '(parent=None, value=value, **kwargs)\n', (1716, 1752), False, 'from pyqtgraph.widgets.SpinBox import SpinBox\n'), ((2402, 2435), 'pymodaq.daq_utils.daq_utils.scroll_log', 'scroll_log', (['val', 'min_val', 'max_val'], {}), '(val, min_val, max_val)\n', (2412, 2435), False, 'from pymodaq.daq_utils.daq_utils import scroll_log, scroll_linear\n'), ((2472, 2508), 'pymodaq.daq_utils.daq_utils.scroll_linear', 'scroll_linear', (['val', 'min_val', 'max_val'], {}), '(val, min_val, max_val)\n', (2485, 2508), False, 'from pymodaq.daq_utils.daq_utils import scroll_log, scroll_linear\n'), ((4675, 4695), 'qtpy.QtCore.QSize', 'QtCore.QSize', (['(50)', '(50)'], {}), '(50, 50)\n', (4687, 4695), False, 'from qtpy import QtWidgets, QtCore\n'), ((3415, 3428), 'numpy.log10', 'np.log10', (['val'], {}), '(val)\n', (3423, 3428), True, 'import numpy as np\n'), ((3431, 3448), 'numpy.log10', 'np.log10', (['min_val'], {}), '(min_val)\n', (3439, 3448), True, 'import numpy as np\n'), ((3453, 3470), 'numpy.log10', 'np.log10', (['max_val'], {}), '(max_val)\n', (3461, 3470), True, 'import numpy as np\n'), ((3473, 3490), 'numpy.log10', 'np.log10', (['min_val'], {}), '(min_val)\n', (3481, 3490), True, 'import numpy as np\n')] |
"""
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import logging as log
from mo.utils.error import Error
from mo.utils.utils import refer_to_faq_msg
def tf_slice_infer(node):
input = node.in_node(0)
begin = node.in_node(1)
size = node.in_node(2)
output = node.out_node()
if input.value is None or begin.value is None or size.value is None:
return
if begin.value.size > 1 or size.value.size > 1:
log.error("Slice operation doesn't support parameters (begin, size) with size more then 1")
log.error(" Begin : {}".format(begin.value))
log.error(" Size : {}".format(size.value))
return
# if the 'size' value is equal to -1 then all remaining elements in dimension are included in the slice.
# refer to TensorFlow documentation for more details
if size.value.item() == -1:
size.value = np.array(input.shape[0] - begin.value.item())
output.value = input.value[begin.value.item():(begin.value.item() + size.value.item())]
output.shape = np.array(output.value.shape, dtype=np.int64)
def tf_strided_slice_infer(node):
begin_id = node.in_node(1).value
end_id = node.in_node(2).value
stride = node.in_node(3).value
shape = node.in_node(0).shape
if shape is None or any([x < 0 for x in shape]):
return
convert_negative_indices(begin_id, shape)
convert_negative_indices(end_id, shape)
test_bit = lambda val, offset: ((1 << offset) & val != 0)
slice_idx = []
shrink_axis_mask = []
ellipsis_mask = []
new_axis_mask = []
dims = len(begin_id)
for idx in range(dims):
l = begin_id[idx] if not test_bit(node.begin_mask, idx) else 0
r = end_id[idx] if not test_bit(node.end_mask, idx) else shape[idx]
# Check shrink_axis_mask
shrink_axis_mask.append(test_bit(node.shrink_axis_mask, idx))
if shrink_axis_mask[idx]:
l, r = l, l + 1
# Check new_axis_mask
new_axis_mask.append(test_bit(node.new_axis_mask, idx))
if new_axis_mask[idx]:
slice_idx.append(np.newaxis)
# Check ellipsis_mask
ellipsis_mask.append(test_bit(node.ellipsis_mask, idx))
if ellipsis_mask[idx]:
shrink_axis_mask[idx] = False
l, r = 0, shape[idx]
slice_idx.append(slice(l, r, stride[idx]))
value = node.in_node(0).value if node.in_node(0).value is not None else np.zeros(shape)
value = value[slice_idx]
for idx, flag in reversed(list(enumerate(shrink_axis_mask))):
if flag:
value = np.squeeze(value, idx)
node['slices'] = np.array(slice_idx)
node['shrink_axis_mask'] = np.array(shrink_axis_mask)
node.out_node().value = np.array(value) if node.in_node(0).value is not None else None
node.out_node().shape = np.array(value.shape)
def convert_negative_indices(indices: np.array, shape: np.array):
for ind, value in enumerate(indices):
if value < 0:
indices[ind] += shape[ind]
def caffe_slice_infer(node):
"""
Slices an input layer to multiple output layers along a given dimension
with given slice indices
Parameters
----------
node
"""
top_shape = node.in_node(0).shape
slice_axis = node.axis
bottom_slice_axis = node.in_node(0).shape[node.axis]
if len(node.slice_point) == 0:
new_shape = np.array(top_shape, dtype=np.int64)
new_shape[slice_axis] = bottom_slice_axis / len(node.out_nodes())
for i in range(0, len(node.out_nodes())):
node.out_node(i).shape = new_shape
return
assert (len(node.slice_point) == len(node.out_nodes()) - 1)
prev = 0
slices = []
for slice_point in node.slice_point:
if slice_point <= prev:
raise Error('Check failed for the layer {}. Slice points should be ordered in increasing manner. '.format(node.id) +
'Current slice point {} is not greater than the previous slice point {}. '.format(slice_point, prev) +
'Please verify your model correctness')
slices.append(slice_point - prev)
prev = slice_point
slices.append(bottom_slice_axis - prev)
if sum(slices) != bottom_slice_axis:
raise Error('Check failed for the layer {}. Sum of slices points {} does not equal '.format(node.id, sum(slices)) +
'to the value of input blob shape by the given slice axis {}'.format(bottom_slice_axis))
for i in range(len(node.out_nodes())):
new_shape = np.array(top_shape, dtype=np.int64)
new_shape[slice_axis] = slices[i]
node.out_node(i).shape = new_shape
def mxnet_slice_axis_infer(node):
in_shape = node.in_node(0).shape
slice_axis = node.axis
new_shape = np.array(in_shape, dtype=np.int64)
new_shape[slice_axis] = new_shape[slice_axis] / len(node.out_nodes())
axis_size = in_shape[slice_axis]
if node.offset < 0:
node.offset += axis_size
if not node.dim:
node.dim = axis_size
elif node.dim < 0:
node.dim += axis_size
input_dim = in_shape.size
node.dim = (node.dim - node.offset)
if node.dim > in_shape[slice_axis]:
raise Error(
'{0} node dimension value is bigger than the corresponding value in the input shape {1}. ' +
'\nIn particular {2} is bigger than {3}. The Model Optimizer does not support this case. ' +
'\nTo overcome, try to edit the original model "end" property of the {0} layer.',
node.name, ','.join(str(i) for i in in_shape), str(node.dim), str(in_shape[slice_axis])
)
for i in range(0, input_dim):
if i == slice_axis:
new_shape[i] = node.dim
else:
new_shape[i] = in_shape[i]
for i in range(0, len(node.out_nodes())):
node.out_node(i)['shape'] = new_shape
| [
"numpy.array",
"numpy.zeros",
"logging.error",
"numpy.squeeze"
] | [((1582, 1626), 'numpy.array', 'np.array', (['output.value.shape'], {'dtype': 'np.int64'}), '(output.value.shape, dtype=np.int64)\n', (1590, 1626), True, 'import numpy as np\n'), ((3177, 3196), 'numpy.array', 'np.array', (['slice_idx'], {}), '(slice_idx)\n', (3185, 3196), True, 'import numpy as np\n'), ((3228, 3254), 'numpy.array', 'np.array', (['shrink_axis_mask'], {}), '(shrink_axis_mask)\n', (3236, 3254), True, 'import numpy as np\n'), ((3375, 3396), 'numpy.array', 'np.array', (['value.shape'], {}), '(value.shape)\n', (3383, 3396), True, 'import numpy as np\n'), ((5335, 5369), 'numpy.array', 'np.array', (['in_shape'], {'dtype': 'np.int64'}), '(in_shape, dtype=np.int64)\n', (5343, 5369), True, 'import numpy as np\n'), ((991, 1092), 'logging.error', 'log.error', (['"""Slice operation doesn\'t support parameters (begin, size) with size more then 1"""'], {}), '(\n "Slice operation doesn\'t support parameters (begin, size) with size more then 1"\n )\n', (1000, 1092), True, 'import logging as log\n'), ((2983, 2998), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (2991, 2998), True, 'import numpy as np\n'), ((3284, 3299), 'numpy.array', 'np.array', (['value'], {}), '(value)\n', (3292, 3299), True, 'import numpy as np\n'), ((3937, 3972), 'numpy.array', 'np.array', (['top_shape'], {'dtype': 'np.int64'}), '(top_shape, dtype=np.int64)\n', (3945, 3972), True, 'import numpy as np\n'), ((5097, 5132), 'numpy.array', 'np.array', (['top_shape'], {'dtype': 'np.int64'}), '(top_shape, dtype=np.int64)\n', (5105, 5132), True, 'import numpy as np\n'), ((3132, 3154), 'numpy.squeeze', 'np.squeeze', (['value', 'idx'], {}), '(value, idx)\n', (3142, 3154), True, 'import numpy as np\n')] |
#!/usr/bin/python3
"""Starts a Flask web application.
The application listens on 0.0.0.0, port 5000.
Routes:
/hbnb_filters: HBnB HTML filters page.
"""
from models import storage
from flask import Flask
from flask import render_template
app = Flask(__name__)
@app.route("/hbnb_filters", strict_slashes=False)
def hbnb_filters():
"""Displays the main HBnB filters HTML page."""
states = storage.all("State")
amenities = storage.all("Amenity")
return render_template("10-hbnb_filters.html",
states=states, amenities=amenities)
@app.teardown_appcontext
def teardown(exc):
"""Remove the current SQLAlchemy session."""
storage.close()
if __name__ == "__main__":
app.run(host="0.0.0.0")
| [
"models.storage.all",
"flask.render_template",
"models.storage.close",
"flask.Flask"
] | [((248, 263), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (253, 263), False, 'from flask import Flask\n'), ((401, 421), 'models.storage.all', 'storage.all', (['"""State"""'], {}), "('State')\n", (412, 421), False, 'from models import storage\n'), ((438, 460), 'models.storage.all', 'storage.all', (['"""Amenity"""'], {}), "('Amenity')\n", (449, 460), False, 'from models import storage\n'), ((472, 547), 'flask.render_template', 'render_template', (['"""10-hbnb_filters.html"""'], {'states': 'states', 'amenities': 'amenities'}), "('10-hbnb_filters.html', states=states, amenities=amenities)\n", (487, 547), False, 'from flask import render_template\n'), ((674, 689), 'models.storage.close', 'storage.close', ([], {}), '()\n', (687, 689), False, 'from models import storage\n')] |
# -*- coding: utf-8 -*-
"""setup.py: setuptools control."""
import re
from setuptools import find_packages, setup
version = re.search(r"^__version__\s*=\s*'(.*)'",
open('src/tav/cmd.py').read(), re.M).group(1)
setup(
name='tav',
version=version,
description='TBD',
long_description='TBD',
author='Mudox',
author_email='<EMAIL>',
url='https://github.com/mudox/tav',
package_dir={'': 'src'},
packages=find_packages('src'),
install_requires=[
'libtmux',
'ruamel.yaml',
],
package_data={
'': ['resources/*'],
},
scripts=[
'asset/script/tav',
],
entry_points={
"console_scripts": ['tav-core = tav.cmd:run']
})
| [
"setuptools.find_packages"
] | [((459, 479), 'setuptools.find_packages', 'find_packages', (['"""src"""'], {}), "('src')\n", (472, 479), False, 'from setuptools import find_packages, setup\n')] |
#!/usr/bin/python;
import sys
import ast
import json
import math as m
import numpy as np
# from scipy.interpolate import interp1d
# from scipy.optimize import fsolve
# Version Controller
sTitle = 'DNVGL RP F103 Cathodic protection of submarine pipelines'
sVersion = 'Version 1.0.0'
# Define constants
pi = m.pi
e = m.e
precision = 2
fFormat = "{:.{}f}"
separate = "--------------------"
Table62 = np.array([
[25, 0.050, 0.020],
[50, 0.060, 0.030],
[80, 0.075, 0.040],
[120, 0.100, 0.060],
[200, 0.130, 0.080],
])
Table63 = np.array([
['Al-Zn-In', 30, -1.050, 2000, -1.000, 1500],
['Al-Zn-In', 60, -1.050, 1500, -1.000, 680],
['Al-Zn-In', 80, -1.000, 720, -1.000, 320],
['Zn', 30, -1.030, 780, -0.980, 750],
['Zn', 50, -1.050, 2000, -0.750, 580],
])
TableA1 = np.array([
['Glass fibre reincored alphat enamel', True, 70, 0.01, 0.0003],
['FBE', True, 90, 0.030, 0.0003],
['FBE', False, 90, 0.030, 0.0010],
['3-layer FBE/PE', True, 80, 0.001, 0.00003],
['3-layer FBE/PE', False, 80, 0.001, 0.00003],
['3-layer FBE/PP', True, 110, 0.001, 0.00003],
['3-layer FBE/PP', False, 80, 0.001, 0.00003],
['FBE/PP Thermally insulating coating', False, 140, 0.0003, 0.00001],
['FBE/PU Thermally insulating coating', False, 70, 0.01, 0.003],
['Polychloroprene', False, 90, 0.01, 0.001],
])
TableA2 = np.array([
['none', '4E(1) moulded PU on top bare steel (with primer)', 70, 0.30, 0.030],
['1D Adhesive Tape or 2A(1)/2A-(2) HSS (PE/PP backing) with mastic adhesive', '4E(2) moulded PU on top 1D or 2A(1)/2A(2)', 70, 0.10, 0.010],
['2B(1) HSS (backing + adhesive in PE with LE primer)', 'none', 70, 0.03, 0.003],
['2B(1) HSS (backing + adhesive in PE with LE primer)', '4E(2) moulded PU on 0.03 0.003 top 2B(1)', 70, 0.03, 0.003],
['2C (1) HSS (backing + adhesive in PP, LE primer)', 'none', 110, 0.03, 0.003],
['2C (1) HSS (backing + adhesive in PP, LE primer)', '4E(2) moulded PU on top 2B(1)', 110, 0.03, 0.003],
['3A FBE', 'none', 90, 0.10, 0.010],
['3A FBE', '4E(2) moulded PU on top', 90, 0.03, 0.003],
['2B(2) FBE with PE HSS', 'none', 70, 0.01, 0.0003],
['2B(2) FBE with PE HSS', '4E(2) moulded PU on top FBE + PE HSS', 70, 0.01, 0.0003],
['5D(1) and 5E FBE with PE applied as flame spraying or tape, respectively', 'none', 70, 0.01, 0.0003],
['2C(2) FBE with PP HSS', 'none', 140, 0.01, 0.0003],
['5A/B/C(1) FBE, PP adhesive and PP (wrapped, flame sprayed or moulded)', 'none', 140, 0.01, 0.0003],
['NA', '5C(1) Moulded PE on top FBE with PE adhesive', 70, 0.01, 0.0003],
['NA', '5C(2) Moulded PP on top FBE with PP adhesive', 140, 0.01, 0.0003],
['8A polychloroprene', 'none', 90, 0.03, 0.001],
])
# This function final mean current demand, M, in accodance with Eq. 3 of [1]
def Icf(Ac, fcf, icm, k):
Icf = Ac*fcf*icm*k
return Icf;
def fcf(a, b, t):
fcf = a + b*tf
return fcf;
# This function return required anode mass, M, in accodance with Eq.5 of [1]
def M(Icm, tf, u, e):
M = (Icm*tf*8760)/(u*e)
return M;
def DNVGLRPF113(D, lPL, lFJ, tAmb, tAno, tf, rhoSea, aGap, aThk, aDen, aMat, coatLP, coatFJ, nJoints, burial, u=0.8):
k = 1.0
EA0 = -0.85
# determine mean current demand from Table 6-2 of [1]
icm = [x for x in Table62 if x[0] > tAmb][0][burial]
# print(icm)
if aMat == False:
aMaterial = 'Al-Zn-In'
else:
aMaterial = 'Zn'
# determine anode properties from Table 6-3 of [1]
anode = [x for x in Table63 if (x[0] == aMaterial and float(x[1]) >= float(tAno)) ][0]
if burial == 1:
EC0 = float(anode[2])
e = float(anode[3])
else:
EC0 = float(anode[4])
e = float(anode[5])
# print(anode)
# print(EC0)
# print(e)
# determine coating breakdown factor from Table A-1 of [1]
coatingPL = TableA1[coatLP]
aPL = float(coatingPL[3])
bPL = float(coatingPL[4])
# print(coatingPL)
# print(aPL)
# print(bPL)
# determine field joint coating breakdown factor from Table A-2 of [1]
coatingFJ = TableA2[coatFJ]
aFJ = float(coatingFJ[3])
bFJ = float(coatingFJ[4])
# print(coatingFJ)
# print(aFJ)
# print(bFJ)
# determine coating area
Acl = pi*D*(lPL)*nJoints
AclPL = pi*D*(lPL-2*lFJ)*nJoints
AclFJ = pi*D*(2*lFJ)*nJoints
# print(AclPL)
# print(AclFJ)
# print(Acl)
#determine mean coating breakdown factor, Eq 2 of [1]
fcmPL = aPL + 0.5*bPL*tf
fcmFJ = aFJ + 0.5*bFJ*tf
# print(fcmPL)
# print(fcmFJ)
#determine mean current demand, Eq 1 of [1]
IcmPL = AclPL*fcmPL*icm*k
IcmFJ = AclFJ*fcmFJ*icm*k
Icm = IcmPL + IcmFJ
# print(IcmPL)
# print(IcmFJ)
# print(Icm)
#determine final coating breakdown factor, Eq 4 of [1]
fcfPL = aPL + bPL*tf
fcfFJ = aFJ + bFJ*tf
# print(fcfPL)
# print(fcfFJ)
#determine final coating breakdown factor, Eq 3 of [1]
IcfPL = AclPL*fcfPL*icm*k
IcfFJ = AclFJ*fcfFJ*icm*k
Icf = IcfPL + IcfFJ
# print(IcfPL)
# print(IcfFJ)
# print(Icf)
#determine minimun required anode mass, Eq. 5 of [1]
reqM = (Icm*tf*8760)/(0.80*e)
reqV = reqM/aDen
# print('required anode mass',reqM)
# print('required anode volume', reqV)
unitV = (0.25*pi*((D + 2*aThk)**2) - 0.25*pi*(D**2) - 2*aGap*aThk)
massLength = reqV/unitV
# print('required anode length by mass', massLength)
deltaE = EC0 - EA0
reqA = (0.315*rhoSea*Icf/deltaE)**2
unitA = pi*(D+2*(1-u)*aThk) - 2*aGap
areaLength = reqA/unitA
# print('required anode length by area', areaLength)
input = [D, lPL, lFJ, tAmb, tAno, tf, rhoSea, aGap, aThk, aDen, aMat, coatLP, coatFJ, nJoints, burial]
# output = [icm, anode, coatingPL, coatingFJ, reqM, reqV, massLength, areaLength]
output = [icm, reqM, reqA, massLength, areaLength]
report = []
resultRaw = [input, output, report]
inputJson = {
'Outer diameter, m':D,
'Length of pipeline, m':lPL,
'Length of field joint':lFJ,
'Ambient temperature, degC':tAmb,
'Design life, year':tf,
'Seawater density, kg/cu.m':rhoSea,
}
outPutJson = {
'No of joints, N:':nJoints,
'Mean current demand, A/Sq.m.': fFormat.format(icm, precision ),
'Min. required anode mass, kg':fFormat.format(reqM, precision ),
'Min. required surface area, Sq.m':fFormat.format(reqA, precision ),
'Min. required length by anode mass, m':fFormat.format(massLength, precision ),
'Min. required length by anode area, m':fFormat.format(areaLength, precision ),
}
resultJson = {'input':inputJson, 'output':outPutJson, 'report':report}
result = [resultRaw, resultJson]
return result;
D = 273.05E-03
lPL = 12
lFJ = 0.30
tAmb = 30
tAno = 30
tf = 30
rhoSea = 1
aGap = 25E-03
aThk = 50E-03
aDen = 2700
aMat = 0
coatLP = 0
coatFJ = 0
spaceMin = 10
spaceMax = 10
burial = 1 # 1 for non burial and 2 for burial
if __name__ == "__main__":
D = float(sys.argv[1])
lPL = float(sys.argv[2])
lFJ = float(sys.argv[3])
tAmb = float(sys.argv[4])
tAno = float(sys.argv[5])
tf = float(sys.argv[6])
rhoSea = float(sys.argv[7])
aGap = float(sys.argv[8])
aThk = float(sys.argv[9])
aDen = float(sys.argv[10])
aMat = int(sys.argv[11])
coatLP = int(sys.argv[12])
coatFJ = int(sys.argv[13])
spaceMin = int(sys.argv[14])
spaceMax = int(sys.argv[15])
burial = int(sys.argv[16])
resultJson = []
for nJoints in range(spaceMin, spaceMax + 1):
result = DNVGLRPF113(D, lPL, lFJ, tAmb, tAno, tf, rhoSea, aGap, aThk, aDen, aMat, coatLP, coatFJ, nJoints, burial)
resultJson.append(result[1])
print (json.dumps(resultJson))
| [
"numpy.array",
"json.dumps"
] | [((402, 508), 'numpy.array', 'np.array', (['[[25, 0.05, 0.02], [50, 0.06, 0.03], [80, 0.075, 0.04], [120, 0.1, 0.06], [\n 200, 0.13, 0.08]]'], {}), '([[25, 0.05, 0.02], [50, 0.06, 0.03], [80, 0.075, 0.04], [120, 0.1,\n 0.06], [200, 0.13, 0.08]])\n', (410, 508), True, 'import numpy as np\n'), ((550, 767), 'numpy.array', 'np.array', (["[['Al-Zn-In', 30, -1.05, 2000, -1.0, 1500], ['Al-Zn-In', 60, -1.05, 1500, -\n 1.0, 680], ['Al-Zn-In', 80, -1.0, 720, -1.0, 320], ['Zn', 30, -1.03, \n 780, -0.98, 750], ['Zn', 50, -1.05, 2000, -0.75, 580]]"], {}), "([['Al-Zn-In', 30, -1.05, 2000, -1.0, 1500], ['Al-Zn-In', 60, -1.05,\n 1500, -1.0, 680], ['Al-Zn-In', 80, -1.0, 720, -1.0, 320], ['Zn', 30, -\n 1.03, 780, -0.98, 750], ['Zn', 50, -1.05, 2000, -0.75, 580]])\n", (558, 767), True, 'import numpy as np\n'), ((814, 1345), 'numpy.array', 'np.array', (["[['Glass fibre reincored alphat enamel', True, 70, 0.01, 0.0003], ['FBE', \n True, 90, 0.03, 0.0003], ['FBE', False, 90, 0.03, 0.001], [\n '3-layer FBE/PE', True, 80, 0.001, 3e-05], ['3-layer FBE/PE', False, 80,\n 0.001, 3e-05], ['3-layer FBE/PP', True, 110, 0.001, 3e-05], [\n '3-layer FBE/PP', False, 80, 0.001, 3e-05], [\n 'FBE/PP Thermally insulating coating', False, 140, 0.0003, 1e-05], [\n 'FBE/PU Thermally insulating coating', False, 70, 0.01, 0.003], [\n 'Polychloroprene', False, 90, 0.01, 0.001]]"], {}), "([['Glass fibre reincored alphat enamel', True, 70, 0.01, 0.0003],\n ['FBE', True, 90, 0.03, 0.0003], ['FBE', False, 90, 0.03, 0.001], [\n '3-layer FBE/PE', True, 80, 0.001, 3e-05], ['3-layer FBE/PE', False, 80,\n 0.001, 3e-05], ['3-layer FBE/PP', True, 110, 0.001, 3e-05], [\n '3-layer FBE/PP', False, 80, 0.001, 3e-05], [\n 'FBE/PP Thermally insulating coating', False, 140, 0.0003, 1e-05], [\n 'FBE/PU Thermally insulating coating', False, 70, 0.01, 0.003], [\n 'Polychloroprene', False, 90, 0.01, 0.001]])\n", (822, 1345), True, 'import numpy as np\n'), ((1380, 2775), 'numpy.array', 'np.array', (["[['none', '4E(1) moulded PU on top bare steel (with primer)', 70, 0.3, 0.03\n ], [\n '1D Adhesive Tape or 2A(1)/2A-(2) HSS (PE/PP backing) with mastic adhesive'\n , '4E(2) moulded PU on top 1D or 2A(1)/2A(2)', 70, 0.1, 0.01], [\n '2B(1) HSS (backing + adhesive in PE with LE primer)', 'none', 70, 0.03,\n 0.003], ['2B(1) HSS (backing + adhesive in PE with LE primer)',\n '4E(2) moulded PU on 0.03 0.003 top 2B(1)', 70, 0.03, 0.003], [\n '2C (1) HSS (backing + adhesive in PP, LE primer)', 'none', 110, 0.03, \n 0.003], ['2C (1) HSS (backing + adhesive in PP, LE primer)',\n '4E(2) moulded PU on top 2B(1)', 110, 0.03, 0.003], ['3A FBE', 'none', \n 90, 0.1, 0.01], ['3A FBE', '4E(2) moulded PU on top', 90, 0.03, 0.003],\n ['2B(2) FBE with PE HSS', 'none', 70, 0.01, 0.0003], [\n '2B(2) FBE with PE HSS', '4E(2) moulded PU on top FBE + PE HSS', 70, \n 0.01, 0.0003], [\n '5D(1) and 5E FBE with PE applied as flame spraying or tape, respectively',\n 'none', 70, 0.01, 0.0003], ['2C(2) FBE with PP HSS', 'none', 140, 0.01,\n 0.0003], [\n '5A/B/C(1) FBE, PP adhesive and PP (wrapped, flame sprayed or moulded)',\n 'none', 140, 0.01, 0.0003], ['NA',\n '5C(1) Moulded PE on top FBE with PE adhesive', 70, 0.01, 0.0003], [\n 'NA', '5C(2) Moulded PP on top FBE with PP adhesive', 140, 0.01, 0.0003\n ], ['8A polychloroprene', 'none', 90, 0.03, 0.001]]"], {}), "([['none', '4E(1) moulded PU on top bare steel (with primer)', 70, \n 0.3, 0.03], [\n '1D Adhesive Tape or 2A(1)/2A-(2) HSS (PE/PP backing) with mastic adhesive'\n , '4E(2) moulded PU on top 1D or 2A(1)/2A(2)', 70, 0.1, 0.01], [\n '2B(1) HSS (backing + adhesive in PE with LE primer)', 'none', 70, 0.03,\n 0.003], ['2B(1) HSS (backing + adhesive in PE with LE primer)',\n '4E(2) moulded PU on 0.03 0.003 top 2B(1)', 70, 0.03, 0.003], [\n '2C (1) HSS (backing + adhesive in PP, LE primer)', 'none', 110, 0.03, \n 0.003], ['2C (1) HSS (backing + adhesive in PP, LE primer)',\n '4E(2) moulded PU on top 2B(1)', 110, 0.03, 0.003], ['3A FBE', 'none', \n 90, 0.1, 0.01], ['3A FBE', '4E(2) moulded PU on top', 90, 0.03, 0.003],\n ['2B(2) FBE with PE HSS', 'none', 70, 0.01, 0.0003], [\n '2B(2) FBE with PE HSS', '4E(2) moulded PU on top FBE + PE HSS', 70, \n 0.01, 0.0003], [\n '5D(1) and 5E FBE with PE applied as flame spraying or tape, respectively',\n 'none', 70, 0.01, 0.0003], ['2C(2) FBE with PP HSS', 'none', 140, 0.01,\n 0.0003], [\n '5A/B/C(1) FBE, PP adhesive and PP (wrapped, flame sprayed or moulded)',\n 'none', 140, 0.01, 0.0003], ['NA',\n '5C(1) Moulded PE on top FBE with PE adhesive', 70, 0.01, 0.0003], [\n 'NA', '5C(2) Moulded PP on top FBE with PP adhesive', 140, 0.01, 0.0003\n ], ['8A polychloroprene', 'none', 90, 0.03, 0.001]])\n", (1388, 2775), True, 'import numpy as np\n'), ((7782, 7804), 'json.dumps', 'json.dumps', (['resultJson'], {}), '(resultJson)\n', (7792, 7804), False, 'import json\n')] |
from django.db import models
from djangae import patches # noqa
class DeferIterationMarker(models.Model):
"""
Marker to keep track of sharded defer
iteration tasks
"""
# Set to True when all shards have been deferred
is_ready = models.BooleanField(default=False)
shard_count = models.PositiveIntegerField(default=0)
shards_complete = models.PositiveIntegerField(default=0)
delete_on_completion = models.BooleanField(default=True)
created = models.DateTimeField(auto_now_add=True)
callback_name = models.CharField(max_length=100)
finalize_name = models.CharField(max_length=100)
class Meta:
app_label = "djangae"
@property
def is_finished(self):
return self.is_ready and self.shard_count == self.shards_complete
def __unicode__(self):
return "Background Task (%s -> %s) at %s" % (
self.callback_name,
self.finalize_name,
self.created
)
| [
"django.db.models.DateTimeField",
"django.db.models.CharField",
"django.db.models.PositiveIntegerField",
"django.db.models.BooleanField"
] | [((265, 299), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (284, 299), False, 'from django.db import models\n'), ((319, 357), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (346, 357), False, 'from django.db import models\n'), ((380, 418), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (407, 418), False, 'from django.db import models\n'), ((447, 480), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (466, 480), False, 'from django.db import models\n'), ((496, 535), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (516, 535), False, 'from django.db import models\n'), ((556, 588), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (572, 588), False, 'from django.db import models\n'), ((609, 641), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (625, 641), False, 'from django.db import models\n')] |
from setuptools import setup
from setuptools import find_packages
setup(name='darwin',
version='0.1',
description='Machine Learning with Genetic Algorithms',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/WillBux/darwin',
license='MIT',
install_requires=['tqdm>=4.19.4',
'numpy>=1.13.3',
'pandas>=0.23.4',
'scikit-learn>=0.19.0'],
classifiers=("Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent"),
packages=find_packages())
| [
"setuptools.find_packages"
] | [((656, 671), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (669, 671), False, 'from setuptools import find_packages\n')] |
"""
Utilities for bounding box manipulation and GIoU.
"""
from x2paddle import torch2paddle
import paddle
def box_area(boxes):
"""
Computes the area of a set of bounding boxes, which are specified by its
(x1, y1, x2, y2) coordinates.
Arguments:
boxes (Tensor[N, 4]): boxes for which the area will be computed. They
are expected to be in (x1, y1, x2, y2) format
Returns:
area (Tensor[N]): area for each box
"""
return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
def box_cxcywh_to_xyxy(x):
x_c, y_c, w, h = x.unbind(-1)
b = [x_c - 0.5 * w, y_c - 0.5 * h, x_c + 0.5 * w, y_c + 0.5 * h]
return paddle.stacks(b, axis=-1)
def box_xyxy_to_cxcywh(x):
x0, y0, x1, y1 = x.unbind(-1)
b = [(x0 + x1) / 2, (y0 + y1) / 2, x1 - x0, y1 - y0]
return paddle.stacks(b, axis=-1)
def box_iou(boxes1, boxes2):
area1 = box_area(boxes1)
area2 = box_area(boxes2)
lt = torch2paddle.max(boxes1[:, None, :2], boxes2[:, :2])
rb = torch2paddle.min(boxes1[:, None, 2:], boxes2[:, 2:])
wh = (rb - lt).clamp(min=0)
inter = wh[:, :, 0] * wh[:, :, 1]
union = area1[:, None] + area2 - inter
iou = inter / union
return iou, union
def generalized_box_iou(boxes1, boxes2):
"""
Generalized IoU from https://giou.stanford.edu/
The boxes should be in [x0, y0, x1, y1] format
Returns a [N, M] pairwise matrix, where N = len(boxes1)
and M = len(boxes2)
"""
assert (boxes1[:, 2:] >= boxes1[:, :2]).all()
assert (boxes2[:, 2:] >= boxes2[:, :2]).all()
iou, union = box_iou(boxes1, boxes2)
lt = torch2paddle.min(boxes1[:, None, :2], boxes2[:, :2])
rb = torch2paddle.max(boxes1[:, None, 2:], boxes2[:, 2:])
wh = (rb - lt).clamp(min=0)
area = wh[:, :, 0] * wh[:, :, 1]
return iou - (area - union) / area
def masks_to_boxes(masks):
"""Compute the bounding boxes around the provided masks
The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions.
Returns a [N, 4] tensors, with the boxes in xyxy format
"""
if masks.numel() == 0:
return paddle.zeros((0, 4)).requires_grad_(False)
h, w = masks.shape[-2:]
y = paddle.arange(0, h, dtype='float32').requires_grad_(False)
x = paddle.arange(0, w, dtype='float32').requires_grad_(False)
y, x = paddle.meshgrid(y, x)
x_mask = masks * x.unsqueeze(0)
x_max = x_mask.flatten(1).max(-1)[0]
x_min = x_mask.masked_fill(~masks.bool(), 100000000.0).flatten(1).min(-1)[0
]
y_mask = masks * y.unsqueeze(0)
y_max = y_mask.flatten(1).max(-1)[0]
y_min = y_mask.masked_fill(~masks.bool(), 100000000.0).flatten(1).min(-1)[0
]
return paddle.stacks([x_min, y_min, x_max, y_max], 1)
| [
"x2paddle.torch2paddle.max",
"paddle.stacks",
"x2paddle.torch2paddle.min",
"paddle.meshgrid",
"paddle.arange",
"paddle.zeros"
] | [((678, 703), 'paddle.stacks', 'paddle.stacks', (['b'], {'axis': '(-1)'}), '(b, axis=-1)\n', (691, 703), False, 'import paddle\n'), ((835, 860), 'paddle.stacks', 'paddle.stacks', (['b'], {'axis': '(-1)'}), '(b, axis=-1)\n', (848, 860), False, 'import paddle\n'), ((959, 1011), 'x2paddle.torch2paddle.max', 'torch2paddle.max', (['boxes1[:, None, :2]', 'boxes2[:, :2]'], {}), '(boxes1[:, None, :2], boxes2[:, :2])\n', (975, 1011), False, 'from x2paddle import torch2paddle\n'), ((1021, 1073), 'x2paddle.torch2paddle.min', 'torch2paddle.min', (['boxes1[:, None, 2:]', 'boxes2[:, 2:]'], {}), '(boxes1[:, None, 2:], boxes2[:, 2:])\n', (1037, 1073), False, 'from x2paddle import torch2paddle\n'), ((1631, 1683), 'x2paddle.torch2paddle.min', 'torch2paddle.min', (['boxes1[:, None, :2]', 'boxes2[:, :2]'], {}), '(boxes1[:, None, :2], boxes2[:, :2])\n', (1647, 1683), False, 'from x2paddle import torch2paddle\n'), ((1693, 1745), 'x2paddle.torch2paddle.max', 'torch2paddle.max', (['boxes1[:, None, 2:]', 'boxes2[:, 2:]'], {}), '(boxes1[:, None, 2:], boxes2[:, 2:])\n', (1709, 1745), False, 'from x2paddle import torch2paddle\n'), ((2382, 2403), 'paddle.meshgrid', 'paddle.meshgrid', (['y', 'x'], {}), '(y, x)\n', (2397, 2403), False, 'import paddle\n'), ((2749, 2795), 'paddle.stacks', 'paddle.stacks', (['[x_min, y_min, x_max, y_max]', '(1)'], {}), '([x_min, y_min, x_max, y_max], 1)\n', (2762, 2795), False, 'import paddle\n'), ((2245, 2281), 'paddle.arange', 'paddle.arange', (['(0)', 'h'], {'dtype': '"""float32"""'}), "(0, h, dtype='float32')\n", (2258, 2281), False, 'import paddle\n'), ((2312, 2348), 'paddle.arange', 'paddle.arange', (['(0)', 'w'], {'dtype': '"""float32"""'}), "(0, w, dtype='float32')\n", (2325, 2348), False, 'import paddle\n'), ((2166, 2186), 'paddle.zeros', 'paddle.zeros', (['(0, 4)'], {}), '((0, 4))\n', (2178, 2186), False, 'import paddle\n')] |
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
import re
import typing
import discord
from discord.ext import commands
import libs as lib
class NijiruAlter(commands.Cog, name='煮汁代替定例会予約'):
def __init__(self, bot):
self.bot = bot
self.SB_URL = (
"http://scp-jp-sandbox3.wikidot.com/",
"http://scp-jp-sandbox2.wikidot.com/")
@commands.command(aliases=['reserv'], enabled=False)
async def reservation(self, ctx, title, url=None):
if url is None:
await ctx.send("引数が不足しています.")
return
if url.startswith(self.SB_URL):
# 空白でsplit、URLとそれ以外を分けそれ以外の最初をタイトルにする!
await ctx.send(f"title : **{title}** {url} にて\n{ctx.message.created_at}に予約を受け付けました.")
else:
print("False")
def setup(bot):
bot.add_cog(NijiruAlter(bot))
| [
"discord.ext.commands.command"
] | [((380, 431), 'discord.ext.commands.command', 'commands.command', ([], {'aliases': "['reserv']", 'enabled': '(False)'}), "(aliases=['reserv'], enabled=False)\n", (396, 431), False, 'from discord.ext import commands\n')] |
import logging
import os
import errno
def make_sure_path_exists(path):
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def initialize_logger(output_dir, console_log_level, info_log="info.log", warn_err_log="warn_error.log", all_log="all.log", verbose=True):
"""
:param output_dir:
:param console_log_level:
:param logsname: The logs go into a folder with this name, if it is a string
:param info_log:
:param warn_err_log:
:param all_log:
:param verbose: if True, prints to stdout which handlers have been set.
:return:
"""
# datefmt='%Y.%m.%d %H:%M:%S'
make_sure_path_exists(output_dir)
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# create console handler and set level to info
if console_log_level:
handler = logging.StreamHandler()
handler.setLevel(console_log_level)
formatter = logging.Formatter('%(process)d: %(levelname)s [%(module)s]:%(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
if verbose:
print("console_log_level is", str(console_log_level))
# create INFO file handler and set level to error
if info_log:
handler = logging.FileHandler(os.path.join(output_dir, info_log), "w", encoding=None)
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(process)d: %(levelname)s [%(module)s]:%(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
if verbose:
print("info_log is", info_log)
# create error file handler and set level to WARNING
if warn_err_log:
handler = logging.FileHandler(os.path.join(output_dir, warn_err_log), "w", encoding=None, delay="true")
handler.setLevel(logging.WARNING)
formatter = logging.Formatter('%(process)d: %(levelname)s [%(module)s]:%(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
if verbose:
print("warn_err_log is", warn_err_log)
# create debug file handler and set level to debug
if all_log:
handler = logging.FileHandler(os.path.join(output_dir, all_log), "w")
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(process)d: %(levelname)s [%(module)s]:%(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
if verbose:
print("all_log is", all_log)
if verbose:
print("Logging to:", output_dir)
| [
"logging.getLogger",
"logging.StreamHandler",
"os.makedirs",
"logging.Formatter",
"os.path.join"
] | [((746, 765), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (763, 765), False, 'import logging\n'), ((90, 107), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (101, 107), False, 'import os\n'), ((897, 920), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (918, 920), False, 'import logging\n'), ((985, 1057), 'logging.Formatter', 'logging.Formatter', (['"""%(process)d: %(levelname)s [%(module)s]:%(message)s"""'], {}), "('%(process)d: %(levelname)s [%(module)s]:%(message)s')\n", (1002, 1057), False, 'import logging\n'), ((1444, 1516), 'logging.Formatter', 'logging.Formatter', (['"""%(process)d: %(levelname)s [%(module)s]:%(message)s"""'], {}), "('%(process)d: %(levelname)s [%(module)s]:%(message)s')\n", (1461, 1516), False, 'import logging\n'), ((1908, 1980), 'logging.Formatter', 'logging.Formatter', (['"""%(process)d: %(levelname)s [%(module)s]:%(message)s"""'], {}), "('%(process)d: %(levelname)s [%(module)s]:%(message)s')\n", (1925, 1980), False, 'import logging\n'), ((2337, 2409), 'logging.Formatter', 'logging.Formatter', (['"""%(process)d: %(levelname)s [%(module)s]:%(message)s"""'], {}), "('%(process)d: %(levelname)s [%(module)s]:%(message)s')\n", (2354, 2409), False, 'import logging\n'), ((1329, 1363), 'os.path.join', 'os.path.join', (['output_dir', 'info_log'], {}), '(output_dir, info_log)\n', (1341, 1363), False, 'import os\n'), ((1772, 1810), 'os.path.join', 'os.path.join', (['output_dir', 'warn_err_log'], {}), '(output_dir, warn_err_log)\n', (1784, 1810), False, 'import os\n'), ((2237, 2270), 'os.path.join', 'os.path.join', (['output_dir', 'all_log'], {}), '(output_dir, all_log)\n', (2249, 2270), False, 'import os\n')] |
import sys
import can
import logging
import struct
import re
import paho.mqtt.client as mqtt
from binascii import unhexlify, hexlify
from flask import Flask, render_template, send_from_directory
from werkzeug.serving import run_simple
from logging.handlers import TimedRotatingFileHandler
from config import Config
httpApp = Flask(__name__)
@httpApp.route('/css/<path:path>')
def send_css(path):
return send_from_directory('static', path)
@httpApp.route("/")
def hello():
return render_template("index.html")
def on_mqtt_message(bus, client, userdata, mqtt_message):
logging.debug("received MQTT message")
#only for debugging, loopback
#bus = can.interface.Bus(Config.canbus_interface, bustype=Config.canbus_type)
match = re.search(Config.mqtt_cmd_topic_iterators_regex, mqtt_message.topic)
if match:
hub = int(match.group(1))
leiste = int(match.group(2))
dose = int(match.group(3))
cmd = match.group(4)
if cmd == 'power':
handle_mqtt_power_message(bus, hub, leiste, dose, mqtt_message.payload)
else:
logging.error("Unknown MQTT Command '%s'" % cmd)
def handle_mqtt_power_message(bus, hub, leiste, dose, payload):
logging.debug("Power Dose %s on Leiste %s for hub %s" %
(dose, leiste, hub))
data = 0x02
if payload == 'ON':
data = 0x01
elif payload == 'OFF':
data = 0x00
if not data == 0x02:
arbitration_id = 0x01F00000
arbitration_id = arbitration_id + (hub << 12) + 0x30 + leiste
payload = 0x0000
for i in range(6-dose):
payload = payload + (0x02 << i * 8)
payload = payload + (data << (6 - dose) * 8)
for i in range(dose-1):
payload = payload + (0x02 << (5 - i) * 8)
send_can_message(bus, arbitration_id, long_to_bytes(payload))
def long_to_bytes(val):
result = []
for i in range(8):
result.insert(0, (val & (0xFF << i*8)) >> i*8)
return bytearray(result)
def send_can_message(bus, id, data):
logging.debug("Sending CAN message with arbitration id %s and data %s" %
(format(id, '#04x'), hexlify(data)))
bus.send(can.Message(extended_id=True, arbitration_id=id, data=data))
def on_can_message(mqtt_client, can_message):
logging.debug("received CAN message")
arbitration_id = can_message.arbitration_id
data = can_message.data
logging.debug("arbitration_id: %s" % format(arbitration_id, '#10x'))
message_type = (arbitration_id & 0xFF000000) >> 24
if message_type == 0x04:
logging.debug("Message Type: announcement")
handle_local_event_message(mqtt_client, arbitration_id, data)
else:
logging.error("Unknown Message Type '%s'" % format(message_type, '#04x'))
def handle_local_event_message(mqtt_client, arbitration_id, data):
node_type = (arbitration_id & 0x00F00000) >> 20
if node_type == 0x0: # Bridge
logging.debug("Node Type: Bridge")
elif node_type == 0x1: # Basis
logging.debug("Node Type: Basis")
elif node_type == 0xF: # Power-Hub
logging.debug("Node Type: Power-Hub")
handle_power_hub_message(mqtt_client, arbitration_id, data)
else:
logging.error("Unknown Node Type '%s'" % format(node_type, '#03x'))
def handle_power_hub_message(mqtt_client, arbitration_id, datanode_id, ):
node_id = (arbitration_id & 0x000FF000) >> 12
event_id = (arbitration_id & 0x00000FFF) >> 0
leiste_id = (arbitration_id & 0x0000000F) >> 0
logging.debug("Event ID: %s" % format(event_id, '#04x'))
if event_id == 0x01:
logging.debug('Sensor: start up - message ignored')
return
if event_id == 0x02:
logging.debug('Sensor: keep alive - message ignored')
return
if event_id == 0x20:
logging.debug('Sensor: Fuse - message ignored.')
return
if event_id <= 0x30 or event_id > 0x34:
logging.warn('Not mapped Sensor "%s"' % format(event_id, '#04x'))
return
data = struct.unpack(">q", data)[0]
logging.debug("CAN Payload: %s" % format(data, '#02x'))
min_amp = (data & 0xFF00000000000000) >> 56
max_amp = (data & 0x00FF000000000000) >> 48
logging.debug("min amp %s" % min_amp)
logging.debug("max amp %s" % max_amp)
dose = []
dose.append(int((data & 0x0000FF0000000000) >> 40))
dose.append(int((data & 0x000000FF00000000) >> 32))
dose.append(int((data & 0x00000000FF000000) >> 24))
dose.append(int((data & 0x0000000000FF0000) >> 16))
dose.append(int((data & 0x000000000000FF00) >> 8))
dose.append(int((data & 0x00000000000000FF) >> 0))
for i in range(6):
topic = create_mqtt_stat_topic(node_id, leiste_id, i + 1)
payload = payload_from_power_msg(dose[i])
if payload:
send_mqtt_message(mqtt_client, topic, payload)
def payload_from_power_msg(data):
if data == 0x00:
return 'OFF'
elif data == 0x01:
return 'ON'
elif data == 0x02:
return None
else:
logging.error('Invalid payload %s' % data)
return None
def create_mqtt_stat_topic(steckdosen_id, node_id, dosen_id):
return Config.mqtt_topic_template % (steckdosen_id, node_id, dosen_id, "stat/power")
def send_mqtt_message(mqtt_client, topic, payload):
logging.debug("Sending MQTT Message '%s' to topic '%s'" % (payload, topic))
try:
result = mqtt_client.publish(topic, payload)
if not (result[0] == mqtt.MQTT_ERR_SUCCESS):
logging.error("Error publishing message \"%s\" to topic \"%s\". Return code %s: %s" % (
payload, topic, str(result[0]), mqtt.error_string(result[0]
)))
except BaseException as e:
logging.error(
"Error relaying message {%s} '%s'. Error: {%s}" % (format(payload, '#2x'), topic, e))
def start():
FORMAT = '%(asctime)-15s %(message)s'
logging.basicConfig(format=FORMAT, level=logging.DEBUG)
if Config.log_file:
logger = logging.getLogger()
handler = TimedRotatingFileHandler(
Config.log_file, when="midnight", interval=1, backupCount=5)
logger.addHandler(handler)
logging.info("Starting CAN bus")
if not Config.canbus_type:
logging.error("No can interface specified. Valid interfaces are: %s" %
can.interface.can.VALID_INTERFACES)
sys.exit(1)
try:
bus = can.interface.Bus(Config.canbus_interface, bustype=Config.canbus_type)
can_buffer = can.BufferedReader()
notifier = can.Notifier(bus, [can_buffer], timeout=0.1)
except BaseException as e:
logging.error("CAN bus error: %s" % e)
sys.exit(1)
logging.info(
"Starting MQTT (" + Config.mqtt_broker + ":" + Config.mqtt_broker_port + ")"
)
mqtt_client = mqtt.Client(client_id=Config.mqtt_client_id,
protocol=mqtt.MQTTv31)
mqtt_client.on_message = lambda client, userdata, mqtt_message: on_mqtt_message(bus, client, userdata, mqtt_message)
try:
mqtt_errno = mqtt_client.connect(Config.mqtt_broker, Config.mqtt_broker_port, 60)
if mqtt_errno != 0:
logging.error("Failed to connect to MQTT " +
mqtt.error_string(mqtt_errno))
raise Exception(mqtt.error_string(mqtt_errno))
mqtt_client.loop_start()
except BaseException as e:
logging.error("MQTT error: %s" % e)
bus.shutdown()
notifier.stop()
return
try:
for i in range(1, Config.mqtt_topic_iterator1_max+1):
for j in range(1, Config.mqtt_topic_iterator2_max+1):
for k in range(1, Config.mqtt_topic_iterator3_max + 1):
subscription_topic = Config.mqtt_topic_template % (i, j, k, "cmd/power")
logging.info("Adding MQTT subscription to '%s'" % subscription_topic)
mqtt_client.subscribe(subscription_topic)
except BaseException as e:
logging.error("Error adding subscribtion \"%s\": %s" %
(Config.mqtt_topic_template, e))
return
if Config.http_port:
logging.info("Starting web server")
run_simple('localhost', Config.http_port, httpApp, use_reloader=True,
extra_files=["static/main.css", "templates/index.html"])
logging.info("Starting main loop")
try:
while True:
message = can_buffer.get_message()
if message is not None:
on_can_message(mqtt_client, message)
except KeyboardInterrupt:
bus.shutdown()
notifier.stop()
mqtt_client.loop_stop()
mqtt_client.disconnect()
return
if __name__ == '__main__':
start()
| [
"flask.render_template",
"logging.getLogger",
"logging.debug",
"flask.Flask",
"can.interface.Bus",
"can.Message",
"binascii.hexlify",
"werkzeug.serving.run_simple",
"sys.exit",
"logging.info",
"logging.error",
"re.search",
"flask.send_from_directory",
"paho.mqtt.client.Client",
"logging.... | [((327, 342), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (332, 342), False, 'from flask import Flask, render_template, send_from_directory\n'), ((411, 446), 'flask.send_from_directory', 'send_from_directory', (['"""static"""', 'path'], {}), "('static', path)\n", (430, 446), False, 'from flask import Flask, render_template, send_from_directory\n'), ((492, 521), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (507, 521), False, 'from flask import Flask, render_template, send_from_directory\n'), ((585, 623), 'logging.debug', 'logging.debug', (['"""received MQTT message"""'], {}), "('received MQTT message')\n", (598, 623), False, 'import logging\n'), ((754, 822), 're.search', 're.search', (['Config.mqtt_cmd_topic_iterators_regex', 'mqtt_message.topic'], {}), '(Config.mqtt_cmd_topic_iterators_regex, mqtt_message.topic)\n', (763, 822), False, 'import re\n'), ((1228, 1304), 'logging.debug', 'logging.debug', (["('Power Dose %s on Leiste %s for hub %s' % (dose, leiste, hub))"], {}), "('Power Dose %s on Leiste %s for hub %s' % (dose, leiste, hub))\n", (1241, 1304), False, 'import logging\n'), ((2325, 2362), 'logging.debug', 'logging.debug', (['"""received CAN message"""'], {}), "('received CAN message')\n", (2338, 2362), False, 'import logging\n'), ((4259, 4296), 'logging.debug', 'logging.debug', (["('min amp %s' % min_amp)"], {}), "('min amp %s' % min_amp)\n", (4272, 4296), False, 'import logging\n'), ((4301, 4338), 'logging.debug', 'logging.debug', (["('max amp %s' % max_amp)"], {}), "('max amp %s' % max_amp)\n", (4314, 4338), False, 'import logging\n'), ((5363, 5438), 'logging.debug', 'logging.debug', (['("Sending MQTT Message \'%s\' to topic \'%s\'" % (payload, topic))'], {}), '("Sending MQTT Message \'%s\' to topic \'%s\'" % (payload, topic))\n', (5376, 5438), False, 'import logging\n'), ((5959, 6014), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': 'FORMAT', 'level': 'logging.DEBUG'}), '(format=FORMAT, level=logging.DEBUG)\n', (5978, 6014), False, 'import logging\n'), ((6234, 6266), 'logging.info', 'logging.info', (['"""Starting CAN bus"""'], {}), "('Starting CAN bus')\n", (6246, 6266), False, 'import logging\n'), ((6759, 6854), 'logging.info', 'logging.info', (["('Starting MQTT (' + Config.mqtt_broker + ':' + Config.mqtt_broker_port + ')')"], {}), "('Starting MQTT (' + Config.mqtt_broker + ':' + Config.\n mqtt_broker_port + ')')\n", (6771, 6854), False, 'import logging\n'), ((6883, 6950), 'paho.mqtt.client.Client', 'mqtt.Client', ([], {'client_id': 'Config.mqtt_client_id', 'protocol': 'mqtt.MQTTv31'}), '(client_id=Config.mqtt_client_id, protocol=mqtt.MQTTv31)\n', (6894, 6950), True, 'import paho.mqtt.client as mqtt\n'), ((8418, 8452), 'logging.info', 'logging.info', (['"""Starting main loop"""'], {}), "('Starting main loop')\n", (8430, 8452), False, 'import logging\n'), ((2213, 2272), 'can.Message', 'can.Message', ([], {'extended_id': '(True)', 'arbitration_id': 'id', 'data': 'data'}), '(extended_id=True, arbitration_id=id, data=data)\n', (2224, 2272), False, 'import can\n'), ((2607, 2650), 'logging.debug', 'logging.debug', (['"""Message Type: announcement"""'], {}), "('Message Type: announcement')\n", (2620, 2650), False, 'import logging\n'), ((2977, 3011), 'logging.debug', 'logging.debug', (['"""Node Type: Bridge"""'], {}), "('Node Type: Bridge')\n", (2990, 3011), False, 'import logging\n'), ((3653, 3704), 'logging.debug', 'logging.debug', (['"""Sensor: start up - message ignored"""'], {}), "('Sensor: start up - message ignored')\n", (3666, 3704), False, 'import logging\n'), ((3754, 3807), 'logging.debug', 'logging.debug', (['"""Sensor: keep alive - message ignored"""'], {}), "('Sensor: keep alive - message ignored')\n", (3767, 3807), False, 'import logging\n'), ((3857, 3905), 'logging.debug', 'logging.debug', (['"""Sensor: Fuse - message ignored."""'], {}), "('Sensor: Fuse - message ignored.')\n", (3870, 3905), False, 'import logging\n'), ((4067, 4092), 'struct.unpack', 'struct.unpack', (['""">q"""', 'data'], {}), "('>q', data)\n", (4080, 4092), False, 'import struct\n'), ((6057, 6076), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (6074, 6076), False, 'import logging\n'), ((6095, 6184), 'logging.handlers.TimedRotatingFileHandler', 'TimedRotatingFileHandler', (['Config.log_file'], {'when': '"""midnight"""', 'interval': '(1)', 'backupCount': '(5)'}), "(Config.log_file, when='midnight', interval=1,\n backupCount=5)\n", (6119, 6184), False, 'from logging.handlers import TimedRotatingFileHandler\n'), ((6306, 6417), 'logging.error', 'logging.error', (["('No can interface specified. Valid interfaces are: %s' % can.interface.can\n .VALID_INTERFACES)"], {}), "('No can interface specified. Valid interfaces are: %s' % can.\n interface.can.VALID_INTERFACES)\n", (6319, 6417), False, 'import logging\n'), ((6443, 6454), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6451, 6454), False, 'import sys\n'), ((6479, 6549), 'can.interface.Bus', 'can.interface.Bus', (['Config.canbus_interface'], {'bustype': 'Config.canbus_type'}), '(Config.canbus_interface, bustype=Config.canbus_type)\n', (6496, 6549), False, 'import can\n'), ((6571, 6591), 'can.BufferedReader', 'can.BufferedReader', ([], {}), '()\n', (6589, 6591), False, 'import can\n'), ((6611, 6655), 'can.Notifier', 'can.Notifier', (['bus', '[can_buffer]'], {'timeout': '(0.1)'}), '(bus, [can_buffer], timeout=0.1)\n', (6623, 6655), False, 'import can\n'), ((8223, 8258), 'logging.info', 'logging.info', (['"""Starting web server"""'], {}), "('Starting web server')\n", (8235, 8258), False, 'import logging\n'), ((8267, 8397), 'werkzeug.serving.run_simple', 'run_simple', (['"""localhost"""', 'Config.http_port', 'httpApp'], {'use_reloader': '(True)', 'extra_files': "['static/main.css', 'templates/index.html']"}), "('localhost', Config.http_port, httpApp, use_reloader=True,\n extra_files=['static/main.css', 'templates/index.html'])\n", (8277, 8397), False, 'from werkzeug.serving import run_simple\n'), ((1110, 1158), 'logging.error', 'logging.error', (['("Unknown MQTT Command \'%s\'" % cmd)'], {}), '("Unknown MQTT Command \'%s\'" % cmd)\n', (1123, 1158), False, 'import logging\n'), ((3056, 3089), 'logging.debug', 'logging.debug', (['"""Node Type: Basis"""'], {}), "('Node Type: Basis')\n", (3069, 3089), False, 'import logging\n'), ((6695, 6733), 'logging.error', 'logging.error', (["('CAN bus error: %s' % e)"], {}), "('CAN bus error: %s' % e)\n", (6708, 6733), False, 'import logging\n'), ((6742, 6753), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6750, 6753), False, 'import sys\n'), ((7471, 7506), 'logging.error', 'logging.error', (["('MQTT error: %s' % e)"], {}), "('MQTT error: %s' % e)\n", (7484, 7506), False, 'import logging\n'), ((8064, 8154), 'logging.error', 'logging.error', (['(\'Error adding subscribtion "%s": %s\' % (Config.mqtt_topic_template, e))'], {}), '(\'Error adding subscribtion "%s": %s\' % (Config.\n mqtt_topic_template, e))\n', (8077, 8154), False, 'import logging\n'), ((2183, 2196), 'binascii.hexlify', 'hexlify', (['data'], {}), '(data)\n', (2190, 2196), False, 'from binascii import unhexlify, hexlify\n'), ((3138, 3175), 'logging.debug', 'logging.debug', (['"""Node Type: Power-Hub"""'], {}), "('Node Type: Power-Hub')\n", (3151, 3175), False, 'import logging\n'), ((5090, 5132), 'logging.error', 'logging.error', (["('Invalid payload %s' % data)"], {}), "('Invalid payload %s' % data)\n", (5103, 5132), False, 'import logging\n'), ((7367, 7396), 'paho.mqtt.client.error_string', 'mqtt.error_string', (['mqtt_errno'], {}), '(mqtt_errno)\n', (7384, 7396), True, 'import paho.mqtt.client as mqtt\n'), ((7308, 7337), 'paho.mqtt.client.error_string', 'mqtt.error_string', (['mqtt_errno'], {}), '(mqtt_errno)\n', (7325, 7337), True, 'import paho.mqtt.client as mqtt\n'), ((7893, 7962), 'logging.info', 'logging.info', (['("Adding MQTT subscription to \'%s\'" % subscription_topic)'], {}), '("Adding MQTT subscription to \'%s\'" % subscription_topic)\n', (7905, 7962), False, 'import logging\n'), ((5703, 5731), 'paho.mqtt.client.error_string', 'mqtt.error_string', (['result[0]'], {}), '(result[0])\n', (5720, 5731), True, 'import paho.mqtt.client as mqtt\n')] |
# <NAME>
# 2017A7PS0112P
from gui import Gui
Gui().loop() | [
"gui.Gui"
] | [((47, 52), 'gui.Gui', 'Gui', ([], {}), '()\n', (50, 52), False, 'from gui import Gui\n')] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from conda.common.io import attach_stderr_handler, captured
from logging import DEBUG, NOTSET, WARN, getLogger
def test_attach_stderr_handler():
name = 'abbacadabba'
logr = getLogger(name)
assert len(logr.handlers) == 0
assert logr.level is NOTSET
debug_message = "debug message 1329-485"
with captured() as c:
attach_stderr_handler(WARN, name)
logr.warn('test message')
logr.debug(debug_message)
assert len(logr.handlers) == 1
assert logr.handlers[0].name == 'stderr'
assert logr.handlers[0].level is NOTSET
assert logr.level is WARN
assert c.stdout == ''
assert 'test message' in c.stderr
assert debug_message not in c.stderr
# round two, with debug
with captured() as c:
attach_stderr_handler(DEBUG, name)
logr.warn('test message')
logr.debug(debug_message)
logr.info('info message')
assert len(logr.handlers) == 1
assert logr.handlers[0].name == 'stderr'
assert logr.handlers[0].level is NOTSET
assert logr.level is DEBUG
assert c.stdout == ''
assert 'test message' in c.stderr
assert debug_message in c.stderr
| [
"logging.getLogger",
"conda.common.io.attach_stderr_handler",
"conda.common.io.captured"
] | [((291, 306), 'logging.getLogger', 'getLogger', (['name'], {}), '(name)\n', (300, 306), False, 'from logging import DEBUG, NOTSET, WARN, getLogger\n'), ((430, 440), 'conda.common.io.captured', 'captured', ([], {}), '()\n', (438, 440), False, 'from conda.common.io import attach_stderr_handler, captured\n'), ((455, 488), 'conda.common.io.attach_stderr_handler', 'attach_stderr_handler', (['WARN', 'name'], {}), '(WARN, name)\n', (476, 488), False, 'from conda.common.io import attach_stderr_handler, captured\n'), ((855, 865), 'conda.common.io.captured', 'captured', ([], {}), '()\n', (863, 865), False, 'from conda.common.io import attach_stderr_handler, captured\n'), ((880, 914), 'conda.common.io.attach_stderr_handler', 'attach_stderr_handler', (['DEBUG', 'name'], {}), '(DEBUG, name)\n', (901, 914), False, 'from conda.common.io import attach_stderr_handler, captured\n')] |
#!/usr/bin/env python3
import argparse
import sys
import csv
import pickle
from common import (
read_comments,
read_comments_csv,
cv_score,
text_preprocess,
print_scores,
)
from nltk.corpus import stopwords
from sklearn.svm import LinearSVC
from sklearn.feature_selection import SelectKBest
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import ComplementNB
from sklearn.svm import LinearSVC
from sklearn.pipeline import make_pipeline
parser = argparse.ArgumentParser(
description="Classify comments sentiments about Brexit. All comments should be categorized as positive/neutral/negative (1/0/-1)"
)
parser.add_argument("test_file", metavar="TEST_FILE", help="Filename with test data")
parser.add_argument(
"-t",
"--train",
metavar="TRAIN_FILE",
dest="train_file",
help="Name of file with training data",
)
parser.add_argument(
"-p",
"--print_classes",
action="store_true",
help="Print real and predicted classes of test file",
)
parser.add_argument(
"-c",
"--classifier",
default="nb",
choices=["nb", "svc"],
help="Chsose classifier during training. nb - Complement Naive Bayes, svc - LinearSVC",
)
parser.add_argument(
"-s", "--save", action="store_true", help="Overwrite current classifier",
)
parser.add_argument(
"-n", "--n_of_best", default=5000, type=int, help="Number of best feautures"
)
parser.add_argument(
"--csv",
metavar="DELIMETER",
type=str,
help="Input data is a csv type. DELIMETER - char between data columns",
)
args = parser.parse_args()
######################################################################
# Main program
print("Loading test data from file: {}".format(args.test_file))
try:
if args.csv:
X_test, y_test = read_comments_csv(args.test_file, args.csv)
else:
X_test, y_test = read_comments(args.test_file)
except Exception as e:
print("Bad data format in file")
print(e)
exit()
if args.train_file:
print("Loading train data from file: {}".format(args.train_file))
try:
if args.csv:
X_train, y_train = read_comments_csv(args.train_file, args.csv)
else:
X_train, y_train = read_comments(args.train_file)
except Exception as e:
print("Bad data format in file")
print(e)
exit()
print("Data loaded")
print("Extracting features from data")
if args.train_file:
tfid_vect = TfidfVectorizer(
ngram_range=(1, 3),
analyzer="word",
sublinear_tf=True,
min_df=1,
stop_words=stopwords.words("english"),
)
f_classif = SelectKBest(k=args.n_of_best)
feautures_extractor = make_pipeline(tfid_vect, f_classif)
X_train = text_preprocess(X_train)
feautures_train = feautures_extractor.fit_transform(X_train, y_train)
if args.save:
pickle.dump(feautures_extractor, open("feautures_extractor.pkl", "wb"))
print("feautures_extractor saved (overwriten) into file.")
else:
try:
feautures_extractor = pickle.load(open("feautures_extractor.pkl", "rb"))
except Exception as e:
print("Cannot load vectorizer file")
print(e)
exit()
print("feautures_extractor succesfully loaded from file")
X_test = text_preprocess(X_test)
feautures_test = feautures_extractor.transform(X_test)
print("Features extracted")
if args.train_file:
print("Training classifier...")
if args.classifier == "nb":
classifier = ComplementNB(alpha=0.001, class_prior=None, fit_prior=True, norm=False)
else:
classifier = LinearSVC(C=3, penalty="l2", dual=True, loss="hinge", random_state=22)
classifier = classifier.fit(feautures_train, y_train)
print("Classifier trained")
cv_score(feautures_train, y_train, classifier)
if args.save:
pickle.dump(classifier, open("classifier.pkl", "wb"))
print("Classifier saved (overwriten) into file.")
else:
try:
classifier = pickle.load(open("classifier.pkl", "rb"))
except Exception as e:
print("Cannot load classifier file")
print(e)
exit()
print("Classifier succesfully loaded from file")
print("Prediction process started")
y_predicted = classifier.predict(feautures_test)
print("Prediction scores")
print_scores(y_test, y_predicted)
if args.print_classes:
for i in range(len(y_predicted)):
print("{:>4} {:>2} {:>2} {}".format(i, y_test[i], y_predicted[i], args.test_file))
| [
"sklearn.naive_bayes.ComplementNB",
"nltk.corpus.stopwords.words",
"argparse.ArgumentParser",
"common.cv_score",
"sklearn.svm.LinearSVC",
"common.text_preprocess",
"common.print_scores",
"sklearn.feature_selection.SelectKBest",
"sklearn.pipeline.make_pipeline",
"common.read_comments",
"common.re... | [((508, 672), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Classify comments sentiments about Brexit. All comments should be categorized as positive/neutral/negative (1/0/-1)"""'}), "(description=\n 'Classify comments sentiments about Brexit. All comments should be categorized as positive/neutral/negative (1/0/-1)'\n )\n", (531, 672), False, 'import argparse\n'), ((3308, 3331), 'common.text_preprocess', 'text_preprocess', (['X_test'], {}), '(X_test)\n', (3323, 3331), False, 'from common import read_comments, read_comments_csv, cv_score, text_preprocess, print_scores\n'), ((4333, 4366), 'common.print_scores', 'print_scores', (['y_test', 'y_predicted'], {}), '(y_test, y_predicted)\n', (4345, 4366), False, 'from common import read_comments, read_comments_csv, cv_score, text_preprocess, print_scores\n'), ((2658, 2687), 'sklearn.feature_selection.SelectKBest', 'SelectKBest', ([], {'k': 'args.n_of_best'}), '(k=args.n_of_best)\n', (2669, 2687), False, 'from sklearn.feature_selection import SelectKBest\n'), ((2715, 2750), 'sklearn.pipeline.make_pipeline', 'make_pipeline', (['tfid_vect', 'f_classif'], {}), '(tfid_vect, f_classif)\n', (2728, 2750), False, 'from sklearn.pipeline import make_pipeline\n'), ((2766, 2790), 'common.text_preprocess', 'text_preprocess', (['X_train'], {}), '(X_train)\n', (2781, 2790), False, 'from common import read_comments, read_comments_csv, cv_score, text_preprocess, print_scores\n'), ((3798, 3844), 'common.cv_score', 'cv_score', (['feautures_train', 'y_train', 'classifier'], {}), '(feautures_train, y_train, classifier)\n', (3806, 3844), False, 'from common import read_comments, read_comments_csv, cv_score, text_preprocess, print_scores\n'), ((1809, 1852), 'common.read_comments_csv', 'read_comments_csv', (['args.test_file', 'args.csv'], {}), '(args.test_file, args.csv)\n', (1826, 1852), False, 'from common import read_comments, read_comments_csv, cv_score, text_preprocess, print_scores\n'), ((1888, 1917), 'common.read_comments', 'read_comments', (['args.test_file'], {}), '(args.test_file)\n', (1901, 1917), False, 'from common import read_comments, read_comments_csv, cv_score, text_preprocess, print_scores\n'), ((3527, 3598), 'sklearn.naive_bayes.ComplementNB', 'ComplementNB', ([], {'alpha': '(0.001)', 'class_prior': 'None', 'fit_prior': '(True)', 'norm': '(False)'}), '(alpha=0.001, class_prior=None, fit_prior=True, norm=False)\n', (3539, 3598), False, 'from sklearn.naive_bayes import ComplementNB\n'), ((3630, 3700), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'C': '(3)', 'penalty': '"""l2"""', 'dual': '(True)', 'loss': '"""hinge"""', 'random_state': '(22)'}), "(C=3, penalty='l2', dual=True, loss='hinge', random_state=22)\n", (3639, 3700), False, 'from sklearn.svm import LinearSVC\n'), ((2154, 2198), 'common.read_comments_csv', 'read_comments_csv', (['args.train_file', 'args.csv'], {}), '(args.train_file, args.csv)\n', (2171, 2198), False, 'from common import read_comments, read_comments_csv, cv_score, text_preprocess, print_scores\n'), ((2244, 2274), 'common.read_comments', 'read_comments', (['args.train_file'], {}), '(args.train_file)\n', (2257, 2274), False, 'from common import read_comments, read_comments_csv, cv_score, text_preprocess, print_scores\n'), ((2608, 2634), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (2623, 2634), False, 'from nltk.corpus import stopwords\n')] |