code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
from django.db import models
# Create your models here.
class Categories(models.Model):
catagorie=models.CharField(max_length=100)
class SubCatagories(models.Model):
#question = models.ForeignKey(Question, on_delete=models.CASCADE)
subCatagories=models.CharField(max_length=100)
class Products(models.Model):
#question = models.ForeignKey(Question, on_delete=models.CASCADE)
products=models.CharField(max_length=100)
|
[
"django.db.models.CharField"
] |
[((103, 135), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (119, 135), False, 'from django.db import models\n'), ((259, 291), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (275, 291), False, 'from django.db import models\n'), ((405, 437), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (421, 437), False, 'from django.db import models\n')]
|
from sys import stdin
n, x = map(int, stdin.readline().split())
li = [int(c) for c in stdin.readline().split()]
li.sort()
res = 0
i = 0
j = n - 1
while i <= j:
if li[i] + li[j] > x:
j -= 1
else:
i += 1
j -= 1
res += 1
print(res)
|
[
"sys.stdin.readline"
] |
[((41, 57), 'sys.stdin.readline', 'stdin.readline', ([], {}), '()\n', (55, 57), False, 'from sys import stdin\n'), ((90, 106), 'sys.stdin.readline', 'stdin.readline', ([], {}), '()\n', (104, 106), False, 'from sys import stdin\n')]
|
#!/usr/bin/env python
"""
A/B timeit test: dict of dicts init.
Output:
exists = False:
speedup seconds option
15% 0.780859 in else
11% 0.821429 defaultdict
10% 0.825422 not in
3% 0.890609 get
0% 0.918161 setdefault
-83% 1.683932 try
exists = True:
speedup seconds option
21% 0.619301 defaultdict
19% 0.634981 try
13% 0.679612 not in
13% 0.681775 in else
5% 0.743055 get
0% 0.779458 setdefault
Result:
* If you want to control when to avoid auto-init on read
(e.g. after explicit delete of k1),
then use "in else" option:
if k1 in d:
d[k1][k2] = v
else:
d[k1] = {k2: v}
* If it fits code better, "not in" option is almost as good:
if k1 not in d:
d[k1] = {}
d[k1][k2] = v
* But if you are OK with auto-init in all cases,
then "defaultdict" is the best option - both fast and DRY:
from collections import defaultdict
d = defaultdict(dict)
d[k1][k2] = v
* While it looks like minus one lookup,
"get" option has almost no effect:
vs = d.get(k1)
if vs is None:
d[k1] = {k2: v}
else:
vs[k2] = v
* Never use "try" option:
it is very slow when k1 does not exist,
and slower than defaultdict when k1 exists:
try:
d[k1][k2] = v
except KeyError:
d[k1] = {k2: v}
* "setdefault" option creates new dict each time,
so it is very bad both for memory and speed:
d.setdefault(k1, {})[k2] = v
Copyright (C) 2017 by <NAME> <<EMAIL>>
MIT License, see http://opensource.org/licenses/MIT
"""
### import
import gc
import time
### config
envs = [
'exists = False',
'exists = True',
]
k1 = 'k1'
k2 = 'k2'
v = 'v'
d1 = {'k' + str(i): {} for i in xrange(10**6)}
defaults = dict(
init_once='pass',
init_each='pass',
repeat=10**6,
)
tests = [
dict(
name='setdefault',
init_once='d = d1.copy()',
init_each='if not exists: del d[k1]',
measure='d.setdefault(k1, {})[k2] = v',
),
dict(
name='defaultdict',
init_once='''
from collections import defaultdict
d = defaultdict(dict, d1)
''',
init_each='if not exists: del d[k1]',
measure='d[k1][k2] = v',
),
dict(
name='not in',
init_once='d = d1.copy()',
init_each='if not exists: del d[k1]',
measure='''
if k1 not in d:
d[k1] = {}
d[k1][k2] = v
''',
),
dict(
name='in else',
init_once='d = d1.copy()',
init_each='if not exists: del d[k1]',
measure='''
if k1 in d:
d[k1][k2] = v
else:
d[k1] = {k2: v}
''',
),
dict(
name='get',
init_once='d = d1.copy()',
init_each='if not exists: del d[k1]',
measure='''
vs = d.get(k1)
if vs is None:
d[k1] = {k2: v}
else:
vs[k2] = v
''',
),
dict(
name='try',
init_once='d = d1.copy()',
init_each='if not exists: del d[k1]',
measure='''
try:
d[k1][k2] = v
except KeyError:
d[k1] = {k2: v}
''',
),
]
### main
def main():
gc.disable()
for env in envs:
print('\n{}:'.format(env))
exec(env)
results = []
base_seconds = None
for test in tests:
init_once = compile(test.get('init_once') or defaults['init_once'], '<string>', 'exec')
init_each = compile(test.get('init_each') or defaults['init_each'], '<string>', 'exec')
measure = compile(test['measure'], '<string>', 'exec')
repeat = test.get('repeat') or defaults['repeat']
exec(init_once)
seconds = 0
for _ in xrange(repeat):
exec(init_each)
start = time.time()
exec(measure)
seconds += time.time() - start
results.append((seconds, test['name']))
if base_seconds is None:
base_seconds = seconds
print('speedup seconds option')
for seconds, name in sorted(results):
print('{:6d}% {:.6f} {}'.format(
int(round(100 * (base_seconds - seconds) / base_seconds)),
seconds,
name,
))
if __name__ == '__main__':
main()
|
[
"gc.disable",
"time.time"
] |
[((3070, 3082), 'gc.disable', 'gc.disable', ([], {}), '()\n', (3080, 3082), False, 'import gc\n'), ((3712, 3723), 'time.time', 'time.time', ([], {}), '()\n', (3721, 3723), False, 'import time\n'), ((3781, 3792), 'time.time', 'time.time', ([], {}), '()\n', (3790, 3792), False, 'import time\n')]
|
import fastai
from fastai.vision import *
from fastai.callbacks import *
from fastai.utils.mem import *
from torchvision.models import vgg16_bn
from skimage.measure import compare_ssim
def gram_matrix(x):
n,c,h,w = x.size()
x = x.view(n, c, -1)
return (x @ x.transpose(1,2))/(c*h*w)
class VGG16FeatureLoss(nn.Module):
# create loss from VGG16 pretrained model and gram matrix
def __init__(self, lyrs_wgts):
super().__init__()
# create vgg16 instance
self.model = vgg16_bn(True).features.cuda().eval()
requires_grad(self.model, False)
# get layers with relu
blocks = [i-1 for i,o in enumerate(children(self.model)) if isinstance(o,nn.MaxPool2d)]
self.loss_features = [self.model[i] for i in blocks[2:5]]
self.hooks = hook_outputs(self.loss_features, detach=False)
self.wgts = lyrs_wgts
self.metric_names = ['LAD',] + [f'feat_{i}' for i in range(len(blocks[2:5]))
] + [f'gram_{i}' for i in range(len(blocks[2:5]))]
def make_features(self, x, clone=False):
self.model(x)
return [(o.clone() if clone else o) for o in self.hooks.stored]
def forward(self, input, target):
out_feat = self.make_features(target, clone=True)
in_feat = self.make_features(input)
# base l1 loss
self.feat_losses = [F.l1_loss(input,target)]
# feature loss
self.feat_losses += [F.l1_loss(f_in, f_out)*w
for f_in, f_out, w in zip(in_feat, out_feat, self.wgts)]
# gram matrix loss
self.feat_losses += [F.l1_loss(gram_matrix(f_in), gram_matrix(f_out))*w**2 * 5e3
for f_in, f_out, w in zip(in_feat, out_feat, self.wgts)]
self.metrics = dict(zip(self.metric_names, self.feat_losses))
return sum(self.feat_losses)
def __del__(self):
self.hooks.remove()
|
[
"torchvision.models.vgg16_bn"
] |
[((520, 534), 'torchvision.models.vgg16_bn', 'vgg16_bn', (['(True)'], {}), '(True)\n', (528, 534), False, 'from torchvision.models import vgg16_bn\n')]
|
#!/usr/bin/env python
#
# Um simples jogo de adivinhação com dicas.
#
# <NAME>
# @VinihJunior
# <EMAIL>
from random import randint
while True:
print("************************************************")
print("* *")
print("* Adivinhe qual é o ANIMAL \o/ *")
print("* *")
print("************************************************")
print("\nDescubra qual é o animal: ")
lista_principal = []
lista_animais = open("lista-animais.txt")
lista = (lista_animais.read() )
lista = lista.split()
for line in lista:
line = line.lower()
lista_principal.append(line)
lista_animais.close()
end = len(lista_principal)
secret = randint(0, end - 1)
animal = (lista_principal[secret])
comp = len(animal)
print(animal)
print("\n* Que tem", (comp - 1), "letras \n",
"\n* E começa com a letra", animal[0], "\n")
resp = input("\n* Digite sua resposta: ")
if resp == animal: # compara a resposta com o nome sorteado.
print("\nVocê ACERTOU!! \o/ é", animal, "\n")
voltar = input("Deseja jogar novamente? (y/n): ")
if voltar not in ("y" or "Y"):
break
else:
print("\nVocê errou :(\n")
voltar = input("Deseja jogar novamente? (y/n): ")
if voltar not in ("y" or "Y"):
break
|
[
"random.randint"
] |
[((790, 809), 'random.randint', 'randint', (['(0)', '(end - 1)'], {}), '(0, end - 1)\n', (797, 809), False, 'from random import randint\n')]
|
# importing necessary packages
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, confusion_matrix
from sklearn.externals import joblib
from sklearn.preprocessing import StandardScaler
import os
import argparse
# command line arguments
parser = argparse.ArgumentParser()
# argument for delta
parser.add_argument('--delta', type=int, default=0, help='Value of delta while computing mfcc features')
# argument for components
parser.add_argument('--components', type = int, default = 4, help = 'How much number of components')
# arguement for energy coefficient
parser.add_argument('--coefficient', type = str, default = 'Yes', help = 'Enter False to not take energy coefficients')
args = parser.parse_args()
# delta value
delta = args.delta
# Number of components
components = args.components
# Coefficient
if(args.coefficient == 'Yes'):
coefficient = True
else:
coefficient = False
print("Delta is: ", delta)
print("Number of components are: ", components)
print("Energy coefficients are included: ", coefficient)
# loading encoder and Scaler
if(coefficient == True):
file_scalar = ("./Scalar/delta_" + str(delta) + "_with_coefficients_" + ".pkl")
print(True)
else:
file_scalar = ("./Scalar/delta_" + str(delta) + "_without_coefficients_" + ".pkl")
file_encoder = ("./labelEncoder/delta_" + str(delta) + "" + ".pkl")
# Load scalar and label encoder objects
scaler = joblib.load(file_scalar)
lb = joblib.load(file_encoder)
# Load data file
timit_testdf = pd.read_hdf("./features_for_PER/timit_test_delta_" + str(delta) + ".hdf")
print("Test data loaded")
# encoding labels
timit_testdf['labels_lb'] = lb.transform(timit_testdf['labels'])
# Take features and label encoded labels
test = timit_testdf.copy()
test = test[['features', 'labels_lb', 'id']]
# Get unique phonemes
unique_labels = np.unique(test.labels_lb)
# print("unique labels are: ", unique_labels)
# Get test feature set
features_test = np.array(test['features'].tolist())
# Filter the co-efficients based on energy coefficients inclusion
if(coefficient == False):
if(delta == 0):
features_test = features_test[:,1:]
elif(delta == 1):
features_test = np.delete(features_test,[0, 13], axis = 1)
else:
features_test = np.delete(features_test, [0, 13, 26], axis = 1)
# print('features shape' + str(features_test.shape))
# Make predictions
for i in unique_labels:
if(coefficient == True):
directory = "./models_updated/delta_" + str(delta) + "_with_energy_coefficients" + "/" + str(components)
else:
directory = "./models_updated/delta_" + str(delta) + "_without_energy_coefficients" + "/" + str(components)
if not os.path.exists(directory):
os.makedirs(directory)
filename = directory + "/" + str(i) + ".pkl"
model = joblib.load(filename)
log_prob = model.score_samples(scaler.transform(features_test))
col_name = str(i)
test[col_name] = log_prob
# Get predictions by using argmax
result = test.copy()
result = result.drop(['features', 'labels_lb', 'id'], axis = 1)
# Make predictions
test['predict'] = (result.idxmax(axis = 1))
test['predict'] = test['predict'].astype(int)
# Make groundtruth and prediction files
final = test.copy()
final = final[['id', 'labels_lb', 'predict']]
# final.head()
final.id = final.id.astype(str)
final.id = "sent_" + (final.id)
# final.head()
uniqueid = np.unique(final.id)
# uniqueid
# File for storing ground truth labels
gt = open("./files_for_WER_computation/groundTruth/groundTruth.txt", "w")
# File for storing predicted labels
pred = open("./files_for_WER_computation/predicted/predict_delta_" + str(delta) + "_components_" + str(components) + "_coefficient_" + str(coefficient) + ".txt", "w")
for i in uniqueid:
# print("sentence id is: ", i)
df = final[final.id==i]
gt.write(str(i))
pred.write(str(i))
for j in df.index.values:
gt.write(" " + str(df.labels_lb[j]))
pred.write(" " + str(df.predict[j]))
gt.write("\n")
pred.write("\n")
gt.close()
pred.close()
|
[
"os.makedirs",
"argparse.ArgumentParser",
"os.path.exists",
"sklearn.externals.joblib.load",
"numpy.delete",
"numpy.unique"
] |
[((368, 393), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (391, 393), False, 'import argparse\n'), ((1522, 1546), 'sklearn.externals.joblib.load', 'joblib.load', (['file_scalar'], {}), '(file_scalar)\n', (1533, 1546), False, 'from sklearn.externals import joblib\n'), ((1552, 1577), 'sklearn.externals.joblib.load', 'joblib.load', (['file_encoder'], {}), '(file_encoder)\n', (1563, 1577), False, 'from sklearn.externals import joblib\n'), ((1949, 1974), 'numpy.unique', 'np.unique', (['test.labels_lb'], {}), '(test.labels_lb)\n', (1958, 1974), True, 'import numpy as np\n'), ((3509, 3528), 'numpy.unique', 'np.unique', (['final.id'], {}), '(final.id)\n', (3518, 3528), True, 'import numpy as np\n'), ((2922, 2943), 'sklearn.externals.joblib.load', 'joblib.load', (['filename'], {}), '(filename)\n', (2933, 2943), False, 'from sklearn.externals import joblib\n'), ((2802, 2827), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (2816, 2827), False, 'import os\n'), ((2837, 2859), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (2848, 2859), False, 'import os\n'), ((2301, 2342), 'numpy.delete', 'np.delete', (['features_test', '[0, 13]'], {'axis': '(1)'}), '(features_test, [0, 13], axis=1)\n', (2310, 2342), True, 'import numpy as np\n'), ((2378, 2423), 'numpy.delete', 'np.delete', (['features_test', '[0, 13, 26]'], {'axis': '(1)'}), '(features_test, [0, 13, 26], axis=1)\n', (2387, 2423), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
import pytest
import ckan.model as model
import ckan.lib.search as search
import ckan.tests.factories as factories
from ckan.lib.create_test_data import CreateTestData
@pytest.mark.usefixtures("clean_db", "clean_index")
class TestTagQuery(object):
def create_test_data(self):
factories.Dataset(tags=[{"name": "russian"}, {"name": "tolstoy"}])
factories.Dataset(tags=[{"name": "Flexible \u30a1"}])
def test_good_search_query(self):
self.create_test_data()
result = search.query_for(model.Tag).run(query=u"ru")
assert result["count"] == 1, result
assert "russian" in result["results"]
result = search.query_for(model.Tag).run(query=u"s")
assert result["count"] == 2, result
assert "russian" in result["results"]
assert "tolstoy" in result["results"]
def test_good_search_queries(self):
self.create_test_data()
result = search.query_for(model.Tag).run(query=[u"ru", u"s"])
assert result["count"] == 1, result
assert "russian" in result["results"], result
def test_bad_search_query(self):
self.create_test_data()
result = search.query_for(model.Tag).run(query=u"asdf")
assert result["count"] == 0, result
def test_search_with_capital_letter_in_tagname(self):
self.create_test_data()
"""
Asserts that it doesn't matter if the tagname has capital letters in it.
"""
result = search.query_for(model.Tag).run(query=u"lexible")
assert u"Flexible \u30a1" in result["results"]
def test_search_with_capital_letter_in_search_query(self):
self.create_test_data()
"""
Asserts that search works with a capital letter in the search query.
"""
result = search.query_for(model.Tag).run(query=u"Flexible")
assert u"Flexible \u30a1" in result["results"]
def test_search_with_unicode_in_search_query(self):
self.create_test_data()
"""
Asserts that search works with a unicode character above \u00ff.
"""
result = search.query_for(model.Tag).run(query=u" \u30a1")
assert u"Flexible \u30a1" in result["results"]
def test_search_is_case_insensitive(self):
self.create_test_data()
result = search.query_for(model.Tag).run(query=u"flexible")
assert u"Flexible \u30a1" in result["results"]
def test_good_search_fields(self):
self.create_test_data()
result = search.query_for(model.Tag).run(fields={"tags": u"ru"})
assert result["count"] == 1, result
assert "russian" in result["results"], result
result = search.query_for(model.Tag).run(fields={"tags": u"s"})
assert result["count"] == 2, result
assert "russian" in result["results"], result
assert "tolstoy" in result["results"], result
def test_bad_search_fields(self):
self.create_test_data()
result = search.query_for(model.Tag).run(fields={"tags": u"asdf"})
assert result["count"] == 0, result
@pytest.fixture
def resources_for_search():
pkg1 = factories.Dataset(name="pkg1")
pkg2 = factories.Dataset()
factories.Resource(
url=TestResourceQuery.ab,
description="This is site ab.",
alt_url="alt_1",
format="Excel spreadsheet",
hash="xyz-123",
package_id=pkg1["id"],
)
factories.Resource(
url=TestResourceQuery.cd,
description="This is site cd.",
alt_url="alt_2",
format="Office spreadsheet",
hash="qwe-456",
package_id=pkg1["id"],
)
factories.Resource(
url=TestResourceQuery.cd,
description="This is site cd.",
alt_url="alt_1",
package_id=pkg2["id"],
)
factories.Resource(
url=TestResourceQuery.ef, description="This is site ef.", package_id=pkg2["id"]
)
factories.Resource(
url=TestResourceQuery.ef, description="This is site gh.", package_id=pkg2["id"]
)
factories.Resource(
url=TestResourceQuery.ef, description="This is site ij.", package_id=pkg2["id"]
)
@pytest.mark.usefixtures("clean_db", "clean_index", "resources_for_search")
class TestResourceQuery(object):
ab = "http://site.com/a/b.txt"
cd = "http://site.com/c/d.txt"
ef = "http://site.com/e/f.txt"
def res_search(
self, query="", fields={}, terms=[], options=search.QueryOptions()
):
result = search.query_for(model.Resource).run(
query=query, fields=fields, terms=terms, options=options
)
resources = [
model.Session.query(model.Resource).get(resource_id)
for resource_id in result["results"]
]
urls = set([resource.url for resource in resources])
return urls
def test_search_url(self):
fields = {"url": "site.com"}
result = search.query_for(model.Resource).run(fields=fields)
assert result["count"] == 6
resources = [
model.Session.query(model.Resource).get(resource_id)
for resource_id in result["results"]
]
urls = set([resource.url for resource in resources])
assert set([self.ab, self.cd, self.ef]) == urls
def test_search_url_2(self):
urls = self.res_search(fields={"url": "a/b"})
assert set([self.ab]) == urls, urls
def test_search_url_multiple_words(self):
fields = {"url": "e f"}
urls = self.res_search(fields=fields)
assert {self.ef} == urls
def test_search_url_none(self):
urls = self.res_search(fields={"url": "nothing"})
assert set() == urls, urls
def test_search_description(self):
urls = self.res_search(fields={"description": "cd"})
assert set([self.cd]) == urls, urls
def test_search_format(self):
urls = self.res_search(fields={"format": "excel"})
assert set([self.ab]) == urls, urls
def test_search_format_2(self):
urls = self.res_search(fields={"format": "sheet"})
assert set([self.ab, self.cd]) == urls, urls
def test_search_hash_complete(self):
urls = self.res_search(fields={"hash": "xyz-123"})
assert set([self.ab]) == urls, urls
def test_search_hash_partial(self):
urls = self.res_search(fields={"hash": "xyz"})
assert set([self.ab]) == urls, urls
def test_search_hash_partial_but_not_initial(self):
urls = self.res_search(fields={"hash": "123"})
assert set() == urls, urls
def test_search_several_fields(self):
urls = self.res_search(fields={"description": "ab", "format": "sheet"})
assert set([self.ab]) == urls, urls
def test_search_all_fields(self):
fields = {"url": "a/b"}
options = search.QueryOptions(all_fields=True)
result = search.query_for(model.Resource).run(
fields=fields, options=options
)
assert result["count"] == 1, result
res_dict = result["results"][0]
assert isinstance(res_dict, dict)
res_keys = set(res_dict.keys())
expected_res_keys = set(model.Resource.get_columns())
expected_res_keys.update(
["id", "package_id", "position"]
)
assert res_keys == expected_res_keys
pkg1 = model.Package.by_name(u"pkg1")
ab = [r for r in pkg1.resources if r.url == self.ab][0]
assert res_dict["id"] == ab.id
assert res_dict["package_id"] == pkg1.id
assert res_dict["url"] == ab.url
assert res_dict["description"] == ab.description
assert res_dict["format"] == ab.format
assert res_dict["hash"] == ab.hash
assert res_dict["position"] == 0
def test_pagination(self):
# large search
options = search.QueryOptions(order_by="id")
fields = {"url": "site"}
all_results = search.query_for(model.Resource).run(
fields=fields, options=options
)
all_resources = all_results["results"]
all_resource_count = all_results["count"]
assert all_resource_count >= 6, all_results
# limit
options = search.QueryOptions(order_by="id")
options.limit = 2
result = search.query_for(model.Resource).run(
fields=fields, options=options
)
resources = result["results"]
count = result["count"]
assert len(resources) == 2, resources
assert count == all_resource_count, (count, all_resource_count)
assert resources == all_resources[:2], "%r, %r" % (
resources,
all_resources,
)
# offset
options = search.QueryOptions(order_by="id")
options.limit = 2
options.offset = 2
result = search.query_for(model.Resource).run(
fields=fields, options=options
)
resources = result["results"]
assert len(resources) == 2, resources
assert resources == all_resources[2:4]
# larger offset
options = search.QueryOptions(order_by="id")
options.limit = 2
options.offset = 4
result = search.query_for(model.Resource).run(
fields=fields, options=options
)
resources = result["results"]
assert len(resources) == 2, resources
assert resources == all_resources[4:6]
def test_extra_info(self):
fields = {"alt_url": "alt_1"}
result = search.query_for(model.Resource).run(fields=fields)
assert result["count"] == 2, result
fields = {"alt_url": "alt_2"}
result = search.query_for(model.Resource).run(fields=fields)
assert result["count"] == 1, result
def test_convert_legacy_params_to_solr():
convert = search.convert_legacy_parameters_to_solr
assert convert({"title": "bob"}) == {"q": "title:bob"}
assert convert({"title": "bob", "fl": "name"}) == {
"q": "title:bob",
"fl": "name",
}
assert convert({"title": "<NAME>ins"}) == {
"q": 'title:"bob perkins"'
}
assert convert({"q": "high+wages"}) == {"q": "high wages"}
assert convert({"q": "high+wages summary"}) == {
"q": "high wages summary"
}
assert convert({"title": "high+wages"}) == {"q": 'title:"high wages"'}
assert convert({"title": "bob", "all_fields": 1}) == {
"q": "title:bob",
"fl": "*",
}
with pytest.raises(search.SearchError):
convert({"title": "bob", "all_fields": "non-boolean"})
assert convert({"q": "bob", "order_by": "name"}) == {
"q": "bob",
"sort": "name asc",
}
assert convert({"q": "bob", "offset": "0", "limit": "10"}) == {
"q": "bob",
"start": "0",
"rows": "10",
}
assert convert({"tags": ["russian", "tolstoy"]}) == {
"q": 'tags:"russian" tags:"tolstoy"'
}
assert convert({"tags": ["russian", "multi word"]}) == {
"q": 'tags:"russian" tags:"multi word"'
}
assert convert({"tags": ["with CAPITALS"]}) == {
"q": 'tags:"with CAPITALS"'
}
assert convert({"tags": [u"with greek omega \u03a9"]}) == {
"q": u'tags:"with greek omega \u03a9"'
}
assert convert({"tags": ["tolstoy"]}) == {"q": 'tags:"tolstoy"'}
assert convert({"tags": "tolstoy"}) == {"q": 'tags:"tolstoy"'}
assert convert({"tags": "more than one tolstoy"}) == {
"q": 'tags:"more than one tolstoy"'
}
assert convert({"tags": u"with greek omega \u03a9"}) == {
"q": u'tags:"with greek omega \u03a9"'
}
assert convert({"title": "Seymour: An Introduction"}) == {
"q": r'title:"Seymour\: An Introduction"'
}
assert convert({"title": "Pop!"}) == {"q": r"title:Pop\!"}
with pytest.raises(search.SearchError):
convert({"tags": {"tolstoy": 1}})
@pytest.mark.usefixtures("clean_db", "clean_index")
class TestPackageQuery:
def test_all_records_by_shared_notes(self):
pkg1 = factories.Dataset(notes="shared")
pkg2 = factories.Dataset(notes="shared")
pkg3 = factories.Dataset(notes="shared")
result = search.query_for(model.Package).run({"q": "shared"})
assert {pkg1["name"], pkg2["name"], pkg3["name"]} == set(result["results"])
def test_single_by_name(self):
factories.Dataset(name="first")
factories.Dataset(name="second")
result = search.query_for(model.Package).run({"q": u"first"})
assert result["results"] == ["first"]
def test_name_multiple_results(self):
factories.Dataset(name="first-record")
factories.Dataset(name="second-record")
factories.Dataset(name="third-dataset")
result = search.query_for(model.Package).run({"q": u"record"})
assert set(result["results"]) == {"first-record", "second-record"}
def test_title_token(self):
pkg1 = factories.Dataset(title="first record")
pkg2 = factories.Dataset(title="second record")
factories.Dataset(title="third dataset")
result = search.query_for(model.Package).run({"q": u"title:record"})
assert set(result["results"]) == {pkg1["name"], pkg2["name"]}
def test_not_real_license(self):
factories.Dataset()
result = search.query_for(model.Package).run(
{"q": u'license:"OKD::Other (PublicsDomain)"'}
)
assert result["count"] == 0, result
def test_quotation(self):
pkg1 = factories.Dataset(title="Government Expenditure")
factories.Dataset(title="Government Extra Expenditure")
# multiple words quoted
result = search.query_for(model.Package).run(
{"q": u'"Government Expenditure"'}
)
assert [pkg1["name"]] == result["results"]
# multiple words quoted wrong order
result = search.query_for(model.Package).run(
{"q": u'"Expenditure Government"'}
)
assert result["results"] == []
def test_tags_field_split_word(self):
pkg1 = factories.Dataset(tags=[{"name": "split todo"}])
result = search.query_for(model.Package).run({"q": u"todo split"})
assert result["results"] == [pkg1["name"]]
def test_tags_field_with_capitals(self):
pkg1 = factories.Dataset(tags=[{"name": "capitals"}])
result = search.query_for(model.Package).run({"q": u"CAPITALS"})
assert result["results"] == [pkg1["name"]]
def dont_test_tags_field_with_basic_unicode(self):
pkg1 = factories.Dataset(tags=[{"name": "greek omega \u03a9"}])
result = search.query_for(model.Package).run(
{"q": u"greek omega \u03a9"}
)
assert result["results"] == [pkg1["name"]]
def test_tags_token_simple(self):
pkg1 = factories.Dataset(tags=[{"name": "country-sweden"}])
result = search.query_for(model.Package).run(
{"q": u"tags:country-sweden"}
)
assert result["results"] == [pkg1["name"]]
def test_tags_token_with_multi_word_tag(self):
pkg1 = factories.Dataset(tags=[{"name": "todo split"}])
result = search.query_for(model.Package).run(
{"q": u'tags:"todo split"'}
)
assert result["results"] == [pkg1["name"]]
def test_tags_token_multiple(self):
pkg1 = factories.Dataset(tags=[{"name": "country-sweden"}, {"name": "format-pdf"}])
result = search.query_for(model.Package).run(
{"q": u"tags:country-sweden tags:format-pdf"}
)
assert result["results"] == [pkg1["name"]]
result = search.query_for(model.Package).run(
{"q": u'tags:"todo split" tags:war'}
)
def test_tags_token_with_punctuation(self):
pkg1 = factories.Dataset(tags=[{"name": "surprise."}])
result = search.query_for(model.Package).run(
{"q": u'tags:"surprise."'}
)
assert result["results"] == [pkg1["name"]]
def test_overall(self):
CreateTestData.create()
query = search.query_for(model.Package)
assert query.run({"q": "annakarenina"})["count"] == 1
assert query.run({"q": "warandpeace"})["count"] == 1
assert query.run({"q": ""})["count"] == 2
assert query.run({"q": "Tolstoy"})["count"] == 1
assert query.run({"q": "title:Novel"})["count"] == 1
assert query.run({"q": "title:peace"})["count"] == 0
assert query.run({"q": "name:warandpeace"})["count"] == 1
assert query.run({"q": "groups:david"})["count"] == 2
assert query.run({"q": "groups:roger"})["count"] == 1
assert query.run({"q": "groups:lenny"})["count"] == 0
assert query.run({"q": 'tags:"russian"'})["count"] == 2
assert query.run({"q": 'tags:"Flexible \u30a1"'})["count"] == 2
assert query.run({"q": "Flexible \u30a1"})["count"] == 2
assert query.run({"q": "Flexible"})["count"] == 2
assert query.run({"q": "flexible"})["count"] == 2
|
[
"ckan.model.Session.query",
"ckan.lib.search.query_for",
"ckan.lib.search.QueryOptions",
"ckan.lib.create_test_data.CreateTestData.create",
"ckan.model.Package.by_name",
"ckan.model.Resource.get_columns",
"pytest.raises",
"ckan.tests.factories.Dataset",
"pytest.mark.usefixtures",
"ckan.tests.factories.Resource"
] |
[((196, 246), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""clean_db"""', '"""clean_index"""'], {}), "('clean_db', 'clean_index')\n", (219, 246), False, 'import pytest\n'), ((4169, 4243), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""clean_db"""', '"""clean_index"""', '"""resources_for_search"""'], {}), "('clean_db', 'clean_index', 'resources_for_search')\n", (4192, 4243), False, 'import pytest\n'), ((11863, 11913), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""clean_db"""', '"""clean_index"""'], {}), "('clean_db', 'clean_index')\n", (11886, 11913), False, 'import pytest\n'), ((3148, 3178), 'ckan.tests.factories.Dataset', 'factories.Dataset', ([], {'name': '"""pkg1"""'}), "(name='pkg1')\n", (3165, 3178), True, 'import ckan.tests.factories as factories\n'), ((3190, 3209), 'ckan.tests.factories.Dataset', 'factories.Dataset', ([], {}), '()\n', (3207, 3209), True, 'import ckan.tests.factories as factories\n'), ((3214, 3383), 'ckan.tests.factories.Resource', 'factories.Resource', ([], {'url': 'TestResourceQuery.ab', 'description': '"""This is site ab."""', 'alt_url': '"""alt_1"""', 'format': '"""Excel spreadsheet"""', 'hash': '"""xyz-123"""', 'package_id': "pkg1['id']"}), "(url=TestResourceQuery.ab, description='This is site ab.',\n alt_url='alt_1', format='Excel spreadsheet', hash='xyz-123', package_id\n =pkg1['id'])\n", (3232, 3383), True, 'import ckan.tests.factories as factories\n'), ((3434, 3603), 'ckan.tests.factories.Resource', 'factories.Resource', ([], {'url': 'TestResourceQuery.cd', 'description': '"""This is site cd."""', 'alt_url': '"""alt_2"""', 'format': '"""Office spreadsheet"""', 'hash': '"""qwe-456"""', 'package_id': "pkg1['id']"}), "(url=TestResourceQuery.cd, description='This is site cd.',\n alt_url='alt_2', format='Office spreadsheet', hash='qwe-456',\n package_id=pkg1['id'])\n", (3452, 3603), True, 'import ckan.tests.factories as factories\n'), ((3656, 3776), 'ckan.tests.factories.Resource', 'factories.Resource', ([], {'url': 'TestResourceQuery.cd', 'description': '"""This is site cd."""', 'alt_url': '"""alt_1"""', 'package_id': "pkg2['id']"}), "(url=TestResourceQuery.cd, description='This is site cd.',\n alt_url='alt_1', package_id=pkg2['id'])\n", (3674, 3776), True, 'import ckan.tests.factories as factories\n'), ((3816, 3919), 'ckan.tests.factories.Resource', 'factories.Resource', ([], {'url': 'TestResourceQuery.ef', 'description': '"""This is site ef."""', 'package_id': "pkg2['id']"}), "(url=TestResourceQuery.ef, description='This is site ef.',\n package_id=pkg2['id'])\n", (3834, 3919), True, 'import ckan.tests.factories as factories\n'), ((3934, 4037), 'ckan.tests.factories.Resource', 'factories.Resource', ([], {'url': 'TestResourceQuery.ef', 'description': '"""This is site gh."""', 'package_id': "pkg2['id']"}), "(url=TestResourceQuery.ef, description='This is site gh.',\n package_id=pkg2['id'])\n", (3952, 4037), True, 'import ckan.tests.factories as factories\n'), ((4052, 4155), 'ckan.tests.factories.Resource', 'factories.Resource', ([], {'url': 'TestResourceQuery.ef', 'description': '"""This is site ij."""', 'package_id': "pkg2['id']"}), "(url=TestResourceQuery.ef, description='This is site ij.',\n package_id=pkg2['id'])\n", (4070, 4155), True, 'import ckan.tests.factories as factories\n'), ((315, 381), 'ckan.tests.factories.Dataset', 'factories.Dataset', ([], {'tags': "[{'name': 'russian'}, {'name': 'tolstoy'}]"}), "(tags=[{'name': 'russian'}, {'name': 'tolstoy'}])\n", (332, 381), True, 'import ckan.tests.factories as factories\n'), ((390, 438), 'ckan.tests.factories.Dataset', 'factories.Dataset', ([], {'tags': "[{'name': 'Flexible ァ'}]"}), "(tags=[{'name': 'Flexible ァ'}])\n", (407, 438), True, 'import ckan.tests.factories as factories\n'), ((4456, 4477), 'ckan.lib.search.QueryOptions', 'search.QueryOptions', ([], {}), '()\n', (4475, 4477), True, 'import ckan.lib.search as search\n'), ((6823, 6859), 'ckan.lib.search.QueryOptions', 'search.QueryOptions', ([], {'all_fields': '(True)'}), '(all_fields=True)\n', (6842, 6859), True, 'import ckan.lib.search as search\n'), ((7345, 7375), 'ckan.model.Package.by_name', 'model.Package.by_name', (['u"""pkg1"""'], {}), "(u'pkg1')\n", (7366, 7375), True, 'import ckan.model as model\n'), ((7830, 7864), 'ckan.lib.search.QueryOptions', 'search.QueryOptions', ([], {'order_by': '"""id"""'}), "(order_by='id')\n", (7849, 7864), True, 'import ckan.lib.search as search\n'), ((8195, 8229), 'ckan.lib.search.QueryOptions', 'search.QueryOptions', ([], {'order_by': '"""id"""'}), "(order_by='id')\n", (8214, 8229), True, 'import ckan.lib.search as search\n'), ((8708, 8742), 'ckan.lib.search.QueryOptions', 'search.QueryOptions', ([], {'order_by': '"""id"""'}), "(order_by='id')\n", (8727, 8742), True, 'import ckan.lib.search as search\n'), ((9078, 9112), 'ckan.lib.search.QueryOptions', 'search.QueryOptions', ([], {'order_by': '"""id"""'}), "(order_by='id')\n", (9097, 9112), True, 'import ckan.lib.search as search\n'), ((10447, 10480), 'pytest.raises', 'pytest.raises', (['search.SearchError'], {}), '(search.SearchError)\n', (10460, 10480), False, 'import pytest\n'), ((11783, 11816), 'pytest.raises', 'pytest.raises', (['search.SearchError'], {}), '(search.SearchError)\n', (11796, 11816), False, 'import pytest\n'), ((12001, 12034), 'ckan.tests.factories.Dataset', 'factories.Dataset', ([], {'notes': '"""shared"""'}), "(notes='shared')\n", (12018, 12034), True, 'import ckan.tests.factories as factories\n'), ((12050, 12083), 'ckan.tests.factories.Dataset', 'factories.Dataset', ([], {'notes': '"""shared"""'}), "(notes='shared')\n", (12067, 12083), True, 'import ckan.tests.factories as factories\n'), ((12099, 12132), 'ckan.tests.factories.Dataset', 'factories.Dataset', ([], {'notes': '"""shared"""'}), "(notes='shared')\n", (12116, 12132), True, 'import ckan.tests.factories as factories\n'), ((12331, 12362), 'ckan.tests.factories.Dataset', 'factories.Dataset', ([], {'name': '"""first"""'}), "(name='first')\n", (12348, 12362), True, 'import ckan.tests.factories as factories\n'), ((12371, 12403), 'ckan.tests.factories.Dataset', 'factories.Dataset', ([], {'name': '"""second"""'}), "(name='second')\n", (12388, 12403), True, 'import ckan.tests.factories as factories\n'), ((12572, 12610), 'ckan.tests.factories.Dataset', 'factories.Dataset', ([], {'name': '"""first-record"""'}), "(name='first-record')\n", (12589, 12610), True, 'import ckan.tests.factories as factories\n'), ((12619, 12658), 'ckan.tests.factories.Dataset', 'factories.Dataset', ([], {'name': '"""second-record"""'}), "(name='second-record')\n", (12636, 12658), True, 'import ckan.tests.factories as factories\n'), ((12667, 12706), 'ckan.tests.factories.Dataset', 'factories.Dataset', ([], {'name': '"""third-dataset"""'}), "(name='third-dataset')\n", (12684, 12706), True, 'import ckan.tests.factories as factories\n'), ((12901, 12940), 'ckan.tests.factories.Dataset', 'factories.Dataset', ([], {'title': '"""first record"""'}), "(title='first record')\n", (12918, 12940), True, 'import ckan.tests.factories as factories\n'), ((12956, 12996), 'ckan.tests.factories.Dataset', 'factories.Dataset', ([], {'title': '"""second record"""'}), "(title='second record')\n", (12973, 12996), True, 'import ckan.tests.factories as factories\n'), ((13005, 13045), 'ckan.tests.factories.Dataset', 'factories.Dataset', ([], {'title': '"""third dataset"""'}), "(title='third dataset')\n", (13022, 13045), True, 'import ckan.tests.factories as factories\n'), ((13240, 13259), 'ckan.tests.factories.Dataset', 'factories.Dataset', ([], {}), '()\n', (13257, 13259), True, 'import ckan.tests.factories as factories\n'), ((13473, 13522), 'ckan.tests.factories.Dataset', 'factories.Dataset', ([], {'title': '"""Government Expenditure"""'}), "(title='Government Expenditure')\n", (13490, 13522), True, 'import ckan.tests.factories as factories\n'), ((13531, 13586), 'ckan.tests.factories.Dataset', 'factories.Dataset', ([], {'title': '"""Government Extra Expenditure"""'}), "(title='Government Extra Expenditure')\n", (13548, 13586), True, 'import ckan.tests.factories as factories\n'), ((14034, 14082), 'ckan.tests.factories.Dataset', 'factories.Dataset', ([], {'tags': "[{'name': 'split todo'}]"}), "(tags=[{'name': 'split todo'}])\n", (14051, 14082), True, 'import ckan.tests.factories as factories\n'), ((14270, 14316), 'ckan.tests.factories.Dataset', 'factories.Dataset', ([], {'tags': "[{'name': 'capitals'}]"}), "(tags=[{'name': 'capitals'}])\n", (14287, 14316), True, 'import ckan.tests.factories as factories\n'), ((14512, 14563), 'ckan.tests.factories.Dataset', 'factories.Dataset', ([], {'tags': "[{'name': 'greek omega Ω'}]"}), "(tags=[{'name': 'greek omega Ω'}])\n", (14529, 14563), True, 'import ckan.tests.factories as factories\n'), ((14779, 14831), 'ckan.tests.factories.Dataset', 'factories.Dataset', ([], {'tags': "[{'name': 'country-sweden'}]"}), "(tags=[{'name': 'country-sweden'}])\n", (14796, 14831), True, 'import ckan.tests.factories as factories\n'), ((15056, 15104), 'ckan.tests.factories.Dataset', 'factories.Dataset', ([], {'tags': "[{'name': 'todo split'}]"}), "(tags=[{'name': 'todo split'}])\n", (15073, 15104), True, 'import ckan.tests.factories as factories\n'), ((15316, 15392), 'ckan.tests.factories.Dataset', 'factories.Dataset', ([], {'tags': "[{'name': 'country-sweden'}, {'name': 'format-pdf'}]"}), "(tags=[{'name': 'country-sweden'}, {'name': 'format-pdf'}])\n", (15333, 15392), True, 'import ckan.tests.factories as factories\n'), ((15743, 15790), 'ckan.tests.factories.Dataset', 'factories.Dataset', ([], {'tags': "[{'name': 'surprise.'}]"}), "(tags=[{'name': 'surprise.'}])\n", (15760, 15790), True, 'import ckan.tests.factories as factories\n'), ((15982, 16005), 'ckan.lib.create_test_data.CreateTestData.create', 'CreateTestData.create', ([], {}), '()\n', (16003, 16005), False, 'from ckan.lib.create_test_data import CreateTestData\n'), ((16022, 16053), 'ckan.lib.search.query_for', 'search.query_for', (['model.Package'], {}), '(model.Package)\n', (16038, 16053), True, 'import ckan.lib.search as search\n'), ((7166, 7194), 'ckan.model.Resource.get_columns', 'model.Resource.get_columns', ([], {}), '()\n', (7192, 7194), True, 'import ckan.model as model\n'), ((532, 559), 'ckan.lib.search.query_for', 'search.query_for', (['model.Tag'], {}), '(model.Tag)\n', (548, 559), True, 'import ckan.lib.search as search\n'), ((685, 712), 'ckan.lib.search.query_for', 'search.query_for', (['model.Tag'], {}), '(model.Tag)\n', (701, 712), True, 'import ckan.lib.search as search\n'), ((955, 982), 'ckan.lib.search.query_for', 'search.query_for', (['model.Tag'], {}), '(model.Tag)\n', (971, 982), True, 'import ckan.lib.search as search\n'), ((1193, 1220), 'ckan.lib.search.query_for', 'search.query_for', (['model.Tag'], {}), '(model.Tag)\n', (1209, 1220), True, 'import ckan.lib.search as search\n'), ((1497, 1524), 'ckan.lib.search.query_for', 'search.query_for', (['model.Tag'], {}), '(model.Tag)\n', (1513, 1524), True, 'import ckan.lib.search as search\n'), ((1816, 1843), 'ckan.lib.search.query_for', 'search.query_for', (['model.Tag'], {}), '(model.Tag)\n', (1832, 1843), True, 'import ckan.lib.search as search\n'), ((2125, 2152), 'ckan.lib.search.query_for', 'search.query_for', (['model.Tag'], {}), '(model.Tag)\n', (2141, 2152), True, 'import ckan.lib.search as search\n'), ((2327, 2354), 'ckan.lib.search.query_for', 'search.query_for', (['model.Tag'], {}), '(model.Tag)\n', (2343, 2354), True, 'import ckan.lib.search as search\n'), ((2522, 2549), 'ckan.lib.search.query_for', 'search.query_for', (['model.Tag'], {}), '(model.Tag)\n', (2538, 2549), True, 'import ckan.lib.search as search\n'), ((2694, 2721), 'ckan.lib.search.query_for', 'search.query_for', (['model.Tag'], {}), '(model.Tag)\n', (2710, 2721), True, 'import ckan.lib.search as search\n'), ((2989, 3016), 'ckan.lib.search.query_for', 'search.query_for', (['model.Tag'], {}), '(model.Tag)\n', (3005, 3016), True, 'import ckan.lib.search as search\n'), ((4502, 4534), 'ckan.lib.search.query_for', 'search.query_for', (['model.Resource'], {}), '(model.Resource)\n', (4518, 4534), True, 'import ckan.lib.search as search\n'), ((4932, 4964), 'ckan.lib.search.query_for', 'search.query_for', (['model.Resource'], {}), '(model.Resource)\n', (4948, 4964), True, 'import ckan.lib.search as search\n'), ((6877, 6909), 'ckan.lib.search.query_for', 'search.query_for', (['model.Resource'], {}), '(model.Resource)\n', (6893, 6909), True, 'import ckan.lib.search as search\n'), ((7920, 7952), 'ckan.lib.search.query_for', 'search.query_for', (['model.Resource'], {}), '(model.Resource)\n', (7936, 7952), True, 'import ckan.lib.search as search\n'), ((8273, 8305), 'ckan.lib.search.query_for', 'search.query_for', (['model.Resource'], {}), '(model.Resource)\n', (8289, 8305), True, 'import ckan.lib.search as search\n'), ((8813, 8845), 'ckan.lib.search.query_for', 'search.query_for', (['model.Resource'], {}), '(model.Resource)\n', (8829, 8845), True, 'import ckan.lib.search as search\n'), ((9183, 9215), 'ckan.lib.search.query_for', 'search.query_for', (['model.Resource'], {}), '(model.Resource)\n', (9199, 9215), True, 'import ckan.lib.search as search\n'), ((9492, 9524), 'ckan.lib.search.query_for', 'search.query_for', (['model.Resource'], {}), '(model.Resource)\n', (9508, 9524), True, 'import ckan.lib.search as search\n'), ((9644, 9676), 'ckan.lib.search.query_for', 'search.query_for', (['model.Resource'], {}), '(model.Resource)\n', (9660, 9676), True, 'import ckan.lib.search as search\n'), ((12150, 12181), 'ckan.lib.search.query_for', 'search.query_for', (['model.Package'], {}), '(model.Package)\n', (12166, 12181), True, 'import ckan.lib.search as search\n'), ((12422, 12453), 'ckan.lib.search.query_for', 'search.query_for', (['model.Package'], {}), '(model.Package)\n', (12438, 12453), True, 'import ckan.lib.search as search\n'), ((12724, 12755), 'ckan.lib.search.query_for', 'search.query_for', (['model.Package'], {}), '(model.Package)\n', (12740, 12755), True, 'import ckan.lib.search as search\n'), ((13064, 13095), 'ckan.lib.search.query_for', 'search.query_for', (['model.Package'], {}), '(model.Package)\n', (13080, 13095), True, 'import ckan.lib.search as search\n'), ((13277, 13308), 'ckan.lib.search.query_for', 'search.query_for', (['model.Package'], {}), '(model.Package)\n', (13293, 13308), True, 'import ckan.lib.search as search\n'), ((13636, 13667), 'ckan.lib.search.query_for', 'search.query_for', (['model.Package'], {}), '(model.Package)\n', (13652, 13667), True, 'import ckan.lib.search as search\n'), ((13843, 13874), 'ckan.lib.search.query_for', 'search.query_for', (['model.Package'], {}), '(model.Package)\n', (13859, 13874), True, 'import ckan.lib.search as search\n'), ((14100, 14131), 'ckan.lib.search.query_for', 'search.query_for', (['model.Package'], {}), '(model.Package)\n', (14116, 14131), True, 'import ckan.lib.search as search\n'), ((14334, 14365), 'ckan.lib.search.query_for', 'search.query_for', (['model.Package'], {}), '(model.Package)\n', (14350, 14365), True, 'import ckan.lib.search as search\n'), ((14586, 14617), 'ckan.lib.search.query_for', 'search.query_for', (['model.Package'], {}), '(model.Package)\n', (14602, 14617), True, 'import ckan.lib.search as search\n'), ((14849, 14880), 'ckan.lib.search.query_for', 'search.query_for', (['model.Package'], {}), '(model.Package)\n', (14865, 14880), True, 'import ckan.lib.search as search\n'), ((15122, 15153), 'ckan.lib.search.query_for', 'search.query_for', (['model.Package'], {}), '(model.Package)\n', (15138, 15153), True, 'import ckan.lib.search as search\n'), ((15410, 15441), 'ckan.lib.search.query_for', 'search.query_for', (['model.Package'], {}), '(model.Package)\n', (15426, 15441), True, 'import ckan.lib.search as search\n'), ((15583, 15614), 'ckan.lib.search.query_for', 'search.query_for', (['model.Package'], {}), '(model.Package)\n', (15599, 15614), True, 'import ckan.lib.search as search\n'), ((15808, 15839), 'ckan.lib.search.query_for', 'search.query_for', (['model.Package'], {}), '(model.Package)\n', (15824, 15839), True, 'import ckan.lib.search as search\n'), ((4653, 4688), 'ckan.model.Session.query', 'model.Session.query', (['model.Resource'], {}), '(model.Resource)\n', (4672, 4688), True, 'import ckan.model as model\n'), ((5054, 5089), 'ckan.model.Session.query', 'model.Session.query', (['model.Resource'], {}), '(model.Resource)\n', (5073, 5089), True, 'import ckan.model as model\n')]
|
# -*- coding: utf-8 -*-
from fire.api.model.punkttyper import GeometriObjekt, PunktInformation
__author__ = "Septima"
__date__ = "2019-12-02"
__copyright__ = "(C) 2019 by Septima"
import os
from datetime import datetime
from typing import List, Dict
from PyQt5.QtCore import QCoreApplication
from PyQt5.QtGui import QIcon
from qgis.core import (
QgsProcessing,
QgsFeatureSink,
QgsProcessingAlgorithm,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterFeatureSink,
QgsProcessingParameterString,
QgsProcessingParameterBoolean,
QgsProcessingParameterEnum,
QgsWkbTypes,
QgsFeature,
QgsField,
QgsFields,
QgsProcessingFeedback,
QgsGeometry,
QgsPoint,
QgsProject,
)
from qgis.PyQt.QtCore import Qt, QVariant, QDateTime, QTime
try:
from fire.api import FireDb
except:
FireDb = None
from fire.api.model import Geometry, Observation, Punkt, Koordinat
from .datetime_widget import DateTimeWidget
from .ui.nullable_datetime_wrapper import NullableDateTimeWrapper
import processing
class ImportObservationerByLocationAlgorithm(QgsProcessingAlgorithm):
OUTPUT = "OUTPUT"
INPUT = "INPUT"
OBSERVATION_TYPE = "OBSERVATION_TYPE"
APPLY_THEME = "APPLY_THEME"
FROM_DATE = "FROM_DATE"
TO_DATE = "TO_DATE"
def __init__(self, settings):
QgsProcessingAlgorithm.__init__(self)
self.settings = settings
def initAlgorithm(self, config):
self.addParameter(
QgsProcessingParameterFeatureSource(
self.INPUT,
self.tr("Importér observationer indenfor (within)"),
[QgsProcessing.TypeVectorPolygon],
)
)
self.OBSERVATION_TYPES = [
(1, self.tr("Koteforskel opmålt geometrisk")),
(2, self.tr("Koteforskel opmålt trigonometrisk")),
]
o = QgsProcessingParameterEnum(
name=self.OBSERVATION_TYPE,
description=self.tr("Observationstype"),
options=[x[1] for x in self.OBSERVATION_TYPES],
allowMultiple=True,
defaultValue=[0, 1],
)
o.setMetadata({"widget_wrapper": {"useCheckBoxes": True, "columns": 2}})
self.addParameter(o)
param = QgsProcessingParameterString(
name=self.FROM_DATE, description="Fra Dato", optional=True
)
param.setMetadata({"widget_wrapper": {"class": NullableDateTimeWrapper}})
self.addParameter(param)
param = QgsProcessingParameterString(
name=self.TO_DATE, description="Til Dato", optional=True
)
param.setMetadata({"widget_wrapper": {"class": NullableDateTimeWrapper}})
self.addParameter(param)
self.addParameter(
QgsProcessingParameterFeatureSink(self.OUTPUT, self.tr("Observationer"))
)
self.addParameter(
QgsProcessingParameterBoolean(
self.APPLY_THEME,
self.tr("Anvend standard fikspunktregister-symbologi"),
defaultValue=True,
)
)
def processAlgorithm(self, parameters, context, feedback: QgsProcessingFeedback):
# Input / Output
source = self.parameterAsSource(parameters, self.INPUT, context)
(sink, dest_id) = self.create_output_sink(
parameters, context, source.sourceCrs()
)
# Filter parameters
observation_type_indices = self.parameterAsEnums(
parameters, self.OBSERVATION_TYPE, context
)
observation_types = list(
map(lambda i: self.OBSERVATION_TYPES[i][0], observation_type_indices)
)
from_date = None
from_date_string = self.parameterAsString(parameters, self.FROM_DATE, context)
if from_date_string:
from_date = datetime.fromisoformat(from_date_string)
to_date = None
to_date_string = self.parameterAsString(parameters, self.TO_DATE, context)
if to_date_string:
to_date = datetime.fromisoformat(to_date_string)
fire_connection_string = self.settings.value("fire_connection_string")
fireDb = FireDb(fire_connection_string, debug=True)
features = list(source.getFeatures())
total_num_features = len(features)
total_num_features_processed = 0
# for current, feature in enumerate(features):
for feature in features:
if feedback.isCanceled():
return {}
wkt = feature.geometry().asWkt().upper()
geometry = Geometry(wkt)
observations = fireDb.hent_observationer_naer_geometri(
geometri=geometry, afstand=0, tidfra=from_date, tidtil=to_date
)
pid_list = self.get_pids_from_observations(observations)
geometriobjekter = self.get_geometriobjekter_from_pids(fireDb, pid_list)
idents = self.get_idents_from_pids(fireDb, pid_list)
feedback.setProgressText(
"Fandt {antal} observationer".format(antal=len(observations))
)
feedback.setProgressText(
"Fandt {antal} geometriobjekter".format(antal=len(geometriobjekter))
)
feedback.setProgressText("Fandt {antal} idents".format(antal=len(idents)))
for current, observation in enumerate(observations):
observation_type_id = observation.observationstypeid
if observation_type_id in observation_types:
feature = self.create_feature_from_observation(
observation, geometriobjekter, idents, feedback
)
if feature:
sink.addFeature(feature, QgsFeatureSink.FastInsert)
total_num_features_processed = total_num_features_processed + 1
feedback.setProgress(total_num_features_processed / total_num_features)
if feedback.isCanceled():
return {}
apply_theme = self.parameterAsBool(parameters, self.APPLY_THEME, context)
if apply_theme:
style_file = os.path.join(
os.path.dirname(__file__), "..", "styles", "observation.qml"
)
alg_params = {"INPUT": dest_id, "STYLE": style_file}
processing.run(
"qgis:setstyleforvectorlayer",
alg_params,
context=context,
feedback=feedback,
is_child_algorithm=True,
)
return {self.OUTPUT: dest_id}
def create_output_sink(self, parameters, context, crs):
fields = QgsFields()
fields.append(QgsField("observation_id", QVariant.String))
fields.append(QgsField("observation_type_id", QVariant.Double))
fields.append(QgsField("fikspunkt1_uuid", QVariant.String))
fields.append(QgsField("fikspunkt1_ident", QVariant.String))
fields.append(QgsField("fikspunkt2_uuid", QVariant.String))
fields.append(QgsField("fikspunkt2_ident", QVariant.String))
fields.append(QgsField("registrering_fra", QVariant.DateTime))
fields.append(QgsField("registrering_fra_iso", QVariant.String))
fields.append(QgsField("koteforskel", QVariant.Double))
fields.append(QgsField("nivellementslaengde", QVariant.Double))
fields.append(QgsField("antal_opstillinger", QVariant.Double))
fields.append(QgsField("afstandsafhaengig_varians", QVariant.Double))
fields.append(QgsField("afstandsuafhaengig_varians", QVariant.Double))
fields.append(QgsField("Praecisionsnivellement", QVariant.Double))
(sink, dest_id) = self.parameterAsSink(
parameters, self.OUTPUT, context, fields, QgsWkbTypes.LineString, crs
)
return (sink, dest_id)
def get_pids_from_observations(self, observations: List[Observation]):
pid_list = []
for o in observations:
op_id = o.opstillingspunktid
if op_id not in pid_list: # Point not already found
pid_list.append(op_id)
sp_id = o.sigtepunktid
if sp_id not in pid_list: # Point not already found
pid_list.append(sp_id)
return pid_list
def get_geometriobjekter_from_pids(self, fireDb, pid_list):
# return dict of {punktid: geometriobjekt}
# Get geometriobjekter
gos: List[GeometriObjekt] = (
fireDb.session.query(GeometriObjekt)
.filter(
GeometriObjekt.punktid.in_(pid_list),
GeometriObjekt._registreringtil == None,
)
.all()
)
go_by_pid = {}
for go in gos:
go_by_pid[go.punktid] = go
return go_by_pid
def get_idents_from_pids(self, fireDb, pid_list):
# return dict of {punktid: ident: string}
# GI(346)->GNSS(343)->landsnr(342)->refgeo_id(344)->uuid
info_type_list = [346, 343, 342, 344]
infos: List[PunktInformation] = (
fireDb.session.query(PunktInformation)
.filter(
PunktInformation.punktid.in_(pid_list),
PunktInformation.infotypeid.in_(info_type_list),
)
.order_by(PunktInformation.punktid, PunktInformation.infotypeid)
.all()
)
ident_by_pid = {}
if len(infos) > 0:
current_index = 0
while current_index is not None:
current_info: PunktInformation = infos[current_index]
current_pid = current_info.punktid
ident = self.get_index_ident(current_index, infos)
ident_by_pid[current_pid] = ident
current_index = self.next_index(current_index, infos)
return ident_by_pid
def get_index_ident(self, current_index, infos: List[PunktInformation]):
current_pid = infos[current_index].punktid
best_info = infos[current_index]
best_info_weight = self.get_info_weight(best_info)
inc = 1
while (
current_index + inc < len(infos)
and infos[current_index + inc].punktid == current_pid
):
current_info = infos[current_index + inc]
current_info_weight = self.get_info_weight(current_info)
if current_info_weight > best_info_weight:
best_info = current_info
best_info_weight = current_info_weight
inc = inc + 1
return self.get_ident_text(best_info)
def get_info_weight(self, info: PunktInformation):
if info.infotypeid == 346:
return 4
elif info.infotypeid == 343:
return 3
elif info.infotypeid == 342:
return 2
elif info.infotypeid == 344:
return 1
def get_ident_text(self, info: PunktInformation):
if info.infotypeid == 346:
return "GI:" + info.tekst
elif info.infotypeid == 343:
return "GNSS:" + info.tekst
elif info.infotypeid == 342:
return "landsnr:" + info.tekst
elif info.infotypeid == 344:
return "refgeo_id:" + info.tekst
def next_index(self, current_index, infos: List[PunktInformation]):
current_pid = infos[current_index].punktid
inc = 1
while (
current_index + inc < len(infos)
and infos[current_index + inc].punktid == current_pid
):
inc = inc + 1
if current_index + inc < len(infos):
return current_index + inc
else:
return None
def create_feature_from_observation(
self,
observation: Observation,
geometriobjekter: Dict[str, GeometriObjekt],
idents: Dict[str, str],
feedback: QgsProcessingFeedback,
):
observation_id = observation.objektid
fikspunkt1_id = observation.opstillingspunktid
fikspunkt1_ident = "uuid:" + fikspunkt1_id
if fikspunkt1_id in idents:
fikspunkt1_ident = idents[fikspunkt1_id]
fikspunkt2_id = observation.sigtepunktid
fikspunkt2_ident = "uuid:" + fikspunkt2_id
if fikspunkt2_id in idents:
fikspunkt2_ident = idents[fikspunkt2_id]
geometriobjekt1 = geometriobjekter[fikspunkt1_id]
geometriobjekt2 = geometriobjekter[fikspunkt2_id]
line_geometry = self.create_line_geometry_from_geometriobjekter(
geometriobjekt1, geometriobjekt2, feedback
)
if line_geometry:
# create the feature
fet = QgsFeature()
fet.setGeometry(line_geometry)
# Felter, der skal gemmes på feature:
# [QgsField("observation_id", QVariant.String),
# QgsField("observation_type_id", QVariant.Double)
# QgsField("fikspunkt1_id", QVariant.String),
# QgsField("fikspunkt1_ident", QVariant.String),
# QgsField("fikspunkt2_id", QVariant.String),
# QgsField("fikspunkt2_ident", QVariant.String),
# QgsField("registrering_fra", QVariant.DateTime),
# QgsField("registrering_fra_iso", QVariant.String),
# QgsField("koteforskel", QVariant.Double),
# QgsField("nivellementslaengde", QVariant.Double),
# QgsField("antal_opstillinger", QVariant.Double), Value3
# QgsField("afstandsafhaengig_varians", QVariant.Double), (value5 for id=1, value4 for id=2)
# QgsField("afstandsuafhaengig_varians", QVariant.Double), (value6 for id=1, value5 for id=2)
# QgsField("Praecisionsnivellement", QVariant.Double)], (value7 for id=1, 0 for id=2)
observation_type_id = observation.observationstypeid
registrering_fra = QDateTime(observation.registreringfra)
registrering_fra_iso = registrering_fra.toString(Qt.ISODate)
koteforskel = observation.value1
nivellementslaengde = observation.value2
antal_opstillinger = observation.value3
if observation_type_id == 1:
afstandsafhaengig_varians = observation.value5
afstandsuafhaengig_varians = observation.value6
Praecisionsnivellement = observation.value7
elif observation_type_id == 2:
afstandsafhaengig_varians = observation.value4
afstandsuafhaengig_varians = observation.value5
Praecisionsnivellement = 0
else:
# Observationstypeid > 2
feedback.setProgressText(
"observation_type_id > 2 for observation med id = {id}. Springes over".format(
id=observation_id
)
)
return None
# create the feature
feature = QgsFeature()
feature.setGeometry(line_geometry)
feature.setAttributes(
[
observation_id,
observation_type_id,
fikspunkt1_id,
fikspunkt1_ident,
fikspunkt2_id,
fikspunkt2_ident,
registrering_fra,
registrering_fra_iso,
koteforskel,
nivellementslaengde,
antal_opstillinger,
afstandsafhaengig_varians,
afstandsuafhaengig_varians,
Praecisionsnivellement,
]
)
return feature
else:
# A geometry could not be established
feedback.setProgressText(
"En liniegeometri kunne IKKE opettes for observation med id = {id}".format(
id=observation_id
)
)
return None
def create_line_geometry_from_geometriobjekter(
self,
geometriobjekt1: GeometriObjekt,
geometriobjekt2: GeometriObjekt,
feedback: QgsProcessingFeedback,
):
if geometriobjekt1 and geometriobjekt2:
wkt1 = geometriobjekt1.geometri.wkt
g1 = QgsPoint()
g1.fromWkt(wkt1)
wkt2 = geometriobjekt2.geometri.wkt
g2 = QgsPoint()
g2.fromWkt(wkt2)
geom = QgsGeometry.fromPolyline([g1, g2])
return geom
else:
return None
def name(self):
return "fire-import-observations-location"
def displayName(self):
return "Importér observationer fra FIRE ud fra placering"
def group(self):
return ""
def groupId(self):
return ""
def flags(self):
return QgsProcessingAlgorithm.FlagNoThreading
def tr(self, string):
return QCoreApplication.translate("Processing", string)
def createInstance(self):
return ImportObservationerByLocationAlgorithm(self.settings)
def canExecute(self):
if FireDb is None:
return (
False,
"Dette plugin er afhængigt af API'et til Fikspunktregistret. Se venligst https://github.com/Septima/fire-qgis#installation",
)
fire_connection_string = self.settings.value("fire_connection_string")
if fire_connection_string is None:
conf_message = "Kan ikke finde konfigurationsfil. Se venligst https://github.com/Kortforsyningen/fire-cli#konfigurationsfil for format og placering af konfigurationsfil"
return False, conf_message
else:
try:
fireDb = FireDb(fire_connection_string)
fireDb.hent_observationstyper()
return True, "OK"
except Exception as ex:
str_ex = str(ex)
fire_connection_file_path = self.settings.value(
"fire_connection_file_path"
)
return (
False,
"Fejl i forbindelse til Fikspunktregistret. Se venligst https://github.com/Kortforsyningen/fire-cli#konfigurationsfil for format og indhold af konfigurationsfil. Exception:["
+ str_ex
+ "] Konfigurationsfil:["
+ fire_connection_file_path
+ "]",
)
def shortHelpString(self):
help_string = "Importerer observationer fra Fikstpunktregistret, hvor\n- enten p1 eller p2 er indeholdt i forespørgselsgeometrien,\n- observationstype er som ønsket og\n- registrering-fra ligger indenfor dato-interval (Optionelt)\n\n"
conf_message = ""
fire_connection_string = self.settings.value("fire_connection_string")
if fire_connection_string is None:
conf_message = "Fejl i konfigurationsfil eller kan ikke finde konfigurationsfil. Se venligst https://github.com/Kortforsyningen/fire-cli#konfigurationsfil"
else:
fire_connection_file_path = self.settings.value("fire_connection_file_path")
conf_message = "Konfigurationsfil: " + fire_connection_file_path
return self.tr(help_string + conf_message)
def icon(self):
icon_path = os.path.join(os.path.dirname(__file__), "ui", "fire-export.png")
return QIcon(icon_path)
|
[
"qgis.core.QgsPoint",
"fire.api.model.punkttyper.GeometriObjekt.punktid.in_",
"PyQt5.QtGui.QIcon",
"qgis.core.QgsGeometry.fromPolyline",
"datetime.datetime.fromisoformat",
"fire.api.model.punkttyper.PunktInformation.punktid.in_",
"processing.run",
"os.path.dirname",
"qgis.core.QgsProcessingAlgorithm.__init__",
"fire.api.FireDb",
"qgis.core.QgsFeature",
"qgis.PyQt.QtCore.QDateTime",
"qgis.core.QgsFields",
"fire.api.model.punkttyper.PunktInformation.infotypeid.in_",
"PyQt5.QtCore.QCoreApplication.translate",
"qgis.core.QgsField",
"fire.api.model.Geometry",
"qgis.core.QgsProcessingParameterString"
] |
[((1342, 1379), 'qgis.core.QgsProcessingAlgorithm.__init__', 'QgsProcessingAlgorithm.__init__', (['self'], {}), '(self)\n', (1373, 1379), False, 'from qgis.core import QgsProcessing, QgsFeatureSink, QgsProcessingAlgorithm, QgsProcessingParameterFeatureSource, QgsProcessingParameterFeatureSink, QgsProcessingParameterString, QgsProcessingParameterBoolean, QgsProcessingParameterEnum, QgsWkbTypes, QgsFeature, QgsField, QgsFields, QgsProcessingFeedback, QgsGeometry, QgsPoint, QgsProject\n'), ((2263, 2355), 'qgis.core.QgsProcessingParameterString', 'QgsProcessingParameterString', ([], {'name': 'self.FROM_DATE', 'description': '"""Fra Dato"""', 'optional': '(True)'}), "(name=self.FROM_DATE, description='Fra Dato',\n optional=True)\n", (2291, 2355), False, 'from qgis.core import QgsProcessing, QgsFeatureSink, QgsProcessingAlgorithm, QgsProcessingParameterFeatureSource, QgsProcessingParameterFeatureSink, QgsProcessingParameterString, QgsProcessingParameterBoolean, QgsProcessingParameterEnum, QgsWkbTypes, QgsFeature, QgsField, QgsFields, QgsProcessingFeedback, QgsGeometry, QgsPoint, QgsProject\n'), ((2506, 2596), 'qgis.core.QgsProcessingParameterString', 'QgsProcessingParameterString', ([], {'name': 'self.TO_DATE', 'description': '"""Til Dato"""', 'optional': '(True)'}), "(name=self.TO_DATE, description='Til Dato',\n optional=True)\n", (2534, 2596), False, 'from qgis.core import QgsProcessing, QgsFeatureSink, QgsProcessingAlgorithm, QgsProcessingParameterFeatureSource, QgsProcessingParameterFeatureSink, QgsProcessingParameterString, QgsProcessingParameterBoolean, QgsProcessingParameterEnum, QgsWkbTypes, QgsFeature, QgsField, QgsFields, QgsProcessingFeedback, QgsGeometry, QgsPoint, QgsProject\n'), ((4164, 4206), 'fire.api.FireDb', 'FireDb', (['fire_connection_string'], {'debug': '(True)'}), '(fire_connection_string, debug=True)\n', (4170, 4206), False, 'from fire.api import FireDb\n'), ((6650, 6661), 'qgis.core.QgsFields', 'QgsFields', ([], {}), '()\n', (6659, 6661), False, 'from qgis.core import QgsProcessing, QgsFeatureSink, QgsProcessingAlgorithm, QgsProcessingParameterFeatureSource, QgsProcessingParameterFeatureSink, QgsProcessingParameterString, QgsProcessingParameterBoolean, QgsProcessingParameterEnum, QgsWkbTypes, QgsFeature, QgsField, QgsFields, QgsProcessingFeedback, QgsGeometry, QgsPoint, QgsProject\n'), ((16895, 16943), 'PyQt5.QtCore.QCoreApplication.translate', 'QCoreApplication.translate', (['"""Processing"""', 'string'], {}), "('Processing', string)\n", (16921, 16943), False, 'from PyQt5.QtCore import QCoreApplication\n'), ((19377, 19393), 'PyQt5.QtGui.QIcon', 'QIcon', (['icon_path'], {}), '(icon_path)\n', (19382, 19393), False, 'from PyQt5.QtGui import QIcon\n'), ((3831, 3871), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['from_date_string'], {}), '(from_date_string)\n', (3853, 3871), False, 'from datetime import datetime\n'), ((4028, 4066), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['to_date_string'], {}), '(to_date_string)\n', (4050, 4066), False, 'from datetime import datetime\n'), ((4566, 4579), 'fire.api.model.Geometry', 'Geometry', (['wkt'], {}), '(wkt)\n', (4574, 4579), False, 'from fire.api.model import Geometry, Observation, Punkt, Koordinat\n'), ((6319, 6441), 'processing.run', 'processing.run', (['"""qgis:setstyleforvectorlayer"""', 'alg_params'], {'context': 'context', 'feedback': 'feedback', 'is_child_algorithm': '(True)'}), "('qgis:setstyleforvectorlayer', alg_params, context=context,\n feedback=feedback, is_child_algorithm=True)\n", (6333, 6441), False, 'import processing\n'), ((6684, 6727), 'qgis.core.QgsField', 'QgsField', (['"""observation_id"""', 'QVariant.String'], {}), "('observation_id', QVariant.String)\n", (6692, 6727), False, 'from qgis.core import QgsProcessing, QgsFeatureSink, QgsProcessingAlgorithm, QgsProcessingParameterFeatureSource, QgsProcessingParameterFeatureSink, QgsProcessingParameterString, QgsProcessingParameterBoolean, QgsProcessingParameterEnum, QgsWkbTypes, QgsFeature, QgsField, QgsFields, QgsProcessingFeedback, QgsGeometry, QgsPoint, QgsProject\n'), ((6751, 6799), 'qgis.core.QgsField', 'QgsField', (['"""observation_type_id"""', 'QVariant.Double'], {}), "('observation_type_id', QVariant.Double)\n", (6759, 6799), False, 'from qgis.core import QgsProcessing, QgsFeatureSink, QgsProcessingAlgorithm, QgsProcessingParameterFeatureSource, QgsProcessingParameterFeatureSink, QgsProcessingParameterString, QgsProcessingParameterBoolean, QgsProcessingParameterEnum, QgsWkbTypes, QgsFeature, QgsField, QgsFields, QgsProcessingFeedback, QgsGeometry, QgsPoint, QgsProject\n'), ((6823, 6867), 'qgis.core.QgsField', 'QgsField', (['"""fikspunkt1_uuid"""', 'QVariant.String'], {}), "('fikspunkt1_uuid', QVariant.String)\n", (6831, 6867), False, 'from qgis.core import QgsProcessing, QgsFeatureSink, QgsProcessingAlgorithm, QgsProcessingParameterFeatureSource, QgsProcessingParameterFeatureSink, QgsProcessingParameterString, QgsProcessingParameterBoolean, QgsProcessingParameterEnum, QgsWkbTypes, QgsFeature, QgsField, QgsFields, QgsProcessingFeedback, QgsGeometry, QgsPoint, QgsProject\n'), ((6891, 6936), 'qgis.core.QgsField', 'QgsField', (['"""fikspunkt1_ident"""', 'QVariant.String'], {}), "('fikspunkt1_ident', QVariant.String)\n", (6899, 6936), False, 'from qgis.core import QgsProcessing, QgsFeatureSink, QgsProcessingAlgorithm, QgsProcessingParameterFeatureSource, QgsProcessingParameterFeatureSink, QgsProcessingParameterString, QgsProcessingParameterBoolean, QgsProcessingParameterEnum, QgsWkbTypes, QgsFeature, QgsField, QgsFields, QgsProcessingFeedback, QgsGeometry, QgsPoint, QgsProject\n'), ((6960, 7004), 'qgis.core.QgsField', 'QgsField', (['"""fikspunkt2_uuid"""', 'QVariant.String'], {}), "('fikspunkt2_uuid', QVariant.String)\n", (6968, 7004), False, 'from qgis.core import QgsProcessing, QgsFeatureSink, QgsProcessingAlgorithm, QgsProcessingParameterFeatureSource, QgsProcessingParameterFeatureSink, QgsProcessingParameterString, QgsProcessingParameterBoolean, QgsProcessingParameterEnum, QgsWkbTypes, QgsFeature, QgsField, QgsFields, QgsProcessingFeedback, QgsGeometry, QgsPoint, QgsProject\n'), ((7028, 7073), 'qgis.core.QgsField', 'QgsField', (['"""fikspunkt2_ident"""', 'QVariant.String'], {}), "('fikspunkt2_ident', QVariant.String)\n", (7036, 7073), False, 'from qgis.core import QgsProcessing, QgsFeatureSink, QgsProcessingAlgorithm, QgsProcessingParameterFeatureSource, QgsProcessingParameterFeatureSink, QgsProcessingParameterString, QgsProcessingParameterBoolean, QgsProcessingParameterEnum, QgsWkbTypes, QgsFeature, QgsField, QgsFields, QgsProcessingFeedback, QgsGeometry, QgsPoint, QgsProject\n'), ((7097, 7144), 'qgis.core.QgsField', 'QgsField', (['"""registrering_fra"""', 'QVariant.DateTime'], {}), "('registrering_fra', QVariant.DateTime)\n", (7105, 7144), False, 'from qgis.core import QgsProcessing, QgsFeatureSink, QgsProcessingAlgorithm, QgsProcessingParameterFeatureSource, QgsProcessingParameterFeatureSink, QgsProcessingParameterString, QgsProcessingParameterBoolean, QgsProcessingParameterEnum, QgsWkbTypes, QgsFeature, QgsField, QgsFields, QgsProcessingFeedback, QgsGeometry, QgsPoint, QgsProject\n'), ((7168, 7217), 'qgis.core.QgsField', 'QgsField', (['"""registrering_fra_iso"""', 'QVariant.String'], {}), "('registrering_fra_iso', QVariant.String)\n", (7176, 7217), False, 'from qgis.core import QgsProcessing, QgsFeatureSink, QgsProcessingAlgorithm, QgsProcessingParameterFeatureSource, QgsProcessingParameterFeatureSink, QgsProcessingParameterString, QgsProcessingParameterBoolean, QgsProcessingParameterEnum, QgsWkbTypes, QgsFeature, QgsField, QgsFields, QgsProcessingFeedback, QgsGeometry, QgsPoint, QgsProject\n'), ((7241, 7281), 'qgis.core.QgsField', 'QgsField', (['"""koteforskel"""', 'QVariant.Double'], {}), "('koteforskel', QVariant.Double)\n", (7249, 7281), False, 'from qgis.core import QgsProcessing, QgsFeatureSink, QgsProcessingAlgorithm, QgsProcessingParameterFeatureSource, QgsProcessingParameterFeatureSink, QgsProcessingParameterString, QgsProcessingParameterBoolean, QgsProcessingParameterEnum, QgsWkbTypes, QgsFeature, QgsField, QgsFields, QgsProcessingFeedback, QgsGeometry, QgsPoint, QgsProject\n'), ((7305, 7353), 'qgis.core.QgsField', 'QgsField', (['"""nivellementslaengde"""', 'QVariant.Double'], {}), "('nivellementslaengde', QVariant.Double)\n", (7313, 7353), False, 'from qgis.core import QgsProcessing, QgsFeatureSink, QgsProcessingAlgorithm, QgsProcessingParameterFeatureSource, QgsProcessingParameterFeatureSink, QgsProcessingParameterString, QgsProcessingParameterBoolean, QgsProcessingParameterEnum, QgsWkbTypes, QgsFeature, QgsField, QgsFields, QgsProcessingFeedback, QgsGeometry, QgsPoint, QgsProject\n'), ((7377, 7424), 'qgis.core.QgsField', 'QgsField', (['"""antal_opstillinger"""', 'QVariant.Double'], {}), "('antal_opstillinger', QVariant.Double)\n", (7385, 7424), False, 'from qgis.core import QgsProcessing, QgsFeatureSink, QgsProcessingAlgorithm, QgsProcessingParameterFeatureSource, QgsProcessingParameterFeatureSink, QgsProcessingParameterString, QgsProcessingParameterBoolean, QgsProcessingParameterEnum, QgsWkbTypes, QgsFeature, QgsField, QgsFields, QgsProcessingFeedback, QgsGeometry, QgsPoint, QgsProject\n'), ((7448, 7502), 'qgis.core.QgsField', 'QgsField', (['"""afstandsafhaengig_varians"""', 'QVariant.Double'], {}), "('afstandsafhaengig_varians', QVariant.Double)\n", (7456, 7502), False, 'from qgis.core import QgsProcessing, QgsFeatureSink, QgsProcessingAlgorithm, QgsProcessingParameterFeatureSource, QgsProcessingParameterFeatureSink, QgsProcessingParameterString, QgsProcessingParameterBoolean, QgsProcessingParameterEnum, QgsWkbTypes, QgsFeature, QgsField, QgsFields, QgsProcessingFeedback, QgsGeometry, QgsPoint, QgsProject\n'), ((7526, 7581), 'qgis.core.QgsField', 'QgsField', (['"""afstandsuafhaengig_varians"""', 'QVariant.Double'], {}), "('afstandsuafhaengig_varians', QVariant.Double)\n", (7534, 7581), False, 'from qgis.core import QgsProcessing, QgsFeatureSink, QgsProcessingAlgorithm, QgsProcessingParameterFeatureSource, QgsProcessingParameterFeatureSink, QgsProcessingParameterString, QgsProcessingParameterBoolean, QgsProcessingParameterEnum, QgsWkbTypes, QgsFeature, QgsField, QgsFields, QgsProcessingFeedback, QgsGeometry, QgsPoint, QgsProject\n'), ((7605, 7656), 'qgis.core.QgsField', 'QgsField', (['"""Praecisionsnivellement"""', 'QVariant.Double'], {}), "('Praecisionsnivellement', QVariant.Double)\n", (7613, 7656), False, 'from qgis.core import QgsProcessing, QgsFeatureSink, QgsProcessingAlgorithm, QgsProcessingParameterFeatureSource, QgsProcessingParameterFeatureSink, QgsProcessingParameterString, QgsProcessingParameterBoolean, QgsProcessingParameterEnum, QgsWkbTypes, QgsFeature, QgsField, QgsFields, QgsProcessingFeedback, QgsGeometry, QgsPoint, QgsProject\n'), ((12621, 12633), 'qgis.core.QgsFeature', 'QgsFeature', ([], {}), '()\n', (12631, 12633), False, 'from qgis.core import QgsProcessing, QgsFeatureSink, QgsProcessingAlgorithm, QgsProcessingParameterFeatureSource, QgsProcessingParameterFeatureSink, QgsProcessingParameterString, QgsProcessingParameterBoolean, QgsProcessingParameterEnum, QgsWkbTypes, QgsFeature, QgsField, QgsFields, QgsProcessingFeedback, QgsGeometry, QgsPoint, QgsProject\n'), ((13870, 13908), 'qgis.PyQt.QtCore.QDateTime', 'QDateTime', (['observation.registreringfra'], {}), '(observation.registreringfra)\n', (13879, 13908), False, 'from qgis.PyQt.QtCore import Qt, QVariant, QDateTime, QTime\n'), ((14940, 14952), 'qgis.core.QgsFeature', 'QgsFeature', ([], {}), '()\n', (14950, 14952), False, 'from qgis.core import QgsProcessing, QgsFeatureSink, QgsProcessingAlgorithm, QgsProcessingParameterFeatureSource, QgsProcessingParameterFeatureSink, QgsProcessingParameterString, QgsProcessingParameterBoolean, QgsProcessingParameterEnum, QgsWkbTypes, QgsFeature, QgsField, QgsFields, QgsProcessingFeedback, QgsGeometry, QgsPoint, QgsProject\n'), ((16268, 16278), 'qgis.core.QgsPoint', 'QgsPoint', ([], {}), '()\n', (16276, 16278), False, 'from qgis.core import QgsProcessing, QgsFeatureSink, QgsProcessingAlgorithm, QgsProcessingParameterFeatureSource, QgsProcessingParameterFeatureSink, QgsProcessingParameterString, QgsProcessingParameterBoolean, QgsProcessingParameterEnum, QgsWkbTypes, QgsFeature, QgsField, QgsFields, QgsProcessingFeedback, QgsGeometry, QgsPoint, QgsProject\n'), ((16373, 16383), 'qgis.core.QgsPoint', 'QgsPoint', ([], {}), '()\n', (16381, 16383), False, 'from qgis.core import QgsProcessing, QgsFeatureSink, QgsProcessingAlgorithm, QgsProcessingParameterFeatureSource, QgsProcessingParameterFeatureSink, QgsProcessingParameterString, QgsProcessingParameterBoolean, QgsProcessingParameterEnum, QgsWkbTypes, QgsFeature, QgsField, QgsFields, QgsProcessingFeedback, QgsGeometry, QgsPoint, QgsProject\n'), ((16432, 16466), 'qgis.core.QgsGeometry.fromPolyline', 'QgsGeometry.fromPolyline', (['[g1, g2]'], {}), '([g1, g2])\n', (16456, 16466), False, 'from qgis.core import QgsProcessing, QgsFeatureSink, QgsProcessingAlgorithm, QgsProcessingParameterFeatureSource, QgsProcessingParameterFeatureSink, QgsProcessingParameterString, QgsProcessingParameterBoolean, QgsProcessingParameterEnum, QgsWkbTypes, QgsFeature, QgsField, QgsFields, QgsProcessingFeedback, QgsGeometry, QgsPoint, QgsProject\n'), ((19310, 19335), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (19325, 19335), False, 'import os\n'), ((6167, 6192), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (6182, 6192), False, 'import os\n'), ((17696, 17726), 'fire.api.FireDb', 'FireDb', (['fire_connection_string'], {}), '(fire_connection_string)\n', (17702, 17726), False, 'from fire.api import FireDb\n'), ((8540, 8576), 'fire.api.model.punkttyper.GeometriObjekt.punktid.in_', 'GeometriObjekt.punktid.in_', (['pid_list'], {}), '(pid_list)\n', (8566, 8576), False, 'from fire.api.model.punkttyper import GeometriObjekt, PunktInformation\n'), ((9135, 9173), 'fire.api.model.punkttyper.PunktInformation.punktid.in_', 'PunktInformation.punktid.in_', (['pid_list'], {}), '(pid_list)\n', (9163, 9173), False, 'from fire.api.model.punkttyper import GeometriObjekt, PunktInformation\n'), ((9191, 9238), 'fire.api.model.punkttyper.PunktInformation.infotypeid.in_', 'PunktInformation.infotypeid.in_', (['info_type_list'], {}), '(info_type_list)\n', (9222, 9238), False, 'from fire.api.model.punkttyper import GeometriObjekt, PunktInformation\n')]
|
import scipy as sp
import scipy.optimize
from . import legops
import tensorflow as tf
import numpy as np
import numpy.random as npr
from . import constructions
def fit_model_family(ts,xs,model_family,p_init,maxiter=100,use_tqdm_notebook=False):
'''
Fits a custom LEG model
Input:
- ts: list of timestamp-vectors: nsamp x [ragged]
- xs: list of observations: nsamp x [ragged] x n
- model_family: model family to fit
- p_init: -- initial conditions for the parameter vector of the model family
- [optional] maxiter -- max number of iters to use in BFGS
- [optional] use_tqdm_notebook -- whether to make an update bar with tqdm
Output: dictionary with lots of keys. See supplementary.pdf for details. Important keys are:
- message (result of optimization)
- params (a dictionary with keys for each parameter of a LEG model)
- nats (the negative log likelihood divided by the number of observations)
'''
# store initial values
N,R,B,Lambda=model_family.p2NRBL(p_init)
initial_params=dict(N=N.numpy(),R=R.numpy(),B=B.numpy(),Lambda=Lambda.numpy())
# process dedups
time_info=[constructions.dedup_ts(tf.convert_to_tensor(x,dtype=tf.float64)) for x in ts]
xs=[tf.convert_to_tensor(x,dtype=tf.float64) for x in xs]
n=xs[0].shape[1]
nobs = np.sum([np.prod(x.shape) for x in xs])
# functions for scipy.optimize
nats=[]
def func(p):
Ls=0
for x,(sub_ts,sub_idxs) in zip(xs,time_info):
Ls+= model_family.log_likelihood(sub_ts,x,sub_idxs,p)
loss=-Ls.numpy()/nobs
nats.append(loss)
return loss
def jac(p):
gs=0
for x,(sub_ts,sub_idxs) in zip(xs,time_info):
gs+= model_family.informant(sub_ts,x,sub_idxs,p)
return -gs/nobs
# get an initial loss
func(p_init)
# fit it
if use_tqdm_notebook:
import tqdm.notebook
with tqdm.notebook.tqdm() as t:
def callback(*args,**kwargs):
t.update(len(nats))
t.set_description(f"nats={nats[-1]:.2f}")
result=sp.optimize.minimize(func,p_init,jac=jac,options=dict(maxiter=maxiter),callback=callback)
else:
result=sp.optimize.minimize(func,p_init,jac=jac,options=dict(maxiter=maxiter))
# supplement loss dictionary with some stuff of interest
result['nats']=nats
# store initial params
result['initial_params']=initial_params
# store final params:
N,R,B,Lambda=model_family.p2NRBL(result['x'])
result['params']=dict(N=N.numpy(),R=R.numpy(),B=B.numpy(),Lambda=Lambda.numpy())
# we call the parameters "p" not "x"
result['p']=result['x']
del result['x']
# done
return result
def fit(ts,xs,ell=None,N=None,R=None,B=None,Lambda=None,maxiter=100,use_tqdm_notebook=False):
'''
fit the LEG model with rank ell
Input:
- ts: list of timestamp-vectors: nsamp x [ragged]
- xs: list of observations: nsamp x [ragged] x n
- ell: order of the LEG model to fit
- [optional] N,R,B,Lambda -- initial conditions
- [optional] maxiter -- max number of iters to use in BFGS
- [optional] use_tqdm_notebook -- whether to make an update bar with tqdm
Output: dictionary with lots of keys. See supplementary.pdf for details. Important keys are:
- message (result of optimization)
- params (a dictionary with keys for each parameter of a LEG model)
- nats (the negative log likelihood divided by the number of observations)
'''
mf =LEGFamily(ell,xs[0].shape[1])
p_init=mf.get_initial_guess(ts,xs,N=N,R=R,B=B,Lambda=Lambda)
return fit_model_family(ts,xs,mf,p_init,use_tqdm_notebook=use_tqdm_notebook)
r'''
_ _ __ _ _ _
_ __ ___ ___ __| | ___| | / _| __ _ _ __ ___ (_) (_) ___ ___
| '_ ` _ \ / _ \ / _` |/ _ \ | | |_ / _` | '_ ` _ \| | | |/ _ \/ __|
| | | | | | (_) | (_| | __/ | | _| (_| | | | | | | | | | __/\__ \
|_| |_| |_|\___/ \__,_|\___|_| |_| \__,_|_| |_| |_|_|_|_|\___||___/
'''
class LEGFamily:
def __init__(self,ell,n):
self.ell=ell
self.n=n
msk=np.tril(np.ones((self.ell,self.ell)))
self.N_idxs = tf.convert_to_tensor(np.c_[np.where(msk)])
msk=np.tril(np.ones((self.ell,self.ell)),k=-1)
self.R_idxs = tf.convert_to_tensor(np.c_[np.where(msk)])
msk=np.tril(np.ones((self.n,self.n)))
self.Lambda_idxs = tf.convert_to_tensor(np.c_[np.where(msk)])
self.psize = self.N_idxs.shape[0]+self.R_idxs.shape[0]+self.ell*self.n+self.Lambda_idxs.shape[0]
def p2NRBL(self,p):
i=0
# N!
sz=self.N_idxs.shape[0]
N=tf.scatter_nd(self.N_idxs,p[i:i+sz],(self.ell,self.ell))
i+=sz
# R!
sz=self.R_idxs.shape[0]
R=tf.scatter_nd(self.R_idxs,p[i:i+sz],(self.ell,self.ell))
i+=sz
# B!
sz=self.ell*self.n; B = tf.reshape(p[i:i+sz],(self.n,self.ell)); i+=sz
# Lambda!
sz=self.Lambda_idxs.shape[0]
Lambda=tf.scatter_nd(self.Lambda_idxs,p[i:i+sz],(self.n,self.n))
i+=sz
return N,R,B,Lambda
@tf.function(autograph=False)
def informant(self,ts,x,idxs,p):
'''
gradient of log likelihood w.r.t. p
'''
with tf.GradientTape() as g:
g.watch(p)
N,R,B,Lambda = self.p2NRBL(p)
nats = legops.leg_log_likelihood_tensorflow(ts,x,idxs,N,R,B,Lambda)
return g.gradient(nats,p)
@tf.function(autograph=False)
def log_likelihood(self,ts,x,idxs,p):
'''
log likelihood
'''
N,R,B,Lambda = self.p2NRBL(p)
return legops.leg_log_likelihood_tensorflow(ts,x,idxs,N,R,B,Lambda)
def get_initial_guess(self,ts,xs,N=None,R=None,B=None,Lambda=None):
# make up values when nothing is provided
if N is None:
N=np.eye(self.ell)
if R is None:
R=npr.randn(self.ell,self.ell)*.2
R=.5*(R-R.T)
if B is None:
B=np.ones((self.n,self.ell))
B=.5*B/np.sqrt(np.sum(B**2,axis=1,keepdims=True))
if Lambda is None:
Lambda = .1*np.eye(self.n)
# make 'em nice for us
N = tf.linalg.cholesky(N@tf.transpose(N))
R = (R-tf.transpose(R))
Lambda = tf.linalg.cholesky(Lambda@tf.transpose(Lambda))
# put it all together
pN=tf.gather_nd(N,self.N_idxs)
pR=tf.gather_nd(R,self.R_idxs)
pB=tf.reshape(B,(self.n*self.ell,))
pL=tf.gather_nd(Lambda,self.Lambda_idxs)
return tf.concat([pN,pR,pB,pL],axis=0)
class CeleriteFamily(LEGFamily):
def __init__(self,nblocks,n):
self.nblocks=nblocks
self.ell=nblocks*2
self.n=n
msk=np.eye(self.ell,dtype=np.bool) + np.diag(np.tile([True,False],self.nblocks)[:-1],-1)
self.N_idxs = tf.convert_to_tensor(np.c_[np.where(msk)])
msk = np.diag(np.tile([True,False],self.nblocks)[:-1],-1)
self.R_idxs = tf.convert_to_tensor(np.c_[np.where(msk)])
msk=np.tril(np.ones((self.n,self.n)))
self.Lambda_idxs = tf.convert_to_tensor(np.c_[np.where(msk)])
self.psize = self.N_idxs.shape[0]+self.R_idxs.shape[0]+self.ell*self.n+self.Lambda_idxs.shape[0]
def get_initial_guess(self,ts,xs):
N=np.eye(self.ell)
R=npr.randn(self.ell,self.ell)*.2
B=np.ones((self.n,self.ell))
B=.5*B/np.sqrt(np.sum(B**2,axis=1,keepdims=True))
Lambda = .1*np.eye(self.n)
N = tf.linalg.cholesky(N@tf.transpose(N))
R = (R-tf.transpose(R))
Lambda = tf.linalg.cholesky(Lambda@tf.transpose(Lambda))
# put it all together
pN=tf.gather_nd(N,self.N_idxs)
pR=tf.gather_nd(R,self.R_idxs)
pB=tf.reshape(B,(self.n*self.ell,))
pL=tf.gather_nd(Lambda,self.Lambda_idxs)
return tf.concat([pN,pR,pB,pL],axis=0)
|
[
"numpy.sum",
"numpy.random.randn",
"tensorflow.gather_nd",
"tensorflow.convert_to_tensor",
"tensorflow.reshape",
"tensorflow.concat",
"numpy.ones",
"tensorflow.transpose",
"numpy.where",
"tensorflow.GradientTape",
"numpy.tile",
"tensorflow.function",
"numpy.eye",
"tensorflow.scatter_nd",
"numpy.prod"
] |
[((5309, 5337), 'tensorflow.function', 'tf.function', ([], {'autograph': '(False)'}), '(autograph=False)\n', (5320, 5337), True, 'import tensorflow as tf\n'), ((5665, 5693), 'tensorflow.function', 'tf.function', ([], {'autograph': '(False)'}), '(autograph=False)\n', (5676, 5693), True, 'import tensorflow as tf\n'), ((1251, 1292), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['x'], {'dtype': 'tf.float64'}), '(x, dtype=tf.float64)\n', (1271, 1292), True, 'import tensorflow as tf\n'), ((4816, 4877), 'tensorflow.scatter_nd', 'tf.scatter_nd', (['self.N_idxs', 'p[i:i + sz]', '(self.ell, self.ell)'], {}), '(self.N_idxs, p[i:i + sz], (self.ell, self.ell))\n', (4829, 4877), True, 'import tensorflow as tf\n'), ((4951, 5012), 'tensorflow.scatter_nd', 'tf.scatter_nd', (['self.R_idxs', 'p[i:i + sz]', '(self.ell, self.ell)'], {}), '(self.R_idxs, p[i:i + sz], (self.ell, self.ell))\n', (4964, 5012), True, 'import tensorflow as tf\n'), ((5068, 5111), 'tensorflow.reshape', 'tf.reshape', (['p[i:i + sz]', '(self.n, self.ell)'], {}), '(p[i:i + sz], (self.n, self.ell))\n', (5078, 5111), True, 'import tensorflow as tf\n'), ((5194, 5256), 'tensorflow.scatter_nd', 'tf.scatter_nd', (['self.Lambda_idxs', 'p[i:i + sz]', '(self.n, self.n)'], {}), '(self.Lambda_idxs, p[i:i + sz], (self.n, self.n))\n', (5207, 5256), True, 'import tensorflow as tf\n'), ((6578, 6606), 'tensorflow.gather_nd', 'tf.gather_nd', (['N', 'self.N_idxs'], {}), '(N, self.N_idxs)\n', (6590, 6606), True, 'import tensorflow as tf\n'), ((6617, 6645), 'tensorflow.gather_nd', 'tf.gather_nd', (['R', 'self.R_idxs'], {}), '(R, self.R_idxs)\n', (6629, 6645), True, 'import tensorflow as tf\n'), ((6656, 6691), 'tensorflow.reshape', 'tf.reshape', (['B', '(self.n * self.ell,)'], {}), '(B, (self.n * self.ell,))\n', (6666, 6691), True, 'import tensorflow as tf\n'), ((6700, 6738), 'tensorflow.gather_nd', 'tf.gather_nd', (['Lambda', 'self.Lambda_idxs'], {}), '(Lambda, self.Lambda_idxs)\n', (6712, 6738), True, 'import tensorflow as tf\n'), ((6753, 6788), 'tensorflow.concat', 'tf.concat', (['[pN, pR, pB, pL]'], {'axis': '(0)'}), '([pN, pR, pB, pL], axis=0)\n', (6762, 6788), True, 'import tensorflow as tf\n'), ((7498, 7514), 'numpy.eye', 'np.eye', (['self.ell'], {}), '(self.ell)\n', (7504, 7514), True, 'import numpy as np\n'), ((7567, 7594), 'numpy.ones', 'np.ones', (['(self.n, self.ell)'], {}), '((self.n, self.ell))\n', (7574, 7594), True, 'import numpy as np\n'), ((7876, 7904), 'tensorflow.gather_nd', 'tf.gather_nd', (['N', 'self.N_idxs'], {}), '(N, self.N_idxs)\n', (7888, 7904), True, 'import tensorflow as tf\n'), ((7915, 7943), 'tensorflow.gather_nd', 'tf.gather_nd', (['R', 'self.R_idxs'], {}), '(R, self.R_idxs)\n', (7927, 7943), True, 'import tensorflow as tf\n'), ((7954, 7989), 'tensorflow.reshape', 'tf.reshape', (['B', '(self.n * self.ell,)'], {}), '(B, (self.n * self.ell,))\n', (7964, 7989), True, 'import tensorflow as tf\n'), ((7998, 8036), 'tensorflow.gather_nd', 'tf.gather_nd', (['Lambda', 'self.Lambda_idxs'], {}), '(Lambda, self.Lambda_idxs)\n', (8010, 8036), True, 'import tensorflow as tf\n'), ((8051, 8086), 'tensorflow.concat', 'tf.concat', (['[pN, pR, pB, pL]'], {'axis': '(0)'}), '([pN, pR, pB, pL], axis=0)\n', (8060, 8086), True, 'import tensorflow as tf\n'), ((1188, 1229), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['x'], {'dtype': 'tf.float64'}), '(x, dtype=tf.float64)\n', (1208, 1229), True, 'import tensorflow as tf\n'), ((1345, 1361), 'numpy.prod', 'np.prod', (['x.shape'], {}), '(x.shape)\n', (1352, 1361), True, 'import numpy as np\n'), ((4272, 4301), 'numpy.ones', 'np.ones', (['(self.ell, self.ell)'], {}), '((self.ell, self.ell))\n', (4279, 4301), True, 'import numpy as np\n'), ((4388, 4417), 'numpy.ones', 'np.ones', (['(self.ell, self.ell)'], {}), '((self.ell, self.ell))\n', (4395, 4417), True, 'import numpy as np\n'), ((4509, 4534), 'numpy.ones', 'np.ones', (['(self.n, self.n)'], {}), '((self.n, self.n))\n', (4516, 4534), True, 'import numpy as np\n'), ((5456, 5473), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (5471, 5473), True, 'import tensorflow as tf\n'), ((6056, 6072), 'numpy.eye', 'np.eye', (['self.ell'], {}), '(self.ell)\n', (6062, 6072), True, 'import numpy as np\n'), ((6202, 6229), 'numpy.ones', 'np.ones', (['(self.n, self.ell)'], {}), '((self.n, self.ell))\n', (6209, 6229), True, 'import numpy as np\n'), ((6454, 6469), 'tensorflow.transpose', 'tf.transpose', (['R'], {}), '(R)\n', (6466, 6469), True, 'import tensorflow as tf\n'), ((6939, 6970), 'numpy.eye', 'np.eye', (['self.ell'], {'dtype': 'np.bool'}), '(self.ell, dtype=np.bool)\n', (6945, 6970), True, 'import numpy as np\n'), ((7242, 7267), 'numpy.ones', 'np.ones', (['(self.n, self.n)'], {}), '((self.n, self.n))\n', (7249, 7267), True, 'import numpy as np\n'), ((7525, 7554), 'numpy.random.randn', 'npr.randn', (['self.ell', 'self.ell'], {}), '(self.ell, self.ell)\n', (7534, 7554), True, 'import numpy.random as npr\n'), ((7672, 7686), 'numpy.eye', 'np.eye', (['self.n'], {}), '(self.n)\n', (7678, 7686), True, 'import numpy as np\n'), ((7752, 7767), 'tensorflow.transpose', 'tf.transpose', (['R'], {}), '(R)\n', (7764, 7767), True, 'import tensorflow as tf\n'), ((4351, 4364), 'numpy.where', 'np.where', (['msk'], {}), '(msk)\n', (4359, 4364), True, 'import numpy as np\n'), ((4472, 4485), 'numpy.where', 'np.where', (['msk'], {}), '(msk)\n', (4480, 4485), True, 'import numpy as np\n'), ((4589, 4602), 'numpy.where', 'np.where', (['msk'], {}), '(msk)\n', (4597, 4602), True, 'import numpy as np\n'), ((6109, 6138), 'numpy.random.randn', 'npr.randn', (['self.ell', 'self.ell'], {}), '(self.ell, self.ell)\n', (6118, 6138), True, 'import numpy.random as npr\n'), ((6342, 6356), 'numpy.eye', 'np.eye', (['self.n'], {}), '(self.n)\n', (6348, 6356), True, 'import numpy as np\n'), ((6422, 6437), 'tensorflow.transpose', 'tf.transpose', (['N'], {}), '(N)\n', (6434, 6437), True, 'import tensorflow as tf\n'), ((6514, 6534), 'tensorflow.transpose', 'tf.transpose', (['Lambda'], {}), '(Lambda)\n', (6526, 6534), True, 'import tensorflow as tf\n'), ((7073, 7086), 'numpy.where', 'np.where', (['msk'], {}), '(msk)\n', (7081, 7086), True, 'import numpy as np\n'), ((7112, 7148), 'numpy.tile', 'np.tile', (['[True, False]', 'self.nblocks'], {}), '([True, False], self.nblocks)\n', (7119, 7148), True, 'import numpy as np\n'), ((7205, 7218), 'numpy.where', 'np.where', (['msk'], {}), '(msk)\n', (7213, 7218), True, 'import numpy as np\n'), ((7322, 7335), 'numpy.where', 'np.where', (['msk'], {}), '(msk)\n', (7330, 7335), True, 'import numpy as np\n'), ((7617, 7654), 'numpy.sum', 'np.sum', (['(B ** 2)'], {'axis': '(1)', 'keepdims': '(True)'}), '(B ** 2, axis=1, keepdims=True)\n', (7623, 7654), True, 'import numpy as np\n'), ((7720, 7735), 'tensorflow.transpose', 'tf.transpose', (['N'], {}), '(N)\n', (7732, 7735), True, 'import tensorflow as tf\n'), ((7812, 7832), 'tensorflow.transpose', 'tf.transpose', (['Lambda'], {}), '(Lambda)\n', (7824, 7832), True, 'import tensorflow as tf\n'), ((6256, 6293), 'numpy.sum', 'np.sum', (['(B ** 2)'], {'axis': '(1)', 'keepdims': '(True)'}), '(B ** 2, axis=1, keepdims=True)\n', (6262, 6293), True, 'import numpy as np\n'), ((6980, 7016), 'numpy.tile', 'np.tile', (['[True, False]', 'self.nblocks'], {}), '([True, False], self.nblocks)\n', (6987, 7016), True, 'import numpy as np\n')]
|
from django.urls import path
from . import views
app_name = 'zeus'
urlpatterns = [
path('token', views.token, name='token'),
]
|
[
"django.urls.path"
] |
[((88, 128), 'django.urls.path', 'path', (['"""token"""', 'views.token'], {'name': '"""token"""'}), "('token', views.token, name='token')\n", (92, 128), False, 'from django.urls import path\n')]
|
"""Module defines NorimDb class"""
from os import path, SEEK_END
from .exceptions import *
import pybinn
from .docid import DocId
class NorimDb:
"""NorimDb class"""
def __init__(self, dir_path):
if not path.isdir(dir_path):
raise DbError(ERR_PATH, path=dir_path)
self._sys = {
'_sys': {'size': 0}
}
self._sys_file = NorimDb._open(path.join(dir_path, "sys.ndb"))
self._data_file = NorimDb._open(path.join(dir_path, "data.ndb"))
self._sys_file.seek(0, SEEK_END)
file_size = self._sys_file.tell()
if file_size > 0:
self._sys_file.seek(0)
self._sys = pybinn.load(self._sys_file)
self._opened = True
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
return False
def get_collection(self, name):
return Collection(name, self)
def close(self):
self._sys_file.close()
self._data_file.close()
self._opened = False
def _sync(self):
self._sys_file.seek(0)
pybinn.dump(self._sys, self._sys_file)
print(pybinn.dumps(self._sys))
print(self._sys)
@staticmethod
def _open(file_path):
if path.isfile(file_path):
return open(file_path, 'r+b')
return open(file_path, 'w+b')
class Collection:
"""Collection class"""
def __init__(self, name: str, db: NorimDb):
if name[0] == '_':
raise DbError(ERR_COL_NAME, name=name)
self._collection = db._sys.get(name, {
'sys': {'size': 0, 'count': 0},
'keys': {}
})
db._sys[name] = self._collection
self._name = name
self._db = db
def add(self, dict_value: dict):
if not self._db._opened:
raise DbError(ERR_DB_CLOSED)
if not isinstance(dict_value, dict):
raise DbError(ERR_DOC_TYPE)
if '_id' not in dict_value:
dict_value['_id'] = DocId()
if dict_value['_id'] in self._collection['keys']:
raise DbError(ERR_COL_KEY, key=dict_value['_id'], collection=self._name)
self._collection['sys']['count'] += 1
self._collection['keys'][dict_value['_id']] = {
'offset': self._db._data_file.tell(),
'size': 0
}
pybinn.dump(dict_value, self._db._data_file)
self._db._sync()
def get(self, doc_id):
if not self._db._opened:
raise DbError(ERR_DB_CLOSED)
if doc_id not in self._collection['keys']:
return None
offset = self._collection['keys'][doc_id]['offset']
self._db._data_file.seek(offset)
return pybinn.load(self._db._data_file)
|
[
"pybinn.dump",
"os.path.isdir",
"os.path.isfile",
"pybinn.dumps",
"pybinn.load",
"os.path.join"
] |
[((1123, 1161), 'pybinn.dump', 'pybinn.dump', (['self._sys', 'self._sys_file'], {}), '(self._sys, self._sys_file)\n', (1134, 1161), False, 'import pybinn\n'), ((1282, 1304), 'os.path.isfile', 'path.isfile', (['file_path'], {}), '(file_path)\n', (1293, 1304), False, 'from os import path, SEEK_END\n'), ((2385, 2429), 'pybinn.dump', 'pybinn.dump', (['dict_value', 'self._db._data_file'], {}), '(dict_value, self._db._data_file)\n', (2396, 2429), False, 'import pybinn\n'), ((2748, 2780), 'pybinn.load', 'pybinn.load', (['self._db._data_file'], {}), '(self._db._data_file)\n', (2759, 2780), False, 'import pybinn\n'), ((223, 243), 'os.path.isdir', 'path.isdir', (['dir_path'], {}), '(dir_path)\n', (233, 243), False, 'from os import path, SEEK_END\n'), ((401, 431), 'os.path.join', 'path.join', (['dir_path', '"""sys.ndb"""'], {}), "(dir_path, 'sys.ndb')\n", (410, 431), False, 'from os import path, SEEK_END\n'), ((473, 504), 'os.path.join', 'path.join', (['dir_path', '"""data.ndb"""'], {}), "(dir_path, 'data.ndb')\n", (482, 504), False, 'from os import path, SEEK_END\n'), ((676, 703), 'pybinn.load', 'pybinn.load', (['self._sys_file'], {}), '(self._sys_file)\n', (687, 703), False, 'import pybinn\n'), ((1176, 1199), 'pybinn.dumps', 'pybinn.dumps', (['self._sys'], {}), '(self._sys)\n', (1188, 1199), False, 'import pybinn\n')]
|
import io
import pulsar
import fastavro
class DictAVRO(dict):
"""``DictAVRO`` provides dictionary class compatible with the Pulsar AVRO "record" interface.
The class is based on regular Python dictionary (``dict``).
The actual "record" classes should be based on the ``DictAVRO`` and either:
- set ``SCHEMA`` class variable to the parsed AVRO schema,
- use ``set_schema`` class method to set parsed AVRO schema.
"""
_schema = None
@classmethod
def schema(cls) -> str:
"""Class method providing AVRO schema related to the class.
Returns:
AVRO schema associated with the class.
"""
if cls._schema is None:
if hasattr(cls, "SCHEMA") and not cls.SCHEMA is None:
cls._schema = cls.SCHEMA
if cls._schema is None:
raise ValueError(
"AVRO schema (e.g. from fastavro.schema.load_schema()) must be provided as SCHEMA attribute")
return cls._schema
@classmethod
def set_schema(cls, schema: str):
"""Sets AVRO schema for all derived classes.
Args:
schema (str): parsed AVRO schema
"""
cls._schema = schema
class DictAvroSchema(pulsar.schema.AvroSchema):
"""``DictAvroSchema`` provides AVRO schema class compatible with the Pulsar AVRO interface.
"""
def __init__(self, record_cls):
"""
Args:
record_cls (class): Class used as a record to write/read Pulsar AVRO messages.
Should be derived from :class:`DictAVRO`
"""
if not issubclass(record_cls, DictAVRO):
raise TypeError(
'Invalid record type {} - record should be derived from DictAVRO class'.format(record_cls.__name__))
super().__init__(record_cls)
def encode(self, obj):
"""Encodes the given object. Used internally by the Pulsar client.
Overrides base implementation in order to allow usage of ``DictAVRO`` based objects.
Args:
obj: AVRO record object to be encoded.
Should be the object of the same class that was used to
initialize the current instance of the class:`DictAVRO`,
i.e. class derivated from :class:`DictAVRO`.
"""
self._validate_object_type(obj)
buffer = io.BytesIO()
fastavro.schemaless_writer(buffer, self._schema, obj)
return buffer.getvalue()
if __name__ == '__main__':
import sys
import json
import datetime
import time
from pprint import pp
WAIT_SECONDS = 3
PULSAR_SERVICE_URL = "pulsar://localhost:6650"
TOPIC = "try"
AVRO_SCHEMA = fastavro.schema.load_schema(sys.argv[1]) if len(sys.argv) > 1 else fastavro.schema.parse_schema(json.loads(
"""{
"type" : "record",
"name" : "Segment",
"namespace" : "try",
"fields" : [ {
"name" : "id",
"type" : "long"
}, {
"name" : "name",
"type" : "string"
}, {
"name" : "when",
"type" : {
"type" : "long",
"logicalType" : "timestamp-millis"
}
}, {
"name" : "direction",
"type" : {
"type" : "enum",
"name" : "CardinalDirection",
"symbols" : [ "north", "south", "east", "west" ]
}
}, {
"name" : "length",
"type" : [ "null", "long" ]
} ]
}
"""))
def send():
class Segment(DictAVRO):
SCHEMA = AVRO_SCHEMA
pulsar_client = pulsar.Client(PULSAR_SERVICE_URL)
producer = pulsar_client.create_producer(topic=TOPIC, schema=DictAvroSchema(Segment))
try:
segment = Segment(
id=99,
name = "<NAME>",
when = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc),
direction = "north",
length = 12345,
)
producer.send(segment)
pp(segment)
finally:
producer.close()
pulsar_client.close()
def receive():
class Segment(DictAVRO):
pass
Segment.set_schema(AVRO_SCHEMA)
pulsar_client = pulsar.Client(PULSAR_SERVICE_URL)
consumer = pulsar_client.subscribe(TOPIC, subscription_name="try", consumer_type=pulsar.ConsumerType.Shared,
schema=DictAvroSchema(Segment))
try:
while True:
msg = consumer.receive(WAIT_SECONDS * 1000)
segment = msg.value()
pp(segment)
consumer.acknowledge(msg)
except Exception as e:
if str(e) == 'Pulsar error: TimeOut':
print("END OF DATA")
else:
raise
finally:
consumer.close()
pulsar_client.close()
send()
time.sleep(WAIT_SECONDS)
receive()
|
[
"io.BytesIO",
"pulsar.Client",
"json.loads",
"pprint.pp",
"time.sleep",
"datetime.datetime.utcnow",
"fastavro.schemaless_writer",
"fastavro.schema.load_schema"
] |
[((4829, 4853), 'time.sleep', 'time.sleep', (['WAIT_SECONDS'], {}), '(WAIT_SECONDS)\n', (4839, 4853), False, 'import time\n'), ((2375, 2387), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (2385, 2387), False, 'import io\n'), ((2396, 2449), 'fastavro.schemaless_writer', 'fastavro.schemaless_writer', (['buffer', 'self._schema', 'obj'], {}), '(buffer, self._schema, obj)\n', (2422, 2449), False, 'import fastavro\n'), ((2714, 2754), 'fastavro.schema.load_schema', 'fastavro.schema.load_schema', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (2741, 2754), False, 'import fastavro\n'), ((3473, 3506), 'pulsar.Client', 'pulsar.Client', (['PULSAR_SERVICE_URL'], {}), '(PULSAR_SERVICE_URL)\n', (3486, 3506), False, 'import pulsar\n'), ((4144, 4177), 'pulsar.Client', 'pulsar.Client', (['PULSAR_SERVICE_URL'], {}), '(PULSAR_SERVICE_URL)\n', (4157, 4177), False, 'import pulsar\n'), ((2810, 3363), 'json.loads', 'json.loads', (['"""{\n "type" : "record",\n "name" : "Segment",\n "namespace" : "try",\n "fields" : [ {\n "name" : "id",\n "type" : "long"\n }, {\n "name" : "name",\n "type" : "string"\n }, {\n "name" : "when",\n "type" : {\n "type" : "long",\n "logicalType" : "timestamp-millis"\n }\n }, {\n "name" : "direction",\n "type" : {\n "type" : "enum",\n "name" : "CardinalDirection",\n "symbols" : [ "north", "south", "east", "west" ]\n }\n }, {\n "name" : "length",\n "type" : [ "null", "long" ]\n } ]\n}\n"""'], {}), '(\n """{\n "type" : "record",\n "name" : "Segment",\n "namespace" : "try",\n "fields" : [ {\n "name" : "id",\n "type" : "long"\n }, {\n "name" : "name",\n "type" : "string"\n }, {\n "name" : "when",\n "type" : {\n "type" : "long",\n "logicalType" : "timestamp-millis"\n }\n }, {\n "name" : "direction",\n "type" : {\n "type" : "enum",\n "name" : "CardinalDirection",\n "symbols" : [ "north", "south", "east", "west" ]\n }\n }, {\n "name" : "length",\n "type" : [ "null", "long" ]\n } ]\n}\n"""\n )\n', (2820, 3363), False, 'import json\n'), ((3917, 3928), 'pprint.pp', 'pp', (['segment'], {}), '(segment)\n', (3919, 3928), False, 'from pprint import pp\n'), ((4521, 4532), 'pprint.pp', 'pp', (['segment'], {}), '(segment)\n', (4523, 4532), False, 'from pprint import pp\n'), ((3724, 3750), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (3748, 3750), False, 'import datetime\n')]
|
# -*- coding: utf-8 -*-
"""Map views"""
import json
from django.conf import settings
from django.views.generic import DetailView
from mspray.apps.main.mixins import SiteNameMixin
from mspray.apps.main.models import Location
from mspray.apps.main.query import get_location_qs
from mspray.apps.main.serializers.target_area import (
GeoTargetAreaSerializer,
TargetAreaQuerySerializer,
TargetAreaSerializer,
count_duplicates,
get_duplicates,
)
from mspray.apps.main.utils import get_location_dict, parse_spray_date
from mspray.apps.main.views.target_area import (
TargetAreaHouseholdsViewSet,
TargetAreaViewSet,
)
class MapView(SiteNameMixin, DetailView):
"""Map View"""
template_name = "map.html"
model = Location
slug_field = "pk"
def get_queryset(self):
return get_location_qs(super(MapView, self).get_queryset())
def get_context_data(self, **kwargs):
context = super(MapView, self).get_context_data(**kwargs)
serializer_class = (
TargetAreaQuerySerializer
if settings.SITE_NAME == "namibia"
else TargetAreaSerializer
)
location = context["object"]
if location.level == "RHC":
location = get_location_qs(
Location.objects.filter(pk=location.pk), "RHC"
).first()
serializer = serializer_class(
location, context={"request": self.request}
)
context["target_data"] = serializer.data
spray_date = parse_spray_date(self.request)
if spray_date:
context["spray_date"] = spray_date
if settings.MSPRAY_SPATIAL_QUERIES or context["object"].geom:
response = TargetAreaViewSet.as_view({"get": "retrieve"})(
self.request, pk=context["object"].pk, format="geojson"
)
response.render()
context["not_sprayable_value"] = getattr(
settings, "NOT_SPRAYABLE_VALUE", "noteligible"
)
context["ta_geojson"] = response.content.decode()
bgeom = settings.HH_BUFFER and settings.OSM_SUBMISSIONS
if self.object.level in ["district", "RHC"]:
data = GeoTargetAreaSerializer(
get_location_qs(
self.object.location_set.all(), self.object.level
),
many=True,
context={"request": self.request},
).data
context["hh_geojson"] = json.dumps(data)
else:
loc = context["object"]
hhview = TargetAreaHouseholdsViewSet.as_view(
{"get": "retrieve"}
)
response = hhview(
self.request,
pk=loc.pk,
bgeom=bgeom,
spray_date=spray_date,
format="geojson",
)
response.render()
context["hh_geojson"] = response.content.decode()
sprayed_duplicates = list(
get_duplicates(loc, True, spray_date)
)
not_sprayed_duplicates = list(
get_duplicates(loc, False, spray_date)
)
context["sprayed_duplicates_data"] = json.dumps(
sprayed_duplicates
)
context["sprayed_duplicates"] = count_duplicates(
loc, True, spray_date
)
context["not_sprayed_duplicates_data"] = json.dumps(
not_sprayed_duplicates
)
context["not_sprayed_duplicates"] = count_duplicates(
loc, False
)
context["districts"] = (
Location.objects.filter(parent=None)
.values_list("id", "code", "name")
.order_by("name")
)
context.update({"map_menu": True})
context.update(get_location_dict(self.object.pk))
context["not_sprayed_reasons"] = json.dumps(
settings.MSPRAY_UNSPRAYED_REASON_OTHER
)
return context
|
[
"mspray.apps.main.utils.get_location_dict",
"mspray.apps.main.serializers.target_area.get_duplicates",
"json.dumps",
"mspray.apps.main.models.Location.objects.filter",
"mspray.apps.main.views.target_area.TargetAreaHouseholdsViewSet.as_view",
"mspray.apps.main.views.target_area.TargetAreaViewSet.as_view",
"mspray.apps.main.utils.parse_spray_date",
"mspray.apps.main.serializers.target_area.count_duplicates"
] |
[((1520, 1550), 'mspray.apps.main.utils.parse_spray_date', 'parse_spray_date', (['self.request'], {}), '(self.request)\n', (1536, 1550), False, 'from mspray.apps.main.utils import get_location_dict, parse_spray_date\n'), ((4108, 4158), 'json.dumps', 'json.dumps', (['settings.MSPRAY_UNSPRAYED_REASON_OTHER'], {}), '(settings.MSPRAY_UNSPRAYED_REASON_OTHER)\n', (4118, 4158), False, 'import json\n'), ((4032, 4065), 'mspray.apps.main.utils.get_location_dict', 'get_location_dict', (['self.object.pk'], {}), '(self.object.pk)\n', (4049, 4065), False, 'from mspray.apps.main.utils import get_location_dict, parse_spray_date\n'), ((1714, 1760), 'mspray.apps.main.views.target_area.TargetAreaViewSet.as_view', 'TargetAreaViewSet.as_view', (["{'get': 'retrieve'}"], {}), "({'get': 'retrieve'})\n", (1739, 1760), False, 'from mspray.apps.main.views.target_area import TargetAreaHouseholdsViewSet, TargetAreaViewSet\n'), ((2528, 2544), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (2538, 2544), False, 'import json\n'), ((2628, 2684), 'mspray.apps.main.views.target_area.TargetAreaHouseholdsViewSet.as_view', 'TargetAreaHouseholdsViewSet.as_view', (["{'get': 'retrieve'}"], {}), "({'get': 'retrieve'})\n", (2663, 2684), False, 'from mspray.apps.main.views.target_area import TargetAreaHouseholdsViewSet, TargetAreaViewSet\n'), ((3351, 3381), 'json.dumps', 'json.dumps', (['sprayed_duplicates'], {}), '(sprayed_duplicates)\n', (3361, 3381), False, 'import json\n'), ((3468, 3507), 'mspray.apps.main.serializers.target_area.count_duplicates', 'count_duplicates', (['loc', '(True)', 'spray_date'], {}), '(loc, True, spray_date)\n', (3484, 3507), False, 'from mspray.apps.main.serializers.target_area import GeoTargetAreaSerializer, TargetAreaQuerySerializer, TargetAreaSerializer, count_duplicates, get_duplicates\n'), ((3603, 3637), 'json.dumps', 'json.dumps', (['not_sprayed_duplicates'], {}), '(not_sprayed_duplicates)\n', (3613, 3637), False, 'import json\n'), ((3728, 3756), 'mspray.apps.main.serializers.target_area.count_duplicates', 'count_duplicates', (['loc', '(False)'], {}), '(loc, False)\n', (3744, 3756), False, 'from mspray.apps.main.serializers.target_area import GeoTargetAreaSerializer, TargetAreaQuerySerializer, TargetAreaSerializer, count_duplicates, get_duplicates\n'), ((3118, 3155), 'mspray.apps.main.serializers.target_area.get_duplicates', 'get_duplicates', (['loc', '(True)', 'spray_date'], {}), '(loc, True, spray_date)\n', (3132, 3155), False, 'from mspray.apps.main.serializers.target_area import GeoTargetAreaSerializer, TargetAreaQuerySerializer, TargetAreaSerializer, count_duplicates, get_duplicates\n'), ((3241, 3279), 'mspray.apps.main.serializers.target_area.get_duplicates', 'get_duplicates', (['loc', '(False)', 'spray_date'], {}), '(loc, False, spray_date)\n', (3255, 3279), False, 'from mspray.apps.main.serializers.target_area import GeoTargetAreaSerializer, TargetAreaQuerySerializer, TargetAreaSerializer, count_duplicates, get_duplicates\n'), ((1276, 1315), 'mspray.apps.main.models.Location.objects.filter', 'Location.objects.filter', ([], {'pk': 'location.pk'}), '(pk=location.pk)\n', (1299, 1315), False, 'from mspray.apps.main.models import Location\n'), ((3841, 3877), 'mspray.apps.main.models.Location.objects.filter', 'Location.objects.filter', ([], {'parent': 'None'}), '(parent=None)\n', (3864, 3877), False, 'from mspray.apps.main.models import Location\n')]
|
# -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""Encode and decode the model config. for JDD."""
from copy import deepcopy
import numpy as np
from vega.search_space.codec import Codec
from vega.core.common.class_factory import ClassType, ClassFactory
@ClassFactory.register(ClassType.CODEC)
class JDDCodec(Codec):
"""Codec of the JDD search space."""
def __init__(self, search_space=None, **kwargs):
"""Construct the SRCodec class.
:param codec_name: name of the codec
:type codec_name: str
:param search_space: Search space of the codec
:type search_space: dictionary
"S_" means that the shrink RDB blcock with 1x1 convolution .
"G_" means that the RDB block with channel shuffle and group convolution.
"C_" means that the contextual RDB block with recursive layer.
first number: the number of convolutional layers in a block
second number: the growth rate of dense connected in a block
third number: the number of output channel in a block
"""
super(JDDCodec, self).__init__(search_space, **kwargs)
self.func_type, self.func_prob = self.get_choices()
self.func_type_num = len(self.func_type)
def get_choices(self):
"""Get search space information.
:return: the configs of the blocks
:rtype: lists
"""
channel_types = ['16', '32', '48']
channel_prob = [1, 0.5, 0.2]
block_types = ['R']
block_prob = [1]
model_type = self.search_space['modules'][0]
channel_types = self.search_space[model_type]['channel_types']
channel_prob = self.search_space[model_type]['channel_prob']
block_types = self.search_space[model_type]['block_types']
block_prob = self.search_space[model_type]['block_prob']
func_type = []
func_prob = []
for b_id in range(len(block_types)):
for chin_id in range(len(channel_types)):
for chout_id in range(len(channel_types)):
func_type.append(block_types[b_id] + '_' + channel_types[chin_id] + '_' + channel_types[chout_id])
func_prob.append(block_prob[b_id] * channel_prob[chin_id] * channel_prob[chout_id])
func_prob = np.cumsum(np.asarray(func_prob) / sum(func_prob))
return func_type, func_prob
def decode(self, indiv):
"""Add the network structure to config.
:param indiv: an individual which contains network architecture code
:type indiv: individual class
:return: config of model structure
:rtype: dictionary
"""
indiv_cfg = deepcopy(self.search_space)
model = indiv_cfg['modules'][0]
indiv_cfg[model]['code'] = indiv.gene.tolist()
indiv_cfg[model]['architecture'] = indiv.active_net_list()
return indiv_cfg
|
[
"vega.core.common.class_factory.ClassFactory.register",
"copy.deepcopy",
"numpy.asarray"
] |
[((646, 684), 'vega.core.common.class_factory.ClassFactory.register', 'ClassFactory.register', (['ClassType.CODEC'], {}), '(ClassType.CODEC)\n', (667, 684), False, 'from vega.core.common.class_factory import ClassType, ClassFactory\n'), ((3053, 3080), 'copy.deepcopy', 'deepcopy', (['self.search_space'], {}), '(self.search_space)\n', (3061, 3080), False, 'from copy import deepcopy\n'), ((2681, 2702), 'numpy.asarray', 'np.asarray', (['func_prob'], {}), '(func_prob)\n', (2691, 2702), True, 'import numpy as np\n')]
|
# Testing CSSCrypt
import CSSCrypt
shiftKey = '3453465'
CSSCrypt = CSSCrypt.encryption()
encMsg = CSSCrypt.encrypt('My Secret Message', shiftKey)
print (encMsg)
print(CSSCrypt.decrypt(encMsg, shiftKey))
|
[
"CSSCrypt.encryption",
"CSSCrypt.decrypt",
"CSSCrypt.encrypt"
] |
[((68, 89), 'CSSCrypt.encryption', 'CSSCrypt.encryption', ([], {}), '()\n', (87, 89), False, 'import CSSCrypt\n'), ((99, 146), 'CSSCrypt.encrypt', 'CSSCrypt.encrypt', (['"""My Secret Message"""', 'shiftKey'], {}), "('My Secret Message', shiftKey)\n", (115, 146), False, 'import CSSCrypt\n'), ((168, 202), 'CSSCrypt.decrypt', 'CSSCrypt.decrypt', (['encMsg', 'shiftKey'], {}), '(encMsg, shiftKey)\n', (184, 202), False, 'import CSSCrypt\n')]
|
from __future__ import annotations
import enum
from typing import Union, TYPE_CHECKING
from ravendb.http.request_executor import ClusterRequestExecutor
from ravendb.http.topology import Topology
from ravendb.serverwide.operations.common import (
GetBuildNumberOperation,
ServerOperation,
VoidServerOperation,
ServerWideOperation,
)
from ravendb.tools.utils import CaseInsensitiveDict
if TYPE_CHECKING:
from ravendb.documents import DocumentStore
from ravendb.documents.operations import OperationIdResult, Operation
class ConnectionStringType(enum.Enum):
NONE = "NONE"
RAVEN = "RAVEN"
SQL = "SQL"
OLAP = "OLAP"
class ServerOperationExecutor:
def __init__(self, store: DocumentStore):
if store is None:
raise ValueError("Store cannot be None")
request_executor = self.create_request_executor(store)
if request_executor is None:
raise ValueError("Request Executor cannot be None")
self.__store = store
self.__request_executor = request_executor
self.__initial_request_executor = None
self.__node_tag = None
self.__cache = CaseInsensitiveDict()
# todo: register events
# todo: if node tag is null add after_close_listener
def send(
self,
operation: Union[VoidServerOperation, ServerOperation],
):
if isinstance(operation, VoidServerOperation):
command = operation.get_command(self.__request_executor.conventions)
self.__request_executor.execute_command(command)
elif isinstance(operation, ServerOperation):
command = operation.get_command(self.__request_executor.conventions)
self.__request_executor.execute_command(command)
return command.result
def send_async(self, operation: ServerOperation[OperationIdResult]) -> Operation:
command = operation.get_command(self.__request_executor.conventions)
self.__request_executor.execute_command(command)
return ServerWideOperation(
self.__request_executor,
self.__request_executor.conventions,
command.result.operation_id,
command.selected_node_tag if command.selected_node_tag else command.result.operation_node_tag,
)
def close(self) -> None:
if self.__node_tag is not None:
return
if self.__request_executor is not None:
self.__request_executor.close()
cache = self.__cache
if cache is not None:
for key, value in cache.items():
request_executor = value._request_executor
if request_executor is not None:
request_executor.close()
cache.clear()
def __get_topology(self, request_executor: ClusterRequestExecutor) -> Topology:
topology: Topology = None
try:
topology = request_executor.topology
if topology is None:
# a bit rude way to make sure that topology was refreshed
# but it handles a case when first topology update failed
operation = GetBuildNumberOperation()
command = operation.get_command(request_executor.conventions)
request_executor.execute_command(command)
topology = request_executor.topology
except:
pass
if topology is None:
raise RuntimeError("Could not fetch the topology")
return topology
@staticmethod
def create_request_executor(store: DocumentStore) -> ClusterRequestExecutor:
return (
ClusterRequestExecutor.create_for_single_node(
store.urls[0],
store.thread_pool_executor,
store.conventions,
store.certificate_path,
store.trust_store_path,
)
if store.conventions.disable_topology_updates
else ClusterRequestExecutor.create_without_database_name(
store.urls,
store.thread_pool_executor,
store.conventions,
store.certificate_path,
store.trust_store_path,
)
)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
return
|
[
"ravendb.http.request_executor.ClusterRequestExecutor.create_without_database_name",
"ravendb.tools.utils.CaseInsensitiveDict",
"ravendb.http.request_executor.ClusterRequestExecutor.create_for_single_node",
"ravendb.serverwide.operations.common.GetBuildNumberOperation",
"ravendb.serverwide.operations.common.ServerWideOperation"
] |
[((1160, 1181), 'ravendb.tools.utils.CaseInsensitiveDict', 'CaseInsensitiveDict', ([], {}), '()\n', (1179, 1181), False, 'from ravendb.tools.utils import CaseInsensitiveDict\n'), ((2042, 2256), 'ravendb.serverwide.operations.common.ServerWideOperation', 'ServerWideOperation', (['self.__request_executor', 'self.__request_executor.conventions', 'command.result.operation_id', '(command.selected_node_tag if command.selected_node_tag else command.result\n .operation_node_tag)'], {}), '(self.__request_executor, self.__request_executor.\n conventions, command.result.operation_id, command.selected_node_tag if\n command.selected_node_tag else command.result.operation_node_tag)\n', (2061, 2256), False, 'from ravendb.serverwide.operations.common import GetBuildNumberOperation, ServerOperation, VoidServerOperation, ServerWideOperation\n'), ((3662, 3827), 'ravendb.http.request_executor.ClusterRequestExecutor.create_for_single_node', 'ClusterRequestExecutor.create_for_single_node', (['store.urls[0]', 'store.thread_pool_executor', 'store.conventions', 'store.certificate_path', 'store.trust_store_path'], {}), '(store.urls[0], store.\n thread_pool_executor, store.conventions, store.certificate_path, store.\n trust_store_path)\n', (3707, 3827), False, 'from ravendb.http.request_executor import ClusterRequestExecutor\n'), ((3988, 4156), 'ravendb.http.request_executor.ClusterRequestExecutor.create_without_database_name', 'ClusterRequestExecutor.create_without_database_name', (['store.urls', 'store.thread_pool_executor', 'store.conventions', 'store.certificate_path', 'store.trust_store_path'], {}), '(store.urls, store.\n thread_pool_executor, store.conventions, store.certificate_path, store.\n trust_store_path)\n', (4039, 4156), False, 'from ravendb.http.request_executor import ClusterRequestExecutor\n'), ((3165, 3190), 'ravendb.serverwide.operations.common.GetBuildNumberOperation', 'GetBuildNumberOperation', ([], {}), '()\n', (3188, 3190), False, 'from ravendb.serverwide.operations.common import GetBuildNumberOperation, ServerOperation, VoidServerOperation, ServerWideOperation\n')]
|
# -*- coding: utf-8 -*-
# Resource object code
#
# Created: Wed Sep 4 08:34:31 2013
# by: The Resource Compiler for PyQt (Qt v5.1.1)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x00\xf9\
\x69\
\x6d\x70\x6f\x72\x74\x20\x51\x74\x51\x75\x69\x63\x6b\x20\x32\x2e\
\x30\x0a\x0a\x52\x65\x63\x74\x61\x6e\x67\x6c\x65\x20\x7b\x0a\x20\
\x20\x20\x20\x77\x69\x64\x74\x68\x3a\x20\x33\x36\x30\x0a\x20\x20\
\x20\x20\x68\x65\x69\x67\x68\x74\x3a\x20\x33\x36\x30\x0a\x20\x20\
\x20\x20\x54\x65\x78\x74\x20\x7b\x0a\x20\x20\x20\x20\x20\x20\x20\
\x20\x61\x6e\x63\x68\x6f\x72\x73\x2e\x63\x65\x6e\x74\x65\x72\x49\
\x6e\x3a\x20\x70\x61\x72\x65\x6e\x74\x0a\x20\x20\x20\x20\x20\x20\
\x20\x20\x74\x65\x78\x74\x3a\x20\x22\x48\x65\x6c\x6c\x6f\x20\x57\
\x6f\x72\x6c\x64\x22\x0a\x20\x20\x20\x20\x7d\x0a\x20\x20\x20\x20\
\x4d\x6f\x75\x73\x65\x41\x72\x65\x61\x20\x7b\x0a\x20\x20\x20\x20\
\x20\x20\x20\x20\x61\x6e\x63\x68\x6f\x72\x73\x2e\x66\x69\x6c\x6c\
\x3a\x20\x70\x61\x72\x65\x6e\x74\x0a\x20\x20\x20\x20\x20\x20\x20\
\x20\x6f\x6e\x43\x6c\x69\x63\x6b\x65\x64\x3a\x20\x7b\x0a\x20\x20\
\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x51\x74\x2e\x71\x75\x69\
\x74\x28\x29\x3b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x7d\x0a\x20\
\x20\x20\x20\x7d\x0a\x7d\x0a\x0a\
"
qt_resource_name = b"\
\x00\x09\
\x03\x32\x8d\xbc\
\x00\x68\
\x00\x65\x00\x6c\x00\x6c\x00\x6f\x00\x2e\x00\x71\x00\x6d\x00\x6c\
"
qt_resource_struct = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
|
[
"PyQt5.QtCore.qUnregisterResourceData",
"PyQt5.QtCore.qRegisterResourceData"
] |
[((1597, 1688), 'PyQt5.QtCore.qRegisterResourceData', 'QtCore.qRegisterResourceData', (['(1)', 'qt_resource_struct', 'qt_resource_name', 'qt_resource_data'], {}), '(1, qt_resource_struct, qt_resource_name,\n qt_resource_data)\n', (1625, 1688), False, 'from PyQt5 import QtCore\n'), ((1718, 1811), 'PyQt5.QtCore.qUnregisterResourceData', 'QtCore.qUnregisterResourceData', (['(1)', 'qt_resource_struct', 'qt_resource_name', 'qt_resource_data'], {}), '(1, qt_resource_struct, qt_resource_name,\n qt_resource_data)\n', (1748, 1811), False, 'from PyQt5 import QtCore\n')]
|
from collections import defaultdict
import networkx as nx
class Node:
"""Class representing a node in the KB.
"""
def __init__(self, kb, name, data, watches=[]):
super().__setattr__('_kb', kb)
super().__setattr__('_name', name)
nx.set_node_attributes(self._kb.G, {self._name: data})
self._watches = defaultdict(list)
for watch in watches:
self._watches[watch[0]].append(watch[1])
def __repr__(self):
return self._name
def __eq__(self, comparator):
return self._name == str(comparator)
def __ne__(self, comparator):
return self._name != str(comparator)
def __getattr__(self, key):
try:
return self._kb.G.nodes(data=True)[self._name][key]
except KeyError as e:
return None
def __setattr__(self, key, value):
attrs = self._kb.G.nodes(data=True)[self._name]
prev_val = attrs.get(key, None)
attrs.update({key: value})
nx.set_node_attributes(self._kb.G, {self._name: attrs})
for watch_fn in self._watches.get(key, []):
watch_fn(self, prev_val)
def __getitem__(self, key):
return self.__getattr__(key)
def __setitem__(self, key, value):
return self.__setattr__(key, value)
@property
def attrs(self):
"""Returns attributes of the node stored in the KB
"""
attributes = self._kb.G.nodes(data=True)[self._name]
try:
del attributes['_watches']
del attributes['_new_neighbor_fn']
except:
pass
return attributes
@property
def neighbors(self):
"""Returns the node's neighbors, in the format of tuples:
[(neighbor_name, [{'pred': predicate aka edge_relation}])]
"""
return self._kb.neighbors(self._name)
def watch(self, attribute, fn):
"""Execute user-defined function when the value of attribute changes.
Function takes two args: `node` which has access to all
its own attributes, including neighbors and edges, and the second
arg is the previous value of the attribute that changed.
:returns int: id of the watch
:Example:
>>> kb.store('node(node1)')
>>> node = kb.node('node1')
>>> node.grains = 3
>>> print(node.grains)
3
>>> node.watch('grains', lambda x: print('grains changed to ' + x.grains))
('grains', 0)
>>> node.grains += 1
grains changed to 4
"""
self._watches[attribute].append(fn)
return (attribute, len(self._watches) - 1)
def remove_watch(self, attribute_or_watch_id):
"""Stop watching `attribute_or_watch_id`.
If it is a string, delete all watches for that attribute.
If it is a tuple of (attribute, watch_id): delete that specific watch.
"""
if isinstance(attribute_or_watch_id, tuple):
self._watches[attribute_or_watch_id[0]].pop(attribute_or_watch_id[1])
else:
self._watches[attribute_or_watch_id] = []
def watch_for_new_neighbor(self, fn):
"""Execute `fn` when node receives a new neighbor."""
self.__setattr__('_new_neighbor_fn', fn)
|
[
"collections.defaultdict",
"networkx.set_node_attributes"
] |
[((266, 320), 'networkx.set_node_attributes', 'nx.set_node_attributes', (['self._kb.G', '{self._name: data}'], {}), '(self._kb.G, {self._name: data})\n', (288, 320), True, 'import networkx as nx\n'), ((345, 362), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (356, 362), False, 'from collections import defaultdict\n'), ((1012, 1067), 'networkx.set_node_attributes', 'nx.set_node_attributes', (['self._kb.G', '{self._name: attrs}'], {}), '(self._kb.G, {self._name: attrs})\n', (1034, 1067), True, 'import networkx as nx\n')]
|
import ctypes, ctypes.util
import sys, os, threading, time
sys.path.append(os.pardir)
import sdl2
#from sdl2 import *
def timer_callback_fn(interval, param):
print("HI")
return interval
def timer_test():
resolution = 60
cb = sdl2.SDL_TimerCallback(timer_callback_fn)
print(type(cb))
t1 = sdl2.SDL_AddTimer(resolution, cb, None)
print("Waiting Timer...")
time.sleep(1)
print("Timer Done.")
sdl2.SDL_RemoveTimer(t1)
def main():
sdl2.sdl2_load(ctypes.util.find_library('SDL2')) # '/usr/local/lib/libSDL2.dylib'
sdl2.SDL_Init(sdl2.SDL_INIT_EVERYTHING)
thr = threading.Thread(target = timer_test, name = "thr")
thr.start()
print("Waiting Thread...")
thr.join()
print("Thread Done.")
sdl2.SDL_Quit()
if __name__ == '__main__':
main()
|
[
"sys.path.append",
"threading.Thread",
"sdl2.SDL_Init",
"time.sleep",
"sdl2.SDL_RemoveTimer",
"sdl2.SDL_AddTimer",
"sdl2.SDL_TimerCallback",
"sdl2.SDL_Quit",
"ctypes.util.find_library"
] |
[((59, 85), 'sys.path.append', 'sys.path.append', (['os.pardir'], {}), '(os.pardir)\n', (74, 85), False, 'import sys, os, threading, time\n'), ((244, 285), 'sdl2.SDL_TimerCallback', 'sdl2.SDL_TimerCallback', (['timer_callback_fn'], {}), '(timer_callback_fn)\n', (266, 285), False, 'import sdl2\n'), ((315, 354), 'sdl2.SDL_AddTimer', 'sdl2.SDL_AddTimer', (['resolution', 'cb', 'None'], {}), '(resolution, cb, None)\n', (332, 354), False, 'import sdl2\n'), ((389, 402), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (399, 402), False, 'import sys, os, threading, time\n'), ((432, 456), 'sdl2.SDL_RemoveTimer', 'sdl2.SDL_RemoveTimer', (['t1'], {}), '(t1)\n', (452, 456), False, 'import sdl2\n'), ((560, 599), 'sdl2.SDL_Init', 'sdl2.SDL_Init', (['sdl2.SDL_INIT_EVERYTHING'], {}), '(sdl2.SDL_INIT_EVERYTHING)\n', (573, 599), False, 'import sdl2\n'), ((611, 658), 'threading.Thread', 'threading.Thread', ([], {'target': 'timer_test', 'name': '"""thr"""'}), "(target=timer_test, name='thr')\n", (627, 658), False, 'import sys, os, threading, time\n'), ((756, 771), 'sdl2.SDL_Quit', 'sdl2.SDL_Quit', ([], {}), '()\n', (769, 771), False, 'import sdl2\n'), ((489, 521), 'ctypes.util.find_library', 'ctypes.util.find_library', (['"""SDL2"""'], {}), "('SDL2')\n", (513, 521), False, 'import ctypes, ctypes.util\n')]
|
#Create a script that uses countries_by_area.txt file as data sourcea and prints out the top 5 most densely populated countries
import pandas
data = pandas.read_csv("countries_by_area.txt")
data["density"] = data["population_2013"] / data["area_sqkm"]
data = data.sort_values(by="density", ascending=False)
for index, row in data[:5].iterrows():
print(row["country"])
|
[
"pandas.read_csv"
] |
[((155, 195), 'pandas.read_csv', 'pandas.read_csv', (['"""countries_by_area.txt"""'], {}), "('countries_by_area.txt')\n", (170, 195), False, 'import pandas\n')]
|
import eisoil.core.pluginmanager as pm
from crpc.configrpc import ConfigRPC
def setup():
# setup config keys
xmlrpc = pm.getService('xmlrpc')
xmlrpc.registerXMLRPC('configrpc', ConfigRPC(), '/amconfig') # handlerObj, endpoint
|
[
"crpc.configrpc.ConfigRPC",
"eisoil.core.pluginmanager.getService"
] |
[((128, 151), 'eisoil.core.pluginmanager.getService', 'pm.getService', (['"""xmlrpc"""'], {}), "('xmlrpc')\n", (141, 151), True, 'import eisoil.core.pluginmanager as pm\n'), ((191, 202), 'crpc.configrpc.ConfigRPC', 'ConfigRPC', ([], {}), '()\n', (200, 202), False, 'from crpc.configrpc import ConfigRPC\n')]
|
#!/usr/bin/env python3
import re
import time
import sys
import requests
from bs4 import BeautifulSoup
def name_and_class(tag_name, class_name):
return lambda e: e.name == tag_name and e.has_attr('class') and class_name in e['class']
def find_search_result_pages(url):
'Return a list of URLs of the pages of search results'
r = requests.get(url)
if not r.status_code == 200:
print('Could not get search results', file=sys.stderr)
return None
soup = BeautifulSoup(r.text, "html.parser")
divs = soup.find_all(name_and_class('div', 'pagination'))
if not divs:
print("Didn't find any pages of search results",
file=sys.stderr)
div = divs[0]
links = div.find_all('a')
numbers = [link for link in links if re.match(r'^[0-9]+$', link.text)]
last = numbers[-1]
return [make_github_page_url(n) for n in range(1, int(last.text)+1)]
def make_github_page_url(number):
url = ''.join([
'https://github.com/search?',
'p={n}'.format(n=number),
'&q="curated+list"',
'&type=Repositories',
'&utf8=%E2%9C%93',
])
return url
def find_repo_elements(soup):
'Find repo <li> elements in GitHub search result soup'
return soup.find_all(name_and_class('li', 'repo-list-item'))
def make_repo_dicts(repo_elements):
'Make repo dictionaries out of repo <li> soups'
dicts = [make_repo_dict(r) for r in repo_elements]
return [d for d in dicts if d is not None]
def make_repo_dict(repo_element):
'Make a repo dictionary out of a single repo <li> soup'
# Find name and URL
h3 = repo_element.find('h3')
if not h3:
print('No <h3> element found in <li> of search result',
file=sys.stderr)
return None
a = h3.find('a')
if not h3:
print('No <a> element found in <li><h3> of search result',
file=sys.stderr)
return None
name = a.text
url = a['href']
# Find description
paras = repo_element.find_all(name_and_class('p', 'repo-list-description'))
if not paras:
print('No description <p> element found in <li> of search result',
file=sys.stderr)
return None
p = paras[0]
desc = p.text.strip()
return {
'name': name,
'url': 'https://github.com{u}'.format(u=url),
'description': desc,
}
def repo_dicts_from_search(url):
pages = find_search_result_pages(url)
pages.reverse()
reqs = []
print('Got search result pages, making {0} requests...'
.format(len(pages), file=sys.stderr))
i = 0
while pages:
i += 1
page = pages.pop()
print('... making request {0}'.format(i), file=sys.stderr)
while True:
r = requests.get(page)
if r.status_code == 200:
reqs.append(r)
time.sleep(5)
break
elif r.status_code == 429:
print('GitHub server tired of us, waiting 60 seconds', file=sys.stderr)
time.sleep(60)
else:
print('... request {0} FAILED: {1}'.format(i+1, r.status_code), file=sys.stderr)
break
print('Got search result requests, making soup...', file=sys.stderr)
soups = [
BeautifulSoup(r.text, "html.parser") for r in reqs
if r.status_code == 200
]
print('Making repo elements...', file=sys.stderr)
repo_elements = [find_repo_elements(soup) for soup in soups]
repo_dicts = [make_repo_dicts(r) for r in repo_elements]
dicts = []
for rd in repo_dicts:
dicts += rd
return sorted(dicts, key=lambda d: d['name'])
def print_repo(dictionary):
u = dictionary['url']
n = dictionary['name']
d = dictionary['description']
return '* [{n}]({u}): {d}'.format(u=u, n=n, d=d)
if __name__ == '__main__':
url = 'https://github.com/search?q=%22curated+list%22&type=Repositories&utf8=%E2%9C%93'
repos = repo_dicts_from_search(url)
lines = []
with open('head.md') as head:
lines = [line.strip() for line in head.readlines()]
for repo in repos:
lines.append(print_repo(repo))
with open('README.md', 'w') as fh:
for line in lines:
print(line, file=fh)
|
[
"bs4.BeautifulSoup",
"re.match",
"requests.get",
"time.sleep"
] |
[((343, 360), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (355, 360), False, 'import requests\n'), ((489, 525), 'bs4.BeautifulSoup', 'BeautifulSoup', (['r.text', '"""html.parser"""'], {}), "(r.text, 'html.parser')\n", (502, 525), False, 'from bs4 import BeautifulSoup\n'), ((3301, 3337), 'bs4.BeautifulSoup', 'BeautifulSoup', (['r.text', '"""html.parser"""'], {}), "(r.text, 'html.parser')\n", (3314, 3337), False, 'from bs4 import BeautifulSoup\n'), ((780, 811), 're.match', 're.match', (['"""^[0-9]+$"""', 'link.text'], {}), "('^[0-9]+$', link.text)\n", (788, 811), False, 'import re\n'), ((2771, 2789), 'requests.get', 'requests.get', (['page'], {}), '(page)\n', (2783, 2789), False, 'import requests\n'), ((2874, 2887), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (2884, 2887), False, 'import time\n'), ((3053, 3067), 'time.sleep', 'time.sleep', (['(60)'], {}), '(60)\n', (3063, 3067), False, 'import time\n')]
|
# Create your views here.
from rest_framework import viewsets
from biolabs.core import models as core_models
from biolabs.core.serializers import LaboratorySerializer
class LaboratoryViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows labs to be viewed or edited.
"""
queryset = core_models.Laboratory.objects.filter(is_moderated=True)
serializer_class = LaboratorySerializer
|
[
"biolabs.core.models.Laboratory.objects.filter"
] |
[((308, 364), 'biolabs.core.models.Laboratory.objects.filter', 'core_models.Laboratory.objects.filter', ([], {'is_moderated': '(True)'}), '(is_moderated=True)\n', (345, 364), True, 'from biolabs.core import models as core_models\n')]
|
from pvector import PVector
WIDTH = 400
HEIGHT = 400
class Ball():
def __init__(self, x, y, v_x, v_y, radius, color):
self.position = PVector(x, y)
self.radius = radius
self.color = color
self. velocity = PVector(v_x, v_y)
def show(self, screen):
screen.draw.filled_circle((self.position.x, self.position.y), self.radius, self.color)
def move(self):
self.position.add(self.velocity)
if (self.position.x > WIDTH - self.radius) or (self.position.x < self.radius):
self.velocity.x *= -1
if (self.position.y > HEIGHT - self.radius) or (self.position.y < self.radius):
self.velocity.y *= -1
|
[
"pvector.PVector"
] |
[((153, 166), 'pvector.PVector', 'PVector', (['x', 'y'], {}), '(x, y)\n', (160, 166), False, 'from pvector import PVector\n'), ((248, 265), 'pvector.PVector', 'PVector', (['v_x', 'v_y'], {}), '(v_x, v_y)\n', (255, 265), False, 'from pvector import PVector\n')]
|
from django.template import Library
from django.utils.encoding import force_text
register = Library()
def force_text_filter(obj):
return force_text(obj)
register.filter('force_text', force_text_filter)
|
[
"django.template.Library",
"django.utils.encoding.force_text"
] |
[((93, 102), 'django.template.Library', 'Library', ([], {}), '()\n', (100, 102), False, 'from django.template import Library\n'), ((144, 159), 'django.utils.encoding.force_text', 'force_text', (['obj'], {}), '(obj)\n', (154, 159), False, 'from django.utils.encoding import force_text\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-08-23 20:08
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('issue_order', '0014_auto_20180819_2108'),
]
operations = [
migrations.CreateModel(
name='Route',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('system', models.CharField(choices=[('jixun', '\u5409\u8bafCC\u7ebf'), ('postal', '\u90ae\u653fBC\u7ebf'), ('yunda', '\u97f5\u8fbeCC\u7ebf')], db_index=True, max_length=32)),
('code', models.CharField(db_index=True, max_length=64)),
('name', models.CharField(max_length=64)),
],
),
migrations.RemoveField(
model_name='courierorder',
name='system',
),
migrations.AddField(
model_name='courierorder',
name='route',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.PROTECT, to='issue_order.Route'),
preserve_default=False,
),
]
|
[
"django.db.migrations.RemoveField",
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.db.models.AutoField"
] |
[((882, 946), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""courierorder"""', 'name': '"""system"""'}), "(model_name='courierorder', name='system')\n", (904, 946), False, 'from django.db import migrations, models\n'), ((1095, 1196), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'default': '(0)', 'on_delete': 'django.db.models.deletion.PROTECT', 'to': '"""issue_order.Route"""'}), "(default=0, on_delete=django.db.models.deletion.PROTECT,\n to='issue_order.Route')\n", (1112, 1196), False, 'from django.db import migrations, models\n'), ((431, 524), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (447, 524), False, 'from django.db import migrations, models\n'), ((550, 671), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('jixun', '吉讯CC线'), ('postal', '邮政BC线'), ('yunda', '韵达CC线')]", 'db_index': '(True)', 'max_length': '(32)'}), "(choices=[('jixun', '吉讯CC线'), ('postal', '邮政BC线'), ('yunda',\n '韵达CC线')], db_index=True, max_length=32)\n", (566, 671), False, 'from django.db import migrations, models\n'), ((740, 786), 'django.db.models.CharField', 'models.CharField', ([], {'db_index': '(True)', 'max_length': '(64)'}), '(db_index=True, max_length=64)\n', (756, 786), False, 'from django.db import migrations, models\n'), ((814, 845), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)'}), '(max_length=64)\n', (830, 845), False, 'from django.db import migrations, models\n')]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
from django.db import models, migrations
logging.basicConfig(format="%(asctime)-15s %(message)s")
logger = logging.getLogger(__file__)
logger.setLevel(logging.INFO)
BULK_SIZE = 2500
def move_metadata(apps, schema_editor):
IEDocument = apps.get_model('corpus', 'IEDocument')
IEDocumentMetadata = apps.get_model('corpus', 'IEDocumentMetadata')
documents = IEDocument.objects.all()
total = documents.count()
objects_to_create = []
logger.info("Creating missing documents metadata objects")
for i, document in enumerate(documents.iterator()):
if i % BULK_SIZE == 0:
logger.info("Created {} out of {}".format(i, total))
if objects_to_create:
IEDocumentMetadata.objects.bulk_create(objects_to_create)
objects_to_create = []
objects_to_create.append(IEDocumentMetadata(
title=document.title,
url=document.url,
items=document.metadata,
document_tmp=document
))
if objects_to_create:
logger.info("Created {} out of {}".format(i+1, total))
IEDocumentMetadata.objects.bulk_create(objects_to_create)
logger.info("Updating documents to point to their metadata objects")
doc_mtds = IEDocumentMetadata.objects.filter(document_tmp__metadata_fk__isnull=True)
total = doc_mtds.count()
for i, doc_mtd in enumerate(doc_mtds):
if i % BULK_SIZE == 0:
logger.info("Updated {} out of {}".format(i, total))
IEDocument.objects.filter(pk=doc_mtd.document_tmp_id).update(metadata_fk=doc_mtd.id)
logger.info("Updated {} out of {}".format(total, total))
class Migration(migrations.Migration):
dependencies = [
('corpus', '0013_create_metadata_model'),
]
operations = [
migrations.RunPython(move_metadata),
]
|
[
"django.db.migrations.RunPython",
"logging.getLogger",
"logging.basicConfig"
] |
[((123, 180), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)-15s %(message)s"""'}), "(format='%(asctime)-15s %(message)s')\n", (142, 180), False, 'import logging\n'), ((190, 217), 'logging.getLogger', 'logging.getLogger', (['__file__'], {}), '(__file__)\n', (207, 217), False, 'import logging\n'), ((1884, 1919), 'django.db.migrations.RunPython', 'migrations.RunPython', (['move_metadata'], {}), '(move_metadata)\n', (1904, 1919), False, 'from django.db import models, migrations\n')]
|
import pytest
from rest_framework.test import APIClient
from tests.factories import accounts
@pytest.fixture
def api_client():
api = APIClient()
return api
@pytest.fixture
def superuser():
return accounts.superuser()
|
[
"rest_framework.test.APIClient",
"tests.factories.accounts.superuser"
] |
[((140, 151), 'rest_framework.test.APIClient', 'APIClient', ([], {}), '()\n', (149, 151), False, 'from rest_framework.test import APIClient\n'), ((213, 233), 'tests.factories.accounts.superuser', 'accounts.superuser', ([], {}), '()\n', (231, 233), False, 'from tests.factories import accounts\n')]
|
"""
Errors in cosmic shear measurement can lead to a multiplicative factor
scaling the observed shear spectra.
This module scales the measured C_ell to account for that difference,
assuming model values of the multplicative factor m, either per bin or for all bins.
"""
from __future__ import print_function
from builtins import range
from cosmosis.datablock import names, option_section
import sys
warning_note_displayed = False
def setup(options):
# This is an option - can set m_per_bin = T to get
# a different m for each tomographic bin, or F to get
# one global value
m_per_bin = options.get_bool(option_section, "m_per_bin", True)
cl_section = options.get_string(
option_section, "cl_section", default=names.shear_cl)
cross_section = options.get_string(
option_section, "cross_section", default="galaxy_shear_cl")
cal_section = options.get_string(
option_section, "cal_section", default=names.shear_calibration_parameters)
verbose = options.get_bool(option_section, "verbose", False)
print()
print("The shear_m_bias module will use calibration values from {} and look for ".format(cal_section))
print("shear-shear spectra in {} and position-shear in {}".format(cl_section, cross_section))
return m_per_bin, cl_section, cal_section, cross_section, verbose
def get_nbins(block, section):
if block.has_value(section, "nbin_a"):
n_a = block[section, "nbin_a"]
n_b = block[section, "nbin_b"]
else:
n_a = block[section, "nbin"]
n_b = n_a
return n_a, n_b
def calibrate_section(block, section, m_a, m_b, verbose):
n_a = len(m_a)
n_b = len(m_b)
for i in range(n_a):
for j in range(n_b):
# Get existing C_ell
cl_name = "bin_{}_{}".format(i + 1, j + 1)
if block.has_value(section, cl_name):
if verbose:
print("Calibrating {} bin {} {} by (1+{}) * (1+{}) = {}".format(section, i + 1, j + 1, m_a[i], m_b[j], (1 + m_a[i]) * (1 + m_b[j])))
block[section, cl_name] *= (1 + m_a[i]) * (1 + m_b[j])
elif verbose:
print("No {} bin {} {} to calibrate".format(section, i + 1, j + 1))
def calibrate_shear_shear(block, section, cal_section, m_per_bin, verbose):
nbin_a, nbin_b = get_nbins(block, section)
if m_per_bin:
m = [block[cal_section, "m{}".format(i + 1)] for i in range(nbin_a)]
else:
m0 = block[cal_section, "m0"]
m = [m0 for i in range(nbin_a)]
calibrate_section(block, section, m, m, verbose)
def calibrate_position_shear(block, section, cal_section, m_per_bin, verbose):
nbin_a, nbin_b = get_nbins(block, section)
m_a = [0.0 for i in range(nbin_a)]
if m_per_bin:
m_b = [block[cal_section, "m{}".format(i + 1)] for i in range(nbin_b)]
else:
m0 = block[cal_section, "m0"]
m_b = [m0 for i in range(nbin_b)]
calibrate_section(block, section, m_a, m_b, verbose)
def execute(block, config):
m_per_bin, cl_section, cal_section, cross_section, verbose = config
do_auto = block.has_section(cl_section)
do_cross = block.has_section(cross_section)
if do_auto:
calibrate_shear_shear(
block, cl_section, cal_section, m_per_bin, verbose)
if do_cross:
calibrate_position_shear(block, cross_section,
cal_section, m_per_bin, verbose)
if (not do_auto) and (not do_cross):
sys.stderr.write("ERROR: The shear bias calibration module could not find either a section {} or a {} to calibrate.\n".format(
cl_section, cross_section))
sys.stderr.write("The module therefore has nothing to do and considers this an error. You may need to either change settings in the module or the precedng pipeline, or remove the module altogether\n")
return 1
global warning_note_displayed
if not warning_note_displayed:
warning_note_displayed = True
if not do_auto:
sys.stderr.write(
"Note: No shear-shear section {} was found to calibrate. I did calibrate position-shear in {}.\n".format(cl_section, cross_section))
elif not do_cross:
sys.stderr.write(
"Note: No position-shear section {} was found to calibrate. I did calibrate shear-shear in {}.\n".format(cross_section, cl_section))
return 0
|
[
"sys.stderr.write",
"builtins.range"
] |
[((1690, 1700), 'builtins.range', 'range', (['n_a'], {}), '(n_a)\n', (1695, 1700), False, 'from builtins import range\n'), ((1719, 1729), 'builtins.range', 'range', (['n_b'], {}), '(n_b)\n', (1724, 1729), False, 'from builtins import range\n'), ((3673, 3887), 'sys.stderr.write', 'sys.stderr.write', (['"""The module therefore has nothing to do and considers this an error. You may need to either change settings in the module or the precedng pipeline, or remove the module altogether\n"""'], {}), '(\n """The module therefore has nothing to do and considers this an error. You may need to either change settings in the module or the precedng pipeline, or remove the module altogether\n"""\n )\n', (3689, 3887), False, 'import sys\n'), ((2745, 2758), 'builtins.range', 'range', (['nbin_a'], {}), '(nbin_a)\n', (2750, 2758), False, 'from builtins import range\n'), ((2437, 2450), 'builtins.range', 'range', (['nbin_a'], {}), '(nbin_a)\n', (2442, 2450), False, 'from builtins import range\n'), ((2525, 2538), 'builtins.range', 'range', (['nbin_a'], {}), '(nbin_a)\n', (2530, 2538), False, 'from builtins import range\n'), ((2842, 2855), 'builtins.range', 'range', (['nbin_b'], {}), '(nbin_b)\n', (2847, 2855), False, 'from builtins import range\n'), ((2932, 2945), 'builtins.range', 'range', (['nbin_b'], {}), '(nbin_b)\n', (2937, 2945), False, 'from builtins import range\n')]
|
import torch
import torch.nn as nn
import torchvision.datasets as dsets
import torchvision.transforms as transforms
from torch.autograd import Variable
from SNN import SNN
import time
import os
from tensorboardX import SummaryWriter
from nettalk import Nettalk
from gesture import Gesture
import argparse
parser = argparse.ArgumentParser(description='train.py')
parser.add_argument('-gpu', type=int, default=3)
parser.add_argument('-seed', type=int, default=3154)
parser.add_argument('-num_epoch', type=int, default=100)
parser.add_argument('-layers', type=int, default=3)
parser.add_argument('-interval', type=int, default=20, help='interval of loss print during training')
parser.add_argument('-bp_mark', type=int)
parser.add_argument('-hidden_size', type=int, default=500)
parser.add_argument('-alpha', type=float, default=0.1)
parser.add_argument('-task', type=str, default='MNIST', choices=['MNIST', 'NETTalk', 'DVSGesture'])
parser.add_argument('-energy', action='store_true')
parser.add_argument('-sbp', action='store_true')
parser.add_argument('-tensorboard', action='store_true')
opt = parser.parse_args()
torch.cuda.set_device(opt.gpu)
torch.manual_seed(opt.seed)
torch.cuda.manual_seed_all(opt.seed)
torch.backends.cudnn.deterministic = True
test_scores = []
train_scores = []
if opt.task == 'MNIST':
if opt.tensorboard:
writer = SummaryWriter(comment = '-Mni')
hyperparams = [100, 784, 10, 1e-3, 20, 'MNIST']
train_dataset = dsets.MNIST(root = './data/mnist', train = True, transform = transforms.ToTensor(), download = True)
test_dataset = dsets.MNIST(root = './data/mnist', train = False, transform = transforms.ToTensor())
train_loader = torch.utils.data.DataLoader(dataset = train_dataset, batch_size = hyperparams[0], shuffle = True)
test_loader = torch.utils.data.DataLoader(dataset = test_dataset, batch_size = hyperparams[0], shuffle = False)
elif opt.task == 'NETTalk':
if opt.tensorboard:
writer = SummaryWriter(comment = '-Net')
hyperparams = [5, 189, 26, 1e-3, 20, 'NETTalk']
train_dataset = Nettalk('train', transform=transforms.ToTensor())
test_dataset = Nettalk('test', transform=transforms.ToTensor())
train_loader = torch.utils.data.DataLoader(dataset = train_dataset, batch_size = hyperparams[0], shuffle = True)
test_loader = torch.utils.data.DataLoader(dataset = test_dataset, batch_size = hyperparams[0], shuffle = False)
elif opt.task == 'DVSGesture':
if opt.tensorboard:
writer = SummaryWriter(comment = '-Ges')
hyperparams = [16, 1024, 11, 1e-4, 20, 'DVSGesture']
train_dataset = Gesture('train', transform=transforms.ToTensor())
test_dataset = Gesture('test', transform=transforms.ToTensor())
train_loader = torch.utils.data.DataLoader(dataset = train_dataset, batch_size = hyperparams[0], shuffle = True)
test_loader = torch.utils.data.DataLoader(dataset = test_dataset, batch_size = hyperparams[0], shuffle = False)
print('Dataset: ' + opt.task)
print('Random Seed: {}'.format(opt.seed))
print('Alpha: {}'.format(opt.alpha))
print('Length of Training Dataset: {}'.format(len(train_dataset)))
print('Length of Test Dataset: {}'.format(len(test_dataset)))
print('Build Model')
model = SNN(hyperparams, opt.hidden_size, opt.layers, opt.sbp, opt.bp_mark)
model.cuda()
loss_function = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=hyperparams[3])
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=50, gamma=0.1)
cossim = torch.nn.CosineSimilarity(dim=1, eps=1e-6)
sigmoid = torch.nn.Sigmoid()
def train(epoch):
model.train()
print('Train Epoch ' + str(epoch + 1))
start_time = time.time()
total_loss = 0
for i, (images, labels) in enumerate(train_loader):
if images.size()[0] == hyperparams[0]:
optimizer.zero_grad()
images = Variable(images.cuda())
if opt.task == 'MNIST':
one_hot = torch.zeros(hyperparams[0], hyperparams[2]).scatter(1, labels.unsqueeze(1), 1)
labels = Variable(one_hot.cuda())
elif opt.task == 'NETTalk':
labels = labels.float()
labels = Variable(labels.cuda())
elif opt.task == 'DVSGesture':
labels = labels.float()
labels = Variable(labels.cuda())
outputs, e_loss = model(images, labels)
c_loss = loss_function(outputs, labels)
loss = c_loss + e_loss * opt.alpha if opt.energy else c_loss
total_loss += float(loss)
loss.backward(retain_graph = True)
optimizer.step()
if (i + 1) % (len(train_dataset) // (hyperparams[0] * opt.interval)) == 0:
print('Epoch: [%d/%d], Step: [%d/%d], Loss: %.6f, Time: %.2f' % (epoch + 1, opt.num_epoch, i + 1, len(train_dataset) // hyperparams[0], total_loss / (hyperparams[0] * opt.interval), time.time() - start_time))
xs = epoch * opt.interval + ((i + 1) // (len(train_dataset) // (hyperparams[0] * opt.interval)))
if opt.tensorboard:
writer.add_scalar('loss_train', total_loss / (hyperparams[0] * opt.interval), xs)
writer.add_scalar('time_train', time.time() - start_time, xs)
start_time = time.time()
total_loss = 0
scheduler.step()
def eval(epoch, if_test):
model.eval()
correct = 0
total = 0
if if_test:
print('Test Epoch ' + str(epoch + 1))
loader = test_loader
test_or_train = 'test'
else:
loader = train_loader
test_or_train = 'train'
if opt.task == 'MNIST':
for i, (images, labels) in enumerate(loader):
images = Variable(images.cuda())
labels = Variable(labels.cuda())
outputs, _ = model(images)
total += labels.size(0)
pred = outputs.max(1)[1]
correct += (pred == labels).sum()
correct = correct.item()
elif opt.task == 'NETTalk':
for i, (images, labels) in enumerate(loader):
images = Variable(images.cuda())
labels = Variable(labels.cuda())
outputs, _ = model(images, labels)
total += 1
if outputs.max() >= 0.05:
pos = []
for label in range(26):
if (labels[0, label] != 0) or (outputs[0, label] != 0):
pos.append(label)
tem_out = torch.zeros((1, len(pos)))
tem_lab = torch.zeros((1, len(pos)))
for label in range(len(pos)):
tem_out[0, label] = outputs[0, pos[label]]
tem_lab[0, label] = labels[0, pos[label]]
correct += cossim(tem_out, tem_lab)
else:
correct += 0
elif opt.task == 'DVSGesture':
for i, (images, labels) in enumerate(loader):
images = Variable(images.cuda())
labels = Variable(labels.cuda())
outputs, _ = model(images, labels)
total += labels.size(0)
pred = outputs.max(1)[1]
t_label = labels.max(1)[1]
correct += (pred == t_label).sum()
correct = correct.item()
acc = 100.0 * correct / total
print(test_or_train + ' correct: %d accuracy: %.2f%%' % (correct, acc))
if opt.tensorboard:
writer.add_scalar('acc_' + test_or_train, acc, epoch + 1)
if if_test:
test_scores.append(acc)
else:
train_scores.append(acc)
def main():
for epoch in range(opt.num_epoch):
train(epoch)
if (epoch + 1) % 1 == 0:
eval(epoch, if_test = True)
if (epoch + 1) % 20 == 0:
print('Best Test Accuracy in %d: %.2f%%' % (epoch + 1, max(test_scores)))
avg = (test_scores[-1] + test_scores[-2] + test_scores[-3] + test_scores[-4] + test_scores[-5] + test_scores[-6] + test_scores[-7] + test_scores[-8] + test_scores[-9] + test_scores[-10]) / 10
print('Average of Last Ten Test Accuracy : %.2f%%' % (avg))
if opt.tensorboard:
writer.close()
if __name__ == '__main__':
main()
|
[
"torch.nn.MSELoss",
"tensorboardX.SummaryWriter",
"torch.optim.lr_scheduler.StepLR",
"argparse.ArgumentParser",
"SNN.SNN",
"torch.utils.data.DataLoader",
"torch.manual_seed",
"torch.zeros",
"time.time",
"torchvision.transforms.ToTensor",
"torch.cuda.manual_seed_all",
"torch.nn.CosineSimilarity",
"torch.cuda.set_device",
"torch.nn.Sigmoid"
] |
[((315, 362), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""train.py"""'}), "(description='train.py')\n", (338, 362), False, 'import argparse\n'), ((1119, 1149), 'torch.cuda.set_device', 'torch.cuda.set_device', (['opt.gpu'], {}), '(opt.gpu)\n', (1140, 1149), False, 'import torch\n'), ((1150, 1177), 'torch.manual_seed', 'torch.manual_seed', (['opt.seed'], {}), '(opt.seed)\n', (1167, 1177), False, 'import torch\n'), ((1178, 1214), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['opt.seed'], {}), '(opt.seed)\n', (1204, 1214), False, 'import torch\n'), ((3226, 3293), 'SNN.SNN', 'SNN', (['hyperparams', 'opt.hidden_size', 'opt.layers', 'opt.sbp', 'opt.bp_mark'], {}), '(hyperparams, opt.hidden_size, opt.layers, opt.sbp, opt.bp_mark)\n', (3229, 3293), False, 'from SNN import SNN\n'), ((3323, 3335), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (3333, 3335), True, 'import torch.nn as nn\n'), ((3416, 3483), 'torch.optim.lr_scheduler.StepLR', 'torch.optim.lr_scheduler.StepLR', (['optimizer'], {'step_size': '(50)', 'gamma': '(0.1)'}), '(optimizer, step_size=50, gamma=0.1)\n', (3447, 3483), False, 'import torch\n'), ((3493, 3536), 'torch.nn.CosineSimilarity', 'torch.nn.CosineSimilarity', ([], {'dim': '(1)', 'eps': '(1e-06)'}), '(dim=1, eps=1e-06)\n', (3518, 3536), False, 'import torch\n'), ((3546, 3564), 'torch.nn.Sigmoid', 'torch.nn.Sigmoid', ([], {}), '()\n', (3562, 3564), False, 'import torch\n'), ((1687, 1783), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'train_dataset', 'batch_size': 'hyperparams[0]', 'shuffle': '(True)'}), '(dataset=train_dataset, batch_size=hyperparams[0\n ], shuffle=True)\n', (1714, 1783), False, 'import torch\n'), ((1803, 1898), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'test_dataset', 'batch_size': 'hyperparams[0]', 'shuffle': '(False)'}), '(dataset=test_dataset, batch_size=hyperparams[0],\n shuffle=False)\n', (1830, 1898), False, 'import torch\n'), ((3662, 3673), 'time.time', 'time.time', ([], {}), '()\n', (3671, 3673), False, 'import time\n'), ((1359, 1388), 'tensorboardX.SummaryWriter', 'SummaryWriter', ([], {'comment': '"""-Mni"""'}), "(comment='-Mni')\n", (1372, 1388), False, 'from tensorboardX import SummaryWriter\n'), ((2211, 2307), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'train_dataset', 'batch_size': 'hyperparams[0]', 'shuffle': '(True)'}), '(dataset=train_dataset, batch_size=hyperparams[0\n ], shuffle=True)\n', (2238, 2307), False, 'import torch\n'), ((2327, 2422), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'test_dataset', 'batch_size': 'hyperparams[0]', 'shuffle': '(False)'}), '(dataset=test_dataset, batch_size=hyperparams[0],\n shuffle=False)\n', (2354, 2422), False, 'import torch\n'), ((1524, 1545), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1543, 1545), True, 'import torchvision.transforms as transforms\n'), ((1645, 1666), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1664, 1666), True, 'import torchvision.transforms as transforms\n'), ((1970, 1999), 'tensorboardX.SummaryWriter', 'SummaryWriter', ([], {'comment': '"""-Net"""'}), "(comment='-Net')\n", (1983, 1999), False, 'from tensorboardX import SummaryWriter\n'), ((2743, 2839), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'train_dataset', 'batch_size': 'hyperparams[0]', 'shuffle': '(True)'}), '(dataset=train_dataset, batch_size=hyperparams[0\n ], shuffle=True)\n', (2770, 2839), False, 'import torch\n'), ((2859, 2954), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'test_dataset', 'batch_size': 'hyperparams[0]', 'shuffle': '(False)'}), '(dataset=test_dataset, batch_size=hyperparams[0],\n shuffle=False)\n', (2886, 2954), False, 'import torch\n'), ((2101, 2122), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2120, 2122), True, 'import torchvision.transforms as transforms\n'), ((2169, 2190), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2188, 2190), True, 'import torchvision.transforms as transforms\n'), ((2497, 2526), 'tensorboardX.SummaryWriter', 'SummaryWriter', ([], {'comment': '"""-Ges"""'}), "(comment='-Ges')\n", (2510, 2526), False, 'from tensorboardX import SummaryWriter\n'), ((5295, 5306), 'time.time', 'time.time', ([], {}), '()\n', (5304, 5306), False, 'import time\n'), ((2633, 2654), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2652, 2654), True, 'import torchvision.transforms as transforms\n'), ((2701, 2722), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2720, 2722), True, 'import torchvision.transforms as transforms\n'), ((3937, 3980), 'torch.zeros', 'torch.zeros', (['hyperparams[0]', 'hyperparams[2]'], {}), '(hyperparams[0], hyperparams[2])\n', (3948, 3980), False, 'import torch\n'), ((5236, 5247), 'time.time', 'time.time', ([], {}), '()\n', (5245, 5247), False, 'import time\n'), ((4906, 4917), 'time.time', 'time.time', ([], {}), '()\n', (4915, 4917), False, 'import time\n')]
|
import csv
import subprocess
from itertools import product
import textacy
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from textacy.text_utils import detect_language
from src.utils import preprocess
if __name__ == '__main__':
EMOTION_DATAPATH = 'data/processed/emotions_full.csv'
raw_data = []
with open(EMOTION_DATAPATH) as data_file:
reader = csv.reader(data_file, quoting=csv.QUOTE_MINIMAL)
reader.__next__()
for i, line in enumerate(reader):
preprocessed_line = preprocess(line[1])
if detect_language(preprocessed_line) == 'en':
doc = textacy.Doc(preprocessed_line, lang='en_core_web_lg')
raw_data.append((doc, line[2]))
texts, labels = zip(*raw_data)
label_encoder = LabelEncoder()
encoded_labels = label_encoder.fit_transform(labels)
x_train, x_test, y_train, y_test = \
train_test_split(texts, encoded_labels, shuffle=True, stratify=encoded_labels,
random_state=42, test_size=0.2)
MODELS_TEST_RESULTS = 'reports/tune_test_scores.csv'
FASTTEXT_INPUT_FILE = 'data/processed/fasttext_input.txt'
FASTTEXT_TEST_FILE = 'data/processed/fasttext_test.txt'
FASTTEXT_FULL_FILE = 'data/processed/fasttext_full.txt'
MODEL_PATH = 'models/emotion_classification/fasttext/model'
label_prefix = '__label__'
with open(FASTTEXT_INPUT_FILE, 'w') as input_file:
for x, y in zip(x_train, y_train):
input_file.write(' , '.join([label_prefix + str(y), x.text]) + '\n')
with open(FASTTEXT_TEST_FILE, 'w') as input_file:
for x, y in zip(x_test, y_test):
input_file.write(x.text.replace('\n', '') + '\n')
tested_dims = [200, 300, 500]
tested_lrs = [0.1, 0.01, 0.01]
tested_epochs = [10, 20, 50]
tested_min_counts = [1]
lr_update_rates = [100, 100000, 1000000]
negs = [5, 50, 100]
word_ngrams = 1
combinations = product(tested_dims, tested_lrs, tested_epochs, tested_min_counts, lr_update_rates, negs)
thread = str(12)
best_params = None
best_score = 0
n_combinations = len(tested_dims) * len(tested_lrs) * len(tested_epochs) * len(tested_min_counts) * \
len(lr_update_rates) * len(negs)
for i, (dim, lr, epoch, min_count, lr_update_rate, neg) in enumerate(combinations):
print("%d / %d" % (i, n_combinations))
subprocess.call(['./fastText-0.1.0/fasttext', 'supervised', '-input', FASTTEXT_INPUT_FILE,
'-output', MODEL_PATH, '-dim', str(dim), '-lr', str(lr), '-epoch', str(epoch),
'-label', label_prefix, '-wordNgrams', str(word_ngrams), '-minCount', str(min_count),
'-thread', thread, '-lrUpdateRate', str(lr_update_rate), '-neg', str(neg)])
test_preds = subprocess.check_output(['./fastText-0.1.0/fasttext', 'predict', MODEL_PATH + '.bin',
FASTTEXT_TEST_FILE])
preds = [int(pred[-1]) for pred in test_preds.decode("utf-8").split('\n') if pred != '']
score = f1_score(y_test, preds, average='micro')
accuracy = accuracy_score(y_test, preds)
if best_score < score:
best_score = score
best_params = {"dim": dim, "lr": lr, "epochs": epoch, "min_count": min_count,
"lr_update_rate": lr_update_rate, "neg": neg, "accuracy": accuracy}
with open(MODELS_TEST_RESULTS, "a") as test_scores_table:
writer = csv.writer(test_scores_table, quoting=csv.QUOTE_MINIMAL)
writer.writerow(["FT", best_score, '', str(best_params)])
|
[
"csv.reader",
"csv.writer",
"sklearn.model_selection.train_test_split",
"subprocess.check_output",
"sklearn.metrics.accuracy_score",
"src.utils.preprocess",
"textacy.Doc",
"sklearn.preprocessing.LabelEncoder",
"sklearn.metrics.f1_score",
"itertools.product",
"textacy.text_utils.detect_language"
] |
[((911, 925), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (923, 925), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((1032, 1147), 'sklearn.model_selection.train_test_split', 'train_test_split', (['texts', 'encoded_labels'], {'shuffle': '(True)', 'stratify': 'encoded_labels', 'random_state': '(42)', 'test_size': '(0.2)'}), '(texts, encoded_labels, shuffle=True, stratify=\n encoded_labels, random_state=42, test_size=0.2)\n', (1048, 1147), False, 'from sklearn.model_selection import train_test_split\n'), ((2080, 2173), 'itertools.product', 'product', (['tested_dims', 'tested_lrs', 'tested_epochs', 'tested_min_counts', 'lr_update_rates', 'negs'], {}), '(tested_dims, tested_lrs, tested_epochs, tested_min_counts,\n lr_update_rates, negs)\n', (2087, 2173), False, 'from itertools import product\n'), ((503, 551), 'csv.reader', 'csv.reader', (['data_file'], {'quoting': 'csv.QUOTE_MINIMAL'}), '(data_file, quoting=csv.QUOTE_MINIMAL)\n', (513, 551), False, 'import csv\n'), ((2966, 3076), 'subprocess.check_output', 'subprocess.check_output', (["['./fastText-0.1.0/fasttext', 'predict', MODEL_PATH + '.bin',\n FASTTEXT_TEST_FILE]"], {}), "(['./fastText-0.1.0/fasttext', 'predict', MODEL_PATH +\n '.bin', FASTTEXT_TEST_FILE])\n", (2989, 3076), False, 'import subprocess\n'), ((3232, 3272), 'sklearn.metrics.f1_score', 'f1_score', (['y_test', 'preds'], {'average': '"""micro"""'}), "(y_test, preds, average='micro')\n", (3240, 3272), False, 'from sklearn.metrics import f1_score\n'), ((3292, 3321), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'preds'], {}), '(y_test, preds)\n', (3306, 3321), False, 'from sklearn.metrics import accuracy_score\n'), ((3649, 3705), 'csv.writer', 'csv.writer', (['test_scores_table'], {'quoting': 'csv.QUOTE_MINIMAL'}), '(test_scores_table, quoting=csv.QUOTE_MINIMAL)\n', (3659, 3705), False, 'import csv\n'), ((652, 671), 'src.utils.preprocess', 'preprocess', (['line[1]'], {}), '(line[1])\n', (662, 671), False, 'from src.utils import preprocess\n'), ((687, 721), 'textacy.text_utils.detect_language', 'detect_language', (['preprocessed_line'], {}), '(preprocessed_line)\n', (702, 721), False, 'from textacy.text_utils import detect_language\n'), ((753, 806), 'textacy.Doc', 'textacy.Doc', (['preprocessed_line'], {'lang': '"""en_core_web_lg"""'}), "(preprocessed_line, lang='en_core_web_lg')\n", (764, 806), False, 'import textacy\n')]
|
from __future__ import absolute_import, division, print_function
import torch
import warnings
from tqdm import tqdm
import pathlib
from scipy import linalg
import tensorflow as tf
import numpy as np
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
def check_or_download_inception(inception_path):
''' Checks if the path to the inception file is valid, or downloads
the file if it is not present. '''
INCEPTION_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
if inception_path is None:
inception_path = '/tmp'
inception_path = pathlib.Path(inception_path)
model_file = inception_path / 'classify_image_graph_def.pb'
if not model_file.exists():
print("Downloading Inception model")
from urllib import request
import tarfile
fn, _ = request.urlretrieve(INCEPTION_URL)
with tarfile.open(fn, mode='r') as f:
f.extract('classify_image_graph_def.pb', str(model_file.parent))
return str(model_file)
def create_inception_graph(pth):
"""Creates a graph from saved GraphDef file."""
# Creates graph from saved graph_def.pb.
with tf.io.gfile.GFile(pth, 'rb') as f:
graph_def = tf.compat.v1.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='FID_Inception_Net')
def calculate_activation_statistics(images,
sess,
batch_size=50,
verbose=False):
"""Calculation of the statistics used by the FID.
Params:
-- images : Numpy array of dimension (n_images, hi, wi, 3). The values
must lie between 0 and 255.
-- sess : current session
-- batch_size : the images numpy array is split into batches with batch size
batch_size. A reasonable batch size depends on the available hardware.
-- verbose : If set to True and parameter out_step is given, the number of calculated
batches is reported.
Returns:
-- mu : The mean over samples of the activations of the pool_3 layer of
the incption model.
-- sigma : The covariance matrix of the activations of the pool_3 layer of
the incption model.
"""
act = get_activations(images, sess, batch_size, verbose)
mu = np.mean(act, axis=0)
sigma = np.cov(act, rowvar=False)
return mu, sigma
# code for handling inception net derived from
# https://github.com/openai/improved-gan/blob/master/inception_score/model.py
def _get_inception_layer(sess):
"""Prepares inception net for batched usage and returns pool_3 layer. """
layername = 'FID_Inception_Net/pool_3:0'
pool3 = sess.graph.get_tensor_by_name(layername)
ops = pool3.graph.get_operations()
for op_idx, op in enumerate(ops):
for o in op.outputs:
shape = o.get_shape()
if shape._dims != []:
shape = [s.value for s in shape]
new_shape = []
for j, s in enumerate(shape):
if s == 1 and j == 0:
new_shape.append(None)
else:
new_shape.append(s)
o.__dict__['_shape_val'] = tf.TensorShape(new_shape)
return pool3
# -------------------------------------------------------------------------------
def get_activations(images, sess, batch_size=200, verbose=False):
"""Calculates the activations of the pool_3 layer for all images.
Params:
-- images : Numpy array of dimension (n_images, hi, wi, 3). The values
must lie between 0 and 256.
-- sess : current session
-- batch_size : the images numpy array is split into batches with batch size
batch_size. A reasonable batch size depends on the disposable hardware.
-- verbose : If set to True and parameter out_step is given, the number of calculated
batches is reported.
Returns:
-- A numpy array of dimension (num images, 2048) that contains the
activations of the given tensor when feeding inception with the query tensor.
"""
inception_layer = _get_inception_layer(sess)
n_images = images.shape[0]
if batch_size > n_images:
print(
"warning: batch size is bigger than the data size. setting batch size to data size"
)
batch_size = n_images
n_batches = n_images // batch_size
pred_arr = np.empty((n_images, 2048))
for i in tqdm(range(n_batches)):
if verbose:
print("\rPropagating batch %d/%d" % (i + 1, n_batches),
end="",
flush=True)
start = i * batch_size
if start + batch_size < n_images:
end = start + batch_size
else:
end = n_images
batch = images[start:end]
pred = sess.run(inception_layer,
{'FID_Inception_Net/ExpandDims:0': batch})
pred_arr[start:end] = pred.reshape(batch_size, -1)
if verbose:
print(" done")
return pred_arr
# -------------------------------------------------------------------------------
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
"""Numpy implementation of the Frechet Distance.
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by <NAME>.
Params:
-- mu1 : Numpy array containing the activations of the pool_3 layer of the
inception net ( like returned by the function 'get_predictions')
for generated samples.
-- mu2 : The sample mean over activations of the pool_3 layer, precalcualted
on an representive data set.
-- sigma1: The covariance matrix over activations of the pool_3 layer for
generated samples.
-- sigma2: The covariance matrix over activations of the pool_3 layer,
precalcualted on an representive data set.
Returns:
-- : The Frechet Distance.
"""
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
assert mu1.shape == mu2.shape, "Training and test mean vectors have different lengths"
assert sigma1.shape == sigma2.shape, "Training and test covariances have different dimensions"
diff = mu1 - mu2
# product might be almost singular
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
msg = "fid calculation produces singular product; adding %s to diagonal of cov estimates" % eps
warnings.warn(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
# numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
raise ValueError("Imaginary component {}".format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
return diff.dot(diff) + np.trace(sigma1) + np.trace(
sigma2) - 2 * tr_covmean
def pt_to_np(imgs):
'''normalizes pytorch image in [-1, 1] to [0, 255]'''
normalized = [((img / 2 + 0.5) * 255).clamp(0, 255) for img in imgs]
return np.array([img.permute(1, 2, 0).numpy() for img in normalized])
def compute_fid_given_images(fake_images, real_images):
'''requires that the image batches are numpy format, normalized to 0, 255'''
inception_path = check_or_download_inception(None)
create_inception_graph(inception_path)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
if isinstance(fake_images, tuple):
m1, s1 = fake_images
else:
m1, s1 = calculate_activation_statistics(fake_images, sess)
if isinstance(real_images, tuple):
m2, s2 = real_images
else:
m2, s2 = calculate_activation_statistics(real_images, sess)
return calculate_frechet_distance(m1, s1, m2, s2)
def compute_fid_given_path(path):
with np.load(path) as data:
fake_imgs = data['fake']
real_imgs = data['real']
return compute_fid_given_images(fake_imgs, real_imgs)
def load_from_path(source):
root = '/data/vision/torralba/ganprojects/placesgan/tracer/utils/fid_stats/'
path = os.path.join(root, f'{source}_stats.npz')
if os.path.exists(path):
print('Loading statistics from ', path)
with np.load(path) as data:
return data['m'], data['s']
else:
print("Stats not found in path", path)
exit()
def compute_fid(source1, source2):
if isinstance(source1, str):
source1 = load_from_path(source1)
if isinstance(source1, torch.Tensor):
source1 = pt_to_np(source1)
if isinstance(source2, str):
source2 = load_from_path(source2)
if isinstance(source2, torch.Tensor):
source2 = pt_to_np(source2)
return compute_fid_given_images(source1, source2)
if __name__ == '__main__':
import argparse
from PIL import Image
from torchvision import transforms
parser = argparse.ArgumentParser()
parser.add_argument('--source')
parser.add_argument('--target')
args = parser.parse_args()
transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
images1 = []
for file_name in tqdm(os.listdir(args.source)):
if file_name.lower().endswith(('.png', 'jpeg', '.jpg')):
path = os.path.join(args.source, file_name)
images1.append(transform(Image.open(path).convert('RGB')))
images1 = torch.stack(images1)
images2 = []
for file_name in tqdm(os.listdir(args.source)):
if file_name.lower().endswith(('.png', 'jpeg', '.jpg')):
path = os.path.join(args.source, file_name)
images2.append(transform(Image.open(path).convert('RGB')))
images2 = torch.stack(images2)
result = compute_fid(images1, images2)
print(result)
with open('fid_results.txt', 'a+') as f:
f.write(args.source + args.target + ':\n')
f.write(str(result) + '\n')
|
[
"numpy.trace",
"numpy.load",
"numpy.abs",
"argparse.ArgumentParser",
"numpy.empty",
"pathlib.Path",
"numpy.mean",
"torchvision.transforms.Normalize",
"os.path.join",
"numpy.atleast_2d",
"numpy.eye",
"os.path.exists",
"tensorflow.TensorShape",
"numpy.isfinite",
"torchvision.transforms.ToTensor",
"tarfile.open",
"tensorflow.compat.v1.GraphDef",
"numpy.cov",
"numpy.diagonal",
"tensorflow.global_variables_initializer",
"tensorflow.Session",
"urllib.request.urlretrieve",
"tensorflow.import_graph_def",
"os.listdir",
"torchvision.transforms.Resize",
"numpy.iscomplexobj",
"torch.stack",
"PIL.Image.open",
"warnings.warn",
"numpy.atleast_1d",
"tensorflow.io.gfile.GFile"
] |
[((600, 628), 'pathlib.Path', 'pathlib.Path', (['inception_path'], {}), '(inception_path)\n', (612, 628), False, 'import pathlib\n'), ((2414, 2434), 'numpy.mean', 'np.mean', (['act'], {'axis': '(0)'}), '(act, axis=0)\n', (2421, 2434), True, 'import numpy as np\n'), ((2447, 2472), 'numpy.cov', 'np.cov', (['act'], {'rowvar': '(False)'}), '(act, rowvar=False)\n', (2453, 2472), True, 'import numpy as np\n'), ((4578, 4604), 'numpy.empty', 'np.empty', (['(n_images, 2048)'], {}), '((n_images, 2048))\n', (4586, 4604), True, 'import numpy as np\n'), ((6259, 6277), 'numpy.atleast_1d', 'np.atleast_1d', (['mu1'], {}), '(mu1)\n', (6272, 6277), True, 'import numpy as np\n'), ((6288, 6306), 'numpy.atleast_1d', 'np.atleast_1d', (['mu2'], {}), '(mu2)\n', (6301, 6306), True, 'import numpy as np\n'), ((6321, 6342), 'numpy.atleast_2d', 'np.atleast_2d', (['sigma1'], {}), '(sigma1)\n', (6334, 6342), True, 'import numpy as np\n'), ((6356, 6377), 'numpy.atleast_2d', 'np.atleast_2d', (['sigma2'], {}), '(sigma2)\n', (6369, 6377), True, 'import numpy as np\n'), ((7049, 7073), 'numpy.iscomplexobj', 'np.iscomplexobj', (['covmean'], {}), '(covmean)\n', (7064, 7073), True, 'import numpy as np\n'), ((7303, 7320), 'numpy.trace', 'np.trace', (['covmean'], {}), '(covmean)\n', (7311, 7320), True, 'import numpy as np\n'), ((8651, 8692), 'os.path.join', 'os.path.join', (['root', 'f"""{source}_stats.npz"""'], {}), "(root, f'{source}_stats.npz')\n", (8663, 8692), False, 'import os\n'), ((8700, 8720), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (8714, 8720), False, 'import os\n'), ((9447, 9472), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (9470, 9472), False, 'import argparse\n'), ((10031, 10051), 'torch.stack', 'torch.stack', (['images1'], {}), '(images1)\n', (10042, 10051), False, 'import torch\n'), ((10328, 10348), 'torch.stack', 'torch.stack', (['images2'], {}), '(images2)\n', (10339, 10348), False, 'import torch\n'), ((844, 878), 'urllib.request.urlretrieve', 'request.urlretrieve', (['INCEPTION_URL'], {}), '(INCEPTION_URL)\n', (863, 878), False, 'from urllib import request\n'), ((1170, 1198), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['pth', '"""rb"""'], {}), "(pth, 'rb')\n", (1187, 1198), True, 'import tensorflow as tf\n'), ((1225, 1248), 'tensorflow.compat.v1.GraphDef', 'tf.compat.v1.GraphDef', ([], {}), '()\n', (1246, 1248), True, 'import tensorflow as tf\n'), ((1305, 1361), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['graph_def'], {'name': '"""FID_Inception_Net"""'}), "(graph_def, name='FID_Inception_Net')\n", (1324, 1361), True, 'import tensorflow as tf\n'), ((6844, 6862), 'warnings.warn', 'warnings.warn', (['msg'], {}), '(msg)\n', (6857, 6862), False, 'import warnings\n'), ((7885, 7897), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (7895, 7897), True, 'import tensorflow as tf\n'), ((8382, 8395), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (8389, 8395), True, 'import numpy as np\n'), ((9799, 9822), 'os.listdir', 'os.listdir', (['args.source'], {}), '(args.source)\n', (9809, 9822), False, 'import os\n'), ((10096, 10119), 'os.listdir', 'os.listdir', (['args.source'], {}), '(args.source)\n', (10106, 10119), False, 'import os\n'), ((892, 918), 'tarfile.open', 'tarfile.open', (['fn'], {'mode': '"""r"""'}), "(fn, mode='r')\n", (904, 918), False, 'import tarfile\n'), ((6880, 6903), 'numpy.eye', 'np.eye', (['sigma1.shape[0]'], {}), '(sigma1.shape[0])\n', (6886, 6903), True, 'import numpy as np\n'), ((7369, 7385), 'numpy.trace', 'np.trace', (['sigma2'], {}), '(sigma2)\n', (7377, 7385), True, 'import numpy as np\n'), ((7924, 7957), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (7955, 7957), True, 'import tensorflow as tf\n'), ((8783, 8796), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (8790, 8796), True, 'import numpy as np\n'), ((9622, 9651), 'torchvision.transforms.Resize', 'transforms.Resize', (['(224, 224)'], {}), '((224, 224))\n', (9639, 9651), False, 'from torchvision import transforms\n'), ((9661, 9682), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (9680, 9682), False, 'from torchvision import transforms\n'), ((9692, 9746), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (9712, 9746), False, 'from torchvision import transforms\n'), ((9909, 9945), 'os.path.join', 'os.path.join', (['args.source', 'file_name'], {}), '(args.source, file_name)\n', (9921, 9945), False, 'import os\n'), ((10206, 10242), 'os.path.join', 'os.path.join', (['args.source', 'file_name'], {}), '(args.source, file_name)\n', (10218, 10242), False, 'import os\n'), ((3333, 3358), 'tensorflow.TensorShape', 'tf.TensorShape', (['new_shape'], {}), '(new_shape)\n', (3347, 3358), True, 'import tensorflow as tf\n'), ((6704, 6724), 'numpy.isfinite', 'np.isfinite', (['covmean'], {}), '(covmean)\n', (6715, 6724), True, 'import numpy as np\n'), ((7167, 7187), 'numpy.abs', 'np.abs', (['covmean.imag'], {}), '(covmean.imag)\n', (7173, 7187), True, 'import numpy as np\n'), ((7350, 7366), 'numpy.trace', 'np.trace', (['sigma1'], {}), '(sigma1)\n', (7358, 7366), True, 'import numpy as np\n'), ((7102, 7122), 'numpy.diagonal', 'np.diagonal', (['covmean'], {}), '(covmean)\n', (7113, 7122), True, 'import numpy as np\n'), ((9983, 9999), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (9993, 9999), False, 'from PIL import Image\n'), ((10280, 10296), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (10290, 10296), False, 'from PIL import Image\n')]
|
import ast
import json
import pickle
import ujson
import collections
import numpy as np
from chord_labels import parse_chord
from progressbar import ProgressBar, Bar, Percentage, AdaptiveETA, Counter
print("Opening files")
with open('dataset_chords.json', 'r') as values:
formatted_chords = ujson.load(values)
with open('dataset_chroma.pickle', 'rb') as chroma:
formatted_chroma = pickle.load(chroma)
print("Files Opened\n")
blank = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
blank12 = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
values = collections.OrderedDict()
cleaned_chroma = []
cleaned_chords = []
final_chroma = {}
final_chords = {}
key_binary_pairs = {}
def slice_vals(chroma_vals, chord_vals, slice_size):
num_slices = int(len(chroma_vals)/slice_size)
sliced_chroma = []
sliced_chords = []
for i in range(num_slices):
sliced_chroma.append(chroma_vals[i*slice_size:(i+1)*100])
sliced_chords.append(chord_vals[i*100:(i+1)*100])
remaining_chroma = chroma_vals[num_slices*100:]
remaining_chords = chord_vals[num_slices*100:]
for i in range(100-len(remaining_chroma)):
remaining_chroma.append(blank)
remaining_chords.append(blank12)
if len(remaining_chroma) > 0:
sliced_chroma.append(remaining_chroma)
sliced_chords.append(remaining_chords)
del remaining_chords
del remaining_chroma
return sliced_chroma, sliced_chords
with open('file_ids.txt', 'r') as idList:
print("--CLEANING FILES: 890 TODO--\n")
progress_bar = ProgressBar(widgets=['PROCESSED: ', Counter(), '/890 ', Bar('>'), Percentage(), ' --- ', AdaptiveETA()], maxval=891)
progress_bar.start()
for i, id in enumerate(idList):
progress_bar.update(value=i)
id = int(id.strip('\n'))
chord_iter = iter(formatted_chords[str(id)].keys())
curr_chord = next(chord_iter)
curr_chord_tuple = ast.literal_eval(curr_chord)
in_chord = False
cleaned_chroma = []
cleaned_chords = []
chord_nums = 0
for i, time in enumerate(formatted_chroma[id].keys()):
if curr_chord_tuple[0] <= time <= curr_chord_tuple[1] and formatted_chords[str(id)][curr_chord] != 'X':
curr_chord_binary = parse_chord(formatted_chords[str(id)][curr_chord]).tones_binary
print(curr_chord_binary)
cleaned_chords.append(curr_chord_binary)
cleaned_chroma.append(formatted_chroma[id][time])
key_binary_pairs[tuple(curr_chord_binary)] = formatted_chords[str(id)][curr_chord]
chord_nums += 1
in_chord = True
elif in_chord:
try:
in_chord = False
cleaned_chords.append(blank12)
cleaned_chroma.append(formatted_chroma[id][time])
curr_chord = next(chord_iter)
curr_chord_tuple = ast.literal_eval(curr_chord)
except StopIteration:
pass
else:
cleaned_chords.append(blank12)
cleaned_chroma.append(formatted_chroma[id][time])
if time > curr_chord_tuple[1]:
try:
in_chord = False
cleaned_chords.append(blank12)
cleaned_chroma.append(formatted_chroma[id][time])
curr_chord = next(chord_iter)
curr_chord_tuple = ast.literal_eval(curr_chord)
except StopIteration:
pass
sliced = slice_vals(cleaned_chroma, cleaned_chords, 100)
final_chroma[int(id)] = sliced[0]
final_chords[int(id)] = sliced[1]
del sliced
key_binary_pairs[tuple(blank12)] = 'None'
print('\n')
print("<------------------------------------------------------->")
print("<------------------COUNTING KEYS------------------------>")
print("<------------------------------------------------------->")
hold_x = []
hold_y = []
print(len(final_chroma[12]))
with open("file_ids_subset.txt", 'r') as idFile:
for id in idFile:
id = int(id.strip('\n'))
for thing1 in final_chroma[id]:
hold_x.append(thing1)
for thing2 in final_chords[id]:
hold_y.append(thing2)
# samples x 100 x 24
print(hold_x[0][99][23])
cleaned_x = np.array(hold_x)
cleaned_y = np.array(hold_y)
# format in [file id][chroma (0) or chord (1)][slice num to look at (per 100)][index within slice]
print(cleaned_x.shape)
print(cleaned_y.shape)
print("NUM OBJECTS: " + str(len(final_chords)))
# with open("cleaned_chroma.pickle", 'wb') as file:
# dill.dump(cleaned_chroma, file, protocol=pickle.HIGHEST_PROTOCOL)
# del cleaned_chroma
#
# with open("cleaned_chords.pickle", 'wb') as file:
# dill.dump(cleaned_chords, file, protocol=pickle.HIGHEST_PROTOCOL)
# del cleaned_chords
print("saving chroma")
with open("cleaned_x.json", 'w') as file:
ujson.dump(hold_x, file)
print("saving chords")
with open("cleaned_y.json", 'w') as file:
ujson.dump(hold_y, file)
print("saving pairs")
with open("key_binary_pairs.json", 'w') as file:
ujson.dump(key_binary_pairs, file)
print("DONE SAVING")
|
[
"ast.literal_eval",
"progressbar.Counter",
"ujson.dump",
"ujson.load",
"progressbar.Bar",
"progressbar.Percentage",
"progressbar.AdaptiveETA",
"pickle.load",
"numpy.array",
"collections.OrderedDict"
] |
[((576, 601), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (599, 601), False, 'import collections\n'), ((4438, 4454), 'numpy.array', 'np.array', (['hold_x'], {}), '(hold_x)\n', (4446, 4454), True, 'import numpy as np\n'), ((4467, 4483), 'numpy.array', 'np.array', (['hold_y'], {}), '(hold_y)\n', (4475, 4483), True, 'import numpy as np\n'), ((297, 315), 'ujson.load', 'ujson.load', (['values'], {}), '(values)\n', (307, 315), False, 'import ujson\n'), ((392, 411), 'pickle.load', 'pickle.load', (['chroma'], {}), '(chroma)\n', (403, 411), False, 'import pickle\n'), ((5051, 5075), 'ujson.dump', 'ujson.dump', (['hold_x', 'file'], {}), '(hold_x, file)\n', (5061, 5075), False, 'import ujson\n'), ((5147, 5171), 'ujson.dump', 'ujson.dump', (['hold_y', 'file'], {}), '(hold_y, file)\n', (5157, 5171), False, 'import ujson\n'), ((5249, 5283), 'ujson.dump', 'ujson.dump', (['key_binary_pairs', 'file'], {}), '(key_binary_pairs, file)\n', (5259, 5283), False, 'import ujson\n'), ((1944, 1972), 'ast.literal_eval', 'ast.literal_eval', (['curr_chord'], {}), '(curr_chord)\n', (1960, 1972), False, 'import ast\n'), ((1604, 1613), 'progressbar.Counter', 'Counter', ([], {}), '()\n', (1611, 1613), False, 'from progressbar import ProgressBar, Bar, Percentage, AdaptiveETA, Counter\n'), ((1626, 1634), 'progressbar.Bar', 'Bar', (['""">"""'], {}), "('>')\n", (1629, 1634), False, 'from progressbar import ProgressBar, Bar, Percentage, AdaptiveETA, Counter\n'), ((1636, 1648), 'progressbar.Percentage', 'Percentage', ([], {}), '()\n', (1646, 1648), False, 'from progressbar import ProgressBar, Bar, Percentage, AdaptiveETA, Counter\n'), ((1659, 1672), 'progressbar.AdaptiveETA', 'AdaptiveETA', ([], {}), '()\n', (1670, 1672), False, 'from progressbar import ProgressBar, Bar, Percentage, AdaptiveETA, Counter\n'), ((2980, 3008), 'ast.literal_eval', 'ast.literal_eval', (['curr_chord'], {}), '(curr_chord)\n', (2996, 3008), False, 'import ast\n'), ((3542, 3570), 'ast.literal_eval', 'ast.literal_eval', (['curr_chord'], {}), '(curr_chord)\n', (3558, 3570), False, 'import ast\n')]
|
#!/usr/bin/env python3
import math
def calc_sqr_distance(a, b):
vx = a[0] - b[0]
vy = a[1] - b[1]
return vx * vx + vy * vy
def find_nearest_distance(uv, max_size, random_points):
xf, xi = math.modf(uv[0])
yf, yi = math.modf(uv[1])
min_sqr_distance = float("inf")
for y_offset in [-1, 0, 1]:
for x_offset in [-1, 0, 1]:
x = int((xi + x_offset + max_size) % max_size)
y = int((yi + y_offset + max_size) % max_size)
idx = int(x * max_size + y)
p = random_points[idx]
other = (x_offset + xi + p[0],
y_offset + yi + p[1])
sqr_distance = calc_sqr_distance(uv, other)
if sqr_distance < min_sqr_distance:
min_sqr_distance = sqr_distance
return math.sqrt(min_sqr_distance)
if __name__ == "__main__":
import argparse
import random
from random_point import gen_random_points
from uv import gen_uv
parser = argparse.ArgumentParser()
parser.add_argument("size", type=int, help="size of a texture. power of 2")
parser.add_argument("-r", "--random_seed", type=int, help="random seed")
parser.add_argument("-s", "--scale_factor", type=float, default=1.0, help="scale factor")
args = parser.parse_args()
scale_factor = args.scale_factor
random.seed(args.random_seed)
random_points = gen_random_points(int(scale_factor*scale_factor), 1.0, random)
uvs = gen_uv(args.size, args.size, scale_factor)
for uv in uvs:
nearest_distance = find_nearest_distance(uv, scale_factor, random_points)
print(nearest_distance)
|
[
"argparse.ArgumentParser",
"math.sqrt",
"math.modf",
"random.seed",
"uv.gen_uv"
] |
[((208, 224), 'math.modf', 'math.modf', (['uv[0]'], {}), '(uv[0])\n', (217, 224), False, 'import math\n'), ((238, 254), 'math.modf', 'math.modf', (['uv[1]'], {}), '(uv[1])\n', (247, 254), False, 'import math\n'), ((838, 865), 'math.sqrt', 'math.sqrt', (['min_sqr_distance'], {}), '(min_sqr_distance)\n', (847, 865), False, 'import math\n'), ((1019, 1044), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1042, 1044), False, 'import argparse\n'), ((1374, 1403), 'random.seed', 'random.seed', (['args.random_seed'], {}), '(args.random_seed)\n', (1385, 1403), False, 'import random\n'), ((1502, 1544), 'uv.gen_uv', 'gen_uv', (['args.size', 'args.size', 'scale_factor'], {}), '(args.size, args.size, scale_factor)\n', (1508, 1544), False, 'from uv import gen_uv\n')]
|
"""
automatic_questioner
--------------------
Module which serves as a interactor between the possible database with the
described structure and which contains information about functions and
variables of other packages.
Scheme of the db
----------------
# {'function_name':
# {'variables':
# {'variable_name':
# {'question_info':
# {'qtype': ['simple_input', 'confirmation_question',
# 'selection_options', 'selection_list_options'],
# 'question_spec': 'question_spec'},
# 'default': default}},
########
# 'descendants': [{'agg_description':
# {variable_name:
# {'variable_value': 'function_name'}
# },
# 'agg_name': 'aggregated_parameter_name'}]
# }}
######## OR
# 'descendants': [{'agg_description': 'function_name'
# 'agg_name': 'aggregated_parameter_name'}]
# }}
#TODO: checker 1 function with list of functions and dicts of dicts
"""
from tui_questioner import general_questioner
def check_quest_info(db):
"""Function which carry out the automatic checking of the database of
function and variables.
Parameters
----------
db: dict
the dictionary of all the information about the system with all its
functions and dependencies between them in order to ask for their
variables authomatically.
Returns
-------
check: boolean
returns the correctness of the database.
path: list
path of the possible error.
message: str
message of the error if it exists.
"""
## 0. Initial preset variables needed
# Function to compare lists
def equality_elements_list(a, b):
a = a.keys() if type(a) == dict else a
b = b.keys() if type(b) == dict else b
c = a[-1::-1]
return a == b or c == b
# List of elements available in some dicts at some levels
first_level = ['descendants', 'variables']
desc_2_level = ['agg_description', 'agg_name']
vars_2_level = ['question_info', 'default']
vars_3_level = ['qtype', 'question_spec']
# Messages of errors
m0 = "The given database of functions is not a dictionary."
m1 = "The function '%s' does not have "+str(first_level)+" as keys."
m2 = "The variables of function '%s' is not a dict."
m3 = "Incorrect keys "+str(vars_2_level)+" in function %s and variable %s."
m4 = "Incorrect question_info format for function %s and variable %s."
m5 = "Not a string the 'qtype' of function %s and variable %s."
m6 = "Incorrect 'question_spec' format for function %s and variable %s."
m7 = "Descendants of the function %s is not a list."
m8 = "Elements of the list of descendants not a dict for function %s."
m9 = "Incorrect structure of a dict in descendants for function %s."
m10 = "Incorrect type of agg_description for function %s and variable %s."
m11 = "Incorrect type of agg_description for function %s."
## Check db is a dict
if type(db) != dict:
return False, [], m0
## Loop for check each function in db
for funct in db.keys():
## Check main keys:
first_bl = equality_elements_list(db[funct], first_level)
if not first_bl:
return False, [funct], m1 % funct
## Check variables
if not type(db[funct]['variables']) == dict:
check = False
path = [funct, 'variables']
message = m2 % funct
return check, path, message
for var in db[funct]['variables']:
varsbles = db[funct]['variables']
v2_bl = equality_elements_list(varsbles[var], vars_2_level)
v3_bl = equality_elements_list(varsbles[var]['question_info'],
vars_3_level)
qtype_bl = db[funct]['variables'][var]['question_info']['qtype']
qtype_bl = type(qtype_bl) != str
qspec_bl = db[funct]['variables'][var]['question_info']
qspec_bl = type(qspec_bl['question_spec']) != dict
if not v2_bl:
check = False
path = [funct, 'variables', var]
message = m3 % (funct, var)
return check, path, message
### Check question_info
if not v3_bl:
check = False
path = [funct, 'variables', 'question_info']
message = m4 % (funct, var)
return check, path, message
if qtype_bl:
check = False
path = [funct, 'variables', 'question_info', 'qtype']
message = m5 % (funct, var)
return check, path, message
if qspec_bl:
check = False
path = [funct, 'variables', 'question_info', 'question_spec']
message = m6 % (funct, var)
return check, path, message
## Check descendants
if not type(db[funct]['descendants']) == list:
check = False
path = [funct, 'descendants']
message = m7 % funct
return check, path, message
for var_desc in db[funct]['descendants']:
if not type(var_desc) == dict:
check = False
path = [funct, 'descendants']
message = m8 % funct
return check, path, message
d2_bl = equality_elements_list(var_desc.keys(), desc_2_level)
if not d2_bl:
check = False
path = [funct, 'descendants']
message = m9 % funct
return check, path, message
if type(var_desc['agg_description']) == str:
pass
elif type(var_desc['agg_description']) == dict:
for varname in var_desc['agg_description']:
if not type(var_desc['agg_description'][varname]) == dict:
check = False
path = [funct, 'descendants', 'agg_description']
message = m10 % (funct, varname)
return check, path, message
else:
check = False
path = [funct, 'descendants', 'agg_description']
message = m11 % funct
return check, path, message
return True, [], ''
def automatic_questioner(function_name, db, choosen={}):
"""Function which carry out the automatic questioning task.
Parameters
----------
function_name: str
the function for which we are interested in their params in order to
call it.
db: dict
the dictionary of all the information about the system with all its
functions and dependencies between them in order to ask for their
variables authomatically.
choosen: dict
previous choosen parameters. The function will avoid to ask for the
pre-set parameters.
Returns
-------
choosen_values: dict
the selected values which are disposed to input in the function we want
to call.
"""
## Initialize variables needed
m1 = "Not value for a variables in order to create aggregate variables."
choosen_values = choosen
if function_name in db.keys():
data_f = db[function_name]
else:
# Better raise error?
return choosen_values
# Put the variables
for var in data_f['variables'].keys():
# Put the variables if there are still not selected
if var not in choosen_values.keys():
question = data_f['variables'][var]['question_info']
choosen_values[var] = general_questioner(**question)
# Put aggregated variables (descendants)
for var_desc in data_f['descendants']:
# Possible variables and aggregated parameter name
agg_description = var_desc['agg_description']
agg_param = var_desc['agg_name']
# prepare possible input for existant aggregated value in choosen
ifaggvar = agg_param in choosen_values
aggvarval = choosen_values[agg_param] if ifaggvar else {}
## Without dependant variable
if type(agg_description) == str:
# Obtain function name
fn = choosen_values[agg_param]
# Recurrent call
aux = automatic_questioner(fn, db, aggvarval)
# Aggregate to our values
choosen_values[agg_param] = aux
## With dependant variable
elif type(agg_description) == dict:
for var in var_desc['agg_description']:
if not var in choosen_values:
raise Exception(m1)
## Give a list and return a dict in the aggparam variable
elif type(choosen_values[var]) == str:
# Obtain function name
fn = var_desc['agg_description'][var][choosen_values[var]]
# Recurrent call
aux = automatic_questioner(fn, db, aggvarval)
# Aggregate to our values
choosen_values[agg_param] = aux
## Give a list and return a list in the aggparam variable
elif type(choosen_values[var]) == list:
choosen_values[agg_param] = []
aggvarval = [] if type(aggvarval) != list else aggvarval
for i in range(len(choosen_values[var])):
val = choosen_values[var][i]
fn = var_desc['agg_description'][var][val]
aux = automatic_questioner(fn, db, aggvarval[i])
choosen_values.append(aux)
return choosen_values
def get_default(function_name, db, choosen={}):
"""Function which returns a dictionary of choosen values by default.
Parameters
----------
function_name: str
the function for which we are interested in their params in order to
call it.
db: dict
the dictionary of all the information about the system with all its
functions and dependencies between them in order to ask for their
variables authomatically.
choosen: dict
previous choosen parameters. The function will avoid to ask for the
pre-set parameters.
Returns
-------
choosen_values: dict
the selected values which are disposed to input in the function we want
to call.
-----
TODO: Possibility of being integrated with authomatic_questioner after
testing.
"""
## Initialize variables needed
m1 = "Not value for a variables in order to create aggregate variables."
choosen_values = choosen
if function_name in db.keys():
data_f = db[function_name]
else:
# Better raise error?
return choosen_values
# Get the variables
for var in data_f['variables'].keys():
# Put the variables if there are still not selected
if var not in choosen_values.keys():
default = data_f['variables'][var]['default']
choosen_values[var] = default
# Get aggregated variables (descendants)
for var_desc in data_f['descendants']:
# Possible variables and aggregated parameter name
agg_description = var_desc['agg_description']
agg_param = var_desc['agg_name']
# prepare possible input for existant aggregated value in choosen
ifaggvar = agg_param in choosen_values
aggvarval = choosen_values[agg_param] if ifaggvar else {}
## Without dependant variable
if type(agg_description) == str:
# Obtain function name
fn = choosen_values[agg_param]
# Recurrent call
aux = get_default(fn, db, aggvarval)
# Aggregate to our values
choosen_values[agg_param] = aux
## With dependant variable
elif type(agg_description) == dict:
for var in var_desc['agg_description']:
if not var in choosen_values:
raise Exception(m1)
## Give a list and return a dict in the aggparam variable
elif type(choosen_values[var]) == str:
# Obtain function name
fn = var_desc['agg_description'][var][choosen_values[var]]
# Recurrent call
aux = get_default(fn, db, aggvarval)
# Aggregate to our values
choosen_values[agg_param] = aux
## Give a list and return a list in the aggparam variable
elif type(choosen_values[var]) == list:
choosen_values[agg_param] = []
aggvarval = [] if type(aggvarval) != list else aggvarval
for i in range(len(choosen_values[var])):
val = choosen_values[var][i]
fn = var_desc['agg_description'][var][val]
aux = get_default(fn, db, aggvarval[i])
choosen_values.append(aux)
return choosen_values
###############################################################################
###############################################################################
###############################################################################
def get_default3(function_name, db, choosen={}):
"""Function which returns a dictionary of choosen values by default.
Parameters
----------
function_name: str
the function for which we are interested in their params in order to
call it.
db: dict
the dictionary of all the information about the system with all its
functions and dependencies between them in order to ask for their
variables authomatically.
choosen: dict
previous choosen parameters. The function will avoid to ask for the
pre-set parameters.
Returns
-------
choosen_values: dict
the selected values which are disposed to input in the function we want
to call.
-----
TODO: Possibility of being integrated with authomatic_questioner after
testing.
"""
choosen_values = choosen
if function_name in db.keys():
data_f = db[function_name]
else:
# Better raise error?
return choosen_values
# Get the variables
for var in data_f['variables'].keys():
# Put the variables if there are still not selected
if var not in choosen_values.keys():
default = data_f['variables'][var]['default']
choosen_values[var] = default
# Get the aggregated variables (descendants)
for i in range(len(data_f['descendants'])):
# Possible variables and aggregated parameter name
vars_values = data_f['descendants'][i]['variable_values']
agg_param = data_f['descendants'][i]['parameters']
variables = vars_values.keys()
# prepare possible input for existant aggregated value in choosen
ifaggvar = agg_param in choosen_values
aggvarval = choosen_values[agg_param] if ifaggvar else {}
for var in variables:
# boolean variables
value = choosen_values[var]
iflist = type(value) == list
ifvars = var in choosen_values.keys()
# if we have to return a list
if ifvars and iflist:
# Initialization values
n = len(value)
aggvarval = aggvarval if ifaggvar else [{} for i in range(n)]
results = []
i = 0
for val in value:
# Obtain function_name
f_name = vars_values[var][value]
# Recurrent call
aux = get_default(f_name, db, aggvarval[i])
# Insert in the correspondent list
results.append(aux)
i += 1
# if we have to return a dict
elif ifvars and not iflist:
# Obtain function_name
f_name = vars_values[var][value]
# Recurrent call
choosen_values[agg_param] = get_default(f_name, db, aggvarval)
return choosen_values
def automatic_questioner3(function_name, db, choosen={}):
"""Function which carry out the automatic questioning task.
Parameters
----------
function_name: str
the function for which we are interested in their params in order to
call it.
db: dict
the dictionary of all the information about the system with all its
functions and dependencies between them in order to ask for their
variables authomatically.
choosen: dict
previous choosen parameters. The function will avoid to ask for the
pre-set parameters.
Returns
-------
choosen_values: dict
the selected values which are disposed to input in the function we want
to call.
"""
## Initialize variables needed
m1 = "Not value for a variables in order to create aggregate variables."
choosen_values = choosen
if function_name in db.keys():
data_f = db[function_name]
else:
# Better raise error?
return choosen_values
# Put the variables
for var in data_f['variables'].keys():
# Put the variables if there are still not selected
if var not in choosen_values.keys():
question = data_f['variables'][var]['question_info']
choosen_values[var] = general_questioner(**question)
# Put aggregated variables (descendants)
for i in range(len(data_f['descendants'])):
# Possible variables and aggregated parameter name
vars_values = data_f['descendants'][i]['variable_values']
agg_param = data_f['descendants'][i]['parameters']
variables = vars_values.keys()
# prepare possible input for existant aggregated value in choosen
ifaggvar = agg_param in choosen_values
aggvarval = choosen_values[agg_param] if ifaggvar else {}
for var in variables:
# boolean variables
value = choosen_values[var]
iflist = type(value) == list
ifvars = var in choosen_values.keys()
# if we have to return a list
if ifvars and iflist:
# Initialization values
n = len(value)
aggvarval = aggvarval if ifaggvar else [{} for i in range(n)]
results = []
i = 0
for val in value:
# Obtain function_name
f_name = vars_values[var][value]
# Recurrent call
aux = authomatic_questioner(f_name, db, aggvarval[i])
# Insert in the correspondent list
results.append(aux)
i += 1
# if we have to return a dict
elif ifvars and not iflist:
# Obtain function_name
f_name = vars_values[var][value]
# Recurrent call
choosen_values[agg_param] = authomatic_questioner(f_name, db,
aggvarval)
return choosen_values
|
[
"tui_questioner.general_questioner"
] |
[((7770, 7800), 'tui_questioner.general_questioner', 'general_questioner', ([], {}), '(**question)\n', (7788, 7800), False, 'from tui_questioner import general_questioner\n'), ((17678, 17708), 'tui_questioner.general_questioner', 'general_questioner', ([], {}), '(**question)\n', (17696, 17708), False, 'from tui_questioner import general_questioner\n')]
|
from __future__ import unicode_literals
import logging
import os
from mopidy import config, ext
from .pinconfig import PinConfig
__version__ = "0.0.2"
logger = logging.getLogger(__name__)
class Extension(ext.Extension):
dist_name = "Mopidy-Raspberry-GPIO"
ext_name = "raspberry-gpio"
version = __version__
def get_default_config(self):
conf_file = os.path.join(os.path.dirname(__file__), "ext.conf")
return config.read(conf_file)
def get_config_schema(self):
schema = super(Extension, self).get_config_schema()
for pin in range(28):
schema["bcm{:d}".format(pin)] = PinConfig()
return schema
def setup(self, registry):
from .frontend import RaspberryGPIOFrontend
registry.add("frontend", RaspberryGPIOFrontend)
|
[
"os.path.dirname",
"mopidy.config.read",
"logging.getLogger"
] |
[((166, 193), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (183, 193), False, 'import logging\n'), ((449, 471), 'mopidy.config.read', 'config.read', (['conf_file'], {}), '(conf_file)\n', (460, 471), False, 'from mopidy import config, ext\n'), ((395, 420), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (410, 420), False, 'import os\n')]
|
from django.db import models
from schedule.models import Event, EventRelation, Calendar
from vms.locations.models import Location
# Create your models here.
class CSPCEvent(Event):
event_location = models.ForeignKey(Location, default=1)
|
[
"django.db.models.ForeignKey"
] |
[((203, 241), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Location'], {'default': '(1)'}), '(Location, default=1)\n', (220, 241), False, 'from django.db import models\n')]
|
# -*- coding: utf-8 -*-
"""WSGI app setup."""
import os
import sys
# Add lib as primary libraries directory, with fallback to lib/dist
# and optionally to lib/dist.zip, loaded using zipimport.
lib_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'lib')
if lib_path not in sys.path:
sys.path[0:0] = [
lib_path,
os.path.join(lib_path, 'dist'),
os.path.join(lib_path, 'dist.zip'),
]
from tipfy.app import App
from config import config
from urls import rules
def enable_appstats(app):
"""Enables appstats middleware."""
from google.appengine.ext.appstats.recording import \
appstats_wsgi_middleware
app.dispatch = appstats_wsgi_middleware(app.dispatch)
def enable_jinja2_debugging():
"""Enables blacklisted modules that help Jinja2 debugging."""
if not debug:
return
from google.appengine.tools.dev_appserver import HardenedModulesHook
HardenedModulesHook._WHITE_LIST_C_MODULES += ['_ctypes', 'gestalt']
# Is this the development server?
debug = os.environ.get('SERVER_SOFTWARE', '').startswith('Dev')
# Instantiate the application.
app = App(rules=rules, config=config, debug=debug)
enable_appstats(app)
enable_jinja2_debugging()
def main():
app.run()
if __name__ == '__main__':
main()
|
[
"tipfy.app.App",
"os.path.dirname",
"os.environ.get",
"google.appengine.ext.appstats.recording.appstats_wsgi_middleware",
"os.path.join"
] |
[((1133, 1177), 'tipfy.app.App', 'App', ([], {'rules': 'rules', 'config': 'config', 'debug': 'debug'}), '(rules=rules, config=config, debug=debug)\n', (1136, 1177), False, 'from tipfy.app import App\n'), ((681, 719), 'google.appengine.ext.appstats.recording.appstats_wsgi_middleware', 'appstats_wsgi_middleware', (['app.dispatch'], {}), '(app.dispatch)\n', (705, 719), False, 'from google.appengine.ext.appstats.recording import appstats_wsgi_middleware\n'), ((234, 259), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (249, 259), False, 'import os\n'), ((346, 376), 'os.path.join', 'os.path.join', (['lib_path', '"""dist"""'], {}), "(lib_path, 'dist')\n", (358, 376), False, 'import os\n'), ((386, 420), 'os.path.join', 'os.path.join', (['lib_path', '"""dist.zip"""'], {}), "(lib_path, 'dist.zip')\n", (398, 420), False, 'import os\n'), ((1039, 1076), 'os.environ.get', 'os.environ.get', (['"""SERVER_SOFTWARE"""', '""""""'], {}), "('SERVER_SOFTWARE', '')\n", (1053, 1076), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
from layers.dynamic_rnn import DynamicLSTM
from layers.shap import Distribution_SHAP, Map_SHAP
import torch
import torch.nn as nn
import numpy as np
class SHAP_LSTM(nn.Module):
def __init__(self, embedding_matrix, opt):
super(SHAP_LSTM, self).__init__()
self.opt = opt
self.embed_dim = embedding_matrix.shape[-1]
self.embed = nn.Embedding.from_pretrained(torch.tensor(embedding_matrix, dtype=torch.float))
self.lstm = DynamicLSTM(opt.embed_dim, opt.hidden_dim, num_layers=1, batch_first=True)
self.shap = Distribution_SHAP(self.opt.max_seq_len, self.opt.polarities_dim, opt)
self.map_shap = Map_SHAP(opt.embed_dim, opt.max_seq_len, opt)
self.dense = nn.Linear(opt.hidden_dim, opt.polarities_dim)
def forward(self, inputs, label, weights, update=True):
text_raw_indices, aspect_indices = inputs[0], inputs[1]
x = self.embed(text_raw_indices)
x_len = torch.sum(text_raw_indices != 0, dim=-1)
aspect_idx = torch.tensor(torch.eq(text_raw_indices, aspect_indices[:, 0].reshape((-1, 1))),
dtype=torch.float)
aspect_pos_idx = [np.where(aspect_idx[i, :] == 1)[0] for i in range(len(aspect_idx))]
if update:
H_N, (h_n, _) = self.lstm(x, x_len)
weights = self.shap(text_raw_indices, aspect_indices, label, H_N, weights, self.dense)
out = self.dense(h_n[0])
return out, weights
else:
if len(weights) != 0:
x = self.map_shap(x, aspect_pos_idx, weights)
else:
pass
_, (h_n, _) = self.lstm(x.to(self.opt.device), x_len)
out = self.dense(h_n[0])
return out
|
[
"layers.shap.Distribution_SHAP",
"torch.sum",
"layers.dynamic_rnn.DynamicLSTM",
"numpy.where",
"torch.nn.Linear",
"layers.shap.Map_SHAP",
"torch.tensor"
] |
[((489, 563), 'layers.dynamic_rnn.DynamicLSTM', 'DynamicLSTM', (['opt.embed_dim', 'opt.hidden_dim'], {'num_layers': '(1)', 'batch_first': '(True)'}), '(opt.embed_dim, opt.hidden_dim, num_layers=1, batch_first=True)\n', (500, 563), False, 'from layers.dynamic_rnn import DynamicLSTM\n'), ((584, 653), 'layers.shap.Distribution_SHAP', 'Distribution_SHAP', (['self.opt.max_seq_len', 'self.opt.polarities_dim', 'opt'], {}), '(self.opt.max_seq_len, self.opt.polarities_dim, opt)\n', (601, 653), False, 'from layers.shap import Distribution_SHAP, Map_SHAP\n'), ((678, 723), 'layers.shap.Map_SHAP', 'Map_SHAP', (['opt.embed_dim', 'opt.max_seq_len', 'opt'], {}), '(opt.embed_dim, opt.max_seq_len, opt)\n', (686, 723), False, 'from layers.shap import Distribution_SHAP, Map_SHAP\n'), ((745, 790), 'torch.nn.Linear', 'nn.Linear', (['opt.hidden_dim', 'opt.polarities_dim'], {}), '(opt.hidden_dim, opt.polarities_dim)\n', (754, 790), True, 'import torch.nn as nn\n'), ((975, 1015), 'torch.sum', 'torch.sum', (['(text_raw_indices != 0)'], {'dim': '(-1)'}), '(text_raw_indices != 0, dim=-1)\n', (984, 1015), False, 'import torch\n'), ((418, 467), 'torch.tensor', 'torch.tensor', (['embedding_matrix'], {'dtype': 'torch.float'}), '(embedding_matrix, dtype=torch.float)\n', (430, 467), False, 'import torch\n'), ((1196, 1227), 'numpy.where', 'np.where', (['(aspect_idx[i, :] == 1)'], {}), '(aspect_idx[i, :] == 1)\n', (1204, 1227), True, 'import numpy as np\n')]
|
import cv2
import numpy as np
from scipy.ndimage.morphology import distance_transform_cdt
import torch
from skimage.io import imsave
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def get_edge_mask(poly, mask):
"""
Generate edge mask
"""
h = mask.shape[0]
w = mask.shape[1]
gt_poly = np.zeros((poly.shape[0],poly.shape[1]),np.int32)
gt_poly[:,0] = np.floor(poly[:,0]*w)
gt_poly[:,1] = np.floor(poly[:,1]*h)
# print(gt_poly[:,0], gt_poly[:,1])
cv2.polylines(mask, np.int32([gt_poly]),True,[1], thickness = 1)
# cv2.fillPoly(mask, np.int32([gt_poly]),[255])
# imsave("./test33/"+str(poly.shape[0])+"edge.jpg",mask[0])
# imsave("./test33/"+str(poly.shape[0])+"edgegt.jpg",mask[1])
return mask
def get_poly_mask(poly, mask):
"""
Generate edge mask
"""
h = mask.shape[0]
w = mask.shape[1]
gt_poly = np.zeros((poly.shape[0],poly.shape[1]),np.int32)
gt_poly[:,0] = np.floor(poly[:,0]*w)
gt_poly[:,1] = np.floor(poly[:,1]*h)
# print(gt_poly[:,0], gt_poly[:,1])
# cv2.polylines(mask, np.int32([gt_poly]),True,[1], thickness = 1)
cv2.fillPoly(mask, np.int32([gt_poly]),[1])
# imsave("./test33/"+str(poly.shape[0])+"edge.jpg",mask[0])
# imsave("./test33/"+str(poly.shape[0])+"edgegt.jpg",mask[1])
return mask
def get_original_mask(poly, mask):
"""
Generate edge mask
"""
h = mask.shape[0]
w = mask.shape[1]
gt_poly = np.zeros((poly.shape[0],poly.shape[1]),np.int32)
gt_poly[:,0] = np.floor(poly[:,0]*w)
gt_poly[:,1] = np.floor(poly[:,1]*h)
# print(gt_poly[:,0], gt_poly[:,1])
# cv2.polylines(mask, np.int32([gt_poly]),True,[1], thickness = 1)
cv2.fillPoly(mask, np.int32([gt_poly]),[1])
# imsave("./test33/"+str(poly.shape[0])+"edge.jpg",mask[0])
# imsave("./test33/"+str(poly.shape[0])+"edgegt.jpg",mask[1])
return mask
def get_fp_mask(poly,mask):
h = mask.shape[0]
w = mask.shape[1]
x = np.int32(np.floor(poly[0,0]*w))
y = np.int32(np.floor(poly[0,1]*h))
mask[y,x] = 1.0
# if(y<=14 and x<=190 and x>=1 and y>=1):
# mask[y,x+1] = 1.0
# mask[y,x-1] = 1.0
# mask[y+1,x] = 1.0
# mask[y-1,x] = 1.0
return mask
def get_vertices_mask(poly, mask):
"""
Generate a vertex mask
"""
h = mask.shape[0]
w = mask.shape[1]
gt_poly = np.zeros((poly.shape[0],poly.shape[1]),np.int32)
gt_poly[:,0] = np.floor(poly[:,0]*w)
gt_poly[:,1] = np.floor(poly[:,1]*h)
mask[gt_poly[:, 1], gt_poly[:, 0]] = 1.0
return mask
def get_previous_mask(poly,mask,t):
mask = torch.zeros(1, 1, 25, 60, device=device)
h = 25
w = 60
x = np.int32(np.floor(poly[0,t,0]*w))
y = np.int32(np.floor(poly[0,t,1]*h))
mask[0,0,y,x] = 1
# if(y<=14 and x<=190 and x>=1 and y>=1):
# mask[0,0,y,x+1] = 1.0
# mask[0,0,y,x-1] = 1.0
# mask[0,0,y+1,x] = 1.0
# mask[0,0,y-1,x] = 1.0
return mask
def get_instance_mask(poly,mask):
h = 25
w = 60
masks = []
for tr in range(poly.shape[0]):
# print(poly[tr,0],poly[tr,1])
x = np.int32(np.floor(poly[tr,0]*w))
y = np.int32(np.floor(poly[tr,1]*h))
# print(y,x)
mask[y,x] = 1.0
# if(y<=14 and x<=190 and x>=1 and y>=1):
# mask[y,x+1] = 1.0
# mask[y,x-1] = 1.0
# mask[y+1,x] = 1.0
# mask[y-1,x] = 1.0
mask1 = mask.flatten()
if(tr == poly.shape[0]-1):
mask1 = np.append(mask1,[1.0])
else:
mask1 = np.append(mask1,[0.0])
masks.append(mask1)
# print(y,x)
mask[y,x] = 0.0
# if(y<=14 and x<=190 and x>=1 and y>=1):
# mask[y+1,x] = 0.0
# mask[y-1,x] = 0.0
# mask[y,x+1] = 0.0
# mask[y,x-1] = 0.0
return np.asarray(masks, dtype=np.float32)
def class_to_grid(poly, out_tensor):
"""
NOTE: Torch function
accepts out_tensor to do it inplace
poly: [batch, ]
out_tensor: [batch, 1, grid_size, grid_size]
"""
out_tensor.zero_()
# Remove old state of out_tensor
b = 0
for i in poly:
if i < 16 * 192:
x = (i%192).long()
y = (i/16).long()
out_tensor[0,0,y,x] = 1
b += 1
return out_tensor
def dt_targets_from_class(poly):
"""
NOTE: numpy function!
poly: [bs, time_steps], each value in [0, grid*size**2+1)
grid_size: size of the grid the polygon is in
dt_threshold: threshold for smoothing in dt targets
returns:
full_targets: [bs, time_steps, grid_size**2+1] array containing
dt smoothed targets to be used for the polygon loss function
"""
full_targets = []
for b in range(poly.shape[0]):
targets = []
for p in poly[b]:
t = np.zeros(16*192+1, dtype=np.int32)
t[p] += 1
if p != 16*192:#EOS
spatial_part = t[:-1]
spatial_part = np.reshape(spatial_part, [16, 192, 1])
# Invert image
spatial_part = -1 * (spatial_part - 1)
# Compute distance transform
spatial_part = distance_transform_cdt(spatial_part, metric='taxicab').astype(np.float32)
# Threshold
spatial_part = np.clip(spatial_part, 0, dt_threshold)
# Normalize
spatial_part /= dt_threshold
# Invert back
spatial_part = -1. * (spatial_part - 1.)
spatial_part /= np.sum(spatial_part)
spatial_part = spatial_part.flatten()
t = np.concatenate([spatial_part, [0.]], axis=-1)
targets.append(t.astype(np.float32))
full_targets.append(targets)
return np.array(full_targets, dtype=np.float32)
# def class_to_grid(poly, out_tensor):
# """
# NOTE: Torch function
# accepts out_tensor to do it inplace
# poly: [batch, ]
# out_tensor: [batch, 1, grid_size, grid_size]
# """
# out_tensor.zero_()
# # Remove old state of out_tensor
# b = 0
# for i in poly:
# if i < 16 * 192:
# x = (i%192).long()
# y = (i/16).long()
# out_tensor[b,0,y,x] = 1
# b += 1
# return out_tensor
|
[
"scipy.ndimage.morphology.distance_transform_cdt",
"numpy.sum",
"numpy.asarray",
"numpy.floor",
"numpy.zeros",
"numpy.clip",
"numpy.append",
"numpy.array",
"torch.cuda.is_available",
"numpy.int32",
"numpy.reshape",
"torch.zeros",
"numpy.concatenate"
] |
[((333, 383), 'numpy.zeros', 'np.zeros', (['(poly.shape[0], poly.shape[1])', 'np.int32'], {}), '((poly.shape[0], poly.shape[1]), np.int32)\n', (341, 383), True, 'import numpy as np\n'), ((401, 425), 'numpy.floor', 'np.floor', (['(poly[:, 0] * w)'], {}), '(poly[:, 0] * w)\n', (409, 425), True, 'import numpy as np\n'), ((442, 466), 'numpy.floor', 'np.floor', (['(poly[:, 1] * h)'], {}), '(poly[:, 1] * h)\n', (450, 466), True, 'import numpy as np\n'), ((905, 955), 'numpy.zeros', 'np.zeros', (['(poly.shape[0], poly.shape[1])', 'np.int32'], {}), '((poly.shape[0], poly.shape[1]), np.int32)\n', (913, 955), True, 'import numpy as np\n'), ((973, 997), 'numpy.floor', 'np.floor', (['(poly[:, 0] * w)'], {}), '(poly[:, 0] * w)\n', (981, 997), True, 'import numpy as np\n'), ((1014, 1038), 'numpy.floor', 'np.floor', (['(poly[:, 1] * h)'], {}), '(poly[:, 1] * h)\n', (1022, 1038), True, 'import numpy as np\n'), ((1479, 1529), 'numpy.zeros', 'np.zeros', (['(poly.shape[0], poly.shape[1])', 'np.int32'], {}), '((poly.shape[0], poly.shape[1]), np.int32)\n', (1487, 1529), True, 'import numpy as np\n'), ((1547, 1571), 'numpy.floor', 'np.floor', (['(poly[:, 0] * w)'], {}), '(poly[:, 0] * w)\n', (1555, 1571), True, 'import numpy as np\n'), ((1588, 1612), 'numpy.floor', 'np.floor', (['(poly[:, 1] * h)'], {}), '(poly[:, 1] * h)\n', (1596, 1612), True, 'import numpy as np\n'), ((2404, 2454), 'numpy.zeros', 'np.zeros', (['(poly.shape[0], poly.shape[1])', 'np.int32'], {}), '((poly.shape[0], poly.shape[1]), np.int32)\n', (2412, 2454), True, 'import numpy as np\n'), ((2472, 2496), 'numpy.floor', 'np.floor', (['(poly[:, 0] * w)'], {}), '(poly[:, 0] * w)\n', (2480, 2496), True, 'import numpy as np\n'), ((2513, 2537), 'numpy.floor', 'np.floor', (['(poly[:, 1] * h)'], {}), '(poly[:, 1] * h)\n', (2521, 2537), True, 'import numpy as np\n'), ((2646, 2686), 'torch.zeros', 'torch.zeros', (['(1)', '(1)', '(25)', '(60)'], {'device': 'device'}), '(1, 1, 25, 60, device=device)\n', (2657, 2686), False, 'import torch\n'), ((3894, 3929), 'numpy.asarray', 'np.asarray', (['masks'], {'dtype': 'np.float32'}), '(masks, dtype=np.float32)\n', (3904, 3929), True, 'import numpy as np\n'), ((5853, 5893), 'numpy.array', 'np.array', (['full_targets'], {'dtype': 'np.float32'}), '(full_targets, dtype=np.float32)\n', (5861, 5893), True, 'import numpy as np\n'), ((166, 191), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (189, 191), False, 'import torch\n'), ((532, 551), 'numpy.int32', 'np.int32', (['[gt_poly]'], {}), '([gt_poly])\n', (540, 551), True, 'import numpy as np\n'), ((1174, 1193), 'numpy.int32', 'np.int32', (['[gt_poly]'], {}), '([gt_poly])\n', (1182, 1193), True, 'import numpy as np\n'), ((1748, 1767), 'numpy.int32', 'np.int32', (['[gt_poly]'], {}), '([gt_poly])\n', (1756, 1767), True, 'import numpy as np\n'), ((2010, 2034), 'numpy.floor', 'np.floor', (['(poly[0, 0] * w)'], {}), '(poly[0, 0] * w)\n', (2018, 2034), True, 'import numpy as np\n'), ((2050, 2074), 'numpy.floor', 'np.floor', (['(poly[0, 1] * h)'], {}), '(poly[0, 1] * h)\n', (2058, 2074), True, 'import numpy as np\n'), ((2726, 2753), 'numpy.floor', 'np.floor', (['(poly[0, t, 0] * w)'], {}), '(poly[0, t, 0] * w)\n', (2734, 2753), True, 'import numpy as np\n'), ((2768, 2795), 'numpy.floor', 'np.floor', (['(poly[0, t, 1] * h)'], {}), '(poly[0, t, 1] * h)\n', (2776, 2795), True, 'import numpy as np\n'), ((3174, 3199), 'numpy.floor', 'np.floor', (['(poly[tr, 0] * w)'], {}), '(poly[tr, 0] * w)\n', (3182, 3199), True, 'import numpy as np\n'), ((3219, 3244), 'numpy.floor', 'np.floor', (['(poly[tr, 1] * h)'], {}), '(poly[tr, 1] * h)\n', (3227, 3244), True, 'import numpy as np\n'), ((3552, 3575), 'numpy.append', 'np.append', (['mask1', '[1.0]'], {}), '(mask1, [1.0])\n', (3561, 3575), True, 'import numpy as np\n'), ((3609, 3632), 'numpy.append', 'np.append', (['mask1', '[0.0]'], {}), '(mask1, [0.0])\n', (3618, 3632), True, 'import numpy as np\n'), ((4886, 4924), 'numpy.zeros', 'np.zeros', (['(16 * 192 + 1)'], {'dtype': 'np.int32'}), '(16 * 192 + 1, dtype=np.int32)\n', (4894, 4924), True, 'import numpy as np\n'), ((5045, 5083), 'numpy.reshape', 'np.reshape', (['spatial_part', '[16, 192, 1]'], {}), '(spatial_part, [16, 192, 1])\n', (5055, 5083), True, 'import numpy as np\n'), ((5380, 5418), 'numpy.clip', 'np.clip', (['spatial_part', '(0)', 'dt_threshold'], {}), '(spatial_part, 0, dt_threshold)\n', (5387, 5418), True, 'import numpy as np\n'), ((5612, 5632), 'numpy.sum', 'np.sum', (['spatial_part'], {}), '(spatial_part)\n', (5618, 5632), True, 'import numpy as np\n'), ((5708, 5754), 'numpy.concatenate', 'np.concatenate', (['[spatial_part, [0.0]]'], {'axis': '(-1)'}), '([spatial_part, [0.0]], axis=-1)\n', (5722, 5754), True, 'import numpy as np\n'), ((5247, 5301), 'scipy.ndimage.morphology.distance_transform_cdt', 'distance_transform_cdt', (['spatial_part'], {'metric': '"""taxicab"""'}), "(spatial_part, metric='taxicab')\n", (5269, 5301), False, 'from scipy.ndimage.morphology import distance_transform_cdt\n')]
|
#!/usr/bin/env python3
# date: 2016.11.24 (update: 2020.06.13)
# https://stackoverflow.com/questions/40777864/retrieving-all-information-from-page-beautifulsoup/
from selenium import webdriver
from bs4 import BeautifulSoup
import time
# --- get page ---
link = 'http://oldnavy.gap.com/browse/category.do?cid=1035712&sop=true'
#driver = webdriver.PhantomJS() # deprecated
driver = webdriver.Firefox()
driver.get(link)
time.sleep(3)
# --- scrolling ---
#size = driver.get_window_size()
#print(size)
#window_height = size['height']
#print('window_height:', window_height) # webpage + toolbars + border
# https://stackoverflow.com/questions/1248081/how-to-get-the-browser-viewport-dimensions
# this may give too big value because it includes scrollbar's height (ie. 962 = 950+22)
#viewport_height = driver.execute_script('return window.innerHeight;')
#print('viewport_height:', viewport_height)
# this gives correct value without scrollbar (ie. 950)
viewport_height = driver.execute_script('return document.documentElement.clientHeight;')
print('viewport_height:', viewport_height)
y = 0 # position to scroll
# at start it has to bigger then `y` to run `while y < page_height:`
page_height = 1
#page_height = driver.execute_script('return document.body.scrollHeight;')
while y < page_height:
y += viewport_height # move only visible height
print('y:', y, 'page_height:', page_height)
# scroll
driver.execute_script(f'window.scrollTo(0, {y});')
# browser may need time to update page
time.sleep(0.5)
# get page height (it can change when JavaScript adds elements)
page_height = driver.execute_script('return document.body.scrollHeight;')
# --- get data with BeautifulSoup ---
base_url = 'http://www.oldnavy.com'
html = driver.page_source
soup = BeautifulSoup(html, 'html5lib')
all_divs = soup.find_all('div', class_='product-card') # new layout
print('len(all_divs):', len(all_divs))
#for div in all_divs:
# link = div.find('a')
# print(link.text)
# print(base_url + link['href'])
# --- get data with Selenium ---
all_products = driver.find_elements_by_class_name('product-card')
print('len(all_products):', len(all_products))
for product in all_products:
link = product.find_element_by_tag_name('a')
print(link.text)
# print(base_url + link['href'])
|
[
"bs4.BeautifulSoup",
"time.sleep",
"selenium.webdriver.Firefox"
] |
[((385, 404), 'selenium.webdriver.Firefox', 'webdriver.Firefox', ([], {}), '()\n', (402, 404), False, 'from selenium import webdriver\n'), ((422, 435), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (432, 435), False, 'import time\n'), ((1794, 1825), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html5lib"""'], {}), "(html, 'html5lib')\n", (1807, 1825), False, 'from bs4 import BeautifulSoup\n'), ((1521, 1536), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (1531, 1536), False, 'import time\n')]
|
from schema_reg_viz.config.settings import get_settings
def test_health():
result = get_settings()
assert result.schema_registry.port == 8081
assert result.schema_registry.protocol == 'http'
assert result.schema_registry.url == 'localhost'
|
[
"schema_reg_viz.config.settings.get_settings"
] |
[((90, 104), 'schema_reg_viz.config.settings.get_settings', 'get_settings', ([], {}), '()\n', (102, 104), False, 'from schema_reg_viz.config.settings import get_settings\n')]
|
"""
Module defining API.
"""
from api import app
from flask import jsonify
import recipes
@app.route('/list')
def list():
"""
List all available recipes
:return:
list
a list containing the names of the recipes. ex: ['recipe1',recipe2']
"""
recipes.refresh()
return jsonify(recipes.list)
@app.route('/status')
def status():
"""
Get the status of the app.
:return:
object
message
The message to be displayed to the user.
options
null or a list of strings to display to the user as selectable options.
recipe
Name of the currently running recipe or null if none is running.
step
The step number or -1 if no recipe is running
status
The state of the application. One of:
idle
App is waiting for the user to start a recipe
running
App is running a recipe and doesn't need any input from the user
user_input
App is waiting for the user to make a decision. See options.
complete
Recipe is complete.
error
A system error has occurred.
"""
return jsonify(recipes.status())
@app.route('/start/<name>')
def start(name):
"""
Start running a recipe.
:param name:
The recipe name. Must be one of the items returned by /list
:return:
object
response
One of:
ok
error
message
Only present if response is "error" and there is a message to present to the user.
"""
(state,msg) = recipes.start(name)
if state:
return jsonify({'response':'ok'})
else:
return jsonify({'response':'error','message':msg})
@app.route('/stop')
def stop():
"""
Stop the currently running recipe.
:return:
object
response
One of:
ok
error
message
Only present if response is "error" and there is a message to present to the user.
"""
recipes.stop()
return jsonify({'response':'ok'})
@app.route('/select/option/<name>')
def selectOption(name):
"""
Provide user selected input.
:param name:
The name of the user selected option. This must be one of the strings presented in the
"options" list in the /status call.
:return:
object
response
One of:
ok
error
message
Only present if response is "error" and there is a message to present to the user.
"""
(state,msg) = recipes.selectOption(name)
if state:
return jsonify({'response':'ok'})
else:
return jsonify({'response':'error','message':msg})
|
[
"recipes.refresh",
"flask.jsonify",
"recipes.status",
"recipes.stop",
"api.app.route",
"recipes.start",
"recipes.selectOption"
] |
[((93, 111), 'api.app.route', 'app.route', (['"""/list"""'], {}), "('/list')\n", (102, 111), False, 'from api import app\n'), ((329, 349), 'api.app.route', 'app.route', (['"""/status"""'], {}), "('/status')\n", (338, 349), False, 'from api import app\n'), ((1313, 1339), 'api.app.route', 'app.route', (['"""/start/<name>"""'], {}), "('/start/<name>')\n", (1322, 1339), False, 'from api import app\n'), ((1866, 1884), 'api.app.route', 'app.route', (['"""/stop"""'], {}), "('/stop')\n", (1875, 1884), False, 'from api import app\n'), ((2226, 2260), 'api.app.route', 'app.route', (['"""/select/option/<name>"""'], {}), "('/select/option/<name>')\n", (2235, 2260), False, 'from api import app\n'), ((275, 292), 'recipes.refresh', 'recipes.refresh', ([], {}), '()\n', (290, 292), False, 'import recipes\n'), ((304, 325), 'flask.jsonify', 'jsonify', (['recipes.list'], {}), '(recipes.list)\n', (311, 325), False, 'from flask import jsonify\n'), ((1718, 1737), 'recipes.start', 'recipes.start', (['name'], {}), '(name)\n', (1731, 1737), False, 'import recipes\n'), ((2170, 2184), 'recipes.stop', 'recipes.stop', ([], {}), '()\n', (2182, 2184), False, 'import recipes\n'), ((2196, 2223), 'flask.jsonify', 'jsonify', (["{'response': 'ok'}"], {}), "({'response': 'ok'})\n", (2203, 2223), False, 'from flask import jsonify\n'), ((2715, 2741), 'recipes.selectOption', 'recipes.selectOption', (['name'], {}), '(name)\n', (2735, 2741), False, 'import recipes\n'), ((1292, 1308), 'recipes.status', 'recipes.status', ([], {}), '()\n', (1306, 1308), False, 'import recipes\n'), ((1767, 1794), 'flask.jsonify', 'jsonify', (["{'response': 'ok'}"], {}), "({'response': 'ok'})\n", (1774, 1794), False, 'from flask import jsonify\n'), ((1819, 1865), 'flask.jsonify', 'jsonify', (["{'response': 'error', 'message': msg}"], {}), "({'response': 'error', 'message': msg})\n", (1826, 1865), False, 'from flask import jsonify\n'), ((2771, 2798), 'flask.jsonify', 'jsonify', (["{'response': 'ok'}"], {}), "({'response': 'ok'})\n", (2778, 2798), False, 'from flask import jsonify\n'), ((2823, 2869), 'flask.jsonify', 'jsonify', (["{'response': 'error', 'message': msg}"], {}), "({'response': 'error', 'message': msg})\n", (2830, 2869), False, 'from flask import jsonify\n')]
|
import numpy as np
import torch
from utils import plotsAnalysis
import os
from utils.helper_functions import load_flags
def auto_swipe(mother_dir=None):
"""
This function swipes the parameter space of a folder and extract the varying hyper-parameters and make 2d heatmap w.r.t. all combinations of them
"""
if mother_dir is None:
#mother_dir = '/scratch/sr365/ML_MM_Benchmark/Yang_temp/models/sweep8'
#mother_dir = '/scratch/sr365/ML_MM_Benchmark/Transformer/models/Yang_new_sweep/'
mother_dir = '/scratch/sr365/ML_MM_Benchmark/Transformer/models/new_norm_color/'
#mother_dir = '/scratch/sr365/ML_MM_Benchmark/Transformer/models/Color_new_sweep/'
#mother_dir = '/scratch/sr365/ML_MM_Benchmark/Transformer/models/encoder_pos_analysis/Color'
#mother_dir = '/scratch/sr365/ML_MM_Benchmark/Color_temp/models'
#mother_dir = '/scratch/sr365/ML_MM_Benchmark/Color_temp/prev_sweep/test_size'
#mother_dir = '/scratch/sr365/ML_MM_Benchmark/Transformer/models/sweep_encode_lr'
flags_list = []
# First step, get the list of object flags
for folder in os.listdir(mother_dir):
# Get the current sub_folder
cur_folder = os.path.join(mother_dir, folder)
if not os.path.isdir(cur_folder) or not os.path.isfile(os.path.join(cur_folder, 'flags.obj')):
print('Either this is not a folder or there is no flags object under this folder for ', cur_folder)
continue
# Read the pickle object
cur_flags = load_flags(cur_folder)
flags_list.append(cur_flags)
# From the list of flags, get the things that are different except for loss terms
att_list = [a for a in dir(cur_flags) if not a.startswith('_') and not 'loss' in a and not 'trainable_param' in a and not 'model_name' in a and not 'dir' in a]
print('In total {} attributes, they are {}'.format(len(att_list), att_list))
# Create a dictionary that have keys as attributes and unique values as that
attDict = {key: [] for key in att_list}
# Loop over all the flags and get the unique values inside
for flags in flags_list:
for keys in attDict.keys():
try:
att = getattr(flags,keys)
except:
print('There is not attribute {} in flags, continue'.format(keys))
continue
# Skip if this is already inside the list
if att in attDict[keys]:
continue
attDict[keys].append(att)
# Get the atts in the dictionary that has more than 1 att inside
varying_att_list = []
for keys in attDict.keys():
if len(attDict[keys]) > 1:
# For linear layers, apply special handlings
if 'linear' not in keys:
varying_att_list.append(keys)
continue
length_list = []
num_node_in_layer_list = []
# Loop over the lists of linear
for linear_list in attDict[keys]:
assert type(linear_list) == list, 'Your linear layer is not list, check again'
length_list.append(len(linear_list)) # Record the length instead
if 'head_linear' in keys:
if len(linear_list) > 2:
num_node_in_layer_list.append(linear_list[-2]) # Record the -2 of the list, which denotes the number of nodes
elif 'tail_linear' in keys:
if len(linear_list) > 1:
num_node_in_layer_list.append(linear_list[-2]) # Record the -2 of the list, which denotes the number of nodes
# Add these two attributes to the
if len(np.unique(length_list)) > 1:
varying_att_list.append(keys)
if len(np.unique(num_node_in_layer_list)) > 1:
varying_att_list.append('linear_unit')
print('varying attributes are', varying_att_list)
# Showing how they are changing
for keys in varying_att_list:
if keys == 'linear_unit':
continue
print('att is {}, they have values of {}'.format(keys, attDict[keys]))
if len(varying_att_list) == 1:
# There is only 1 attribute that is changing
att = varying_att_list[0]
key_a = att
key_b = 'lr'
for heatmap_value in ['best_validation_loss', 'best_training_loss','trainable_param']:
#try:
print('doing heatmap {}'.format(heatmap_value))
plotsAnalysis.HeatMapBVL(key_a, key_b, key_a + '_' + key_b + '_HeatMap',save_name=mother_dir + '_' + key_a + '_' + key_b + '_' + heatmap_value + '_heatmap.png',
HeatMap_dir=mother_dir,feature_1_name=key_a,feature_2_name=key_b, heat_value_name=heatmap_value)
#except Exception as e:
# print('the plotswipe does not work in {} and {} cross for {}'.format(key_a, key_b, heatmap_value))
# print('error message: {}'.format(e))
# Start calling the plotsAnalysis function for all the pairs
for a, key_a in enumerate(varying_att_list):
for b, key_b in enumerate(varying_att_list):
# Skip the same attribute
if a <= b:
continue
# Call the plotsAnalysis function
#for heatmap_value in ['best_validation_loss']:
for heatmap_value in ['best_validation_loss', 'best_training_loss','trainable_param']:
print('doing heatmap {}'.format(heatmap_value))
try:
plotsAnalysis.HeatMapBVL(key_a, key_b, key_a + '_' + key_b + '_HeatMap',save_name=mother_dir + '_' + key_a + '_' + key_b + '_' + heatmap_value + '_heatmap.png',
HeatMap_dir=mother_dir,feature_1_name=key_a,feature_2_name=key_b, heat_value_name=heatmap_value)
except:
print('the plotswipe does not work in {} and {} cross for {}'.format(key_a, key_b, heatmap_value))
if __name__ == '__main__':
#pathnamelist = ['/scratch/sr365/ML_MM_Benchmark/Yang_temp/models/sweep4',
# '/scratch/sr365/ML_MM_Benchmark/Transformer/models/sweep4']#,
#'/scratch/sr365/ML_MM_Benchmark/Color_temp/models/sweep2']
#'/scratch/sr365/ML_MM_Benchmark/Yang_temp/models/lr_sweep']
#big_mother_dir = '/scratch/sr365/ML_MM_Benchmark/Transformer/models/encoder'
#big_mother_dir = '/scratch/sr365/ML_MM_Benchmark/Transformer/models/sequence_len'
#big_mother_dir = '/scratch/sr365/ML_MM_Benchmark/Transformer/models/MLP_complexity/'
#for dirs in os.listdir(big_mother_dir):
# mother_dir = os.path.join(big_mother_dir, dirs)
# if os.path.isdir(mother_dir):
# auto_swipe(mother_dir)
auto_swipe()
|
[
"utils.helper_functions.load_flags",
"os.path.isdir",
"utils.plotsAnalysis.HeatMapBVL",
"os.path.join",
"os.listdir",
"numpy.unique"
] |
[((1134, 1156), 'os.listdir', 'os.listdir', (['mother_dir'], {}), '(mother_dir)\n', (1144, 1156), False, 'import os\n'), ((1216, 1248), 'os.path.join', 'os.path.join', (['mother_dir', 'folder'], {}), '(mother_dir, folder)\n', (1228, 1248), False, 'import os\n'), ((1538, 1560), 'utils.helper_functions.load_flags', 'load_flags', (['cur_folder'], {}), '(cur_folder)\n', (1548, 1560), False, 'from utils.helper_functions import load_flags\n'), ((4527, 4799), 'utils.plotsAnalysis.HeatMapBVL', 'plotsAnalysis.HeatMapBVL', (['key_a', 'key_b', "(key_a + '_' + key_b + '_HeatMap')"], {'save_name': "(mother_dir + '_' + key_a + '_' + key_b + '_' + heatmap_value + '_heatmap.png')", 'HeatMap_dir': 'mother_dir', 'feature_1_name': 'key_a', 'feature_2_name': 'key_b', 'heat_value_name': 'heatmap_value'}), "(key_a, key_b, key_a + '_' + key_b + '_HeatMap',\n save_name=mother_dir + '_' + key_a + '_' + key_b + '_' + heatmap_value +\n '_heatmap.png', HeatMap_dir=mother_dir, feature_1_name=key_a,\n feature_2_name=key_b, heat_value_name=heatmap_value)\n", (4551, 4799), False, 'from utils import plotsAnalysis\n'), ((1264, 1289), 'os.path.isdir', 'os.path.isdir', (['cur_folder'], {}), '(cur_folder)\n', (1277, 1289), False, 'import os\n'), ((1312, 1349), 'os.path.join', 'os.path.join', (['cur_folder', '"""flags.obj"""'], {}), "(cur_folder, 'flags.obj')\n", (1324, 1349), False, 'import os\n'), ((3729, 3751), 'numpy.unique', 'np.unique', (['length_list'], {}), '(length_list)\n', (3738, 3751), True, 'import numpy as np\n'), ((3823, 3856), 'numpy.unique', 'np.unique', (['num_node_in_layer_list'], {}), '(num_node_in_layer_list)\n', (3832, 3856), True, 'import numpy as np\n'), ((5602, 5874), 'utils.plotsAnalysis.HeatMapBVL', 'plotsAnalysis.HeatMapBVL', (['key_a', 'key_b', "(key_a + '_' + key_b + '_HeatMap')"], {'save_name': "(mother_dir + '_' + key_a + '_' + key_b + '_' + heatmap_value + '_heatmap.png')", 'HeatMap_dir': 'mother_dir', 'feature_1_name': 'key_a', 'feature_2_name': 'key_b', 'heat_value_name': 'heatmap_value'}), "(key_a, key_b, key_a + '_' + key_b + '_HeatMap',\n save_name=mother_dir + '_' + key_a + '_' + key_b + '_' + heatmap_value +\n '_heatmap.png', HeatMap_dir=mother_dir, feature_1_name=key_a,\n feature_2_name=key_b, heat_value_name=heatmap_value)\n", (5626, 5874), False, 'from utils import plotsAnalysis\n')]
|
from django.contrib import messages as notifications
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.messages.views import SuccessMessageMixin
from django.db.models import F, Q
from django.db.models.functions import Coalesce
from django.http import Http404
from django.shortcuts import get_object_or_404, redirect
from django.urls import reverse, reverse_lazy
from django.utils import timezone
from django.utils.translation import gettext, gettext_lazy as _
from django.views.generic import CreateView, FormView, UpdateView
from dictionary.forms.edit import EntryForm, PreferencesForm
from dictionary.models import Author, Comment, Entry, Topic
from dictionary.utils import time_threshold
class UserPreferences(LoginRequiredMixin, SuccessMessageMixin, UpdateView):
model = Author
form_class = PreferencesForm
template_name = "dictionary/user/preferences/index.html"
success_message = _("settings are saved, dear")
success_url = reverse_lazy("user_preferences")
def get_object(self, queryset=None):
return self.request.user
def form_invalid(self, form):
notifications.error(self.request, gettext("we couldn't handle your request. try again later."))
return super().form_invalid(form)
class EntryCreateMixin:
model = Entry
form_class = EntryForm
def form_valid(self, form):
"""
User sent new entry, whose topic may or may not be existent. If topic
exists, adds the entry and redirects to the entry permalink, otherwise
the topic is created if the title is valid. Entry.save() sets created_by
field of the topic.
"""
draft_pk = self.request.POST.get("pub_draft_pk", "")
publishing_draft = draft_pk.isdigit()
if (not publishing_draft) and (self.topic.exists and self.topic.is_banned):
# Cannot check is_banned before checking its existence.
notifications.error(self.request, _("we couldn't handle your request. try again later."))
return self.form_invalid(form)
status = self.request.user.entry_publishable_status
if status is not None:
notifications.error(self.request, status, extra_tags="persistent")
if publishing_draft:
return redirect(reverse("entry_update", kwargs={"pk": int(draft_pk)}))
return self.form_invalid(form)
if publishing_draft:
try:
entry = Entry.objects_all.get(
pk=int(draft_pk), is_draft=True, author=self.request.user, topic__is_banned=False
)
entry.content = form.cleaned_data["content"]
entry.is_draft = False
entry.date_created = timezone.now()
entry.date_edited = None
except Entry.DoesNotExist:
notifications.error(self.request, _("we couldn't handle your request. try again later."))
return self.form_invalid(form)
else:
# Creating a brand new entry.
entry = form.save(commit=False)
entry.author = self.request.user
if self.topic.exists:
entry.topic = self.topic
else:
if not self.topic.valid:
notifications.error(self.request, _("curses to such a topic anyway."), extra_tags="persistent")
return self.form_invalid(form)
entry.topic = Topic.objects.create_topic(title=self.topic.title)
entry.save()
notifications.info(self.request, _("the entry was successfully launched into stratosphere"))
return redirect(reverse("entry-permalink", kwargs={"entry_id": entry.id}))
def form_invalid(self, form):
if form.errors:
for err in form.errors["content"]:
notifications.error(self.request, err, extra_tags="persistent")
return super().form_invalid(form)
class EntryCreate(LoginRequiredMixin, EntryCreateMixin, FormView):
template_name = "dictionary/edit/entry_create.html"
def dispatch(self, request, *args, **kwargs):
self.extra_context = {"title": self.request.POST.get("title", "")}
return super().dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["recent_drafts"] = (
Entry.objects_all.filter(
Q(date_created__gte=time_threshold(hours=24)) | Q(date_edited__gte=time_threshold(hours=24)),
is_draft=True,
author=self.request.user,
)
.select_related("topic")
.only("topic__title", "date_created", "date_edited")
.alias(last_edited=Coalesce(F("date_edited"), F("date_created")))
.order_by("-last_edited")[:5]
)
return context
def form_valid(self, form):
if not self.request.POST.get("pub_draft_pk", "").isdigit():
# Topic object is only required if not publishing a draft.
self.topic = Topic.objects.get_or_pseudo(unicode_string=self.extra_context.get("title")) # noqa
return super().form_valid(form)
class EntryUpdate(LoginRequiredMixin, UpdateView):
model = Entry
form_class = EntryForm
template_name = "dictionary/edit/entry_update.html"
context_object_name = "entry"
def form_valid(self, form):
entry = form.save(commit=False)
if self.request.user.is_suspended or entry.topic.is_banned:
notifications.error(self.request, gettext("you lack the required permissions."))
return super().form_invalid(form)
if entry.is_draft:
status = self.request.user.entry_publishable_status
if status is not None:
notifications.error(self.request, status, extra_tags="persistent")
return super().form_invalid(form)
entry.is_draft = False
entry.date_created = timezone.now()
entry.date_edited = None
notifications.info(self.request, gettext("the entry was successfully launched into stratosphere"))
else:
entry.date_edited = timezone.now()
return super().form_valid(form)
def form_invalid(self, form):
for error in form.errors["content"]:
notifications.error(self.request, error)
return super().form_invalid(form)
def get_queryset(self):
return Entry.objects_all.filter(author=self.request.user)
class CommentMixin(LoginRequiredMixin, SuccessMessageMixin):
model = Comment
fields = ("content",)
template_name = "dictionary/edit/comment_form.html"
def form_invalid(self, form):
for error in form.errors["content"]:
notifications.error(self.request, error)
return super().form_invalid(form)
class CommentCreate(CommentMixin, CreateView):
success_message = _("the comment was successfully launched into stratosphere")
entry = None
def dispatch(self, request, *args, **kwargs):
self.entry = get_object_or_404(Entry.objects_published, pk=self.kwargs.get("pk"))
if not (
request.user.has_perm("dictionary.can_comment") and self.entry.topic.is_ama and request.user.is_accessible
):
raise Http404
return super().dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["entry"] = self.entry
return context
def form_valid(self, form):
comment = form.save(commit=False)
comment.author = self.request.user
comment.entry = self.entry
comment.save()
return super().form_valid(form)
class CommentUpdate(CommentMixin, UpdateView):
success_message = _("the comment has been updated")
def get_object(self, queryset=None):
return get_object_or_404(Comment, pk=self.kwargs.get(self.pk_url_kwarg), author=self.request.user)
def form_valid(self, form):
if self.request.POST.get("delete"):
self.object.delete()
notifications.success(self.request, gettext("the comment has been deleted"))
return redirect(self.object.entry.get_absolute_url())
if not self.request.user.is_accessible:
notifications.error(
self.request, gettext("you lack the permissions to edit this comment. you might as well delete it?")
)
return self.form_invalid(form)
comment = form.save(commit=False)
comment.date_edited = timezone.now()
return super().form_valid(form)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["entry"] = self.object.entry
context["updating"] = True
return context
|
[
"dictionary.utils.time_threshold",
"django.utils.translation.gettext",
"django.utils.translation.gettext_lazy",
"django.utils.timezone.now",
"django.urls.reverse_lazy",
"django.contrib.messages.error",
"dictionary.models.Entry.objects_all.filter",
"django.urls.reverse",
"django.db.models.F",
"dictionary.models.Topic.objects.create_topic"
] |
[((933, 962), 'django.utils.translation.gettext_lazy', '_', (['"""settings are saved, dear"""'], {}), "('settings are saved, dear')\n", (934, 962), True, 'from django.utils.translation import gettext, gettext_lazy as _\n'), ((981, 1013), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""user_preferences"""'], {}), "('user_preferences')\n", (993, 1013), False, 'from django.urls import reverse, reverse_lazy\n'), ((6967, 7027), 'django.utils.translation.gettext_lazy', '_', (['"""the comment was successfully launched into stratosphere"""'], {}), "('the comment was successfully launched into stratosphere')\n", (6968, 7027), True, 'from django.utils.translation import gettext, gettext_lazy as _\n'), ((7863, 7896), 'django.utils.translation.gettext_lazy', '_', (['"""the comment has been updated"""'], {}), "('the comment has been updated')\n", (7864, 7896), True, 'from django.utils.translation import gettext, gettext_lazy as _\n'), ((6505, 6555), 'dictionary.models.Entry.objects_all.filter', 'Entry.objects_all.filter', ([], {'author': 'self.request.user'}), '(author=self.request.user)\n', (6529, 6555), False, 'from dictionary.models import Author, Comment, Entry, Topic\n'), ((8640, 8654), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (8652, 8654), False, 'from django.utils import timezone\n'), ((1166, 1226), 'django.utils.translation.gettext', 'gettext', (['"""we couldn\'t handle your request. try again later."""'], {}), '("we couldn\'t handle your request. try again later.")\n', (1173, 1226), False, 'from django.utils.translation import gettext, gettext_lazy as _\n'), ((2175, 2241), 'django.contrib.messages.error', 'notifications.error', (['self.request', 'status'], {'extra_tags': '"""persistent"""'}), "(self.request, status, extra_tags='persistent')\n", (2194, 2241), True, 'from django.contrib import messages as notifications\n'), ((3596, 3654), 'django.utils.translation.gettext_lazy', '_', (['"""the entry was successfully launched into stratosphere"""'], {}), "('the entry was successfully launched into stratosphere')\n", (3597, 3654), True, 'from django.utils.translation import gettext, gettext_lazy as _\n'), ((3680, 3737), 'django.urls.reverse', 'reverse', (['"""entry-permalink"""'], {'kwargs': "{'entry_id': entry.id}"}), "('entry-permalink', kwargs={'entry_id': entry.id})\n", (3687, 3737), False, 'from django.urls import reverse, reverse_lazy\n'), ((6020, 6034), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (6032, 6034), False, 'from django.utils import timezone\n'), ((6229, 6243), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (6241, 6243), False, 'from django.utils import timezone\n'), ((6377, 6417), 'django.contrib.messages.error', 'notifications.error', (['self.request', 'error'], {}), '(self.request, error)\n', (6396, 6417), True, 'from django.contrib import messages as notifications\n'), ((6813, 6853), 'django.contrib.messages.error', 'notifications.error', (['self.request', 'error'], {}), '(self.request, error)\n', (6832, 6853), True, 'from django.contrib import messages as notifications\n'), ((1971, 2025), 'django.utils.translation.gettext_lazy', '_', (['"""we couldn\'t handle your request. try again later."""'], {}), '("we couldn\'t handle your request. try again later.")\n', (1972, 2025), True, 'from django.utils.translation import gettext, gettext_lazy as _\n'), ((2756, 2770), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (2768, 2770), False, 'from django.utils import timezone\n'), ((3482, 3532), 'dictionary.models.Topic.objects.create_topic', 'Topic.objects.create_topic', ([], {'title': 'self.topic.title'}), '(title=self.topic.title)\n', (3508, 3532), False, 'from dictionary.models import Author, Comment, Entry, Topic\n'), ((3861, 3924), 'django.contrib.messages.error', 'notifications.error', (['self.request', 'err'], {'extra_tags': '"""persistent"""'}), "(self.request, err, extra_tags='persistent')\n", (3880, 3924), True, 'from django.contrib import messages as notifications\n'), ((5597, 5642), 'django.utils.translation.gettext', 'gettext', (['"""you lack the required permissions."""'], {}), "('you lack the required permissions.')\n", (5604, 5642), False, 'from django.utils.translation import gettext, gettext_lazy as _\n'), ((5834, 5900), 'django.contrib.messages.error', 'notifications.error', (['self.request', 'status'], {'extra_tags': '"""persistent"""'}), "(self.request, status, extra_tags='persistent')\n", (5853, 5900), True, 'from django.contrib import messages as notifications\n'), ((6117, 6181), 'django.utils.translation.gettext', 'gettext', (['"""the entry was successfully launched into stratosphere"""'], {}), "('the entry was successfully launched into stratosphere')\n", (6124, 6181), False, 'from django.utils.translation import gettext, gettext_lazy as _\n'), ((8204, 8243), 'django.utils.translation.gettext', 'gettext', (['"""the comment has been deleted"""'], {}), "('the comment has been deleted')\n", (8211, 8243), False, 'from django.utils.translation import gettext, gettext_lazy as _\n'), ((8423, 8519), 'django.utils.translation.gettext', 'gettext', (['"""you lack the permissions to edit this comment. you might as well delete it?"""'], {}), "(\n 'you lack the permissions to edit this comment. you might as well delete it?'\n )\n", (8430, 8519), False, 'from django.utils.translation import gettext, gettext_lazy as _\n'), ((2901, 2955), 'django.utils.translation.gettext_lazy', '_', (['"""we couldn\'t handle your request. try again later."""'], {}), '("we couldn\'t handle your request. try again later.")\n', (2902, 2955), True, 'from django.utils.translation import gettext, gettext_lazy as _\n'), ((3338, 3373), 'django.utils.translation.gettext_lazy', '_', (['"""curses to such a topic anyway."""'], {}), "('curses to such a topic anyway.')\n", (3339, 3373), True, 'from django.utils.translation import gettext, gettext_lazy as _\n'), ((4787, 4803), 'django.db.models.F', 'F', (['"""date_edited"""'], {}), "('date_edited')\n", (4788, 4803), False, 'from django.db.models import F, Q\n'), ((4805, 4822), 'django.db.models.F', 'F', (['"""date_created"""'], {}), "('date_created')\n", (4806, 4822), False, 'from django.db.models import F, Q\n'), ((4484, 4508), 'dictionary.utils.time_threshold', 'time_threshold', ([], {'hours': '(24)'}), '(hours=24)\n', (4498, 4508), False, 'from dictionary.utils import time_threshold\n'), ((4531, 4555), 'dictionary.utils.time_threshold', 'time_threshold', ([], {'hours': '(24)'}), '(hours=24)\n', (4545, 4555), False, 'from dictionary.utils import time_threshold\n')]
|
#!/usr/bin/env python
# Copyright 2019 <NAME>
#
# This file is part of RfPy.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Import modules and functions
import numpy as np
import pickle
import stdb
from obspy.clients.fdsn import Client
from obspy.core import Stream, UTCDateTime
from rfpy import arguments, binning, plotting
from rfpy import CCPimage
from pathlib import Path
def main():
print()
print("############################################")
print("# __ #")
print("# _ __ / _|_ __ _ _ ___ ___ _ __ #")
print("# | '__| |_| '_ \| | | | / __/ __| '_ \ #")
print("# | | | _| |_) | |_| | | (_| (__| |_) | #")
print("# |_| |_| | .__/ \__, |___\___\___| .__/ #")
print("# |_| |___/_____| |_| #")
print("# #")
print("############################################")
print()
# Run Input Parser
args = arguments.get_ccp_arguments()
# Load Database
db = stdb.io.load_db(fname=args.indb)
# Construct station key loop
allkeys = db.keys()
# Extract key subset
if len(args.stkeys) > 0:
stkeys = []
for skey in args.stkeys:
stkeys.extend([s for s in allkeys if skey in s])
else:
stkeys = db.keys()
if args.load:
# Check if CCPimage object exists and whether overwrite has been set
load_file = Path('CCP_load.pkl')
if load_file.is_file() and not args.ovr:
ccpfile = open(load_file, "rb")
ccpimage = pickle.load(ccpfile)
ccpfile.close()
else:
print()
print("|-----------------------------------------------|")
print("| Loading data |")
print("|-----------------------------------------------|")
print("| Gridding: ")
print("| start = {0:5.1f},{1:6.1f}".format(
args.coord_start[0],args.coord_start[1]))
print("| end = {0:5.1f},{1:6.1f}".format(
args.coord_end[0],args.coord_end[1]))
print("| dz = {0} (km)".format(str(args.dz)))
print("| dx = {0} (km)".format(str(args.dx)))
print()
# Initialize CCPimage object
ccpimage = CCPimage(coord_start=args.coord_start,
coord_end=args.coord_end,
dz=args.dz, dx=args.dx)
# Loop over station keys
for stkey in list(stkeys):
# Extract station information from dictionary
sta = db[stkey]
# Define path to see if it exists
if args.phase in ['P', 'PP', 'allP']:
datapath = Path('P_DATA') / stkey
elif args.phase in ['S', 'SKS', 'allS']:
datapath = Path('S_DATA') / stkey
if not datapath.is_dir():
print('Path to ' + str(datapath) + ' doesn`t exist - continuing')
continue
# Temporary print locations
tlocs = sta.location
if len(tlocs) == 0:
tlocs = ['']
for il in range(0, len(tlocs)):
if len(tlocs[il]) == 0:
tlocs[il] = "--"
sta.location = tlocs
rfRstream = Stream()
datafiles = [x for x in datapath.iterdir() if x.is_dir()]
for folder in datafiles:
# Skip hidden folders
if folder.name.startswith('.'):
continue
# Load meta data
filename = folder / "Meta_Data.pkl"
if not filename.is_file():
continue
metafile = open(filename, 'rb')
meta = pickle.load(metafile)
metafile.close()
# Skip data not in list of phases
if meta.phase not in args.listphase:
continue
# QC Thresholding
if meta.snrh < args.snrh:
continue
if meta.snr < args.snr:
continue
if meta.cc < args.cc:
continue
# If everything passed, load the RF data
filename = folder / "RF_Data.pkl"
if filename.is_file():
file = open(filename, "rb")
rfdata = pickle.load(file)
rfRstream.append(rfdata[1])
file.close()
if len(rfRstream) == 0:
continue
if args.no_outl:
t1 = 0.
t2 = 30.
varR = []
for i in range(len(rfRstream)):
taxis = rfRstream[i].stats.taxis
tselect = (taxis > t1) & (taxis < t2)
varR.append(np.var(rfRstream[i].data[tselect]))
varR = np.array(varR)
# Remove outliers wrt variance within time range
medvarR = np.median(varR)
madvarR = 1.4826*np.median(np.abs(varR-medvarR))
robustR = np.abs((varR-medvarR)/madvarR)
outliersR = np.arange(len(rfRstream))[robustR > 2.5]
for i in outliersR[::-1]:
rfRstream.remove(rfRstream[i])
print("Station: {0:>2s}.{1:5s} - {2} traces loaded".format(
sta.network, sta.station, len(rfRstream)))
if len(rfRstream)==0:
continue
ccpimage.add_rfstream(rfRstream)
if len(ccpimage.radialRF) > 0:
ccpimage.save("CCP_load.pkl")
ccpimage.is_ready_for_prep = True
print()
print("CCPimage saved to 'CCP_load.pkl'")
else:
ccpimage.is_ready_for_prep = False
else:
pass
if args.prep:
prep_file = Path("CCP_prep.pkl")
if prep_file.is_file() and not args.ovr:
ccpfile = open(prep_file, 'rb')
ccpimage = pickle.load(ccpfile)
ccpfile.close()
else:
load_file = Path('CCP_load.pkl')
if not load_file.is_file():
raise(Exception("No CCP_load.pkl file available - aborting"))
else:
print()
print("|-----------------------------------------------|")
print("| Preparing data before stacking |")
print("|-----------------------------------------------|")
print("| Frequencies: ")
print("| f1 = {0:4.2f} (Hz)".format(args.f1))
print("| f2ps = {0:4.2f} (Hz)".format(args.f2ps))
print("| f2pps = {0:4.2f} (Hz)".format(args.f2pps))
print("| f2pss = {0:4.2f} (Hz)".format(args.f2pss))
print("| Binning: ")
print("| nbaz = {0}".format(str(args.nbaz)))
print("| nslow = {0}".format(str(args.nslow)))
print()
ccpfile = open(load_file,"rb")
ccpimage = pickle.load(ccpfile)
ccpfile.close()
ccpimage.prep_data(f1=args.f1, f2ps=args.f2ps,
f2pps=args.f2pps, f2pss=args.f2pss,
nbaz=args.nbaz, nslow=args.nslow)
ccpimage.is_ready_for_prestack = True
ccpimage.save(prep_file)
print()
print("CCPimage saved to {0}".format(str(prep_file)))
else:
pass
if args.prestack:
prestack_file = Path("CCP_prestack.pkl")
if prestack_file.is_file() and not args.ovr:
ccpfile = open(prestack_file, 'rb')
ccpimage = pickle.load(ccpfile)
ccpfile.close()
else:
prep_file = Path("CCP_prep.pkl")
if not prep_file.is_file():
raise(Exception("No CCP_prep.pkl file available - aborting"))
else:
print()
print("|-----------------------------------------------|")
print("| CCP pre-stacking each phase |")
print("|-----------------------------------------------|")
print()
ccpfile = open(prep_file, 'rb')
ccpimage = pickle.load(ccpfile)
ccpfile.close()
ccpimage.prestack()
ccpimage.save(prestack_file)
print()
print("CCPimage saved to {0}".format(str(prestack_file)))
else:
pass
if args.ccp:
ccp_file = Path("CCP_stack.pkl")
if ccp_file.is_file() and not args.ovr:
ccpfile = open(ccp_file, 'rb')
ccpimage = pickle.load(ccpfile)
ccpfile.close()
else:
prestack_file = Path("CCP_prestack.pkl")
if not prestack_file.is_file():
raise(Exception("No CCP_prestack.pkl file available - aborting"))
else:
if args.linear:
print()
print("|-----------------------------------------------|")
print("| Linear CCP stack - all phases |")
print("|-----------------------------------------------|")
print()
elif args.pws:
print()
print("|-----------------------------------------------|")
print("| Phase-weighted CCP stack - all phases |")
print("|-----------------------------------------------|")
print()
ccpfile = open(prestack_file, 'rb')
ccpimage = pickle.load(ccpfile)
ccpfile.close()
ccpimage.ccp()
if args.linear:
if args.weights:
ccpimage.weights = args.weights
ccpimage.linear_stack(typ='ccp')
elif args.pws:
if args.weights:
ccpimage.weights = args.weights
ccpimage.phase_weighted_stack(typ='ccp')
ccpimage.save(ccp_file)
print()
print("CCPimage saved to {0}".format(str(ccp_file)))
if args.ccp_figure:
ccpimage.plot_ccp(save=args.save_figure, fmt=args.fmt,
vmin=-1.*args.cbound, vmax=args.cbound, title=args.title)
else:
pass
if args.gccp:
gccp_file = Path("GCCP_stack.pkl")
if gccp_file.is_file() and not args.ovr:
ccpfile = open(gccp_file, 'rb')
ccpimage = pickle.load(ccpfile)
ccpfile.close()
else:
prestack_file = Path("CCP_prestack.pkl")
if not prestack_file.is_file():
raise(Exception("No CCP_prestack.pkl file available - aborting"))
else:
if args.linear:
print()
print("|-----------------------------------------------|")
print("| Linear GCCP stack - all phases |")
print("|-----------------------------------------------|")
print()
elif args.pws:
print()
print("|-----------------------------------------------|")
print("| Phase-weighted GCCP stack - all phases |")
print("|-----------------------------------------------|")
print()
ccpfile = open(prestack_file, 'rb')
ccpimage = pickle.load(ccpfile)
ccpfile.close()
ccpimage.gccp(wlen=args.wlen)
if args.linear:
if args.weights:
ccpimage.weights = args.weights
ccpimage.linear_stack(typ='gccp')
elif args.pws:
if args.weights:
ccpimage.weights = args.weights
ccpimage.phase_weighted_stack(typ='gccp')
ccpimage.save(gccp_file)
print()
print("CCPimage saved to {0}".format(str(gccp_file)))
if args.ccp_figure:
ccpimage.plot_gccp(save=args.save_figure, fmt=args.fmt,
vmin=-1.*args.cbound, vmax=args.cbound, title=args.title)
else:
pass
if __name__ == "__main__":
# Run main program
main()
|
[
"numpy.abs",
"rfpy.arguments.get_ccp_arguments",
"numpy.median",
"rfpy.CCPimage",
"pathlib.Path",
"pickle.load",
"obspy.core.Stream",
"numpy.array",
"numpy.var",
"stdb.io.load_db"
] |
[((1987, 2016), 'rfpy.arguments.get_ccp_arguments', 'arguments.get_ccp_arguments', ([], {}), '()\n', (2014, 2016), False, 'from rfpy import arguments, binning, plotting\n'), ((2047, 2079), 'stdb.io.load_db', 'stdb.io.load_db', ([], {'fname': 'args.indb'}), '(fname=args.indb)\n', (2062, 2079), False, 'import stdb\n'), ((2461, 2481), 'pathlib.Path', 'Path', (['"""CCP_load.pkl"""'], {}), "('CCP_load.pkl')\n", (2465, 2481), False, 'from pathlib import Path\n'), ((7325, 7345), 'pathlib.Path', 'Path', (['"""CCP_prep.pkl"""'], {}), "('CCP_prep.pkl')\n", (7329, 7345), False, 'from pathlib import Path\n'), ((9067, 9091), 'pathlib.Path', 'Path', (['"""CCP_prestack.pkl"""'], {}), "('CCP_prestack.pkl')\n", (9071, 9091), False, 'from pathlib import Path\n'), ((10103, 10124), 'pathlib.Path', 'Path', (['"""CCP_stack.pkl"""'], {}), "('CCP_stack.pkl')\n", (10107, 10124), False, 'from pathlib import Path\n'), ((12042, 12064), 'pathlib.Path', 'Path', (['"""GCCP_stack.pkl"""'], {}), "('GCCP_stack.pkl')\n", (12046, 12064), False, 'from pathlib import Path\n'), ((2598, 2618), 'pickle.load', 'pickle.load', (['ccpfile'], {}), '(ccpfile)\n', (2609, 2618), False, 'import pickle\n'), ((3383, 3475), 'rfpy.CCPimage', 'CCPimage', ([], {'coord_start': 'args.coord_start', 'coord_end': 'args.coord_end', 'dz': 'args.dz', 'dx': 'args.dx'}), '(coord_start=args.coord_start, coord_end=args.coord_end, dz=args.dz,\n dx=args.dx)\n', (3391, 3475), False, 'from rfpy import CCPimage\n'), ((7462, 7482), 'pickle.load', 'pickle.load', (['ccpfile'], {}), '(ccpfile)\n', (7473, 7482), False, 'import pickle\n'), ((7549, 7569), 'pathlib.Path', 'Path', (['"""CCP_load.pkl"""'], {}), "('CCP_load.pkl')\n", (7553, 7569), False, 'from pathlib import Path\n'), ((9216, 9236), 'pickle.load', 'pickle.load', (['ccpfile'], {}), '(ccpfile)\n', (9227, 9236), False, 'import pickle\n'), ((9303, 9323), 'pathlib.Path', 'Path', (['"""CCP_prep.pkl"""'], {}), "('CCP_prep.pkl')\n", (9307, 9323), False, 'from pathlib import Path\n'), ((10239, 10259), 'pickle.load', 'pickle.load', (['ccpfile'], {}), '(ccpfile)\n', (10250, 10259), False, 'import pickle\n'), ((10330, 10354), 'pathlib.Path', 'Path', (['"""CCP_prestack.pkl"""'], {}), "('CCP_prestack.pkl')\n", (10334, 10354), False, 'from pathlib import Path\n'), ((12181, 12201), 'pickle.load', 'pickle.load', (['ccpfile'], {}), '(ccpfile)\n', (12192, 12201), False, 'import pickle\n'), ((12272, 12296), 'pathlib.Path', 'Path', (['"""CCP_prestack.pkl"""'], {}), "('CCP_prestack.pkl')\n", (12276, 12296), False, 'from pathlib import Path\n'), ((4485, 4493), 'obspy.core.Stream', 'Stream', ([], {}), '()\n', (4491, 4493), False, 'from obspy.core import Stream, UTCDateTime\n'), ((8549, 8569), 'pickle.load', 'pickle.load', (['ccpfile'], {}), '(ccpfile)\n', (8560, 8569), False, 'import pickle\n'), ((9809, 9829), 'pickle.load', 'pickle.load', (['ccpfile'], {}), '(ccpfile)\n', (9820, 9829), False, 'import pickle\n'), ((11228, 11248), 'pickle.load', 'pickle.load', (['ccpfile'], {}), '(ccpfile)\n', (11239, 11248), False, 'import pickle\n'), ((13170, 13190), 'pickle.load', 'pickle.load', (['ccpfile'], {}), '(ccpfile)\n', (13181, 13190), False, 'import pickle\n'), ((4991, 5012), 'pickle.load', 'pickle.load', (['metafile'], {}), '(metafile)\n', (5002, 5012), False, 'import pickle\n'), ((6278, 6292), 'numpy.array', 'np.array', (['varR'], {}), '(varR)\n', (6286, 6292), True, 'import numpy as np\n'), ((6393, 6408), 'numpy.median', 'np.median', (['varR'], {}), '(varR)\n', (6402, 6408), True, 'import numpy as np\n'), ((6508, 6542), 'numpy.abs', 'np.abs', (['((varR - medvarR) / madvarR)'], {}), '((varR - medvarR) / madvarR)\n', (6514, 6542), True, 'import numpy as np\n'), ((3844, 3858), 'pathlib.Path', 'Path', (['"""P_DATA"""'], {}), "('P_DATA')\n", (3848, 3858), False, 'from pathlib import Path\n'), ((5709, 5726), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (5720, 5726), False, 'import pickle\n'), ((3955, 3969), 'pathlib.Path', 'Path', (['"""S_DATA"""'], {}), "('S_DATA')\n", (3959, 3969), False, 'from pathlib import Path\n'), ((6215, 6249), 'numpy.var', 'np.var', (['rfRstream[i].data[tselect]'], {}), '(rfRstream[i].data[tselect])\n', (6221, 6249), True, 'import numpy as np\n'), ((6456, 6478), 'numpy.abs', 'np.abs', (['(varR - medvarR)'], {}), '(varR - medvarR)\n', (6462, 6478), True, 'import numpy as np\n')]
|
from . import models
from . import schema
import re
import magic
import mimetypes
import boto3
from botocore.client import Config
from mongoengine import connect
from pydub import AudioSegment
import io
import hashlib
from base64 import urlsafe_b64encode
#MONGO_URI = f'mongodb://{MONGO_USERNAME}:{MONGO_PASSWORD}@{MONGO_IP}/{MONGO_DB}?authSource={MONGO_AUTH_DB}'
config = None
'''
Defaults
Modified when init_app() called
'''
REGION = 'sfo2'
STATIC_FILE_BASE_URL = f'https://{REGION}.digitaloceanspaces.com'
session = boto3.session.Session()
client = session.client('s3',
region_name=REGION,
endpoint_url=STATIC_FILE_BASE_URL,
aws_access_key_id='<KEY>',
aws_secret_access_key='<KEY>')
BUCKET = 'ultracast-files'
FILE_ACCESS = 'public-read'
def init_app(app):
'''
Init based off apps config
'''
config = app.config
REGION = app.config["S3"]["REGION"]
STATIC_FILE_BASE_URL = f'https://{REGION}.digitaloceanspaces.com'
client = session.client('s3',
region_name=REGION,
endpoint_url=STATIC_FILE_BASE_URL,
aws_access_key_id=app.config["S3"]["AWS_ACCESS_KEY"],
aws_secret_access_key=app.config["S3"]["AWS_SECRET_ACCESS_KEY"])
BUCKET = app.config["S3"]["BUCKET"]
FILE_ACCESS = app.config["S3"]["FILE_ACCESS"]
def connect_mongo(app_config):
mongo_uri = "mongodb://{u}:{p}@{ip}/{db}?authSource={auth_db}".format(
u=app_config["MONGO_USERNAME"], p=app_config["MONGO_PASSWORD"],
ip=app_config["MONGO_IP"], db=app_config["MONGO_DB"], auth_db=app_config["MONGO_AUTH_DB"])
connect(host=mongo_uri)
# Digital Ocean Space (Static-Files)
class IllegalMimeException(Exception):
pass
def get_bucket_url():
return re.sub(r"^https://", f"https://{BUCKET}.", STATIC_FILE_BASE_URL)
def get_file_url(filename):
return get_bucket_url() + f"/{filename}"
def get_key_from_url(url):
return re.sub(get_bucket_url() + "/", "", url)
def get_key_from_binary_data(data, ext=""):
return urlsafe_b64encode(hashlib.sha256(data).digest()).decode('UTF-8') + ext
def check_status(resp, ok_statuses, op):
if resp['ResponseMetadata']['HTTPStatusCode'] not in ok_statuses:
raise Exception(f"Error for operation [{op}] - Response: {resp}")
def file_exists(key):
try:
client.head_object(Bucket=BUCKET, Key=key)
return True
except:
return False
def url_exists(url):
return file_exists(get_key_from_url(url))
def get_key(data, key=None, ext=""):
if key is None:
return get_key_from_binary_data(data, ext)
else:
return key
def check_mime(data, valid_mimes):
try:
mime_type = magic.from_buffer(data, mime=True)
except:
raise IllegalMimeException(f"Could not interpret MIME type of payload")
if mime_type not in valid_mimes:
raise IllegalMimeException(f"MIME type {mime_type} not allowed")
return mime_type
def add_file(data, key=None, valid_mimes=[], override=False):
mime_type = check_mime(data, valid_mimes)
extension = mimetypes.guess_extension(mime_type)
key = get_key(data, key, extension)
if not override and file_exists(key):
return get_file_url(key)
resp = client.put_object(
Body=data,
Bucket=BUCKET,
Key=key,
ACL=FILE_ACCESS,
ContentType=mime_type)
check_status(resp, [200], 'Add File')
return get_file_url(key)
def remove_file(url, key=None):
if key is None:
resp = client.delete_object(Bucket=BUCKET, Key=get_key_from_url(url))
else:
resp = client.delete_object(Bucket=BUCKET, Key=key)
check_status(resp, [200, 204], 'Remove File')
def update_file(old_url, data, new_key=None, valid_mimes=[]):
if url_exists(old_url):
remove_file(old_url)
return add_file(data, new_key, valid_mimes)
def audio_file_duration_secs(data):
try:
audio = AudioSegment.from_file(io.BytesIO(data), format="mp3")
return int(round(audio.duration_seconds))
except:
return -1
|
[
"io.BytesIO",
"mongoengine.connect",
"hashlib.sha256",
"magic.from_buffer",
"boto3.session.Session",
"mimetypes.guess_extension",
"re.sub"
] |
[((523, 546), 'boto3.session.Session', 'boto3.session.Session', ([], {}), '()\n', (544, 546), False, 'import boto3\n'), ((1745, 1768), 'mongoengine.connect', 'connect', ([], {'host': 'mongo_uri'}), '(host=mongo_uri)\n', (1752, 1768), False, 'from mongoengine import connect\n'), ((1893, 1956), 're.sub', 're.sub', (['"""^https://"""', 'f"""https://{BUCKET}."""', 'STATIC_FILE_BASE_URL'], {}), "('^https://', f'https://{BUCKET}.', STATIC_FILE_BASE_URL)\n", (1899, 1956), False, 'import re\n'), ((3224, 3260), 'mimetypes.guess_extension', 'mimetypes.guess_extension', (['mime_type'], {}), '(mime_type)\n', (3249, 3260), False, 'import mimetypes\n'), ((2839, 2873), 'magic.from_buffer', 'magic.from_buffer', (['data'], {'mime': '(True)'}), '(data, mime=True)\n', (2856, 2873), False, 'import magic\n'), ((4100, 4116), 'io.BytesIO', 'io.BytesIO', (['data'], {}), '(data)\n', (4110, 4116), False, 'import io\n'), ((2188, 2208), 'hashlib.sha256', 'hashlib.sha256', (['data'], {}), '(data)\n', (2202, 2208), False, 'import hashlib\n')]
|
import copy
from django.conf import settings
from django.db.models import Sum, Count, F
from rest_framework.response import Response
from rest_framework.views import APIView
from usaspending_api.awards.models_matviews import UniversalAwardView
from usaspending_api.awards.v2.filters.matview_filters import matview_search_filter
from usaspending_api.awards.v2.filters.sub_award import subaward_filter
from usaspending_api.awards.v2.filters.view_selector import spending_by_award_count
from usaspending_api.awards.v2.lookups.lookups import (contract_type_mapping, loan_type_mapping,
non_loan_assistance_type_mapping, grant_type_mapping,
contract_subaward_mapping, grant_subaward_mapping,
idv_type_mapping)
from usaspending_api.awards.v2.lookups.matview_lookups import (award_contracts_mapping, loan_award_mapping,
non_loan_assistance_award_mapping, award_idv_mapping)
from usaspending_api.common.api_versioning import api_transformations, API_TRANSFORM_FUNCTIONS
from usaspending_api.common.cache_decorator import cache_response
from usaspending_api.common.exceptions import InvalidParameterException, UnprocessableEntityException
from usaspending_api.core.validator.award_filter import AWARD_FILTER
from usaspending_api.core.validator.pagination import PAGINATION
from usaspending_api.core.validator.tinyshield import TinyShield
@api_transformations(api_version=settings.API_VERSION, function_list=API_TRANSFORM_FUNCTIONS)
class SpendingByAwardVisualizationViewSet(APIView):
"""
This route takes award filters and fields, and returns the fields of the filtered awards.
endpoint_doc: /advanced_award_search/spending_by_award.md
"""
@cache_response()
def post(self, request):
"""Return all awards matching the provided filters and limits"""
models = [
{'name': 'fields', 'key': 'fields', 'type': 'array', 'array_type': 'text', 'text_type': 'search', 'min': 1},
{'name': 'subawards', 'key': 'subawards', 'type': 'boolean', 'default': False}
]
models.extend(copy.deepcopy(AWARD_FILTER))
models.extend(copy.deepcopy(PAGINATION))
for m in models:
if m['name'] in ('award_type_codes', 'fields'):
m['optional'] = False
json_request = TinyShield(models).block(request.data)
fields = json_request["fields"]
filters = json_request.get("filters", {})
subawards = json_request["subawards"]
order = json_request["order"]
limit = json_request["limit"]
page = json_request["page"]
if "no intersection" in filters["award_type_codes"]:
# "Special case": there will never be results when the website provides this value
return Response({
"limit": limit,
"results": [],
"page_metadata": {"page": page, "hasNext": False},
})
sort = json_request.get("sort", fields[0])
if sort not in fields:
raise InvalidParameterException("Sort value '{}' not found in requested fields: {}".format(sort, fields))
subawards_values = list(contract_subaward_mapping.keys()) + list(grant_subaward_mapping.keys())
awards_values = list(award_contracts_mapping.keys()) + list(loan_award_mapping.keys()) + \
list(non_loan_assistance_award_mapping.keys()) + list(award_idv_mapping.keys())
msg = "Sort value '{}' not found in {{}} mappings: {{}}".format(sort)
if not subawards and sort not in awards_values:
raise InvalidParameterException(msg.format("award", awards_values))
elif subawards and sort not in subawards_values:
raise InvalidParameterException(msg.format("subaward", subawards_values))
# build sql query filters
if subawards:
queryset = subaward_filter(filters)
values = {'subaward_number', 'piid', 'fain', 'award_type'}
for field in fields:
if contract_subaward_mapping.get(field):
values.add(contract_subaward_mapping.get(field))
if grant_subaward_mapping.get(field):
values.add(grant_subaward_mapping.get(field))
else:
queryset = matview_search_filter(filters, UniversalAwardView).values()
values = {'award_id', 'piid', 'fain', 'uri', 'type'}
for field in fields:
if award_contracts_mapping.get(field):
values.add(award_contracts_mapping.get(field))
if loan_award_mapping.get(field):
values.add(loan_award_mapping.get(field))
if non_loan_assistance_award_mapping.get(field):
values.add(non_loan_assistance_award_mapping.get(field))
if award_idv_mapping.get(field):
values.add(award_idv_mapping.get(field))
# Modify queryset to be ordered by requested "sort" in the request or default value(s)
if sort:
if subawards:
if set(filters["award_type_codes"]) <= set(contract_type_mapping): # Subaward contracts
sort_filters = [contract_subaward_mapping[sort]]
elif set(filters["award_type_codes"]) <= set(grant_type_mapping): # Subaward grants
sort_filters = [grant_subaward_mapping[sort]]
else:
msg = 'Award Type codes limited for Subawards. Only contracts {} or grants {} are available'
msg = msg.format(list(contract_type_mapping.keys()), list(grant_type_mapping.keys()))
raise UnprocessableEntityException(msg)
else:
if set(filters["award_type_codes"]) <= set(contract_type_mapping): # contracts
sort_filters = [award_contracts_mapping[sort]]
elif set(filters["award_type_codes"]) <= set(loan_type_mapping): # loans
sort_filters = [loan_award_mapping[sort]]
elif set(filters["award_type_codes"]) <= set(idv_type_mapping): # idvs
sort_filters = [award_idv_mapping[sort]]
else: # assistance data
sort_filters = [non_loan_assistance_award_mapping[sort]]
# Explictly set NULLS LAST in the ordering to encourage the usage of the indexes
if sort == "Award ID" and subawards:
if order == "desc":
queryset = queryset.order_by(
F('award__piid').desc(nulls_last=True),
F('award__fain').desc(nulls_last=True)).values(*list(values))
else:
queryset = queryset.order_by(
F('award__piid').asc(nulls_last=True),
F('award__fain').asc(nulls_last=True)).values(*list(values))
elif sort == "Award ID":
if order == "desc":
queryset = queryset.order_by(
F('piid').desc(nulls_last=True),
F('fain').desc(nulls_last=True),
F('uri').desc(nulls_last=True)).values(*list(values))
else:
queryset = queryset.order_by(
F('piid').asc(nulls_last=True),
F('fain').asc(nulls_last=True),
F('uri').asc(nulls_last=True)).values(*list(values))
elif order == "desc":
queryset = queryset.order_by(F(sort_filters[0]).desc(nulls_last=True)).values(*list(values))
else:
queryset = queryset.order_by(F(sort_filters[0]).asc(nulls_last=True)).values(*list(values))
limited_queryset = queryset[(page - 1) * limit:page * limit + 1] # lower limit : upper limit
has_next = len(limited_queryset) > limit
results = []
for award in limited_queryset[:limit]:
if subawards:
row = {"internal_id": award["subaward_number"]}
if award['award_type'] == 'procurement':
for field in fields:
row[field] = award.get(contract_subaward_mapping[field])
elif award['award_type'] == 'grant':
for field in fields:
row[field] = award.get(grant_subaward_mapping[field])
else:
row = {"internal_id": award["award_id"]}
if award['type'] in loan_type_mapping: # loans
for field in fields:
row[field] = award.get(loan_award_mapping.get(field))
elif award['type'] in non_loan_assistance_type_mapping: # assistance data
for field in fields:
row[field] = award.get(non_loan_assistance_award_mapping.get(field))
elif award['type'] in idv_type_mapping:
for field in fields:
row[field] = award.get(award_idv_mapping.get(field))
elif (award['type'] is None and award['piid']) or award['type'] in contract_type_mapping:
# IDV + contract
for field in fields:
row[field] = award.get(award_contracts_mapping.get(field))
if "Award ID" in fields:
for id_type in ["piid", "fain", "uri"]:
if award[id_type]:
row["Award ID"] = award[id_type]
break
results.append(row)
return Response({"limit": limit, "results": results, "page_metadata": {"page": page, "hasNext": has_next}})
@api_transformations(api_version=settings.API_VERSION, function_list=API_TRANSFORM_FUNCTIONS)
class SpendingByAwardCountVisualizationViewSet(APIView):
"""
This route takes award filters, and returns the number of awards in each award type (Contracts, Loans, Grants, etc.)
endpoint_doc: /advanced_award_search/spending_by_award_count.md
"""
@cache_response()
def post(self, request):
models = [{'name': 'subawards', 'key': 'subawards', 'type': 'boolean', 'default': False}]
models.extend(copy.deepcopy(AWARD_FILTER))
models.extend(copy.deepcopy(PAGINATION))
json_request = TinyShield(models).block(request.data)
filters = json_request.get("filters", None)
subawards = json_request["subawards"]
if filters is None:
raise InvalidParameterException("Missing required request parameters: 'filters'")
results = {
"contracts": 0, "idvs": 0, "grants": 0, "direct_payments": 0, "loans": 0, "other": 0
} if not subawards else {
"subcontracts": 0, "subgrants": 0
}
if "award_type_codes" in filters and "no intersection" in filters["award_type_codes"]:
# "Special case": there will never be results when the website provides this value
return Response({"results": results})
if subawards:
queryset = subaward_filter(filters)
else:
queryset, model = spending_by_award_count(filters)
if subawards:
queryset = queryset.values('award_type').annotate(category_count=Count('subaward_id'))
elif model == 'SummaryAwardView':
queryset = queryset.values('category').annotate(category_count=Sum('counts'))
else:
queryset = queryset.values('category').annotate(category_count=Count('category'))
categories = {
'contract': 'contracts',
'idv': 'idvs',
'grant': 'grants',
'direct payment': 'direct_payments',
'loans': 'loans',
'other': 'other'
} if not subawards else {'procurement': 'subcontracts', 'grant': 'subgrants'}
category_name = 'category' if not subawards else 'award_type'
# DB hit here
for award in queryset:
if award[category_name] is None:
result_key = 'other' if not subawards else 'subcontracts'
elif award[category_name] not in categories.keys():
result_key = 'other'
else:
result_key = categories[award[category_name]]
results[result_key] += award['category_count']
return Response({"results": results})
|
[
"usaspending_api.awards.v2.lookups.lookups.grant_subaward_mapping.keys",
"rest_framework.response.Response",
"usaspending_api.common.api_versioning.api_transformations",
"usaspending_api.awards.v2.lookups.matview_lookups.award_idv_mapping.keys",
"usaspending_api.awards.v2.lookups.lookups.contract_type_mapping.keys",
"usaspending_api.awards.v2.lookups.lookups.grant_subaward_mapping.get",
"usaspending_api.awards.v2.filters.sub_award.subaward_filter",
"usaspending_api.awards.v2.lookups.matview_lookups.loan_award_mapping.get",
"usaspending_api.awards.v2.lookups.matview_lookups.award_contracts_mapping.keys",
"django.db.models.F",
"usaspending_api.awards.v2.lookups.lookups.contract_subaward_mapping.get",
"usaspending_api.awards.v2.filters.view_selector.spending_by_award_count",
"usaspending_api.awards.v2.lookups.matview_lookups.award_contracts_mapping.get",
"copy.deepcopy",
"usaspending_api.awards.v2.filters.matview_filters.matview_search_filter",
"usaspending_api.awards.v2.lookups.lookups.grant_type_mapping.keys",
"usaspending_api.common.exceptions.InvalidParameterException",
"django.db.models.Sum",
"usaspending_api.awards.v2.lookups.matview_lookups.non_loan_assistance_award_mapping.get",
"usaspending_api.common.cache_decorator.cache_response",
"usaspending_api.awards.v2.lookups.matview_lookups.non_loan_assistance_award_mapping.keys",
"usaspending_api.common.exceptions.UnprocessableEntityException",
"usaspending_api.awards.v2.lookups.matview_lookups.award_idv_mapping.get",
"usaspending_api.awards.v2.lookups.matview_lookups.loan_award_mapping.keys",
"usaspending_api.awards.v2.lookups.lookups.contract_subaward_mapping.keys",
"usaspending_api.core.validator.tinyshield.TinyShield",
"django.db.models.Count"
] |
[((1561, 1658), 'usaspending_api.common.api_versioning.api_transformations', 'api_transformations', ([], {'api_version': 'settings.API_VERSION', 'function_list': 'API_TRANSFORM_FUNCTIONS'}), '(api_version=settings.API_VERSION, function_list=\n API_TRANSFORM_FUNCTIONS)\n', (1580, 1658), False, 'from usaspending_api.common.api_versioning import api_transformations, API_TRANSFORM_FUNCTIONS\n'), ((9908, 10005), 'usaspending_api.common.api_versioning.api_transformations', 'api_transformations', ([], {'api_version': 'settings.API_VERSION', 'function_list': 'API_TRANSFORM_FUNCTIONS'}), '(api_version=settings.API_VERSION, function_list=\n API_TRANSFORM_FUNCTIONS)\n', (9927, 10005), False, 'from usaspending_api.common.api_versioning import api_transformations, API_TRANSFORM_FUNCTIONS\n'), ((1884, 1900), 'usaspending_api.common.cache_decorator.cache_response', 'cache_response', ([], {}), '()\n', (1898, 1900), False, 'from usaspending_api.common.cache_decorator import cache_response\n'), ((10272, 10288), 'usaspending_api.common.cache_decorator.cache_response', 'cache_response', ([], {}), '()\n', (10286, 10288), False, 'from usaspending_api.common.cache_decorator import cache_response\n'), ((9804, 9908), 'rest_framework.response.Response', 'Response', (["{'limit': limit, 'results': results, 'page_metadata': {'page': page,\n 'hasNext': has_next}}"], {}), "({'limit': limit, 'results': results, 'page_metadata': {'page':\n page, 'hasNext': has_next}})\n", (9812, 9908), False, 'from rest_framework.response import Response\n'), ((12571, 12601), 'rest_framework.response.Response', 'Response', (["{'results': results}"], {}), "({'results': results})\n", (12579, 12601), False, 'from rest_framework.response import Response\n'), ((2266, 2293), 'copy.deepcopy', 'copy.deepcopy', (['AWARD_FILTER'], {}), '(AWARD_FILTER)\n', (2279, 2293), False, 'import copy\n'), ((2317, 2342), 'copy.deepcopy', 'copy.deepcopy', (['PAGINATION'], {}), '(PAGINATION)\n', (2330, 2342), False, 'import copy\n'), ((2954, 3050), 'rest_framework.response.Response', 'Response', (["{'limit': limit, 'results': [], 'page_metadata': {'page': page, 'hasNext': \n False}}"], {}), "({'limit': limit, 'results': [], 'page_metadata': {'page': page,\n 'hasNext': False}})\n", (2962, 3050), False, 'from rest_framework.response import Response\n'), ((4045, 4069), 'usaspending_api.awards.v2.filters.sub_award.subaward_filter', 'subaward_filter', (['filters'], {}), '(filters)\n', (4060, 4069), False, 'from usaspending_api.awards.v2.filters.sub_award import subaward_filter\n'), ((10438, 10465), 'copy.deepcopy', 'copy.deepcopy', (['AWARD_FILTER'], {}), '(AWARD_FILTER)\n', (10451, 10465), False, 'import copy\n'), ((10489, 10514), 'copy.deepcopy', 'copy.deepcopy', (['PAGINATION'], {}), '(PAGINATION)\n', (10502, 10514), False, 'import copy\n'), ((10722, 10797), 'usaspending_api.common.exceptions.InvalidParameterException', 'InvalidParameterException', (['"""Missing required request parameters: \'filters\'"""'], {}), '("Missing required request parameters: \'filters\'")\n', (10747, 10797), False, 'from usaspending_api.common.exceptions import InvalidParameterException, UnprocessableEntityException\n'), ((11216, 11246), 'rest_framework.response.Response', 'Response', (["{'results': results}"], {}), "({'results': results})\n", (11224, 11246), False, 'from rest_framework.response import Response\n'), ((11293, 11317), 'usaspending_api.awards.v2.filters.sub_award.subaward_filter', 'subaward_filter', (['filters'], {}), '(filters)\n', (11308, 11317), False, 'from usaspending_api.awards.v2.filters.sub_award import subaward_filter\n'), ((11362, 11394), 'usaspending_api.awards.v2.filters.view_selector.spending_by_award_count', 'spending_by_award_count', (['filters'], {}), '(filters)\n', (11385, 11394), False, 'from usaspending_api.awards.v2.filters.view_selector import spending_by_award_count\n'), ((2491, 2509), 'usaspending_api.core.validator.tinyshield.TinyShield', 'TinyShield', (['models'], {}), '(models)\n', (2501, 2509), False, 'from usaspending_api.core.validator.tinyshield import TinyShield\n'), ((3344, 3376), 'usaspending_api.awards.v2.lookups.lookups.contract_subaward_mapping.keys', 'contract_subaward_mapping.keys', ([], {}), '()\n', (3374, 3376), False, 'from usaspending_api.awards.v2.lookups.lookups import contract_type_mapping, loan_type_mapping, non_loan_assistance_type_mapping, grant_type_mapping, contract_subaward_mapping, grant_subaward_mapping, idv_type_mapping\n'), ((3385, 3414), 'usaspending_api.awards.v2.lookups.lookups.grant_subaward_mapping.keys', 'grant_subaward_mapping.keys', ([], {}), '()\n', (3412, 3414), False, 'from usaspending_api.awards.v2.lookups.lookups import contract_type_mapping, loan_type_mapping, non_loan_assistance_type_mapping, grant_type_mapping, contract_subaward_mapping, grant_subaward_mapping, idv_type_mapping\n'), ((3581, 3605), 'usaspending_api.awards.v2.lookups.matview_lookups.award_idv_mapping.keys', 'award_idv_mapping.keys', ([], {}), '()\n', (3603, 3605), False, 'from usaspending_api.awards.v2.lookups.matview_lookups import award_contracts_mapping, loan_award_mapping, non_loan_assistance_award_mapping, award_idv_mapping\n'), ((4194, 4230), 'usaspending_api.awards.v2.lookups.lookups.contract_subaward_mapping.get', 'contract_subaward_mapping.get', (['field'], {}), '(field)\n', (4223, 4230), False, 'from usaspending_api.awards.v2.lookups.lookups import contract_type_mapping, loan_type_mapping, non_loan_assistance_type_mapping, grant_type_mapping, contract_subaward_mapping, grant_subaward_mapping, idv_type_mapping\n'), ((4320, 4353), 'usaspending_api.awards.v2.lookups.lookups.grant_subaward_mapping.get', 'grant_subaward_mapping.get', (['field'], {}), '(field)\n', (4346, 4353), False, 'from usaspending_api.awards.v2.lookups.lookups import contract_type_mapping, loan_type_mapping, non_loan_assistance_type_mapping, grant_type_mapping, contract_subaward_mapping, grant_subaward_mapping, idv_type_mapping\n'), ((4636, 4670), 'usaspending_api.awards.v2.lookups.matview_lookups.award_contracts_mapping.get', 'award_contracts_mapping.get', (['field'], {}), '(field)\n', (4663, 4670), False, 'from usaspending_api.awards.v2.lookups.matview_lookups import award_contracts_mapping, loan_award_mapping, non_loan_assistance_award_mapping, award_idv_mapping\n'), ((4758, 4787), 'usaspending_api.awards.v2.lookups.matview_lookups.loan_award_mapping.get', 'loan_award_mapping.get', (['field'], {}), '(field)\n', (4780, 4787), False, 'from usaspending_api.awards.v2.lookups.matview_lookups import award_contracts_mapping, loan_award_mapping, non_loan_assistance_award_mapping, award_idv_mapping\n'), ((4870, 4914), 'usaspending_api.awards.v2.lookups.matview_lookups.non_loan_assistance_award_mapping.get', 'non_loan_assistance_award_mapping.get', (['field'], {}), '(field)\n', (4907, 4914), False, 'from usaspending_api.awards.v2.lookups.matview_lookups import award_contracts_mapping, loan_award_mapping, non_loan_assistance_award_mapping, award_idv_mapping\n'), ((5012, 5040), 'usaspending_api.awards.v2.lookups.matview_lookups.award_idv_mapping.get', 'award_idv_mapping.get', (['field'], {}), '(field)\n', (5033, 5040), False, 'from usaspending_api.awards.v2.lookups.matview_lookups import award_contracts_mapping, loan_award_mapping, non_loan_assistance_award_mapping, award_idv_mapping\n'), ((10539, 10557), 'usaspending_api.core.validator.tinyshield.TinyShield', 'TinyShield', (['models'], {}), '(models)\n', (10549, 10557), False, 'from usaspending_api.core.validator.tinyshield import TinyShield\n'), ((3532, 3572), 'usaspending_api.awards.v2.lookups.matview_lookups.non_loan_assistance_award_mapping.keys', 'non_loan_assistance_award_mapping.keys', ([], {}), '()\n', (3570, 3572), False, 'from usaspending_api.awards.v2.lookups.matview_lookups import award_contracts_mapping, loan_award_mapping, non_loan_assistance_award_mapping, award_idv_mapping\n'), ((4458, 4508), 'usaspending_api.awards.v2.filters.matview_filters.matview_search_filter', 'matview_search_filter', (['filters', 'UniversalAwardView'], {}), '(filters, UniversalAwardView)\n', (4479, 4508), False, 'from usaspending_api.awards.v2.filters.matview_filters import matview_search_filter\n'), ((11495, 11515), 'django.db.models.Count', 'Count', (['"""subaward_id"""'], {}), "('subaward_id')\n", (11500, 11515), False, 'from django.db.models import Sum, Count, F\n'), ((3445, 3475), 'usaspending_api.awards.v2.lookups.matview_lookups.award_contracts_mapping.keys', 'award_contracts_mapping.keys', ([], {}), '()\n', (3473, 3475), False, 'from usaspending_api.awards.v2.lookups.matview_lookups import award_contracts_mapping, loan_award_mapping, non_loan_assistance_award_mapping, award_idv_mapping\n'), ((3484, 3509), 'usaspending_api.awards.v2.lookups.matview_lookups.loan_award_mapping.keys', 'loan_award_mapping.keys', ([], {}), '()\n', (3507, 3509), False, 'from usaspending_api.awards.v2.lookups.matview_lookups import award_contracts_mapping, loan_award_mapping, non_loan_assistance_award_mapping, award_idv_mapping\n'), ((4263, 4299), 'usaspending_api.awards.v2.lookups.lookups.contract_subaward_mapping.get', 'contract_subaward_mapping.get', (['field'], {}), '(field)\n', (4292, 4299), False, 'from usaspending_api.awards.v2.lookups.lookups import contract_type_mapping, loan_type_mapping, non_loan_assistance_type_mapping, grant_type_mapping, contract_subaward_mapping, grant_subaward_mapping, idv_type_mapping\n'), ((4386, 4419), 'usaspending_api.awards.v2.lookups.lookups.grant_subaward_mapping.get', 'grant_subaward_mapping.get', (['field'], {}), '(field)\n', (4412, 4419), False, 'from usaspending_api.awards.v2.lookups.lookups import contract_type_mapping, loan_type_mapping, non_loan_assistance_type_mapping, grant_type_mapping, contract_subaward_mapping, grant_subaward_mapping, idv_type_mapping\n'), ((4703, 4737), 'usaspending_api.awards.v2.lookups.matview_lookups.award_contracts_mapping.get', 'award_contracts_mapping.get', (['field'], {}), '(field)\n', (4730, 4737), False, 'from usaspending_api.awards.v2.lookups.matview_lookups import award_contracts_mapping, loan_award_mapping, non_loan_assistance_award_mapping, award_idv_mapping\n'), ((4820, 4849), 'usaspending_api.awards.v2.lookups.matview_lookups.loan_award_mapping.get', 'loan_award_mapping.get', (['field'], {}), '(field)\n', (4842, 4849), False, 'from usaspending_api.awards.v2.lookups.matview_lookups import award_contracts_mapping, loan_award_mapping, non_loan_assistance_award_mapping, award_idv_mapping\n'), ((4947, 4991), 'usaspending_api.awards.v2.lookups.matview_lookups.non_loan_assistance_award_mapping.get', 'non_loan_assistance_award_mapping.get', (['field'], {}), '(field)\n', (4984, 4991), False, 'from usaspending_api.awards.v2.lookups.matview_lookups import award_contracts_mapping, loan_award_mapping, non_loan_assistance_award_mapping, award_idv_mapping\n'), ((5073, 5101), 'usaspending_api.awards.v2.lookups.matview_lookups.award_idv_mapping.get', 'award_idv_mapping.get', (['field'], {}), '(field)\n', (5094, 5101), False, 'from usaspending_api.awards.v2.lookups.matview_lookups import award_contracts_mapping, loan_award_mapping, non_loan_assistance_award_mapping, award_idv_mapping\n'), ((5850, 5883), 'usaspending_api.common.exceptions.UnprocessableEntityException', 'UnprocessableEntityException', (['msg'], {}), '(msg)\n', (5878, 5883), False, 'from usaspending_api.common.exceptions import InvalidParameterException, UnprocessableEntityException\n'), ((11634, 11647), 'django.db.models.Sum', 'Sum', (['"""counts"""'], {}), "('counts')\n", (11637, 11647), False, 'from django.db.models import Sum, Count, F\n'), ((11738, 11755), 'django.db.models.Count', 'Count', (['"""category"""'], {}), "('category')\n", (11743, 11755), False, 'from django.db.models import Sum, Count, F\n'), ((8819, 8848), 'usaspending_api.awards.v2.lookups.matview_lookups.loan_award_mapping.get', 'loan_award_mapping.get', (['field'], {}), '(field)\n', (8841, 8848), False, 'from usaspending_api.awards.v2.lookups.matview_lookups import award_contracts_mapping, loan_award_mapping, non_loan_assistance_award_mapping, award_idv_mapping\n'), ((5760, 5788), 'usaspending_api.awards.v2.lookups.lookups.contract_type_mapping.keys', 'contract_type_mapping.keys', ([], {}), '()\n', (5786, 5788), False, 'from usaspending_api.awards.v2.lookups.lookups import contract_type_mapping, loan_type_mapping, non_loan_assistance_type_mapping, grant_type_mapping, contract_subaward_mapping, grant_subaward_mapping, idv_type_mapping\n'), ((5796, 5821), 'usaspending_api.awards.v2.lookups.lookups.grant_type_mapping.keys', 'grant_type_mapping.keys', ([], {}), '()\n', (5819, 5821), False, 'from usaspending_api.awards.v2.lookups.lookups import contract_type_mapping, loan_type_mapping, non_loan_assistance_type_mapping, grant_type_mapping, contract_subaward_mapping, grant_subaward_mapping, idv_type_mapping\n'), ((9029, 9073), 'usaspending_api.awards.v2.lookups.matview_lookups.non_loan_assistance_award_mapping.get', 'non_loan_assistance_award_mapping.get', (['field'], {}), '(field)\n', (9066, 9073), False, 'from usaspending_api.awards.v2.lookups.matview_lookups import award_contracts_mapping, loan_award_mapping, non_loan_assistance_award_mapping, award_idv_mapping\n'), ((9219, 9247), 'usaspending_api.awards.v2.lookups.matview_lookups.award_idv_mapping.get', 'award_idv_mapping.get', (['field'], {}), '(field)\n', (9240, 9247), False, 'from usaspending_api.awards.v2.lookups.matview_lookups import award_contracts_mapping, loan_award_mapping, non_loan_assistance_award_mapping, award_idv_mapping\n'), ((6737, 6753), 'django.db.models.F', 'F', (['"""award__piid"""'], {}), "('award__piid')\n", (6738, 6753), False, 'from django.db.models import Sum, Count, F\n'), ((6801, 6817), 'django.db.models.F', 'F', (['"""award__fain"""'], {}), "('award__fain')\n", (6802, 6817), False, 'from django.db.models import Sum, Count, F\n'), ((6959, 6975), 'django.db.models.F', 'F', (['"""award__piid"""'], {}), "('award__piid')\n", (6960, 6975), False, 'from django.db.models import Sum, Count, F\n'), ((7022, 7038), 'django.db.models.F', 'F', (['"""award__fain"""'], {}), "('award__fain')\n", (7023, 7038), False, 'from django.db.models import Sum, Count, F\n'), ((9480, 9514), 'usaspending_api.awards.v2.lookups.matview_lookups.award_contracts_mapping.get', 'award_contracts_mapping.get', (['field'], {}), '(field)\n', (9507, 9514), False, 'from usaspending_api.awards.v2.lookups.matview_lookups import award_contracts_mapping, loan_award_mapping, non_loan_assistance_award_mapping, award_idv_mapping\n'), ((7230, 7239), 'django.db.models.F', 'F', (['"""piid"""'], {}), "('piid')\n", (7231, 7239), False, 'from django.db.models import Sum, Count, F\n'), ((7287, 7296), 'django.db.models.F', 'F', (['"""fain"""'], {}), "('fain')\n", (7288, 7296), False, 'from django.db.models import Sum, Count, F\n'), ((7344, 7352), 'django.db.models.F', 'F', (['"""uri"""'], {}), "('uri')\n", (7345, 7352), False, 'from django.db.models import Sum, Count, F\n'), ((7494, 7503), 'django.db.models.F', 'F', (['"""piid"""'], {}), "('piid')\n", (7495, 7503), False, 'from django.db.models import Sum, Count, F\n'), ((7550, 7559), 'django.db.models.F', 'F', (['"""fain"""'], {}), "('fain')\n", (7551, 7559), False, 'from django.db.models import Sum, Count, F\n'), ((7606, 7614), 'django.db.models.F', 'F', (['"""uri"""'], {}), "('uri')\n", (7607, 7614), False, 'from django.db.models import Sum, Count, F\n'), ((7738, 7756), 'django.db.models.F', 'F', (['sort_filters[0]'], {}), '(sort_filters[0])\n', (7739, 7756), False, 'from django.db.models import Sum, Count, F\n'), ((7865, 7883), 'django.db.models.F', 'F', (['sort_filters[0]'], {}), '(sort_filters[0])\n', (7866, 7883), False, 'from django.db.models import Sum, Count, F\n')]
|
import requests
from kata.domain.exceptions import ApiLimitReached, InvalidAuthToken
class GithubApi:
"""
Basic wrapper around the Github Api
"""
def __init__(self, auth_token: str):
self._requests = requests
self._auth_token = auth_token
def contents(self, user, repo, path=''):
url = f'https://api.github.com/repos/{user}/{repo}/contents'
if path:
url += f'/{path}'
response = self._get_url(url)
return response.json()
def download_raw_text_file(self, raw_text_file_url: str):
response = self._get_url(raw_text_file_url)
return response.text
def _get_url(self, url: str):
response = self._requests.get(url, headers=self._headers())
self._validate_response(response)
return response
def _headers(self):
if not self._auth_token:
return {}
return {'Authorization': f'token {self._auth_token}'}
def _validate_response(self, response: requests.Response):
def rate_limit_reached():
def unauthorised():
return response.status_code == 403
def limit_reached():
return int(response.headers.get('X-RateLimit-Remaining', -1)) == 0
return unauthorised() and limit_reached()
def invalid_auth():
return response.status_code == 401
if rate_limit_reached():
raise ApiLimitReached()
if invalid_auth():
raise InvalidAuthToken(self._auth_token)
response.raise_for_status()
|
[
"kata.domain.exceptions.ApiLimitReached",
"kata.domain.exceptions.InvalidAuthToken"
] |
[((1443, 1460), 'kata.domain.exceptions.ApiLimitReached', 'ApiLimitReached', ([], {}), '()\n', (1458, 1460), False, 'from kata.domain.exceptions import ApiLimitReached, InvalidAuthToken\n'), ((1506, 1540), 'kata.domain.exceptions.InvalidAuthToken', 'InvalidAuthToken', (['self._auth_token'], {}), '(self._auth_token)\n', (1522, 1540), False, 'from kata.domain.exceptions import ApiLimitReached, InvalidAuthToken\n')]
|
import boto3
import json
MTURK_SANDBOX = 'https://mturk-requester-sandbox.us-east-1.amazonaws.com'
def get_mturk_client():
with open('config.json', 'r') as f:
config = json.load(f)
mturk = boto3.client('mturk',
aws_access_key_id = config['SANDBOX']['aws_access_key_id'],
aws_secret_access_key = config['SANDBOX']['aws_secret_access_key'],
region_name='us-east-1',
endpoint_url = MTURK_SANDBOX
)
return mturk
|
[
"json.load",
"boto3.client"
] |
[((217, 430), 'boto3.client', 'boto3.client', (['"""mturk"""'], {'aws_access_key_id': "config['SANDBOX']['aws_access_key_id']", 'aws_secret_access_key': "config['SANDBOX']['aws_secret_access_key']", 'region_name': '"""us-east-1"""', 'endpoint_url': 'MTURK_SANDBOX'}), "('mturk', aws_access_key_id=config['SANDBOX'][\n 'aws_access_key_id'], aws_secret_access_key=config['SANDBOX'][\n 'aws_secret_access_key'], region_name='us-east-1', endpoint_url=\n MTURK_SANDBOX)\n", (229, 430), False, 'import boto3\n'), ((189, 201), 'json.load', 'json.load', (['f'], {}), '(f)\n', (198, 201), False, 'import json\n')]
|
from stevedore import driver, ExtensionManager
def get_operator(name):
"""Get an operator class from a plugin.
Attrs:
name: The name of the plugin containing the operator class.
Returns: The operator *class object* (i.e. not an instance) provided by the
plugin named `name`.
"""
return ExtensionManager('cosmic_ray.operators')[name].plugin
def operator_names():
"""Get an iterable of all operator plugin names."""
return ExtensionManager('cosmic_ray.operators').names()
def get_test_runner(name, test_args):
"""Get a test-runner instance by name."""
test_runner_manager = driver.DriverManager(
namespace='cosmic_ray.test_runners',
name=name,
invoke_on_load=True,
invoke_args=(test_args,),
)
return test_runner_manager.driver
def test_runner_names():
"""Get iterable of test-runner plugin names."""
return ExtensionManager('cosmic_ray.test_runners').names()
|
[
"stevedore.driver.DriverManager",
"stevedore.ExtensionManager"
] |
[((632, 751), 'stevedore.driver.DriverManager', 'driver.DriverManager', ([], {'namespace': '"""cosmic_ray.test_runners"""', 'name': 'name', 'invoke_on_load': '(True)', 'invoke_args': '(test_args,)'}), "(namespace='cosmic_ray.test_runners', name=name,\n invoke_on_load=True, invoke_args=(test_args,))\n", (652, 751), False, 'from stevedore import driver, ExtensionManager\n'), ((326, 366), 'stevedore.ExtensionManager', 'ExtensionManager', (['"""cosmic_ray.operators"""'], {}), "('cosmic_ray.operators')\n", (342, 366), False, 'from stevedore import driver, ExtensionManager\n'), ((471, 511), 'stevedore.ExtensionManager', 'ExtensionManager', (['"""cosmic_ray.operators"""'], {}), "('cosmic_ray.operators')\n", (487, 511), False, 'from stevedore import driver, ExtensionManager\n'), ((916, 959), 'stevedore.ExtensionManager', 'ExtensionManager', (['"""cosmic_ray.test_runners"""'], {}), "('cosmic_ray.test_runners')\n", (932, 959), False, 'from stevedore import driver, ExtensionManager\n')]
|
from django.contrib import admin
from .models import List
class ListAdmin(admin.ModelAdmin):
list_filter = ('board', 'name')
admin.site.register(List, ListAdmin)
|
[
"django.contrib.admin.site.register"
] |
[((134, 170), 'django.contrib.admin.site.register', 'admin.site.register', (['List', 'ListAdmin'], {}), '(List, ListAdmin)\n', (153, 170), False, 'from django.contrib import admin\n')]
|
import pandas as pd
import numpy as np
from sklearn.metrics.pairwise import euclidean_distances, cosine_similarity, manhattan_distances
def top_5(book, items, similarity_measure):
"""
This function extracts the top-five similar books for a given book and
similarity measure. This function takes the following arguments:
book: the book for which five recommendations need to be provided
items: data-frame that contains all the books with their corresponding
engineered features.
similarity_measure: possible values Euclidean, Cosine or Manhattan
"""
## Filter out books with same title but different publisher
temp = items[items['itemID'] == book]
temp_title = items.loc[items['itemID'] == book, 'title']
items = items[~np.isin(items['title'], temp_title)]
items = pd.concat([temp, items]).reset_index(drop = True)
## Selecting books based on the same language and topic
items = items[np.isin(items['language'], temp['language'])].reset_index(drop = True)
if (items[np.isin(items['general_topic'], temp['general_topic'])].shape[0] > 5):
if (sum(items['general_topic'] == 'Y') > 15000):
if (all(temp['general_topic_2'] == 'YF') == True):
items = items[np.isin(items['general_topic_3'], temp['general_topic_3'])].reset_index(drop = True)
else:
if (items[np.isin(items['general_topic_2'], temp['general_topic_2'])].shape[0] >= 6):
items = items[np.isin(items['general_topic_2'], temp['general_topic_2'])].reset_index(drop = True)
else:
items = items[np.isin(items['general_topic'], temp['general_topic'])].reset_index(drop = True)
## Selecting variables of interest
to_remove = ['itemID', 'title', 'author', 'publisher', 'subtopics', 'general_topic', 'general_topic_2', 'general_topic_3', 'language', 'main topic']
variables_of_interest = items.columns[~np.isin(items.columns, to_remove)]
items_temp = items[variables_of_interest]
## Selecting top 5 similar books
if (similarity_measure == 'Euclidean'):
D = euclidean_distances(items_temp)
to_select = np.argsort(D[:, 0])[1:6]
elif (similarity_measure == 'Cosine'):
D = cosine_similarity(items_temp)
to_select = np.argsort(-D[:, 0])[1:6]
elif (similarity_measure == 'Manhattan'):
D = manhattan_distances(items_temp)
to_select = np.argsort(D[:, 0])[1:6]
return [items.loc[to_select[0], 'itemID'], items.loc[to_select[1], 'itemID'], items.loc[to_select[2], 'itemID'], items.loc[to_select[3], 'itemID'], items.loc[to_select[4], 'itemID']]
def top_5_after_transaction(book, book_to_recommend, items, similarity_measure):
"""
This function extracts the top-five similar books for a given book, books from
transaction history, items and a similarity measure. This function takes the
following arguments:
book: the book for which five recommendations need to be provided.
book_to_recommend: list of book from historical transactions.
items: data-frame that contains all the books with their corresponding
engineered features.
similarity_measure: possible values Euclidean, Cosine or Manhattan
"""
## Selecting books based on transactions
items_temp = items.loc[np.isin(items['itemID'], book_to_recommend)]
## Selecting books based on the same language and topic
temp = items[items['itemID'] == book]
temp_title = items.loc[items['itemID'] == book, 'title']
items_temp = items_temp[~np.isin(items_temp['title'], temp_title)]
items_temp = pd.concat([temp, items_temp]).reset_index(drop = True)
## Selecting books based on language
items_temp = items_temp[np.isin(items_temp['language'], temp['language'])].reset_index(drop = True)
## Selecting variables of interest
to_remove = ['itemID', 'title', 'author', 'publisher', 'subtopics', 'general_topic', 'general_topic_2', 'general_topic_3', 'language', 'main topic']
variables_of_interest = items.columns[~np.isin(items.columns, to_remove)]
items_temp_1 = items_temp[variables_of_interest]
## Sanity check
if (items_temp.shape[0] >= 6):
## Selecting top 5 similar books
if (similarity_measure == 'Euclidean'):
D = euclidean_distances(items_temp_1)
to_select = np.argsort(D[:, 0])[1:6]
elif (similarity_measure == 'Cosine'):
D = cosine_similarity(items_temp_1)
to_select = np.argsort(-D[:, 0])[1:6]
elif (similarity_measure == 'Manhattan'):
D = manhattan_distances(items_temp_1)
to_select = np.argsort(D[:, 0])[1:6]
return [items_temp.loc[to_select[0], 'itemID'], items_temp.loc[to_select[1], 'itemID'], items_temp.loc[to_select[2], 'itemID'], items_temp.loc[to_select[3], 'itemID'], items_temp.loc[to_select[4], 'itemID']]
else:
knn_top_5 = top_5(book, items, similarity_measure)
return knn_top_5
|
[
"numpy.isin",
"sklearn.metrics.pairwise.cosine_similarity",
"sklearn.metrics.pairwise.manhattan_distances",
"sklearn.metrics.pairwise.euclidean_distances",
"numpy.argsort",
"pandas.concat"
] |
[((2284, 2315), 'sklearn.metrics.pairwise.euclidean_distances', 'euclidean_distances', (['items_temp'], {}), '(items_temp)\n', (2303, 2315), False, 'from sklearn.metrics.pairwise import euclidean_distances, cosine_similarity, manhattan_distances\n'), ((3588, 3631), 'numpy.isin', 'np.isin', (["items['itemID']", 'book_to_recommend'], {}), "(items['itemID'], book_to_recommend)\n", (3595, 3631), True, 'import numpy as np\n'), ((805, 840), 'numpy.isin', 'np.isin', (["items['title']", 'temp_title'], {}), "(items['title'], temp_title)\n", (812, 840), True, 'import numpy as np\n'), ((854, 878), 'pandas.concat', 'pd.concat', (['[temp, items]'], {}), '([temp, items])\n', (863, 878), True, 'import pandas as pd\n'), ((2100, 2133), 'numpy.isin', 'np.isin', (['items.columns', 'to_remove'], {}), '(items.columns, to_remove)\n', (2107, 2133), True, 'import numpy as np\n'), ((2336, 2355), 'numpy.argsort', 'np.argsort', (['D[:, 0]'], {}), '(D[:, 0])\n', (2346, 2355), True, 'import numpy as np\n'), ((2438, 2467), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['items_temp'], {}), '(items_temp)\n', (2455, 2467), False, 'from sklearn.metrics.pairwise import euclidean_distances, cosine_similarity, manhattan_distances\n'), ((3835, 3875), 'numpy.isin', 'np.isin', (["items_temp['title']", 'temp_title'], {}), "(items_temp['title'], temp_title)\n", (3842, 3875), True, 'import numpy as np\n'), ((3894, 3923), 'pandas.concat', 'pd.concat', (['[temp, items_temp]'], {}), '([temp, items_temp])\n', (3903, 3923), True, 'import pandas as pd\n'), ((4339, 4372), 'numpy.isin', 'np.isin', (['items.columns', 'to_remove'], {}), '(items.columns, to_remove)\n', (4346, 4372), True, 'import numpy as np\n'), ((4615, 4648), 'sklearn.metrics.pairwise.euclidean_distances', 'euclidean_distances', (['items_temp_1'], {}), '(items_temp_1)\n', (4634, 4648), False, 'from sklearn.metrics.pairwise import euclidean_distances, cosine_similarity, manhattan_distances\n'), ((991, 1035), 'numpy.isin', 'np.isin', (["items['language']", "temp['language']"], {}), "(items['language'], temp['language'])\n", (998, 1035), True, 'import numpy as np\n'), ((2488, 2508), 'numpy.argsort', 'np.argsort', (['(-D[:, 0])'], {}), '(-D[:, 0])\n', (2498, 2508), True, 'import numpy as np\n'), ((2590, 2621), 'sklearn.metrics.pairwise.manhattan_distances', 'manhattan_distances', (['items_temp'], {}), '(items_temp)\n', (2609, 2621), False, 'from sklearn.metrics.pairwise import euclidean_distances, cosine_similarity, manhattan_distances\n'), ((4023, 4072), 'numpy.isin', 'np.isin', (["items_temp['language']", "temp['language']"], {}), "(items_temp['language'], temp['language'])\n", (4030, 4072), True, 'import numpy as np\n'), ((4673, 4692), 'numpy.argsort', 'np.argsort', (['D[:, 0]'], {}), '(D[:, 0])\n', (4683, 4692), True, 'import numpy as np\n'), ((4783, 4814), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['items_temp_1'], {}), '(items_temp_1)\n', (4800, 4814), False, 'from sklearn.metrics.pairwise import euclidean_distances, cosine_similarity, manhattan_distances\n'), ((1081, 1135), 'numpy.isin', 'np.isin', (["items['general_topic']", "temp['general_topic']"], {}), "(items['general_topic'], temp['general_topic'])\n", (1088, 1135), True, 'import numpy as np\n'), ((2642, 2661), 'numpy.argsort', 'np.argsort', (['D[:, 0]'], {}), '(D[:, 0])\n', (2652, 2661), True, 'import numpy as np\n'), ((4839, 4859), 'numpy.argsort', 'np.argsort', (['(-D[:, 0])'], {}), '(-D[:, 0])\n', (4849, 4859), True, 'import numpy as np\n'), ((4957, 4990), 'sklearn.metrics.pairwise.manhattan_distances', 'manhattan_distances', (['items_temp_1'], {}), '(items_temp_1)\n', (4976, 4990), False, 'from sklearn.metrics.pairwise import euclidean_distances, cosine_similarity, manhattan_distances\n'), ((5015, 5034), 'numpy.argsort', 'np.argsort', (['D[:, 0]'], {}), '(D[:, 0])\n', (5025, 5034), True, 'import numpy as np\n'), ((1312, 1370), 'numpy.isin', 'np.isin', (["items['general_topic_3']", "temp['general_topic_3']"], {}), "(items['general_topic_3'], temp['general_topic_3'])\n", (1319, 1370), True, 'import numpy as np\n'), ((1459, 1517), 'numpy.isin', 'np.isin', (["items['general_topic_2']", "temp['general_topic_2']"], {}), "(items['general_topic_2'], temp['general_topic_2'])\n", (1466, 1517), True, 'import numpy as np\n'), ((1590, 1648), 'numpy.isin', 'np.isin', (["items['general_topic_2']", "temp['general_topic_2']"], {}), "(items['general_topic_2'], temp['general_topic_2'])\n", (1597, 1648), True, 'import numpy as np\n'), ((1770, 1824), 'numpy.isin', 'np.isin', (["items['general_topic']", "temp['general_topic']"], {}), "(items['general_topic'], temp['general_topic'])\n", (1777, 1824), True, 'import numpy as np\n')]
|
# util.py/Open GoPro, Version 1.0 (C) Copyright 2021 GoPro, Inc. (http://gopro.com/OpenGoPro).
# This copyright was auto-generated on Tue May 18 22:08:50 UTC 2021
"""Miscellaneous utilities for the GoPro package."""
import sys
import queue
import logging
import subprocess
from pathlib import Path
from typing import Dict, Type, Any, List, Optional, Union
logger = logging.getLogger(__name__)
def launch_vlc(location: Optional[Path]) -> None:
"""Launch VLC
Args:
location (Optional[Path]): path to VLC. If None, it will be automatically discovered
"""
# This is a fairly lazy way to find VLC. We'll call it best effort.
potential_vlc_locations: List[Union[Path, str]] = []
command = "echo Invalid Platform"
if "linux" in sys.platform.lower():
potential_vlc_locations = [r'"/snap/bin/vlc"']
command = 'su $(id -un 1000) -c "{} udp://@:8554 > /dev/null 2>&1 &"'
elif "darwin" in sys.platform.lower():
potential_vlc_locations = [r'"/Applications/VLC.app/Contents/MacOS/VLC"']
command = "{} udp://@:8554 > /dev/null 2>&1 &"
elif "win" in sys.platform.lower():
potential_vlc_locations = [
r'"/c/Program Files/VideoLAN/VLC/vlc.exe"',
r'"/c/Program Files (x86)/VideoLAN/VLC/vlc.exe"',
r'"C:\Program Files (x86)\VideoLAN\VLC\vlc.exe"',
r'"C:\Program Files\VideoLAN\VLC\vlc.exe"',
]
command = "{} udp://@:8554 &"
potential_vlc_locations = potential_vlc_locations if location is None else [location]
for vlc in potential_vlc_locations:
response = cmd(command.format(vlc)).lower()
if (
" not " not in response
and " no " not in response
and " cannot " not in response
and " unexpected " not in response
):
logger.info("VLC launched")
return
logger.error("Failed to find VLC")
def scrub(obj: Any, bad_key: str) -> None:
"""Recursively scrub a dict or list to remove a given key in place.
Args:
obj (Any): dict or list to operate on. If neither, it will return immediately.
bad_key (str): key to remove
"""
if isinstance(obj, dict):
for key in list(obj.keys()):
if key == bad_key:
del obj[key]
else:
scrub(obj[key], bad_key)
elif isinstance(obj, list):
for i in reversed(range(len(obj))):
if obj[i] == bad_key:
del obj[i]
else:
scrub(obj[i], bad_key)
else:
# neither a dict nor a list, do nothing
pass
def cmd(command: str) -> str:
"""Send a command to the shell and return the result.
Args:
command (str): command to send
Returns:
str: response returned from shell
"""
logger.debug(f"Send cmd --> {command}")
# Note: Ignoring unicode characters in SSIDs to prevent intermittent UnicodeDecodeErrors from occurring
# while trying to connect to SSID when *any* AP is nearby that has unicode characters in the name
response = (
subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) # type: ignore
.stdout.read()
.decode(errors="ignore")
)
logger.debug(f"Receive response --> {response}")
return response
class Singleton:
"""To be subclassed to create a singleton class."""
_instances: Dict[Type["Singleton"], Type["Singleton"]] = {}
# pylint: disable=missing-return-doc, missing-return-type-doc
def __new__(cls, *args, **kwargs): # type: ignore
"""Check for existing instance."""
if cls not in cls._instances:
cls._instances[cls] = object.__new__(cls, *args, **kwargs)
return cls._instances[cls]
class SnapshotQueue(queue.Queue):
"""A subclass of the default queue module to safely take a snapshot of the queue
This is so we can access the elements (in a thread safe manner) without dequeuing them.
"""
def snapshot(self) -> List[Any]:
"""Acquire the mutex, then return the queue's elements as a list.
Returns:
List[Any]: List of queue elements
"""
with self.mutex:
return list(self.queue)
|
[
"subprocess.Popen",
"logging.getLogger",
"sys.platform.lower"
] |
[((368, 395), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (385, 395), False, 'import logging\n'), ((763, 783), 'sys.platform.lower', 'sys.platform.lower', ([], {}), '()\n', (781, 783), False, 'import sys\n'), ((939, 959), 'sys.platform.lower', 'sys.platform.lower', ([], {}), '()\n', (957, 959), False, 'import sys\n'), ((1116, 1136), 'sys.platform.lower', 'sys.platform.lower', ([], {}), '()\n', (1134, 1136), False, 'import sys\n'), ((3124, 3216), 'subprocess.Popen', 'subprocess.Popen', (['command'], {'shell': '(True)', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.STDOUT'}), '(command, shell=True, stdout=subprocess.PIPE, stderr=\n subprocess.STDOUT)\n', (3140, 3216), False, 'import subprocess\n')]
|
_IS_SIMPLE_CORE = False
if _IS_SIMPLE_CORE:
from dezero.core_simple import Variable
from dezero.core_simple import Function
from dezero.core_simple import using_config
from dezero.core_simple import no_grad
from dezero.core_simple import as_array
from dezero.core_simple import as_variable
from dezero.core_simple import setup_variable
else:
from dezero.core import Variable
from dezero.core import Parameter
from dezero.core import Function
from dezero.core import using_config
from dezero.core import no_grad
from dezero.core import as_array
from dezero.core import as_variable
from dezero.core import setup_variable
from dezero.layers import Layer
from dezero.models import Model
setup_variable()
|
[
"dezero.core.setup_variable"
] |
[((754, 770), 'dezero.core.setup_variable', 'setup_variable', ([], {}), '()\n', (768, 770), False, 'from dezero.core import setup_variable\n')]
|
import time
import urllib
from typing import List, Tuple
from SPARQLWrapper import JSON, SPARQLWrapper
from named_entity_recognition.utils import (join_with_newlines, load_list,
save_text)
LIMIT = 0
def main():
names = load_list('output/nltk.txt')
sparql = SPARQLWrapper("https://query.wikidata.org/sparql")
names_with_classes = []
for i, name in enumerate(names):
if LIMIT > 0 and i > LIMIT:
break
retries = 0
while True:
try:
print(f"({i + 1} / {len(names)}) {name}{' '*100}", end='\r')
names_with_classes.append(
(name, fetch_classes_from_wikidata(sparql, name)))
break
except urllib.error.HTTPError as exc:
retries += 1
if retries > 2:
print()
raise exc
finally:
time.sleep(2)
print(f"Saving HTML...{' '*100}")
html = generate_html(names_with_classes)
save_text('output/tagged_with_wikidata.html', html)
def fetch_classes_from_wikidata(sparql: SPARQLWrapper, name: str) -> str:
separator = ', '
sparql.setQuery(f'''
SELECT (GROUP_CONCAT(DISTINCT ?instanceOfName; SEPARATOR="{separator}") AS ?instanceOfNames) {{
?item rdfs:label "{name}"@en .
?item wdt:P31 ?instanceOf .
?instanceOf rdfs:label ?instanceOfName FILTER(LANG(?instanceOfName) = "en") .
}}
''')
sparql.setReturnFormat(JSON)
return sparql.query().convert()['results']['bindings'][0]['instanceOfNames']['value']
def generate_html(names_with_classes: List[Tuple[str, str]]) -> str:
css = '''
table { border-collapse: collapse; max-width: 1000px; margin: 0 auto; }
tr { border-bottom: 1px solid #333 }
td { min-width: 200px; }
'''
return f'''
<!DOCTYPE html>
<html lang="en">
<head>
<style>{css}</style>
</head>
<body>
<table>
<tr><th>Name</th><th>Classes</th></tr>
{ join_with_newlines(f'<tr><td>{name}</td><td>{classes}</td></tr>' for (name, classes) in names_with_classes) }
</table>
</body>
</html>
'''.lstrip()
if __name__ == '__main__':
main()
|
[
"named_entity_recognition.utils.save_text",
"named_entity_recognition.utils.load_list",
"time.sleep",
"SPARQLWrapper.SPARQLWrapper",
"named_entity_recognition.utils.join_with_newlines"
] |
[((272, 300), 'named_entity_recognition.utils.load_list', 'load_list', (['"""output/nltk.txt"""'], {}), "('output/nltk.txt')\n", (281, 300), False, 'from named_entity_recognition.utils import join_with_newlines, load_list, save_text\n'), ((314, 364), 'SPARQLWrapper.SPARQLWrapper', 'SPARQLWrapper', (['"""https://query.wikidata.org/sparql"""'], {}), "('https://query.wikidata.org/sparql')\n", (327, 364), False, 'from SPARQLWrapper import JSON, SPARQLWrapper\n'), ((1061, 1112), 'named_entity_recognition.utils.save_text', 'save_text', (['"""output/tagged_with_wikidata.html"""', 'html'], {}), "('output/tagged_with_wikidata.html', html)\n", (1070, 1112), False, 'from named_entity_recognition.utils import join_with_newlines, load_list, save_text\n'), ((960, 973), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (970, 973), False, 'import time\n'), ((2000, 2109), 'named_entity_recognition.utils.join_with_newlines', 'join_with_newlines', (["(f'<tr><td>{name}</td><td>{classes}</td></tr>' for name, classes in\n names_with_classes)"], {}), "(f'<tr><td>{name}</td><td>{classes}</td></tr>' for name,\n classes in names_with_classes)\n", (2018, 2109), False, 'from named_entity_recognition.utils import join_with_newlines, load_list, save_text\n')]
|
from datetime import datetime
from pathlib import Path
import pytest
from maggma.stores import MemoryStore
from .simple_bib_drone import SimpleBibDrone
@pytest.fixture
def init_drone(test_dir):
"""
Initialize the drone, do not initialize the connection with the database
:return:
initialized drone
"""
mongo_store = MemoryStore(collection_name="drone_test", key="record_key")
simple_path = test_dir / "simple_bib_example_data"
assert simple_path.exists(), f"{simple_path} not found"
simple_bib_drone = SimpleBibDrone(store=mongo_store, path=simple_path)
return simple_bib_drone
def test_read(init_drone: SimpleBibDrone):
"""
Test whether read function is correct
:param init_drone: un-connected simpleBibDrone instance
:return:
None
"""
list_record_id = init_drone.read(init_drone.path)
assert len(list_record_id) == 7
state_hashes = [r.state_hash for r in list_record_id]
assert len(state_hashes) == len(list_record_id) # all record_id has hash
assert len((set(state_hashes))) == len(state_hashes) # all unique hashes
num_docs = sum([len(r.documents) for r in list_record_id])
assert num_docs == 12
def test_record_id(init_drone: SimpleBibDrone):
"""
Test validity of RecordIdentifier
:param init_drone: un-connected simpleBibDrone instance
:return:
None
"""
list_record_id = init_drone.read(init_drone.path)
record0 = list_record_id[0]
assert record0.parent_directory == init_drone.path
assert record0.last_updated < datetime.now()
assert len(record0.documents) > 0
# state hash does not change when the file is not changed
assert record0.compute_state_hash() == record0.state_hash
def test_process_item(init_drone: SimpleBibDrone):
"""
Test whether data is expaneded correctly and whether meta data is added
:param init_drone: un-connected simpleBibDrone instance
:return:
None
"""
list_record_id = list(init_drone.read(init_drone.path))
text_record = next(
d for d in list_record_id if any("text" in f.name for f in d.documents)
)
data = init_drone.process_item(text_record)
assert "citations" in data
assert "text" in data
assert "record_key" in data
assert "last_updated" in data
assert "documents" in data
assert "state_hash" in data
def test_compute_record_identifier_key(init_drone: SimpleBibDrone):
list_record_id = init_drone.read(init_drone.path)
record0 = list_record_id[0]
doc0 = record0.documents[0]
assert record0.record_key == init_drone.compute_record_identifier_key(doc0)
def test_get_items(init_drone: SimpleBibDrone):
"""
This test might take a while
test whether get_items work correctly.
It should fetch from database all the files that needs to be updated
:param init_drone: un-connected simpleBibDrone instance
:return:
None
"""
init_drone.connect()
init_drone.run() # make sure the database is up-to-date
init_drone.connect()
assert sum([1 for _ in init_drone.get_items()]) == 0
init_drone.finalize()
init_drone.connect()
init_drone.store.remove_docs(criteria={}) # clears the database
assert sum([1 for _ in init_drone.get_items()]) == 7
init_drone.finalize()
def test_assimilate(init_drone: SimpleBibDrone):
"""
Test whether assimilate file is correct
:param init_drone: un-connected simpleBibDrone instance
:return:
None
"""
record_ids = init_drone.assimilate(init_drone.path)
assert len(record_ids) == 7
def test_compute_data(init_drone: SimpleBibDrone):
"""
test whether data is extracted as expected
:param init_drone: un-connected simpleBibDrone instance
:return:
None
"""
list_record_id = list(init_drone.read(init_drone.path))
text_record = next(
d for d in list_record_id if any("text" in f.name for f in d.documents)
)
data = init_drone.process_item(text_record)
assert "citations" in data
assert "text" in data
|
[
"datetime.datetime.now",
"maggma.stores.MemoryStore"
] |
[((350, 409), 'maggma.stores.MemoryStore', 'MemoryStore', ([], {'collection_name': '"""drone_test"""', 'key': '"""record_key"""'}), "(collection_name='drone_test', key='record_key')\n", (361, 409), False, 'from maggma.stores import MemoryStore\n'), ((1577, 1591), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1589, 1591), False, 'from datetime import datetime\n')]
|
# Generated by Django 2.0.13 on 2020-02-16 13:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('websubsub', '0010_subscription_time_last_event_received'),
]
operations = [
migrations.AddField(
model_name='subscription',
name='static',
field=models.BooleanField(default=False, editable=False),
),
]
|
[
"django.db.models.BooleanField"
] |
[((362, 412), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'editable': '(False)'}), '(default=False, editable=False)\n', (381, 412), False, 'from django.db import migrations, models\n')]
|
# coding: utf8
# Copyright (c) <NAME>, University of Antwerp
# Distributed under the terms of the MIT License
import os
import numpy as np
from fireworks import Firework, LaunchPad, PyTask, Workflow
from pymongo.errors import ServerSelectionTimeoutError
from ruamel.yaml import YAML
from pybat.cli.commands.define import define_dimer, define_migration
from pybat.cli.commands.setup import transition
from pybat.core import Cathode, LiRichCathode, Dimer
from pybat.workflow.firetasks import VaspTask, CustodianTask, ConfigurationTask, \
EnergyConfTask
from pybat.workflow.fireworks import ScfFirework, RelaxFirework, NebFirework
"""
Package that contains all the Workflows of the pybat package.
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2019, <NAME>, University of Antwerp"
__version__ = "alpha"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__date__ = "Mar 2019"
# Load the workflow configuration
CONFIG_FILE = os.path.join(os.path.expanduser("~"), ".pybat_wf_config.yaml")
if os.path.exists(CONFIG_FILE):
with open(CONFIG_FILE, 'r') as configfile:
yaml = YAML()
yaml.default_flow_style = False
CONFIG = yaml.load(configfile.read())
try:
LAUNCHPAD = LaunchPad(
host=CONFIG["SERVER"].get("host", ""),
port=int(CONFIG["SERVER"].get("port", 0)),
name=CONFIG["SERVER"].get("name", ""),
username=CONFIG["SERVER"].get("username", ""),
password=CONFIG["SERVER"].get("password", ""),
ssl=CONFIG["SERVER"].get("ssl", False),
authsource=CONFIG["SERVER"].get("authsource", None)
)
except ServerSelectionTimeoutError:
raise TimeoutError("Could not connect to server. Please make "
"sure the details of the server are correctly "
"set up.")
else:
raise FileNotFoundError("No configuration file found in user's home "
"directory. Please use pybat config "
"in order to set up the configuration for "
"the workflows.")
# TODO Extend configuration and make the whole configuration setup more user friendly
# Currently the user is not guided to the workflow setup when attempting to use
# pybat workflows, this should change and be tested. Moreover, careful additions should
# be made to make sure all user-specific configuration elements are easily configured
# and implemented in the code.
# TODO Fix the CustodianTask
# TODO Add UnitTests!
# It's really getting time to do this. Think about what unit tests you need and make a
# test suite.
def scf_workflow(structure_file, functional=("pbe", {}), directory="",
write_chgcar=False, in_custodian=False, number_nodes=None):
"""
Set up a self consistent field calculation (SCF) workflow and add it to the
launchpad of the mongoDB server defined in the config file.
Args:
structure_file (str): Path to the geometry file of the structure.
functional (tuple): Tuple with the functional choices. The first element
contains a string that indicates the functional used ("pbe", "hse", ...),
whereas the second element contains a dictionary that allows the user
to specify the various functional tags.
directory (str): Directory in which the SCF calculation should be performed.
write_chgcar (bool): Flag that indicates whether the CHGCAR file should
be written.
in_custodian (bool): Flag that indicates whether the calculation should be
run inside a Custodian.
number_nodes (int): Number of nodes that should be used for the calculations.
Is required to add the proper `_category` to the Firework generated, so
it is picked up by the right Fireworker.
Returns:
None
"""
# Set up the calculation directory
if directory == "":
directory = os.path.join(os.getcwd(), functional[0])
if functional[0] == "pbeu":
directory += "_" + "".join(k + str(functional[1]["LDAUU"][k]) for k
in functional[1]["LDAUU"].keys())
directory += "_scf"
# Set up the SCF Firework
scf_firework = ScfFirework(
structure_file=structure_file, functional=functional,
directory=directory, write_chgcar=write_chgcar,
in_custodian=in_custodian, number_nodes=number_nodes
)
# Set up a clear name for the workflow
cathode = LiRichCathode.from_file(structure_file)
workflow_name = str(cathode.composition.reduced_formula).replace(" ", "")
workflow_name += str(functional)
# Create the workflow
workflow = Workflow(fireworks=[scf_firework, ],
name=workflow_name)
LAUNCHPAD.add_wf(workflow)
def relax_workflow(structure_file, functional=("pbe", {}), directory="",
is_metal=False, in_custodian=False, number_nodes=None):
"""
Set up a geometry optimization workflow and add it to the launchpad of the
mongoDB server defined in the config file.
Args:
structure_file (str): Path to the geometry file of the structure.
functional (tuple): Tuple with the functional choices. The first element
contains a string that indicates the functional used ("pbe", "hse", ...),
whereas the second element contains a dictionary that allows the user
to specify the various functional tags.
directory (str): Directory in which the SCF calculation should be performed.
is_metal (bool): Flag that indicates whether the material for which the
geometry optimization should be performed is metallic. Determines the
smearing method used.
in_custodian (bool): Flag that indicates wheter the calculation should be
run inside a Custodian.
number_nodes (int): Number of nodes that should be used for the calculations.
Is required to add the proper `_category` to the Firework generated, so
it is picked up by the right Fireworker.
Returns:
None
"""
# Set up the calculation directory
if directory == "":
directory = os.path.join(os.getcwd(), functional[0])
if functional[0] == "pbeu":
directory += "_" + "".join(k + str(functional[1]["LDAUU"][k]) for k
in functional[1]["LDAUU"].keys())
directory += "_relax"
# Set up the geometry optimization Firework
relax_firework = RelaxFirework(structure_file=structure_file,
functional=functional,
directory=directory,
is_metal=is_metal,
in_custodian=in_custodian,
number_nodes=number_nodes)
# Set up a clear name for the workflow
cathode = LiRichCathode.from_file(structure_file)
workflow_name = str(cathode.composition.reduced_formula).replace(" ", "")
workflow_name += str(functional)
# Create the workflow
workflow = Workflow(fireworks=[relax_firework, ],
name=workflow_name)
LAUNCHPAD.add_wf(workflow)
def dimer_workflow(structure_file, dimer_indices=(0, 0), distance=0,
functional=("pbe", {}), is_metal=False, in_custodian=False,
number_nodes=None):
"""
Set up a workflow that calculates the thermodynamics for a dimer
formation in the current directory.
Can later be expanded to also include kinetic barrier calculation.
Args:
structure_file (str): Structure file of the cathode material. Note
that the structure file should be a json format file that is
derived from the Cathode class, i.e. it should contain the cation
configuration of the structure.
dimer_indices (tuple): Indices of the oxygen sites which are to form a
dimer. If no indices are provided, the user will be prompted.
distance (float): Final distance between the oxygen atoms. If no
distance is provided, the user will be prompted.
functional (tuple): Tuple with the functional choices. The first element
contains a string that indicates the functional used ("pbe", "hse", ...),
whereas the second element contains a dictionary that allows the user
to specify the various functional tags.
is_metal (bool): Flag that indicates the material being studied is a
metal, which changes the smearing from Gaussian to second order
Methfessel-Paxton of 0.2 eV. Defaults to False.
in_custodian (bool): Flag that indicates that the calculations
should be run within a Custodian. Defaults to False.
number_nodes (int): Number of nodes that should be used for the calculations.
Is required to add the proper `_category` to the Firework generated, so
it is picked up by the right Fireworker.
"""
# TODO Change naming scheme
# Let the user define a dimer, unless one is provided
dimer_dir = define_dimer(structure_file=structure_file,
dimer_indices=dimer_indices,
distance=distance,
write_cif=True)
# Set up the FireTask that sets up the transition calculation
setup_transition = PyTask(
func="pybat.cli.commands.setup.transition",
kwargs={"directory": dimer_dir,
"functional": functional,
"is_metal": is_metal,
"is_migration": False}
)
# Create the PyTask that runs the calculation
if in_custodian:
vasprun = CustodianTask(directory=os.path.join(dimer_dir, "final"))
else:
vasprun = VaspTask(directory=os.path.join(dimer_dir, "final"))
# Extract the final cathode from the geometry optimization
get_cathode = PyTask(
func="pybat.cli.commands.get.get_cathode",
kwargs={"directory": os.path.join(dimer_dir, "final"),
"write_cif": True}
)
# Add number of nodes to spec, or "none"
firework_spec = {"_launch_dir": os.getcwd()}
if number_nodes is None:
firework_spec.update({"_category": "none"})
else:
firework_spec.update({"_category": str(number_nodes) + "nodes"})
transition_firework = Firework(tasks=[setup_transition, vasprun, get_cathode],
name="Dimer Geometry optimization",
spec=firework_spec)
# Set up the SCF calculation directory
scf_dir = os.path.join(dimer_dir, "scf_final")
final_cathode = os.path.join(dimer_dir, "final", "final_cathode.json")
# Set up the SCF calculation
scf_firework = ScfFirework(
structure_file=final_cathode, functional=functional,
directory=scf_dir, write_chgcar=False, in_custodian=in_custodian,
number_nodes=number_nodes
)
workflow = Workflow(fireworks=[transition_firework, scf_firework],
name=structure_file + dimer_dir.split("/")[-1],
links_dict={transition_firework: [scf_firework]})
LAUNCHPAD.add_wf(workflow)
def migration_workflow(structure_file, migration_indices=(0, 0),
functional=("pbe", {}), is_metal=False,
in_custodian=False, number_nodes=None):
"""
Set up a workflow that calculates the thermodynamics for a migration in
the current directory.
Can later be expanded to also include kinetic barrier calculation.
Args:
structure_file (str): Structure file of the cathode material. Note
that the structure file should be a json format file that is
derived from the Cathode class, i.e. it should contain the cation
configuration of the structure.
migration_indices (tuple): Tuple of the indices which designate the
migrating site and the vacant site to which the cation will
migrate. If no indices are provided, the user will be prompted.
functional (tuple): Tuple with the functional choices. The first element
contains a string that indicates the functional used ("pbe", "hse", ...),
whereas the second element contains a dictionary that allows the user
to specify the various functional tags.
is_metal (bool): Flag that indicates the material being studied is a
metal, which changes the smearing from Gaussian to second order
Methfessel-Paxton of 0.2 eV. Defaults to False.
in_custodian (bool): Flag that indicates that the calculations
should be run within a Custodian. Defaults to False.
number_nodes (int): Number of nodes that should be used for the calculations.
Is required to add the proper `_category` to the Firework generated, so
it is picked up by the right Fireworker.
"""
# TODO Add setup steps to the workflow
# In case adjustments need to made to the setup of certain calculations,
# after which the calculation needs to be rerun, not adding the setup
# steps to the workflow means that these will have to be rerun manually,
# instead of simply relying on the fireworks commands.
# Let the user define a migration
migration_dir = define_migration(structure_file=structure_file,
migration_indices=migration_indices,
write_cif=True)
# Set up the transition calculation
transition(directory=migration_dir,
functional=functional,
is_metal=is_metal,
is_migration=False)
# Create the PyTask that runs the calculation
if in_custodian:
vasprun = CustodianTask(directory=os.path.join(migration_dir, "final"))
else:
vasprun = VaspTask(directory=os.path.join(migration_dir, "final"))
# Add number of nodes to spec, or "none"
firework_spec = {"_launch_dir": os.getcwd()}
if number_nodes is None:
firework_spec.update({"_category": "none"})
else:
firework_spec.update({"_category": str(number_nodes) + "nodes"})
transition_firework = Firework(tasks=[vasprun],
name="Migration Geometry optimization",
spec=firework_spec)
workflow = Workflow(fireworks=[transition_firework],
name=structure_file + migration_dir.split("/")[-1])
LAUNCHPAD.add_wf(workflow)
def neb_workflow(directory, nimages=7, functional=("pbe", {}), is_metal=False,
is_migration=False, in_custodian=False,
number_nodes=None):
"""
Set up a workflow that calculates the kinetic barrier between two geometries.
# TODO
TEMPORARY? Should NEB be integrated in other workflows? If so, should we still
have a separate NEB workflow?
Args:
directory (str): Directory in which the NEB calculation should be performed.
nimages (int): Number of images to use for the NEB calculation.
functional (tuple): Tuple with the functional choices. The first element
contains a string that indicates the functional used ("pbe", "hse", ...),
whereas the second element contains a dictionary that allows the user
to specify the various functional tags.
is_metal (bool): Flag that indicates the material being studied is a
metal, which changes the smearing from Gaussian to second order
Methfessel-Paxton of 0.2 eV. Defaults to False.
is_migration (bool): Flag that indicates that the transition is a migration
of an atom in the structure.
in_custodian (bool): Flag that indicates that the calculations
should be run within a Custodian. Defaults to False.
number_nodes (int): Number of nodes that should be used for the calculations.
Is required to add the proper `_category` to the Firework generated, so
it is picked up by the right Fireworker. Defaults to the number of images.
"""
# If no number of nodes is specified, take the number of images
if number_nodes is None:
number_nodes = nimages
# Create the Firework that sets up and runs the NEB
neb_firework = NebFirework(
directory=directory,
nimages=nimages,
functional=functional,
is_metal=is_metal,
is_migration=is_migration,
in_custodian=in_custodian,
number_nodes=number_nodes
)
# Add number of nodes to spec, or "none"
firework_spec = {"_launch_dir": os.getcwd()}
if number_nodes is None:
firework_spec.update({"_category": "none"})
else:
firework_spec.update({"_category": str(number_nodes) + "nodes"})
cathode = Cathode.from_file(
os.path.join(directory, "final", "initial_cathode.json")
)
dir_name = os.path.abspath(directory).split("/")[-1]
workflow_name = str(cathode.composition).replace(" ", "") + " " + dir_name
workflow = Workflow(fireworks=[neb_firework, ],
name=workflow_name)
LAUNCHPAD.add_wf(workflow)
def configuration_workflow(structure_file, substitution_sites=None, element_list=None,
sizes=None, concentration_restrictions=None,
max_configurations=None, functional=("pbe", {}),
directory=None, in_custodian=False, number_nodes=None):
"""
Set up a workflow for a set of atomic configurations, which includes a geometric
optimization as well as a SCF calculation based on the final geometry.
Args:
structure_file (str): Structure file of the cathode material. Note
that the structure file should be a json format file that is
derived from the Cathode class, i.e. it should contain the cation
configuration of the structure.
substitution_sites (list): List of site indices or pymatgen.Sites to be
substituted.
element_list (list): List of string representations of the cation elements
which have to be substituted on the substitution sites. Can also
include "Vac" to introduce vacancy sites.
E.g. ["Li", "Vac"]; ["Mn", "Co", "Ni"]; ...
sizes (list): List of unit supercell sizes to be considered for the
enumeration of the configurations.
E.g. [1, 2]; range(1, 4); ...
concentration_restrictions (dict): Dictionary of allowed concentration
ranges for each element. Note that the concentration is defined
versus the total amount of atoms in the unit cell.
E.g. {"Li": (0.2, 0.3)}; {"Ni": (0.1, 0.2, "Mn": (0.05, 0.1)}; ...
max_configurations (int): Maximum number of new configurations to generate.
Note that the function detects all the cathode.json files present
in the directory tree and ignores the corresponding configurations.
max_configurations is the maximum number of new configurations that need
to be generated, i.e. on top of the configurations already present in the
directory tree in the form of cathode.json files.
functional (tuple): Tuple with the functional choices. The first element
contains a string that indicates the functional used ("pbe", "hse", ...),
whereas the second element contains a dictionary that allows the user
to specify the various functional tags.
directory (str): Path to the directory in which the configurations and
calculations should be set up.
in_custodian (bool): Flag that indicates that the calculations
should be run within a Custodian. Defaults to False.
number_nodes (int): Number of nodes that should be used for the calculations.
Is required to add the proper `_category` to the Firework generated, so
it is picked up by the right Fireworker.
Returns:
None
"""
# Load the cathode from the structure file
cathode = Cathode.from_file(structure_file)
# Check for the required input, and request if necessary
if not substitution_sites or not element_list or not sizes:
print(cathode)
print()
if not substitution_sites:
substitution_sites = [int(i) for i in input(
"Please provide the substitution site indices, separated by a space: "
).split(" ")]
if not element_list:
element_list = [i for i in input(
"Please provide the substitution elements, separated by a space: "
).split(" ")]
if not sizes:
sizes = [int(i) for i in input(
"Please provide the possible unit cell sizes, separated by a space: "
).split(" ")]
# Set up the directory
if directory == "":
directory = os.getcwd()
directory = os.path.abspath(directory)
configuration_task = ConfigurationTask(
structure=cathode,
directory=directory,
substitution_sites=list(substitution_sites),
element_list=element_list,
sizes=list(sizes),
concentration_restrictions=concentration_restrictions,
max_configurations=max_configurations
)
energy_task = EnergyConfTask(
functional=functional,
in_custodian=in_custodian,
number_nodes=number_nodes
)
# Set up a (sort of) clear name for the workflow
workflow_name = str(cathode.composition.reduced_formula).replace(" ", "")
workflow_name += " " + str(element_list)
workflow_name += " " + str(functional)
configuration_fw = Firework(tasks=[configuration_task, energy_task],
name="Configuration Setup",
spec={"_category": "none"})
# Create the workflow
workflow = Workflow(
fireworks=[configuration_fw],
name=workflow_name
)
LAUNCHPAD.add_wf(workflow)
def noneq_dimers_workflow(structure_file, distance, functional=("pbe", {}),
is_metal=False, in_custodian=False, number_nodes=None):
"""
Run dimer calculations for all the nonequivalent dimers in a structure.
Args:
structure_file (str): Structure file of the cathode material. Note
that the structure file should be a json format file that is
derived from the Cathode class, i.e. it should contain the cation
configuration of the structure.
distance (float): Final distance between the oxygen atoms. If no
distance is provided, the user will be prompted.
functional (tuple): Tuple with the functional choices. The first element
contains a string that indicates the functional used ("pbe", "hse", ...),
whereas the second element contains a dictionary that allows the user
to specify the various functional tags.
is_metal (bool): Flag that indicates the material being studied is a
metal, which changes the smearing from Gaussian to second order
Methfessel-Paxton of 0.2 eV. Defaults to False.
in_custodian (bool): Flag that indicates that the calculations
should be run within a Custodian. Defaults to False.
number_nodes (int): Number of nodes that should be used for the calculations.
Is required to add the proper `_category` to the Firework generated, so
it is picked up by the right Fireworker.
Returns:
None
"""
lirich = LiRichCathode.from_file(structure_file)
dimer_lists = lirich.list_noneq_dimers()
for dimer_list in dimer_lists:
# Find the dimer closest to the center of the lattice. Just for
# visualization purposes.
central_dimer = [(), 1e10]
for dimer in dimer_list:
dimer_center = Dimer(lirich, dimer).center
lattice_center = np.sum(lirich.lattice.matrix, 0) / 3
dist_to_center = np.linalg.norm(dimer_center - lattice_center)
if dist_to_center < central_dimer[1]:
central_dimer = [dimer, dist_to_center]
dimer_workflow(structure_file=structure_file,
dimer_indices=central_dimer[0],
distance=distance,
functional=functional,
is_metal=is_metal,
in_custodian=in_custodian,
number_nodes=number_nodes)
def site_dimers_workflow(structure_file, site_index, distance,
functional=("pbe", {}), is_metal=False,
in_custodian=False, number_nodes=None):
"""
Run dimer calculations for all the dimers around a site.
Args:
structure_file (str): Structure file of the cathode material. Note
that the structure file should be a json format file that is
derived from the Cathode class, i.e. it should contain the cation
configuration of the structure.
site_index (int): Index of the site around which the dimers should
be investigated. Corresponds to the internal Python index.
distance (float): Final distance between the oxygen atoms. If no
distance is provided, the user will be prompted.
functional (tuple): Tuple with the functional choices. The first element
contains a string that indicates the functional used ("pbe", "hse", ...),
whereas the second element contains a dictionary that allows the user
to specify the various functional tags.
is_metal (bool): Flag that indicates the material being studied is a
metal, which changes the smearing from Gaussian to second order
Methfessel-Paxton of 0.2 eV. Defaults to False.
in_custodian (bool): Flag that indicates that the calculations
should be run within a Custodian. Defaults to False.
number_nodes (int): Number of nodes that should be used for the calculations.
Is required to add the proper `_category` to the Firework generated, so
it is picked up by the right Fireworker.
Returns:
None
"""
lirich = LiRichCathode.from_file(structure_file)
dimer_list = lirich.find_noneq_dimers(int(site_index))
for dimer in dimer_list:
dimer_workflow(structure_file=structure_file,
dimer_indices=dimer,
distance=distance,
functional=functional,
is_metal=is_metal,
in_custodian=in_custodian,
number_nodes=number_nodes)
# region * Utility scripts
def find_all(name, path):
result = []
for root, dirs, files in os.walk(path):
if name in files:
result.append(os.path.join(root, name))
return result
def find_all_cathode_hashes(path):
return [Cathode.from_file(file).__hash__() for file in find_all("cathode.json", path)]
def find_hash_dict(path):
path = os.path.abspath(path)
return {Cathode.from_file(file).__hash__(): file.replace(path, "").replace(
"cathode.json", "")
for file in find_all("cathode.json", path)}
def generate_conf_dir(directory, element_list, configuration, number):
if "Vac" in element_list:
# Set up Li configuration directory
conf_dir = os.path.join(
directory, "tm_conf_1",
str(round(configuration.concentration, 3)),
"workion_conf" + str(number), "prim"
)
else:
# Set up TM configuration directory
try:
conf_dir = os.path.join(
directory, "tm_conf_" + str(number),
str(round(configuration.concentration, 3)), "workion_conf1",
"prim"
)
except ZeroDivisionError:
conf_dir = os.path.join(
directory, "tm_conf_" + str(number), "prim"
)
return conf_dir
# endregion
# region * Token workflows for testing
# endregion
|
[
"numpy.sum",
"fireworks.Workflow",
"os.walk",
"numpy.linalg.norm",
"os.path.join",
"os.path.abspath",
"pybat.core.LiRichCathode.from_file",
"os.path.exists",
"ruamel.yaml.YAML",
"pybat.cli.commands.setup.transition",
"pybat.workflow.fireworks.RelaxFirework",
"pybat.workflow.fireworks.NebFirework",
"pybat.workflow.firetasks.EnergyConfTask",
"pybat.cli.commands.define.define_migration",
"pybat.workflow.fireworks.ScfFirework",
"fireworks.PyTask",
"pybat.core.Dimer",
"pybat.core.Cathode.from_file",
"os.getcwd",
"fireworks.Firework",
"pybat.cli.commands.define.define_dimer",
"os.path.expanduser"
] |
[((1002, 1029), 'os.path.exists', 'os.path.exists', (['CONFIG_FILE'], {}), '(CONFIG_FILE)\n', (1016, 1029), False, 'import os\n'), ((948, 971), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (966, 971), False, 'import os\n'), ((4347, 4523), 'pybat.workflow.fireworks.ScfFirework', 'ScfFirework', ([], {'structure_file': 'structure_file', 'functional': 'functional', 'directory': 'directory', 'write_chgcar': 'write_chgcar', 'in_custodian': 'in_custodian', 'number_nodes': 'number_nodes'}), '(structure_file=structure_file, functional=functional, directory\n =directory, write_chgcar=write_chgcar, in_custodian=in_custodian,\n number_nodes=number_nodes)\n', (4358, 4523), False, 'from pybat.workflow.fireworks import ScfFirework, RelaxFirework, NebFirework\n'), ((4603, 4642), 'pybat.core.LiRichCathode.from_file', 'LiRichCathode.from_file', (['structure_file'], {}), '(structure_file)\n', (4626, 4642), False, 'from pybat.core import Cathode, LiRichCathode, Dimer\n'), ((4800, 4854), 'fireworks.Workflow', 'Workflow', ([], {'fireworks': '[scf_firework]', 'name': 'workflow_name'}), '(fireworks=[scf_firework], name=workflow_name)\n', (4808, 4854), False, 'from fireworks import Firework, LaunchPad, PyTask, Workflow\n'), ((6655, 6824), 'pybat.workflow.fireworks.RelaxFirework', 'RelaxFirework', ([], {'structure_file': 'structure_file', 'functional': 'functional', 'directory': 'directory', 'is_metal': 'is_metal', 'in_custodian': 'in_custodian', 'number_nodes': 'number_nodes'}), '(structure_file=structure_file, functional=functional,\n directory=directory, is_metal=is_metal, in_custodian=in_custodian,\n number_nodes=number_nodes)\n', (6668, 6824), False, 'from pybat.workflow.fireworks import ScfFirework, RelaxFirework, NebFirework\n'), ((7050, 7089), 'pybat.core.LiRichCathode.from_file', 'LiRichCathode.from_file', (['structure_file'], {}), '(structure_file)\n', (7073, 7089), False, 'from pybat.core import Cathode, LiRichCathode, Dimer\n'), ((7247, 7303), 'fireworks.Workflow', 'Workflow', ([], {'fireworks': '[relax_firework]', 'name': 'workflow_name'}), '(fireworks=[relax_firework], name=workflow_name)\n', (7255, 7303), False, 'from fireworks import Firework, LaunchPad, PyTask, Workflow\n'), ((9297, 9408), 'pybat.cli.commands.define.define_dimer', 'define_dimer', ([], {'structure_file': 'structure_file', 'dimer_indices': 'dimer_indices', 'distance': 'distance', 'write_cif': '(True)'}), '(structure_file=structure_file, dimer_indices=dimer_indices,\n distance=distance, write_cif=True)\n', (9309, 9408), False, 'from pybat.cli.commands.define import define_dimer, define_migration\n'), ((9582, 9744), 'fireworks.PyTask', 'PyTask', ([], {'func': '"""pybat.cli.commands.setup.transition"""', 'kwargs': "{'directory': dimer_dir, 'functional': functional, 'is_metal': is_metal,\n 'is_migration': False}"}), "(func='pybat.cli.commands.setup.transition', kwargs={'directory':\n dimer_dir, 'functional': functional, 'is_metal': is_metal,\n 'is_migration': False})\n", (9588, 9744), False, 'from fireworks import Firework, LaunchPad, PyTask, Workflow\n'), ((10567, 10684), 'fireworks.Firework', 'Firework', ([], {'tasks': '[setup_transition, vasprun, get_cathode]', 'name': '"""Dimer Geometry optimization"""', 'spec': 'firework_spec'}), "(tasks=[setup_transition, vasprun, get_cathode], name=\n 'Dimer Geometry optimization', spec=firework_spec)\n", (10575, 10684), False, 'from fireworks import Firework, LaunchPad, PyTask, Workflow\n'), ((10808, 10844), 'os.path.join', 'os.path.join', (['dimer_dir', '"""scf_final"""'], {}), "(dimer_dir, 'scf_final')\n", (10820, 10844), False, 'import os\n'), ((10866, 10920), 'os.path.join', 'os.path.join', (['dimer_dir', '"""final"""', '"""final_cathode.json"""'], {}), "(dimer_dir, 'final', 'final_cathode.json')\n", (10878, 10920), False, 'import os\n'), ((10974, 11141), 'pybat.workflow.fireworks.ScfFirework', 'ScfFirework', ([], {'structure_file': 'final_cathode', 'functional': 'functional', 'directory': 'scf_dir', 'write_chgcar': '(False)', 'in_custodian': 'in_custodian', 'number_nodes': 'number_nodes'}), '(structure_file=final_cathode, functional=functional, directory=\n scf_dir, write_chgcar=False, in_custodian=in_custodian, number_nodes=\n number_nodes)\n', (10985, 11141), False, 'from pybat.workflow.fireworks import ScfFirework, RelaxFirework, NebFirework\n'), ((13567, 13672), 'pybat.cli.commands.define.define_migration', 'define_migration', ([], {'structure_file': 'structure_file', 'migration_indices': 'migration_indices', 'write_cif': '(True)'}), '(structure_file=structure_file, migration_indices=\n migration_indices, write_cif=True)\n', (13583, 13672), False, 'from pybat.cli.commands.define import define_dimer, define_migration\n'), ((13787, 13889), 'pybat.cli.commands.setup.transition', 'transition', ([], {'directory': 'migration_dir', 'functional': 'functional', 'is_metal': 'is_metal', 'is_migration': '(False)'}), '(directory=migration_dir, functional=functional, is_metal=\n is_metal, is_migration=False)\n', (13797, 13889), False, 'from pybat.cli.commands.setup import transition\n'), ((14453, 14543), 'fireworks.Firework', 'Firework', ([], {'tasks': '[vasprun]', 'name': '"""Migration Geometry optimization"""', 'spec': 'firework_spec'}), "(tasks=[vasprun], name='Migration Geometry optimization', spec=\n firework_spec)\n", (14461, 14543), False, 'from fireworks import Firework, LaunchPad, PyTask, Workflow\n'), ((16582, 16762), 'pybat.workflow.fireworks.NebFirework', 'NebFirework', ([], {'directory': 'directory', 'nimages': 'nimages', 'functional': 'functional', 'is_metal': 'is_metal', 'is_migration': 'is_migration', 'in_custodian': 'in_custodian', 'number_nodes': 'number_nodes'}), '(directory=directory, nimages=nimages, functional=functional,\n is_metal=is_metal, is_migration=is_migration, in_custodian=in_custodian,\n number_nodes=number_nodes)\n', (16593, 16762), False, 'from pybat.workflow.fireworks import ScfFirework, RelaxFirework, NebFirework\n'), ((17333, 17387), 'fireworks.Workflow', 'Workflow', ([], {'fireworks': '[neb_firework]', 'name': 'workflow_name'}), '(fireworks=[neb_firework], name=workflow_name)\n', (17341, 17387), False, 'from fireworks import Firework, LaunchPad, PyTask, Workflow\n'), ((20407, 20440), 'pybat.core.Cathode.from_file', 'Cathode.from_file', (['structure_file'], {}), '(structure_file)\n', (20424, 20440), False, 'from pybat.core import Cathode, LiRichCathode, Dimer\n'), ((21225, 21251), 'os.path.abspath', 'os.path.abspath', (['directory'], {}), '(directory)\n', (21240, 21251), False, 'import os\n'), ((21602, 21697), 'pybat.workflow.firetasks.EnergyConfTask', 'EnergyConfTask', ([], {'functional': 'functional', 'in_custodian': 'in_custodian', 'number_nodes': 'number_nodes'}), '(functional=functional, in_custodian=in_custodian,\n number_nodes=number_nodes)\n', (21616, 21697), False, 'from pybat.workflow.firetasks import VaspTask, CustodianTask, ConfigurationTask, EnergyConfTask\n'), ((21968, 22078), 'fireworks.Firework', 'Firework', ([], {'tasks': '[configuration_task, energy_task]', 'name': '"""Configuration Setup"""', 'spec': "{'_category': 'none'}"}), "(tasks=[configuration_task, energy_task], name=\n 'Configuration Setup', spec={'_category': 'none'})\n", (21976, 22078), False, 'from fireworks import Firework, LaunchPad, PyTask, Workflow\n'), ((22180, 22238), 'fireworks.Workflow', 'Workflow', ([], {'fireworks': '[configuration_fw]', 'name': 'workflow_name'}), '(fireworks=[configuration_fw], name=workflow_name)\n', (22188, 22238), False, 'from fireworks import Firework, LaunchPad, PyTask, Workflow\n'), ((23875, 23914), 'pybat.core.LiRichCathode.from_file', 'LiRichCathode.from_file', (['structure_file'], {}), '(structure_file)\n', (23898, 23914), False, 'from pybat.core import Cathode, LiRichCathode, Dimer\n'), ((26565, 26604), 'pybat.core.LiRichCathode.from_file', 'LiRichCathode.from_file', (['structure_file'], {}), '(structure_file)\n', (26588, 26604), False, 'from pybat.core import Cathode, LiRichCathode, Dimer\n'), ((27123, 27136), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (27130, 27136), False, 'import os\n'), ((27401, 27422), 'os.path.abspath', 'os.path.abspath', (['path'], {}), '(path)\n', (27416, 27422), False, 'import os\n'), ((1093, 1099), 'ruamel.yaml.YAML', 'YAML', ([], {}), '()\n', (1097, 1099), False, 'from ruamel.yaml import YAML\n'), ((10363, 10374), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (10372, 10374), False, 'import os\n'), ((14249, 14260), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (14258, 14260), False, 'import os\n'), ((16899, 16910), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (16908, 16910), False, 'import os\n'), ((17118, 17174), 'os.path.join', 'os.path.join', (['directory', '"""final"""', '"""initial_cathode.json"""'], {}), "(directory, 'final', 'initial_cathode.json')\n", (17130, 17174), False, 'import os\n'), ((21197, 21208), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (21206, 21208), False, 'import os\n'), ((4052, 4063), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4061, 4063), False, 'import os\n'), ((6338, 6349), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6347, 6349), False, 'import os\n'), ((24324, 24369), 'numpy.linalg.norm', 'np.linalg.norm', (['(dimer_center - lattice_center)'], {}), '(dimer_center - lattice_center)\n', (24338, 24369), True, 'import numpy as np\n'), ((9921, 9953), 'os.path.join', 'os.path.join', (['dimer_dir', '"""final"""'], {}), "(dimer_dir, 'final')\n", (9933, 9953), False, 'import os\n'), ((10002, 10034), 'os.path.join', 'os.path.join', (['dimer_dir', '"""final"""'], {}), "(dimer_dir, 'final')\n", (10014, 10034), False, 'import os\n'), ((10206, 10238), 'os.path.join', 'os.path.join', (['dimer_dir', '"""final"""'], {}), "(dimer_dir, 'final')\n", (10218, 10238), False, 'import os\n'), ((14044, 14080), 'os.path.join', 'os.path.join', (['migration_dir', '"""final"""'], {}), "(migration_dir, 'final')\n", (14056, 14080), False, 'import os\n'), ((14129, 14165), 'os.path.join', 'os.path.join', (['migration_dir', '"""final"""'], {}), "(migration_dir, 'final')\n", (14141, 14165), False, 'import os\n'), ((17196, 17222), 'os.path.abspath', 'os.path.abspath', (['directory'], {}), '(directory)\n', (17211, 17222), False, 'import os\n'), ((24200, 24220), 'pybat.core.Dimer', 'Dimer', (['lirich', 'dimer'], {}), '(lirich, dimer)\n', (24205, 24220), False, 'from pybat.core import Cathode, LiRichCathode, Dimer\n'), ((24257, 24289), 'numpy.sum', 'np.sum', (['lirich.lattice.matrix', '(0)'], {}), '(lirich.lattice.matrix, 0)\n', (24263, 24289), True, 'import numpy as np\n'), ((27190, 27214), 'os.path.join', 'os.path.join', (['root', 'name'], {}), '(root, name)\n', (27202, 27214), False, 'import os\n'), ((27283, 27306), 'pybat.core.Cathode.from_file', 'Cathode.from_file', (['file'], {}), '(file)\n', (27300, 27306), False, 'from pybat.core import Cathode, LiRichCathode, Dimer\n'), ((27435, 27458), 'pybat.core.Cathode.from_file', 'Cathode.from_file', (['file'], {}), '(file)\n', (27452, 27458), False, 'from pybat.core import Cathode, LiRichCathode, Dimer\n')]
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
__all__ = ['StoredIscsiVolume']
class StoredIscsiVolume(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
disk_id: Optional[pulumi.Input[str]] = None,
gateway_arn: Optional[pulumi.Input[str]] = None,
kms_encrypted: Optional[pulumi.Input[bool]] = None,
kms_key: Optional[pulumi.Input[str]] = None,
network_interface_id: Optional[pulumi.Input[str]] = None,
preserve_existing_data: Optional[pulumi.Input[bool]] = None,
snapshot_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
target_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Manages an AWS Storage Gateway stored iSCSI volume.
> **NOTE:** The gateway must have a working storage added (e.g. via the `storagegateway.WorkingStorage` resource) before the volume is operational to clients, however the Storage Gateway API will allow volume creation without error in that case and return volume status as `WORKING STORAGE NOT CONFIGURED`.
## Example Usage
### Create Empty Stored iSCSI Volume
```python
import pulumi
import pulumi_aws as aws
example = aws.storagegateway.StoredIscsiVolume("example",
gateway_arn=aws_storagegateway_cache["example"]["gateway_arn"],
network_interface_id=aws_instance["example"]["private_ip"],
target_name="example",
preserve_existing_data=False,
disk_id=data["aws_storagegateway_local_disk"]["test"]["id"])
```
### Create Stored iSCSI Volume From Snapshot
```python
import pulumi
import pulumi_aws as aws
example = aws.storagegateway.StoredIscsiVolume("example",
gateway_arn=aws_storagegateway_cache["example"]["gateway_arn"],
network_interface_id=aws_instance["example"]["private_ip"],
snapshot_id=aws_ebs_snapshot["example"]["id"],
target_name="example",
preserve_existing_data=False,
disk_id=data["aws_storagegateway_local_disk"]["test"]["id"])
```
## Import
`aws_storagegateway_stored_iscsi_volume` can be imported by using the volume Amazon Resource Name (ARN), e.g.
```sh
$ pulumi import aws:storagegateway/storedIscsiVolume:StoredIscsiVolume example arn:aws:storagegateway:us-east-1:123456789012:gateway/sgw-12345678/volume/vol-12345678
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] disk_id: The unique identifier for the gateway local disk that is configured as a stored volume.
:param pulumi.Input[str] gateway_arn: The Amazon Resource Name (ARN) of the gateway.
:param pulumi.Input[bool] kms_encrypted: `true` to use Amazon S3 server side encryption with your own AWS KMS key, or `false` to use a key managed by Amazon S3. Optional.
:param pulumi.Input[str] kms_key: The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server side encryption. This value can only be set when `kms_encrypted` is `true`.
:param pulumi.Input[str] network_interface_id: The network interface of the gateway on which to expose the iSCSI target. Only IPv4 addresses are accepted.
:param pulumi.Input[bool] preserve_existing_data: Specify this field as `true` if you want to preserve the data on the local disk. Otherwise, specifying this field as false creates an empty volume.
:param pulumi.Input[str] snapshot_id: The snapshot ID of the snapshot to restore as the new stored volume. e.g. `snap-1122aabb`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value mapping of resource tags
:param pulumi.Input[str] target_name: The name of the iSCSI target used by initiators to connect to the target and as a suffix for the target ARN. The target name must be unique across all volumes of a gateway.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if disk_id is None and not opts.urn:
raise TypeError("Missing required property 'disk_id'")
__props__['disk_id'] = disk_id
if gateway_arn is None and not opts.urn:
raise TypeError("Missing required property 'gateway_arn'")
__props__['gateway_arn'] = gateway_arn
__props__['kms_encrypted'] = kms_encrypted
__props__['kms_key'] = kms_key
if network_interface_id is None and not opts.urn:
raise TypeError("Missing required property 'network_interface_id'")
__props__['network_interface_id'] = network_interface_id
if preserve_existing_data is None and not opts.urn:
raise TypeError("Missing required property 'preserve_existing_data'")
__props__['preserve_existing_data'] = preserve_existing_data
__props__['snapshot_id'] = snapshot_id
__props__['tags'] = tags
if target_name is None and not opts.urn:
raise TypeError("Missing required property 'target_name'")
__props__['target_name'] = target_name
__props__['arn'] = None
__props__['chap_enabled'] = None
__props__['lun_number'] = None
__props__['network_interface_port'] = None
__props__['target_arn'] = None
__props__['volume_attachment_status'] = None
__props__['volume_id'] = None
__props__['volume_size_in_bytes'] = None
__props__['volume_status'] = None
__props__['volume_type'] = None
super(StoredIscsiVolume, __self__).__init__(
'aws:storagegateway/storedIscsiVolume:StoredIscsiVolume',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
arn: Optional[pulumi.Input[str]] = None,
chap_enabled: Optional[pulumi.Input[bool]] = None,
disk_id: Optional[pulumi.Input[str]] = None,
gateway_arn: Optional[pulumi.Input[str]] = None,
kms_encrypted: Optional[pulumi.Input[bool]] = None,
kms_key: Optional[pulumi.Input[str]] = None,
lun_number: Optional[pulumi.Input[int]] = None,
network_interface_id: Optional[pulumi.Input[str]] = None,
network_interface_port: Optional[pulumi.Input[int]] = None,
preserve_existing_data: Optional[pulumi.Input[bool]] = None,
snapshot_id: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
target_arn: Optional[pulumi.Input[str]] = None,
target_name: Optional[pulumi.Input[str]] = None,
volume_attachment_status: Optional[pulumi.Input[str]] = None,
volume_id: Optional[pulumi.Input[str]] = None,
volume_size_in_bytes: Optional[pulumi.Input[int]] = None,
volume_status: Optional[pulumi.Input[str]] = None,
volume_type: Optional[pulumi.Input[str]] = None) -> 'StoredIscsiVolume':
"""
Get an existing StoredIscsiVolume resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: Volume Amazon Resource Name (ARN), e.g. `arn:aws:storagegateway:us-east-1:123456789012:gateway/sgw-12345678/volume/vol-12345678`.
:param pulumi.Input[bool] chap_enabled: Whether mutual CHAP is enabled for the iSCSI target.
:param pulumi.Input[str] disk_id: The unique identifier for the gateway local disk that is configured as a stored volume.
:param pulumi.Input[str] gateway_arn: The Amazon Resource Name (ARN) of the gateway.
:param pulumi.Input[bool] kms_encrypted: `true` to use Amazon S3 server side encryption with your own AWS KMS key, or `false` to use a key managed by Amazon S3. Optional.
:param pulumi.Input[str] kms_key: The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server side encryption. This value can only be set when `kms_encrypted` is `true`.
:param pulumi.Input[int] lun_number: Logical disk number.
:param pulumi.Input[str] network_interface_id: The network interface of the gateway on which to expose the iSCSI target. Only IPv4 addresses are accepted.
:param pulumi.Input[int] network_interface_port: The port used to communicate with iSCSI targets.
:param pulumi.Input[bool] preserve_existing_data: Specify this field as `true` if you want to preserve the data on the local disk. Otherwise, specifying this field as false creates an empty volume.
:param pulumi.Input[str] snapshot_id: The snapshot ID of the snapshot to restore as the new stored volume. e.g. `snap-1122aabb`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value mapping of resource tags
:param pulumi.Input[str] target_arn: Target Amazon Resource Name (ARN), e.g. `arn:aws:storagegateway:us-east-1:123456789012:gateway/sgw-12345678/target/iqn.1997-05.com.amazon:TargetName`.
:param pulumi.Input[str] target_name: The name of the iSCSI target used by initiators to connect to the target and as a suffix for the target ARN. The target name must be unique across all volumes of a gateway.
:param pulumi.Input[str] volume_attachment_status: A value that indicates whether a storage volume is attached to, detached from, or is in the process of detaching from a gateway.
:param pulumi.Input[str] volume_id: Volume ID, e.g. `vol-12345678`.
:param pulumi.Input[int] volume_size_in_bytes: The size of the data stored on the volume in bytes.
:param pulumi.Input[str] volume_status: indicates the state of the storage volume.
:param pulumi.Input[str] volume_type: indicates the type of the volume.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["arn"] = arn
__props__["chap_enabled"] = chap_enabled
__props__["disk_id"] = disk_id
__props__["gateway_arn"] = gateway_arn
__props__["kms_encrypted"] = kms_encrypted
__props__["kms_key"] = kms_key
__props__["lun_number"] = lun_number
__props__["network_interface_id"] = network_interface_id
__props__["network_interface_port"] = network_interface_port
__props__["preserve_existing_data"] = preserve_existing_data
__props__["snapshot_id"] = snapshot_id
__props__["tags"] = tags
__props__["target_arn"] = target_arn
__props__["target_name"] = target_name
__props__["volume_attachment_status"] = volume_attachment_status
__props__["volume_id"] = volume_id
__props__["volume_size_in_bytes"] = volume_size_in_bytes
__props__["volume_status"] = volume_status
__props__["volume_type"] = volume_type
return StoredIscsiVolume(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
Volume Amazon Resource Name (ARN), e.g. `arn:aws:storagegateway:us-east-1:123456789012:gateway/sgw-12345678/volume/vol-12345678`.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="chapEnabled")
def chap_enabled(self) -> pulumi.Output[bool]:
"""
Whether mutual CHAP is enabled for the iSCSI target.
"""
return pulumi.get(self, "chap_enabled")
@property
@pulumi.getter(name="diskId")
def disk_id(self) -> pulumi.Output[str]:
"""
The unique identifier for the gateway local disk that is configured as a stored volume.
"""
return pulumi.get(self, "disk_id")
@property
@pulumi.getter(name="gatewayArn")
def gateway_arn(self) -> pulumi.Output[str]:
"""
The Amazon Resource Name (ARN) of the gateway.
"""
return pulumi.get(self, "gateway_arn")
@property
@pulumi.getter(name="kmsEncrypted")
def kms_encrypted(self) -> pulumi.Output[Optional[bool]]:
"""
`true` to use Amazon S3 server side encryption with your own AWS KMS key, or `false` to use a key managed by Amazon S3. Optional.
"""
return pulumi.get(self, "kms_encrypted")
@property
@pulumi.getter(name="kmsKey")
def kms_key(self) -> pulumi.Output[Optional[str]]:
"""
The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server side encryption. This value can only be set when `kms_encrypted` is `true`.
"""
return pulumi.get(self, "kms_key")
@property
@pulumi.getter(name="lunNumber")
def lun_number(self) -> pulumi.Output[int]:
"""
Logical disk number.
"""
return pulumi.get(self, "lun_number")
@property
@pulumi.getter(name="networkInterfaceId")
def network_interface_id(self) -> pulumi.Output[str]:
"""
The network interface of the gateway on which to expose the iSCSI target. Only IPv4 addresses are accepted.
"""
return pulumi.get(self, "network_interface_id")
@property
@pulumi.getter(name="networkInterfacePort")
def network_interface_port(self) -> pulumi.Output[int]:
"""
The port used to communicate with iSCSI targets.
"""
return pulumi.get(self, "network_interface_port")
@property
@pulumi.getter(name="preserveExistingData")
def preserve_existing_data(self) -> pulumi.Output[bool]:
"""
Specify this field as `true` if you want to preserve the data on the local disk. Otherwise, specifying this field as false creates an empty volume.
"""
return pulumi.get(self, "preserve_existing_data")
@property
@pulumi.getter(name="snapshotId")
def snapshot_id(self) -> pulumi.Output[Optional[str]]:
"""
The snapshot ID of the snapshot to restore as the new stored volume. e.g. `snap-1122aabb`.
"""
return pulumi.get(self, "snapshot_id")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Key-value mapping of resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="targetArn")
def target_arn(self) -> pulumi.Output[str]:
"""
Target Amazon Resource Name (ARN), e.g. `arn:aws:storagegateway:us-east-1:123456789012:gateway/sgw-12345678/target/iqn.1997-05.com.amazon:TargetName`.
"""
return pulumi.get(self, "target_arn")
@property
@pulumi.getter(name="targetName")
def target_name(self) -> pulumi.Output[str]:
"""
The name of the iSCSI target used by initiators to connect to the target and as a suffix for the target ARN. The target name must be unique across all volumes of a gateway.
"""
return pulumi.get(self, "target_name")
@property
@pulumi.getter(name="volumeAttachmentStatus")
def volume_attachment_status(self) -> pulumi.Output[str]:
"""
A value that indicates whether a storage volume is attached to, detached from, or is in the process of detaching from a gateway.
"""
return pulumi.get(self, "volume_attachment_status")
@property
@pulumi.getter(name="volumeId")
def volume_id(self) -> pulumi.Output[str]:
"""
Volume ID, e.g. `vol-12345678`.
"""
return pulumi.get(self, "volume_id")
@property
@pulumi.getter(name="volumeSizeInBytes")
def volume_size_in_bytes(self) -> pulumi.Output[int]:
"""
The size of the data stored on the volume in bytes.
"""
return pulumi.get(self, "volume_size_in_bytes")
@property
@pulumi.getter(name="volumeStatus")
def volume_status(self) -> pulumi.Output[str]:
"""
indicates the state of the storage volume.
"""
return pulumi.get(self, "volume_status")
@property
@pulumi.getter(name="volumeType")
def volume_type(self) -> pulumi.Output[str]:
"""
indicates the type of the volume.
"""
return pulumi.get(self, "volume_type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
[
"pulumi.get",
"pulumi.getter",
"pulumi.ResourceOptions",
"warnings.warn"
] |
[((13153, 13186), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""chapEnabled"""'}), "(name='chapEnabled')\n", (13166, 13186), False, 'import pulumi\n'), ((13391, 13419), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""diskId"""'}), "(name='diskId')\n", (13404, 13419), False, 'import pulumi\n'), ((13648, 13680), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""gatewayArn"""'}), "(name='gatewayArn')\n", (13661, 13680), False, 'import pulumi\n'), ((13876, 13910), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""kmsEncrypted"""'}), "(name='kmsEncrypted')\n", (13889, 13910), False, 'import pulumi\n'), ((14204, 14232), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""kmsKey"""'}), "(name='kmsKey')\n", (14217, 14232), False, 'import pulumi\n'), ((14535, 14566), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""lunNumber"""'}), "(name='lunNumber')\n", (14548, 14566), False, 'import pulumi\n'), ((14734, 14774), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""networkInterfaceId"""'}), "(name='networkInterfaceId')\n", (14747, 14774), False, 'import pulumi\n'), ((15049, 15091), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""networkInterfacePort"""'}), "(name='networkInterfacePort')\n", (15062, 15091), False, 'import pulumi\n'), ((15311, 15353), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""preserveExistingData"""'}), "(name='preserveExistingData')\n", (15324, 15353), False, 'import pulumi\n'), ((15673, 15705), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""snapshotId"""'}), "(name='snapshotId')\n", (15686, 15705), False, 'import pulumi\n'), ((16162, 16193), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""targetArn"""'}), "(name='targetArn')\n", (16175, 16193), False, 'import pulumi\n'), ((16491, 16523), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""targetName"""'}), "(name='targetName')\n", (16504, 16523), False, 'import pulumi\n'), ((16845, 16889), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""volumeAttachmentStatus"""'}), "(name='volumeAttachmentStatus')\n", (16858, 16889), False, 'import pulumi\n'), ((17193, 17223), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""volumeId"""'}), "(name='volumeId')\n", (17206, 17223), False, 'import pulumi\n'), ((17400, 17439), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""volumeSizeInBytes"""'}), "(name='volumeSizeInBytes')\n", (17413, 17439), False, 'import pulumi\n'), ((17658, 17692), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""volumeStatus"""'}), "(name='volumeStatus')\n", (17671, 17692), False, 'import pulumi\n'), ((17888, 17920), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""volumeType"""'}), "(name='volumeType')\n", (17901, 17920), False, 'import pulumi\n'), ((13109, 13132), 'pulumi.get', 'pulumi.get', (['self', '"""arn"""'], {}), "(self, 'arn')\n", (13119, 13132), False, 'import pulumi\n'), ((13338, 13370), 'pulumi.get', 'pulumi.get', (['self', '"""chap_enabled"""'], {}), "(self, 'chap_enabled')\n", (13348, 13370), False, 'import pulumi\n'), ((13600, 13627), 'pulumi.get', 'pulumi.get', (['self', '"""disk_id"""'], {}), "(self, 'disk_id')\n", (13610, 13627), False, 'import pulumi\n'), ((13824, 13855), 'pulumi.get', 'pulumi.get', (['self', '"""gateway_arn"""'], {}), "(self, 'gateway_arn')\n", (13834, 13855), False, 'import pulumi\n'), ((14150, 14183), 'pulumi.get', 'pulumi.get', (['self', '"""kms_encrypted"""'], {}), "(self, 'kms_encrypted')\n", (14160, 14183), False, 'import pulumi\n'), ((14487, 14514), 'pulumi.get', 'pulumi.get', (['self', '"""kms_key"""'], {}), "(self, 'kms_key')\n", (14497, 14514), False, 'import pulumi\n'), ((14683, 14713), 'pulumi.get', 'pulumi.get', (['self', '"""lun_number"""'], {}), "(self, 'lun_number')\n", (14693, 14713), False, 'import pulumi\n'), ((14988, 15028), 'pulumi.get', 'pulumi.get', (['self', '"""network_interface_id"""'], {}), "(self, 'network_interface_id')\n", (14998, 15028), False, 'import pulumi\n'), ((15248, 15290), 'pulumi.get', 'pulumi.get', (['self', '"""network_interface_port"""'], {}), "(self, 'network_interface_port')\n", (15258, 15290), False, 'import pulumi\n'), ((15610, 15652), 'pulumi.get', 'pulumi.get', (['self', '"""preserve_existing_data"""'], {}), "(self, 'preserve_existing_data')\n", (15620, 15652), False, 'import pulumi\n'), ((15903, 15934), 'pulumi.get', 'pulumi.get', (['self', '"""snapshot_id"""'], {}), "(self, 'snapshot_id')\n", (15913, 15934), False, 'import pulumi\n'), ((16117, 16141), 'pulumi.get', 'pulumi.get', (['self', '"""tags"""'], {}), "(self, 'tags')\n", (16127, 16141), False, 'import pulumi\n'), ((16440, 16470), 'pulumi.get', 'pulumi.get', (['self', '"""target_arn"""'], {}), "(self, 'target_arn')\n", (16450, 16470), False, 'import pulumi\n'), ((16793, 16824), 'pulumi.get', 'pulumi.get', (['self', '"""target_name"""'], {}), "(self, 'target_name')\n", (16803, 16824), False, 'import pulumi\n'), ((17128, 17172), 'pulumi.get', 'pulumi.get', (['self', '"""volume_attachment_status"""'], {}), "(self, 'volume_attachment_status')\n", (17138, 17172), False, 'import pulumi\n'), ((17350, 17379), 'pulumi.get', 'pulumi.get', (['self', '"""volume_id"""'], {}), "(self, 'volume_id')\n", (17360, 17379), False, 'import pulumi\n'), ((17597, 17637), 'pulumi.get', 'pulumi.get', (['self', '"""volume_size_in_bytes"""'], {}), "(self, 'volume_size_in_bytes')\n", (17607, 17637), False, 'import pulumi\n'), ((17834, 17867), 'pulumi.get', 'pulumi.get', (['self', '"""volume_status"""'], {}), "(self, 'volume_status')\n", (17844, 17867), False, 'import pulumi\n'), ((18051, 18082), 'pulumi.get', 'pulumi.get', (['self', '"""volume_type"""'], {}), "(self, 'volume_type')\n", (18061, 18082), False, 'import pulumi\n'), ((4695, 4770), 'warnings.warn', 'warnings.warn', (['"""explicit use of __name__ is deprecated"""', 'DeprecationWarning'], {}), "('explicit use of __name__ is deprecated', DeprecationWarning)\n", (4708, 4770), False, 'import warnings\n'), ((4853, 4952), 'warnings.warn', 'warnings.warn', (['"""explicit use of __opts__ is deprecated, use \'opts\' instead"""', 'DeprecationWarning'], {}), '("explicit use of __opts__ is deprecated, use \'opts\' instead",\n DeprecationWarning)\n', (4866, 4952), False, 'import warnings\n'), ((5021, 5045), 'pulumi.ResourceOptions', 'pulumi.ResourceOptions', ([], {}), '()\n', (5043, 5045), False, 'import pulumi\n'), ((11762, 11791), 'pulumi.ResourceOptions', 'pulumi.ResourceOptions', ([], {'id': 'id'}), '(id=id)\n', (11784, 11791), False, 'import pulumi\n')]
|
import sys
sys.stdout = open('output.txt', 'w')
sys.stdin = open('input.txt')
# Part Two
from collections import deque
ans = 0
last = None
t = 2000
q = deque()
sm = 0
for _ in range(t):
if len(q) == 3:
sm -= q.popleft()
num = int(input())
sm += num
q.append(num)
if last is None and len(q) == 3:
last = sm
elif last is None:
continue
if sm > last:
ans += 1
last = sm
print(ans)
|
[
"collections.deque"
] |
[((153, 160), 'collections.deque', 'deque', ([], {}), '()\n', (158, 160), False, 'from collections import deque\n')]
|
__author__ = '<NAME> <<EMAIL>>'
from unittest import TestSuite
from .testcase_api_key_authorized import ApiKeyAuthorizedTestCase
from .testcase_api_key_unauthorized import ApiKeyUnauthorizedTestCase
from .testcase_create_headers import CreateHttpHeadersTestCase
from .testcase_convert import ConvertTestCase
from .testcase_convert_dict import ConvertDictTestCase
from .testcase_get_api_base import GetApiBaseTestCase
from .testcase_bitcodinobject import BitcodinObjectBooleanTestCase
from .testcase_bitcodinobject import BitcodinObjectLengthTestCase
def get_test_suite():
test_suite = TestSuite()
test_suite.addTest(ConvertTestCase())
test_suite.addTest(ConvertDictTestCase())
test_suite.addTest(GetApiBaseTestCase())
test_suite.addTest(CreateHttpHeadersTestCase())
test_suite.addTest(ApiKeyAuthorizedTestCase())
test_suite.addTest(ApiKeyUnauthorizedTestCase())
test_suite.addTest(BitcodinObjectBooleanTestCase())
test_suite.addTest(BitcodinObjectLengthTestCase())
return test_suite
|
[
"unittest.TestSuite"
] |
[((593, 604), 'unittest.TestSuite', 'TestSuite', ([], {}), '()\n', (602, 604), False, 'from unittest import TestSuite\n')]
|
# -*- coding: utf-8 -*-
from sqlalchemy import Column, Integer
from sqlalchemy.types import Numeric, Unicode
from sqlalchemy.dialects import postgresql
from chsdi.models import register, bases
from chsdi.models.vector import Vector, Geometry2D
Base = bases['zeitreihen']
class Zeitreihen15(Base, Vector):
__tablename__ = 'tooltip_15'
__table_args__ = ({'schema': 'public', 'autoload': False})
__template__ = 'templates/htmlpopup/zeitreihen.mako'
__bodId__ = 'ch.swisstopo.zeitreihen'
__minresolution__ = 10.05
__maxresolution__ = 500005
__minscale__ = 37984.176
__timeInstant__ = 'years'
__label__ = 'release_year'
id = Column('bgdi_id', Unicode, primary_key=True)
kbbez = Column('kbbez', Unicode)
produkt = Column('produkt', Unicode)
kbnum = Column('kbnum', Unicode)
release_year = Column('release_year', Integer)
years = Column('years', Integer)
bv_nummer = Column('bv_nummer', Unicode)
bgdi_order = Column('bgdi_order', Integer)
array_release_years = Column('array_release_years', postgresql.ARRAY(Integer))
box2d = Column('box2d', Unicode)
the_geom = Column(Geometry2D)
class Zeitreihen20(Base, Vector):
__tablename__ = 'tooltip_20'
__table_args__ = ({'schema': 'public', 'autoload': False})
__template__ = 'templates/htmlpopup/zeitreihen.mako'
__bodId__ = 'ch.swisstopo.zeitreihen'
__minresolution__ = 5.05
__maxresolution__ = 10.05
__minscale__ = 19086.576
__maxscale__ = 37984.176
__timeInstant__ = 'years'
__label__ = 'release_year'
id = Column('bgdi_id', Unicode, primary_key=True)
kbbez = Column('kbbez', Unicode)
produkt = Column('produkt', Unicode)
kbnum = Column('kbnum', Unicode)
release_year = Column('release_year', Integer)
years = Column('years', Integer)
bv_nummer = Column('bv_nummer', Unicode)
bgdi_order = Column('bgdi_order', Integer)
array_release_years = Column('array_release_years', postgresql.ARRAY(Integer))
box2d = Column('box2d', Unicode)
the_geom = Column(Geometry2D)
class Zeitreihen21(Base, Vector):
__tablename__ = 'tooltip_21'
__table_args__ = ({'schema': 'public', 'autoload': False})
__template__ = 'templates/htmlpopup/zeitreihen.mako'
__bodId__ = 'ch.swisstopo.zeitreihen'
__minresolution__ = 2.55
__maxresolution__ = 5.05
__minscale__ = 9637.776
__maxscale__ = 19086.576
__timeInstant__ = 'years'
__label__ = 'release_year'
id = Column('bgdi_id', Unicode, primary_key=True)
kbbez = Column('kbbez', Unicode)
produkt = Column('produkt', Unicode)
kbnum = Column('kbnum', Unicode)
release_year = Column('release_year', Integer)
years = Column('years', Integer)
bv_nummer = Column('bv_nummer', Unicode)
bgdi_order = Column('bgdi_order', Integer)
array_release_years = Column('array_release_years', postgresql.ARRAY(Integer))
box2d = Column('box2d', Unicode)
the_geom = Column(Geometry2D)
class Zeitreihen22(Base, Vector):
__tablename__ = 'tooltip_22'
__table_args__ = ({'schema': 'public', 'autoload': False})
__template__ = 'templates/htmlpopup/zeitreihen.mako'
__bodId__ = 'ch.swisstopo.zeitreihen'
__minresolution__ = 0
__maxresolution__ = 2.55
__minscale__ = 0
__maxscale__ = 9637.776
__timeInstant__ = 'years'
__label__ = 'release_year'
id = Column('bgdi_id', Unicode, primary_key=True)
kbbez = Column('kbbez', Unicode)
produkt = Column('produkt', Unicode)
kbnum = Column('kbnum', Unicode)
release_year = Column('release_year', Integer)
years = Column('years', Integer)
bv_nummer = Column('bv_nummer', Unicode)
bgdi_order = Column('bgdi_order', Integer)
array_release_years = Column('array_release_years', postgresql.ARRAY(Integer))
box2d = Column('box2d', Unicode)
the_geom = Column(Geometry2D)
class DufourErst(Base, Vector):
__tablename__ = 'view_dufour_erstausgabe'
__table_args__ = ({'schema': 'public', 'autoload': False})
__template__ = 'templates/htmlpopup/dufour_erst.mako'
__bodId__ = 'ch.swisstopo.hiks-dufour'
__label__ = 'datenstand'
id = Column('tilenumber', Unicode, primary_key=True)
kbbez = Column('kbbez', Unicode)
datenstand = Column('datenstand', Integer)
bv_nummer = Column('bv_nummer', Unicode)
the_geom = Column(Geometry2D)
class SiegfriedErst(Base, Vector):
__tablename__ = 'view_siegfried_erstausgabe'
__table_args__ = ({'schema': 'public', 'autoload': False})
__template__ = 'templates/htmlpopup/siegfried_erst.mako'
__bodId__ = 'ch.swisstopo.hiks-siegfried'
__label__ = 'datenstand'
id = Column('tilenumber', Unicode, primary_key=True)
kbbez = Column('kbbez', Unicode)
datenstand = Column('datenstand', Numeric)
bv_nummer = Column('bv_nummer', Unicode)
the_geom = Column(Geometry2D)
register('ch.swisstopo.hiks-siegfried', SiegfriedErst)
register('ch.swisstopo.hiks-dufour', DufourErst)
register('ch.swisstopo.zeitreihen', Zeitreihen15)
register('ch.swisstopo.zeitreihen', Zeitreihen20)
register('ch.swisstopo.zeitreihen', Zeitreihen21)
register('ch.swisstopo.zeitreihen', Zeitreihen22)
|
[
"sqlalchemy.dialects.postgresql.ARRAY",
"sqlalchemy.Column",
"chsdi.models.register"
] |
[((4881, 4935), 'chsdi.models.register', 'register', (['"""ch.swisstopo.hiks-siegfried"""', 'SiegfriedErst'], {}), "('ch.swisstopo.hiks-siegfried', SiegfriedErst)\n", (4889, 4935), False, 'from chsdi.models import register, bases\n'), ((4936, 4984), 'chsdi.models.register', 'register', (['"""ch.swisstopo.hiks-dufour"""', 'DufourErst'], {}), "('ch.swisstopo.hiks-dufour', DufourErst)\n", (4944, 4984), False, 'from chsdi.models import register, bases\n'), ((4985, 5034), 'chsdi.models.register', 'register', (['"""ch.swisstopo.zeitreihen"""', 'Zeitreihen15'], {}), "('ch.swisstopo.zeitreihen', Zeitreihen15)\n", (4993, 5034), False, 'from chsdi.models import register, bases\n'), ((5035, 5084), 'chsdi.models.register', 'register', (['"""ch.swisstopo.zeitreihen"""', 'Zeitreihen20'], {}), "('ch.swisstopo.zeitreihen', Zeitreihen20)\n", (5043, 5084), False, 'from chsdi.models import register, bases\n'), ((5085, 5134), 'chsdi.models.register', 'register', (['"""ch.swisstopo.zeitreihen"""', 'Zeitreihen21'], {}), "('ch.swisstopo.zeitreihen', Zeitreihen21)\n", (5093, 5134), False, 'from chsdi.models import register, bases\n'), ((5135, 5184), 'chsdi.models.register', 'register', (['"""ch.swisstopo.zeitreihen"""', 'Zeitreihen22'], {}), "('ch.swisstopo.zeitreihen', Zeitreihen22)\n", (5143, 5184), False, 'from chsdi.models import register, bases\n'), ((666, 710), 'sqlalchemy.Column', 'Column', (['"""bgdi_id"""', 'Unicode'], {'primary_key': '(True)'}), "('bgdi_id', Unicode, primary_key=True)\n", (672, 710), False, 'from sqlalchemy import Column, Integer\n'), ((723, 747), 'sqlalchemy.Column', 'Column', (['"""kbbez"""', 'Unicode'], {}), "('kbbez', Unicode)\n", (729, 747), False, 'from sqlalchemy import Column, Integer\n'), ((762, 788), 'sqlalchemy.Column', 'Column', (['"""produkt"""', 'Unicode'], {}), "('produkt', Unicode)\n", (768, 788), False, 'from sqlalchemy import Column, Integer\n'), ((801, 825), 'sqlalchemy.Column', 'Column', (['"""kbnum"""', 'Unicode'], {}), "('kbnum', Unicode)\n", (807, 825), False, 'from sqlalchemy import Column, Integer\n'), ((845, 876), 'sqlalchemy.Column', 'Column', (['"""release_year"""', 'Integer'], {}), "('release_year', Integer)\n", (851, 876), False, 'from sqlalchemy import Column, Integer\n'), ((889, 913), 'sqlalchemy.Column', 'Column', (['"""years"""', 'Integer'], {}), "('years', Integer)\n", (895, 913), False, 'from sqlalchemy import Column, Integer\n'), ((930, 958), 'sqlalchemy.Column', 'Column', (['"""bv_nummer"""', 'Unicode'], {}), "('bv_nummer', Unicode)\n", (936, 958), False, 'from sqlalchemy import Column, Integer\n'), ((976, 1005), 'sqlalchemy.Column', 'Column', (['"""bgdi_order"""', 'Integer'], {}), "('bgdi_order', Integer)\n", (982, 1005), False, 'from sqlalchemy import Column, Integer\n'), ((1101, 1125), 'sqlalchemy.Column', 'Column', (['"""box2d"""', 'Unicode'], {}), "('box2d', Unicode)\n", (1107, 1125), False, 'from sqlalchemy import Column, Integer\n'), ((1141, 1159), 'sqlalchemy.Column', 'Column', (['Geometry2D'], {}), '(Geometry2D)\n', (1147, 1159), False, 'from sqlalchemy import Column, Integer\n'), ((1578, 1622), 'sqlalchemy.Column', 'Column', (['"""bgdi_id"""', 'Unicode'], {'primary_key': '(True)'}), "('bgdi_id', Unicode, primary_key=True)\n", (1584, 1622), False, 'from sqlalchemy import Column, Integer\n'), ((1635, 1659), 'sqlalchemy.Column', 'Column', (['"""kbbez"""', 'Unicode'], {}), "('kbbez', Unicode)\n", (1641, 1659), False, 'from sqlalchemy import Column, Integer\n'), ((1674, 1700), 'sqlalchemy.Column', 'Column', (['"""produkt"""', 'Unicode'], {}), "('produkt', Unicode)\n", (1680, 1700), False, 'from sqlalchemy import Column, Integer\n'), ((1713, 1737), 'sqlalchemy.Column', 'Column', (['"""kbnum"""', 'Unicode'], {}), "('kbnum', Unicode)\n", (1719, 1737), False, 'from sqlalchemy import Column, Integer\n'), ((1757, 1788), 'sqlalchemy.Column', 'Column', (['"""release_year"""', 'Integer'], {}), "('release_year', Integer)\n", (1763, 1788), False, 'from sqlalchemy import Column, Integer\n'), ((1801, 1825), 'sqlalchemy.Column', 'Column', (['"""years"""', 'Integer'], {}), "('years', Integer)\n", (1807, 1825), False, 'from sqlalchemy import Column, Integer\n'), ((1842, 1870), 'sqlalchemy.Column', 'Column', (['"""bv_nummer"""', 'Unicode'], {}), "('bv_nummer', Unicode)\n", (1848, 1870), False, 'from sqlalchemy import Column, Integer\n'), ((1888, 1917), 'sqlalchemy.Column', 'Column', (['"""bgdi_order"""', 'Integer'], {}), "('bgdi_order', Integer)\n", (1894, 1917), False, 'from sqlalchemy import Column, Integer\n'), ((2013, 2037), 'sqlalchemy.Column', 'Column', (['"""box2d"""', 'Unicode'], {}), "('box2d', Unicode)\n", (2019, 2037), False, 'from sqlalchemy import Column, Integer\n'), ((2053, 2071), 'sqlalchemy.Column', 'Column', (['Geometry2D'], {}), '(Geometry2D)\n', (2059, 2071), False, 'from sqlalchemy import Column, Integer\n'), ((2488, 2532), 'sqlalchemy.Column', 'Column', (['"""bgdi_id"""', 'Unicode'], {'primary_key': '(True)'}), "('bgdi_id', Unicode, primary_key=True)\n", (2494, 2532), False, 'from sqlalchemy import Column, Integer\n'), ((2545, 2569), 'sqlalchemy.Column', 'Column', (['"""kbbez"""', 'Unicode'], {}), "('kbbez', Unicode)\n", (2551, 2569), False, 'from sqlalchemy import Column, Integer\n'), ((2584, 2610), 'sqlalchemy.Column', 'Column', (['"""produkt"""', 'Unicode'], {}), "('produkt', Unicode)\n", (2590, 2610), False, 'from sqlalchemy import Column, Integer\n'), ((2623, 2647), 'sqlalchemy.Column', 'Column', (['"""kbnum"""', 'Unicode'], {}), "('kbnum', Unicode)\n", (2629, 2647), False, 'from sqlalchemy import Column, Integer\n'), ((2667, 2698), 'sqlalchemy.Column', 'Column', (['"""release_year"""', 'Integer'], {}), "('release_year', Integer)\n", (2673, 2698), False, 'from sqlalchemy import Column, Integer\n'), ((2711, 2735), 'sqlalchemy.Column', 'Column', (['"""years"""', 'Integer'], {}), "('years', Integer)\n", (2717, 2735), False, 'from sqlalchemy import Column, Integer\n'), ((2752, 2780), 'sqlalchemy.Column', 'Column', (['"""bv_nummer"""', 'Unicode'], {}), "('bv_nummer', Unicode)\n", (2758, 2780), False, 'from sqlalchemy import Column, Integer\n'), ((2798, 2827), 'sqlalchemy.Column', 'Column', (['"""bgdi_order"""', 'Integer'], {}), "('bgdi_order', Integer)\n", (2804, 2827), False, 'from sqlalchemy import Column, Integer\n'), ((2923, 2947), 'sqlalchemy.Column', 'Column', (['"""box2d"""', 'Unicode'], {}), "('box2d', Unicode)\n", (2929, 2947), False, 'from sqlalchemy import Column, Integer\n'), ((2963, 2981), 'sqlalchemy.Column', 'Column', (['Geometry2D'], {}), '(Geometry2D)\n', (2969, 2981), False, 'from sqlalchemy import Column, Integer\n'), ((3387, 3431), 'sqlalchemy.Column', 'Column', (['"""bgdi_id"""', 'Unicode'], {'primary_key': '(True)'}), "('bgdi_id', Unicode, primary_key=True)\n", (3393, 3431), False, 'from sqlalchemy import Column, Integer\n'), ((3444, 3468), 'sqlalchemy.Column', 'Column', (['"""kbbez"""', 'Unicode'], {}), "('kbbez', Unicode)\n", (3450, 3468), False, 'from sqlalchemy import Column, Integer\n'), ((3483, 3509), 'sqlalchemy.Column', 'Column', (['"""produkt"""', 'Unicode'], {}), "('produkt', Unicode)\n", (3489, 3509), False, 'from sqlalchemy import Column, Integer\n'), ((3522, 3546), 'sqlalchemy.Column', 'Column', (['"""kbnum"""', 'Unicode'], {}), "('kbnum', Unicode)\n", (3528, 3546), False, 'from sqlalchemy import Column, Integer\n'), ((3566, 3597), 'sqlalchemy.Column', 'Column', (['"""release_year"""', 'Integer'], {}), "('release_year', Integer)\n", (3572, 3597), False, 'from sqlalchemy import Column, Integer\n'), ((3610, 3634), 'sqlalchemy.Column', 'Column', (['"""years"""', 'Integer'], {}), "('years', Integer)\n", (3616, 3634), False, 'from sqlalchemy import Column, Integer\n'), ((3651, 3679), 'sqlalchemy.Column', 'Column', (['"""bv_nummer"""', 'Unicode'], {}), "('bv_nummer', Unicode)\n", (3657, 3679), False, 'from sqlalchemy import Column, Integer\n'), ((3697, 3726), 'sqlalchemy.Column', 'Column', (['"""bgdi_order"""', 'Integer'], {}), "('bgdi_order', Integer)\n", (3703, 3726), False, 'from sqlalchemy import Column, Integer\n'), ((3822, 3846), 'sqlalchemy.Column', 'Column', (['"""box2d"""', 'Unicode'], {}), "('box2d', Unicode)\n", (3828, 3846), False, 'from sqlalchemy import Column, Integer\n'), ((3862, 3880), 'sqlalchemy.Column', 'Column', (['Geometry2D'], {}), '(Geometry2D)\n', (3868, 3880), False, 'from sqlalchemy import Column, Integer\n'), ((4163, 4210), 'sqlalchemy.Column', 'Column', (['"""tilenumber"""', 'Unicode'], {'primary_key': '(True)'}), "('tilenumber', Unicode, primary_key=True)\n", (4169, 4210), False, 'from sqlalchemy import Column, Integer\n'), ((4223, 4247), 'sqlalchemy.Column', 'Column', (['"""kbbez"""', 'Unicode'], {}), "('kbbez', Unicode)\n", (4229, 4247), False, 'from sqlalchemy import Column, Integer\n'), ((4265, 4294), 'sqlalchemy.Column', 'Column', (['"""datenstand"""', 'Integer'], {}), "('datenstand', Integer)\n", (4271, 4294), False, 'from sqlalchemy import Column, Integer\n'), ((4311, 4339), 'sqlalchemy.Column', 'Column', (['"""bv_nummer"""', 'Unicode'], {}), "('bv_nummer', Unicode)\n", (4317, 4339), False, 'from sqlalchemy import Column, Integer\n'), ((4355, 4373), 'sqlalchemy.Column', 'Column', (['Geometry2D'], {}), '(Geometry2D)\n', (4361, 4373), False, 'from sqlalchemy import Column, Integer\n'), ((4668, 4715), 'sqlalchemy.Column', 'Column', (['"""tilenumber"""', 'Unicode'], {'primary_key': '(True)'}), "('tilenumber', Unicode, primary_key=True)\n", (4674, 4715), False, 'from sqlalchemy import Column, Integer\n'), ((4728, 4752), 'sqlalchemy.Column', 'Column', (['"""kbbez"""', 'Unicode'], {}), "('kbbez', Unicode)\n", (4734, 4752), False, 'from sqlalchemy import Column, Integer\n'), ((4770, 4799), 'sqlalchemy.Column', 'Column', (['"""datenstand"""', 'Numeric'], {}), "('datenstand', Numeric)\n", (4776, 4799), False, 'from sqlalchemy import Column, Integer\n'), ((4816, 4844), 'sqlalchemy.Column', 'Column', (['"""bv_nummer"""', 'Unicode'], {}), "('bv_nummer', Unicode)\n", (4822, 4844), False, 'from sqlalchemy import Column, Integer\n'), ((4860, 4878), 'sqlalchemy.Column', 'Column', (['Geometry2D'], {}), '(Geometry2D)\n', (4866, 4878), False, 'from sqlalchemy import Column, Integer\n'), ((1062, 1087), 'sqlalchemy.dialects.postgresql.ARRAY', 'postgresql.ARRAY', (['Integer'], {}), '(Integer)\n', (1078, 1087), False, 'from sqlalchemy.dialects import postgresql\n'), ((1974, 1999), 'sqlalchemy.dialects.postgresql.ARRAY', 'postgresql.ARRAY', (['Integer'], {}), '(Integer)\n', (1990, 1999), False, 'from sqlalchemy.dialects import postgresql\n'), ((2884, 2909), 'sqlalchemy.dialects.postgresql.ARRAY', 'postgresql.ARRAY', (['Integer'], {}), '(Integer)\n', (2900, 2909), False, 'from sqlalchemy.dialects import postgresql\n'), ((3783, 3808), 'sqlalchemy.dialects.postgresql.ARRAY', 'postgresql.ARRAY', (['Integer'], {}), '(Integer)\n', (3799, 3808), False, 'from sqlalchemy.dialects import postgresql\n')]
|
import json
import numpy as np
def get_timestamps(evts):
return [c['timestamp'] for c in evts['content']]
def get_bucket(dt):
return dt.weekday() * 24 + dt.hour
from collections import namedtuple
AllData = namedtuple('AllData', ['spots', 'trends', 'total'])
def load_data():
pass
|
[
"collections.namedtuple"
] |
[((213, 264), 'collections.namedtuple', 'namedtuple', (['"""AllData"""', "['spots', 'trends', 'total']"], {}), "('AllData', ['spots', 'trends', 'total'])\n", (223, 264), False, 'from collections import namedtuple\n')]
|
# Copyright 2021 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Union
import yaml
from pathlib import Path
from towhee.hparam.hyperparameter import param_scope
from towhee.pipelines.alias_resolvers import get_resolver
from towhee.pipelines.base import PipelineBase
from towhee import Inject, pipeline
class ImageEmbeddingPipeline(PipelineBase):
"""
Pipeline for image embedding tasks.
Args:
model: (`str` or `List[str]`)
Specifies the model used for image embedding. The user can pass a list
of model names to create a pipeline ensembling multiple models.
Supported models:
`vgg`,
`resnet50`, `resnet101`,
`swin-transformer`,
`vit`,
...
ensemble: (`str`)
Specifies the type of model ensemble. This argument works iff
multiple model names are given via `model`.
Supported ensemble types:
`linear`,
"""
def __init__(self, model: Union[Any, List[Any]] = None, ensemble: str = None):
with param_scope() as hp:
resolver = get_resolver(hp().towhee.alias_resolver('local'))
models: List[Any] = []
if isinstance(model, str):
models = [model]
else:
models = model
num_branch = len(models)
models = [resolver.resolve(model) if isinstance(model, str) else model for model in models]
operators = dict(zip([
'embedding_model_1',
'embedding_model_2',
'embedding_model_3',
], models))
if ensemble is not None:
operators['ensemble_model'] = resolver.resolve(
ensemble) if isinstance(ensemble, str) else ensemble
injections = {name: {'function': model.function, 'init_args': model.init_args} for name, model in operators.items()}
self._pipeline = Inject(**injections).pipeline('builtin/image_embedding_template_{}'.format(num_branch))
def __call__(self, *arg, **kws):
return self._pipeline(*arg, **kws)
def save(self, name: str, path: Union[str, Path] = Path.cwd()):
path = Path(path)
operator_path = path / name
if operator_path.exists():
raise FileExistsError(operator_path)
operator_path.mkdir(parents=True)
with open('{}/{}.yaml'.format(operator_path, name), 'w', encoding='utf-8') as f:
info = yaml.safe_load(self._pipeline.pipeline.graph_repr.ir)
info['name'] = name
f.write(yaml.safe_dump(info))
def push_to_hub(self, version: str = 'main'):
# TODO: push to hub with new hub tool
pass
def image_embedding_pipeline(model: Union[str, List[str]] = None,
ensemble: str = None,
name: str = None,
version: str = None):
"""Create a pipeline for image embedding tasks.
An image embedding pipeline converts input images into feature vectors (embedding),
which can be adapted to various vision tasks,
such as image retrieval, image classifications, etc.
There are two ways to instantiate an image embedding pipeline:
1 - If `model` is passed to `image_embedding_pipeline`,
a new pipeline will be generated for evaluation and benchmarking.
```python
>>> pipe = image_embedding_pipeline(model='resnet101')
>>> embedding = pipe('uri_to_image')
```
The pipeline can be saved to file if the evaluation results seems good,
and if you want to reuse this pipeline:
```python
>>> pipe.save(name='my_image_embedding_pipeline', path='my_pipelines')
```
You can also publish this pipeline to towhee hub to share it with the community.
```shell
$ cd ${WORK_DIR}/my_pipelines/my_image_embedding_pipeline
$ towhee publish # see towhee publish user guide from the terminal
$ git commit && git push
```
2 - Load a saved/shared pipeline from towhee hub:
```python
>>> pipe = image_embedding_pipeline(name='your_name/my_image_embedding_pipeline')
```
Args:
model (Union[str, List[str]], optional): Backbone models for extracting image embedding.
If there are more than one models, the model outputs will be fused with the `ensemble` model.
Defaults to None.
ensemble (str, optional): Ensemble model used to fuse backbone model outputs. Defaults to None.
name (str, optional): Pipeline name. Defaults to None.
version (str, optional): Version of the pipeline. Defaults to None.
Returns:
Pipeline: An image embedding pipeline.
"""
pipe = None
if name is not None:
pipe = pipeline(name, tag=version)
return pipe
if model is not None:
return ImageEmbeddingPipeline(model=model, ensemble=ensemble)
|
[
"yaml.safe_dump",
"towhee.pipeline",
"towhee.hparam.hyperparameter.param_scope",
"pathlib.Path",
"yaml.safe_load",
"towhee.Inject",
"pathlib.Path.cwd"
] |
[((2716, 2726), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (2724, 2726), False, 'from pathlib import Path\n'), ((2744, 2754), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (2748, 2754), False, 'from pathlib import Path\n'), ((5312, 5339), 'towhee.pipeline', 'pipeline', (['name'], {'tag': 'version'}), '(name, tag=version)\n', (5320, 5339), False, 'from towhee import Inject, pipeline\n'), ((1663, 1676), 'towhee.hparam.hyperparameter.param_scope', 'param_scope', ([], {}), '()\n', (1674, 1676), False, 'from towhee.hparam.hyperparameter import param_scope\n'), ((3025, 3078), 'yaml.safe_load', 'yaml.safe_load', (['self._pipeline.pipeline.graph_repr.ir'], {}), '(self._pipeline.pipeline.graph_repr.ir)\n', (3039, 3078), False, 'import yaml\n'), ((2491, 2511), 'towhee.Inject', 'Inject', ([], {}), '(**injections)\n', (2497, 2511), False, 'from towhee import Inject, pipeline\n'), ((3131, 3151), 'yaml.safe_dump', 'yaml.safe_dump', (['info'], {}), '(info)\n', (3145, 3151), False, 'import yaml\n')]
|
from rich import print
#print("Hello, [bold magenta]World[/bold magenta]!", ":vampire:", locals())
from rich.console import Console
console = Console()
console.print("Hello", "World!", style="bold red")
console.print("Hello", style="5")
console.print("Hello", style="#af00ff")
console.print("Hello", style="rgb(175,0,255)")
console.print("DANGER!", style="red on white")
console.print("Where there is a [bold cyan]Will[/bold cyan] there [u]is[/u] a [i]way[/i].")
console.print([1, 2, 3])
console.print("[blue underline]Looks like a link")
console.print(locals())
console.print("FOO", style="white on blue")
console.print("Google", style="link https://google.com")
console.log("Hello, World!")
console.input("What is [i]your[/i] [bold red]name[/]? :smiley: ")
from rich.panel import Panel
print(Panel("Hello, [red]World!"))
from rich.theme import Theme
custom_theme = Theme({
"info" : "dim cyan",
"warning": "magenta",
"danger": "bold red"
})
console = Console(theme=custom_theme)
console.print("This is information", style="info")
console.print("Something terrible happened!", style="danger")
from rich.text import Text
text = Text("Hello, World!")
text.stylize(0, 8, "bold magenta")
console.print(text)
from rich.highlighter import RegexHighlighter
class EmailHighlighter(RegexHighlighter):
"""Apply style to anything that looks like an email."""
base_style = "example."
highlights = [r"(?P<email>[\w-]+@([\w-]+\.)+[\w-]+)"]
theme = Theme({"example.email": "bold magenta"})
console = Console(highlighter=EmailHighlighter(), theme=theme)
console.print("Send funds to <EMAIL>")
from rich.table import Table
table = Table(title="Star Wars Movies")
table.add_column("Released", justify="right", style="cyan", no_wrap=True)
table.add_column("Title", style="magenta")
table.add_column("Box Office", justify="right", style="green")
table.add_row("Dec 20, 2019", "Star Wars: The Rise of Skywalker", "$952,110,690")
table.add_row("May 25, 2018", "Solo: A Star Wars Story", "$393,151,347")
table.add_row("Dec 15, 2017", "Star Wars Ep. V111: The Last Jedi", "$1,332,539,889")
table.add_row("Dec 16, 2016", "Rouge One: A Star Wars Story", "$1,332,439,889")
console = Console()
console.print(table)
MARKDOWN = """
# This is a h1
Rich can do a pretty *decent* job of rendering markdown.
1. This is a list item
2. This is another list item
"""
from rich.markdown import Markdown
console = Console()
md = Markdown(MARKDOWN)
console.print(md)
|
[
"rich.panel.Panel",
"rich.text.Text",
"rich.markdown.Markdown",
"rich.console.Console",
"rich.theme.Theme",
"rich.table.Table"
] |
[((144, 153), 'rich.console.Console', 'Console', ([], {}), '()\n', (151, 153), False, 'from rich.console import Console\n'), ((870, 941), 'rich.theme.Theme', 'Theme', (["{'info': 'dim cyan', 'warning': 'magenta', 'danger': 'bold red'}"], {}), "({'info': 'dim cyan', 'warning': 'magenta', 'danger': 'bold red'})\n", (875, 941), False, 'from rich.theme import Theme\n'), ((967, 994), 'rich.console.Console', 'Console', ([], {'theme': 'custom_theme'}), '(theme=custom_theme)\n', (974, 994), False, 'from rich.console import Console\n'), ((1143, 1164), 'rich.text.Text', 'Text', (['"""Hello, World!"""'], {}), "('Hello, World!')\n", (1147, 1164), False, 'from rich.text import Text\n'), ((1468, 1508), 'rich.theme.Theme', 'Theme', (["{'example.email': 'bold magenta'}"], {}), "({'example.email': 'bold magenta'})\n", (1473, 1508), False, 'from rich.theme import Theme\n'), ((1651, 1682), 'rich.table.Table', 'Table', ([], {'title': '"""Star Wars Movies"""'}), "(title='Star Wars Movies')\n", (1656, 1682), False, 'from rich.table import Table\n'), ((2196, 2205), 'rich.console.Console', 'Console', ([], {}), '()\n', (2203, 2205), False, 'from rich.console import Console\n'), ((2420, 2429), 'rich.console.Console', 'Console', ([], {}), '()\n', (2427, 2429), False, 'from rich.console import Console\n'), ((2435, 2453), 'rich.markdown.Markdown', 'Markdown', (['MARKDOWN'], {}), '(MARKDOWN)\n', (2443, 2453), False, 'from rich.markdown import Markdown\n'), ((796, 823), 'rich.panel.Panel', 'Panel', (['"""Hello, [red]World!"""'], {}), "('Hello, [red]World!')\n", (801, 823), False, 'from rich.panel import Panel\n')]
|
#! /usr/bin/python
'''
Data Normalization
'''
from sklearn import preprocessing
def normalize(file_dataframe, cols):
'''
Data Normalization.
'''
for col in cols:
preprocessing.normalize(file_dataframe[col], \
axis=1, norm='l2', copy=False)
return file_dataframe
|
[
"sklearn.preprocessing.normalize"
] |
[((197, 272), 'sklearn.preprocessing.normalize', 'preprocessing.normalize', (['file_dataframe[col]'], {'axis': '(1)', 'norm': '"""l2"""', 'copy': '(False)'}), "(file_dataframe[col], axis=1, norm='l2', copy=False)\n", (220, 272), False, 'from sklearn import preprocessing\n')]
|
from datetime import timedelta
AUTOFOCUS_IP_RESPONSE_MOCK = {
"indicator": {
"indicatorValue": "172.16.31.10",
"indicatorType": "IPV4_ADDRESS",
"summaryGenerationTs": 1607951568568,
"firstSeenTsGlobal": None,
"lastSeenTsGlobal": None,
"latestPanVerdicts": {
"PAN_DB": "MALWARE"
},
"seenByDataSourceIds": [],
"wildfireRelatedSampleVerdictCounts": {}
},
"tags": [],
"bucketInfo": {
"minutePoints": 200,
"dailyPoints": 25000,
"minuteBucketStart": "2020-11-20 05:02:52",
"dailyBucketStart": "2020-11-20 04:52:40",
"minutePointsRemaining": 196,
"dailyPointsRemaining": 24980,
"waitInSeconds": 0
}
}
INTEGRATION_IP_RESPONSE_MOCK = {
'/observe/observables': {
"data": {
"judgements": {
"count": 1,
"docs": [
{
"confidence": "High",
"disposition": 2,
"disposition_name": "Malicious",
"observable": {
"type": "ip",
"value": "172.16.31.10"
},
"priority": 85,
"reason": "MALWARE in AutoFocus",
"schema_version": "1.0.22",
"severity": "High",
"source": "Palo Alto AutoFocus",
"source_uri": "https://autofocus.paloaltonetworks.com/"
"#/search/indicator/ipv4_address/"
"172.16.31.10",
"type": "judgement"
}
]
},
"verdicts": {
"count": 1,
"docs": [
{
"disposition": 2,
"disposition_name": "Malicious",
"observable": {
"type": "ip",
"value": "172.16.31.10"
},
"type": "verdict"
}
]
}
}
},
'/refer/observables': {
'data': [
{
"categories": [
"Search",
"Palo Alto AutoFocus"
],
"description": "Look up this IP on Palo Alto AutoFocus",
"id": "ref-palo-alto-autofocus-search-ip-172.16.31.10",
"title": "Search for this IP",
"url": "https://autofocus.paloaltonetworks.com/#/search/"
"indicator/ipv4_address/103.110.84.196"
}]
}
}
AUTOFOCUS_IPV6_RESPONSE_MOCK = {
"indicator": {
"indicatorValue": "2001:db8:85a3:8d3:1319:8a2e:370:7348",
"indicatorType": "IPV6_ADDRESS",
"summaryGenerationTs": 1607953105326,
"firstSeenTsGlobal": None,
"lastSeenTsGlobal": None,
"latestPanVerdicts": {
"PAN_DB": "BENIGN"
},
"seenByDataSourceIds": [],
"wildfireRelatedSampleVerdictCounts": {}
},
"tags": [],
"bucketInfo": {
"minutePoints": 200,
"dailyPoints": 25000,
"minuteBucketStart": "2020-11-20 05:02:52",
"dailyBucketStart": "2020-11-20 04:52:40",
"minutePointsRemaining": 196,
"dailyPointsRemaining": 24980,
"waitInSeconds": 0
}
}
INTEGRATION_IPV6_RESPONSE_MOCK = {
'/observe/observables': {
"data": {
"judgements": {
"count": 1,
"docs": [
{
"confidence": "High",
"disposition": 1,
"disposition_name": "Clean",
"observable": {
"type": "ipv6",
"value": "2001:db8:85a3:8d3:1319:8a2e:370:7348"
},
"priority": 85,
"reason": "BENIGN in AutoFocus",
"schema_version": "1.0.22",
"severity": "High",
"source": "Palo Alto AutoFocus",
"source_uri": "https://autofocus.paloaltonetworks.com"
"/#/search/indicator/ipv6_address/"
"2001:db8:85a3:8d3:1319:8a2e:370:7348",
"type": "judgement"
}
]
},
"verdicts": {
"count": 1,
"docs": [
{
"disposition": 1,
"disposition_name": "Clean",
"observable": {
"type": "ipv6",
"value": "2001:db8:85a3:8d3:1319:8a2e:370:7348"
},
"type": "verdict"
}
]
}
}
},
'/refer/observables': {
'data': [
{
"categories": [
"Search",
"Palo Alto AutoFocus"
],
"description": "Look up this IPv6 on Palo Alto AutoFocus",
"id": "ref-palo-alto-autofocus-search-"
"ipv6-2001:db8:85a3:8d3:1319:8a2e:370:7348",
"title": "Search for this IPv6",
"url": "https://autofocus.paloaltonetworks.com/#/search"
"/indicator/ipv6_address/"
"2001:db8:85a3:8d3:1319:8a2e:370:7348"
}
]
}
}
AUTOFOCUS_DOMAIN_RESPONSE_MOCK = {
"indicator": {
"indicatorValue": "cisco.com",
"indicatorType": "DOMAIN",
"summaryGenerationTs": 1607953513675,
"firstSeenTsGlobal": None,
"lastSeenTsGlobal": None,
"latestPanVerdicts": {
"PAN_DB": "BENIGN"
},
"seenByDataSourceIds": [],
"whoisAdminCountry": None,
"whoisAdminEmail": None,
"whoisAdminName": None,
"whoisDomainCreationDate": "1987-05-14",
"whoisDomainExpireDate": "2022-05-15",
"whoisDomainUpdateDate": "2019-06-21",
"whoisRegistrar": "MarkMonitor Inc.",
"whoisRegistrarUrl": "http://www.markmonitor.com",
"whoisRegistrant": None,
"wildfireRelatedSampleVerdictCounts": {}
},
"tags": [],
"bucketInfo": {
"minutePoints": 200,
"dailyPoints": 25000,
"minuteBucketStart": "2020-11-20 05:02:52",
"dailyBucketStart": "2020-11-20 04:52:40",
"minutePointsRemaining": 196,
"dailyPointsRemaining": 24980,
"waitInSeconds": 0
}
}
INTEGRATION_DOMAIN_RESPONSE_MOCK = {
'/observe/observables': {
"data": {
"judgements": {
"count": 1,
"docs": [
{
"confidence": "High",
"disposition": 1,
"disposition_name": "Clean",
"observable": {
"type": "domain",
"value": "cisco.com"
},
"priority": 85,
"reason": "BENIGN in AutoFocus",
"schema_version": "1.0.22",
"severity": "High",
"source": "Palo Alto AutoFocus",
"source_uri": "https://autofocus.paloaltonetworks.com"
"/#/search/indicator/domain/cisco.com",
"type": "judgement"
}
]
},
"verdicts": {
"count": 1,
"docs": [
{
"disposition": 1,
"disposition_name": "Clean",
"observable": {
"type": "domain",
"value": "cisco.com"
},
"type": "verdict"
}
]
}
}
},
'/refer/observables': {
'data': [
{
"categories": [
"Search",
"Palo Alto AutoFocus"
],
"description": "Look up this domain on Palo Alto AutoFocus",
"id": "ref-palo-alto-autofocus-search-domain-cisco.com",
"title": "Search for this domain",
"url": "https://autofocus.paloaltonetworks.com/#/search/"
"indicator/domain/cisco.com"
}
]
}
}
AUTOFOCUS_URL_RESPONSE_MOCK = {
"indicator": {
"indicatorValue": "http://0win365.com/wp-admin/sites/",
"indicatorType": "URL",
"summaryGenerationTs": 1607953838339,
"firstSeenTsGlobal": None,
"lastSeenTsGlobal": None,
"latestPanVerdicts": {
"PAN_DB": "MALWARE"
},
"seenByDataSourceIds": [],
"wildfireRelatedSampleVerdictCounts": {}
},
"tags": [],
"bucketInfo": {
"minutePoints": 200,
"dailyPoints": 25000,
"minuteBucketStart": "2020-11-20 05:02:52",
"dailyBucketStart": "2020-11-20 04:52:40",
"minutePointsRemaining": 196,
"dailyPointsRemaining": 24980,
"waitInSeconds": 0
}
}
INTEGRATION_URL_RESPONSE_MOCK = {
'/observe/observables': {
"data": {
"judgements": {
"count": 1,
"docs": [
{
"confidence": "High",
"disposition": 2,
"disposition_name": "Malicious",
"observable": {
"type": "url",
"value": "http://0win365.com/wp-admin/sites/"
},
"priority": 85,
"reason": "MALWARE in AutoFocus",
"schema_version": "1.0.22",
"severity": "High",
"source": "Palo Alto AutoFocus",
"source_uri": "https://autofocus.paloaltonetworks.com"
"/#/search/indicator/url/"
"http%3A%2F%2F0win365.com%2Fwp-admin%2"
"Fsites%2F/summary",
"type": "judgement"
}
]
},
"verdicts": {
"count": 1,
"docs": [
{
"disposition": 2,
"disposition_name": "Malicious",
"observable": {
"type": "url",
"value": "http://0win365.com/wp-admin/sites/"
},
"type": "verdict"
}
]
}
}
},
'/refer/observables': {
'data': [
{
"categories": [
"Search",
"Palo Alto AutoFocus"
],
"description": "Look up this URL on Palo Alto AutoFocus",
"id": "ref-palo-alto-autofocus-search-url-http://"
"0win365.com/wp-admin/sites/",
"title": "Search for this URL",
"url": "https://autofocus.paloaltonetworks.com/#/search/"
"indicator/url/http%3A%2F%2F0win365.com%2Fwp-"
"admin%2Fsites%2F/summary"
}
]
}
}
AUTOFOCUS_SHA256_RESPONSE_MOCK = {
"indicator": {
"indicatorValue": "7fa2c54d7dabb0503d75bdd13cc4d6a6520516a990fb7879ae0"
"52bad9520763b",
"indicatorType": "FILEHASH",
"summaryGenerationTs": 1607954098735,
"firstSeenTsGlobal": 1605847163000,
"lastSeenTsGlobal": 1605847163000,
"latestPanVerdicts": {
"WF_SAMPLE": "GRAYWARE"
},
"seenByDataSourceIds": [
"WF_SAMPLE"
]
},
"tags": [
{
"support_id": 1,
"tag_name": "RenameOnReboot",
"public_tag_name": "Unit42.RenameOnReboot",
"tag_definition_scope_id": 4,
"tag_definition_status_id": 1,
"count": 16068736,
"lasthit": "2020-12-14 03:08:59",
"description": "The PendingFileRenameOperations key stores the nam"
"es of files to be renamed when the system restarts"
". It consists of pairs of file names. The file spe"
"cified in the first item of the pair is renamed to"
" match the second item of the pair. The system add"
"s this entry to the registry when a user or progra"
"m tries to rename a file that is in use. The file "
"names are stored in the value of this entry until "
"the system is restarted and they are renamed. Whil"
"e this is often a legitimate operation, it is some"
"times used by malware to overwrite or replace legi"
"timate system binaries with malicious ones.",
"customer_name": "Palo Alto Networks Unit42",
"customer_industry": "High Tech",
"upVotes": None,
"downVotes": None,
"myVote": None,
"source": "Unit 42",
"tag_class_id": 5,
"tag_definition_id": 36580
},
{
"support_id": 1,
"tag_name": "HttpNoUserAgent",
"public_tag_name": "Unit42.HttpNoUserAgent",
"tag_definition_scope_id": 4,
"tag_definition_status_id": 1,
"count": 23313610,
"lasthit": "2020-12-14 03:39:11",
"description": "A sample creates HTTP traffic but omits or uses a "
"blank user-agent field. Typically, legitimate appl"
"ications will include a user-agent value in HTTP r"
"equests. HTTP requests without the user-agent head"
"er or with a blank user agent value are extremely "
"suspect. This tag identified such suspect applicat"
"ions.",
"customer_name": "<NAME> Networks Unit42",
"customer_industry": "High Tech",
"upVotes": 4,
"downVotes": None,
"myVote": None,
"source": "Unit 42",
"tag_class_id": 5,
"tag_definition_id": 41533
},
{
"support_id": 1,
"tag_name": "SelfExtractingExecutable",
"public_tag_name": "Unit42.SelfExtractingExecutable",
"tag_definition_scope_id": 4,
"tag_definition_status_id": 1,
"count": 3750321,
"lasthit": "2020-12-13 21:31:08",
"description": "This sample is a self-extracting executable, which"
" is often an attribute of legitimate executables b"
"ut is also commonly used by malware authors.\n\nTh"
"ese files allow attackers to compress their malici"
"ous file(s) into a single binary and launch a seri"
"es of commands in sequence. This often allows them"
" to execute a malicious binary and display a decoy"
" document in a simple fashion.",
"customer_name": "<NAME> Unit42",
"customer_industry": "High Tech",
"upVotes": 1,
"downVotes": None,
"myVote": None,
"source": "Unit 42",
"tag_class_id": 5,
"tag_definition_id": 42834
}
],
"bucketInfo": {
"minutePoints": 200,
"dailyPoints": 25000,
"minuteBucketStart": "2020-11-20 05:02:52",
"dailyBucketStart": "2020-11-20 04:52:40",
"minutePointsRemaining": 196,
"dailyPointsRemaining": 24980,
"waitInSeconds": 0
}
}
INTEGRATION_SHA256_RESPONSE_MOCK = {
'/observe/observables': {
"data": {
"judgements": {
"count": 1,
"docs": [
{
"confidence": "High",
"disposition": 3,
"disposition_name": "Suspicious",
"observable": {
"type": "sha256",
"value": "7fa2c54d7dabb0503d75bdd13cc4d6a6520516a9"
"90fb7879ae052bad9520763b"
},
"priority": 85,
"reason": "GRAYWARE in AutoFocus",
"schema_version": "1.0.22",
"severity": "High",
"source": "Palo Alto AutoFocus",
"source_uri": "https://autofocus.paloaltonetworks.com"
"/#/search/indicator/sha256/7fa2c54d7dab"
"b0503d75bdd13cc4d6a6520516a990fb7879ae"
"052bad9520763b",
"type": "judgement"
}
]
},
"verdicts": {
"count": 1,
"docs": [
{
"disposition": 3,
"disposition_name": "Suspicious",
"observable": {
"type": "sha256",
"value": "7fa2c54d7dabb0503d75bdd13cc4d6a6520516a9"
"90fb7879ae052bad9520763b"
},
"type": "verdict"
}
]
}
}
},
'/refer/observables': {
'data': [
{
"categories": [
"Search",
"Palo Alto AutoFocus"
],
"description": "Look up this SHA256 on Palo Alto AutoFocus",
"id": "ref-palo-alto-autofocus-search-sha256-"
"7fa2c54d7dabb0503d75bdd13cc4d6a6520516a990fb7879ae052ba"
"d9520763b",
"title": "Search for this SHA256",
"url": "https://autofocus.paloaltonetworks.com/#/search/"
"indicator/sha256/7fa2c54d7dabb0503d75bdd13cc4d6a"
"6520516a990fb7879ae052bad9520763b"
}
]
}
}
ENTITY_LIFETIME_MOCK = timedelta(days=7)
EXPECTED_RESPONSE_OF_JWKS_ENDPOINT = {
'keys': [
{
'kty': 'RSA',
'n': 'tSKfSeI0fukRIX38AHlKB1YPpX8PUYN2JdvfM-XjNmLfU1M74N0V'
'mdzIX95sneQGO9kC2xMIE-AIlt52Yf_KgBZggAlS9Y0Vx8DsSL2H'
'vOjguAdXir3vYLvAyyHin_mUisJOqccFKChHKjnk0uXy_38-1r17'
'_cYTp76brKpU1I4kM20M__dbvLBWjfzyw9ehufr74aVwr-0xJfsB'
'Vr2oaQFww_XHGz69Q7yHK6DbxYO4w4q2sIfcC4pT8XTPHo4JZ2M7'
'33Ea8a7HxtZS563_mhhRZLU5aynQpwaVv2U--CL6EvGt8TlNZOke'
'Rv8wz-Rt8B70jzoRpVK36rR-pHKlXhMGT619v82LneTdsqA25Wi2'
'Ld_c0niuul24A6-aaj2u9SWbxA9LmVtFntvNbRaHXE1SLpLPoIp8'
'uppGF02Nz2v3ld8gCnTTWfq_BQ80Qy8e0coRRABECZrjIMzHEg6M'
'loRDy4na0pRQv61VogqRKDU2r3_VezFPQDb3ciYsZjWBr3HpNOkU'
'jTrvLmFyOE9Q5R_qQGmc6BYtfk5rn7iIfXlkJAZHXhBy-ElBuiBM'
'-YSkFM7dH92sSIoZ05V4MP09Xcppx7kdwsJy72Sust9Hnd9B7V35'
'YnVF6W791lVHnenhCJOziRmkH4xLLbPkaST2Ks3IHH7tVltM6NsR'
'k3jNdVM',
'e': 'AQAB',
'alg': 'RS256',
'kid': '02B1174234C29F8EFB69911438F597FF3FFEE6B7',
'use': 'sig'
}
]
}
RESPONSE_OF_JWKS_ENDPOINT_WITH_WRONG_KEY = {
'keys': [
{
'kty': 'RSA',
'n': '<KEY>'
'<KEY>'
'<KEY>vYLvAyyHin_mUisJOqccFKChHKjnk0uXy_38-1r17'
'_cYTp76brKpU1I4kM20M__dbvLBWjfzyw9ehufr74aVwr-0xJfsB'
'<KEY>XHGz69Q7yHK6DbxYO4w4q2sIfcC4pT8XTPHo4JZ2M7'
'<KEY>'
'Rv8wz-Rt8B70jzoRpVK36rR-pHKlXhMGT619v82LneTdsqA25Wi2'
'Ld_c0niuul24A6-aaj2u9SWbxA9LmVtFntvNbRaHXE1SLpLPoIp8'
'uppGF02Nz2v3ld8gCnTTWfq_BQ80Qy8e0coRRABECZrjIMzHEg6M'
'loRDy4na0pRQv61VogqRKDU2r3_VezFPQDb3ciYsZjWBr3HpNOkU'
'jTrvLmFyOE9Q5R_qQGmc6BYtfk5rn7iIfXlkJAZHXhBy-ElBuiBM'
'-YSkFM7dH92sSIoZ05V4MP09Xcppx7kdwsJy72Sust9Hnd9B7V35'
'YnVF6W791lVHnenhCJOziRmkH4xLLbPkaST2Ks3IHH7tVltM6NsR'
'k3jNdVM',
'e': 'AQAB',
'alg': 'RS256',
'kid': '02B1174234C29F8EFB69911438F597FF3FFEE6B7',
'use': 'sig'
}
]
}
PRIVATE_KEY = """-----<KEY>
-----END RSA PRIVATE KEY-----"""
|
[
"datetime.timedelta"
] |
[((19231, 19248), 'datetime.timedelta', 'timedelta', ([], {'days': '(7)'}), '(days=7)\n', (19240, 19248), False, 'from datetime import timedelta\n')]
|
# -*- coding: utf-8 -*-
# @Author: <NAME>
# @Date: 2018-05-10 17:57:07
# @Last Modified by: <NAME>
# @Last Modified time: 2018-05-28 21:50:38
from distutils.core import setup
setup(
name = 'IPX800',
packages = ['IPX800'],
version = '0.1.5',
description = 'Library for controlling GCE-Electronics IPX800',
author = '<NAME>',
author_email = '<EMAIL>',
url = 'https://github.com/d4mi1/python-ipx800',
download_url = 'https://github.com/d4mi1/python-ipx800/archive/0.1.1.tar.gz',
keywords = ['GCE-Electronics', 'IPX800'],
classifiers = [],
)
|
[
"distutils.core.setup"
] |
[((180, 542), 'distutils.core.setup', 'setup', ([], {'name': '"""IPX800"""', 'packages': "['IPX800']", 'version': '"""0.1.5"""', 'description': '"""Library for controlling GCE-Electronics IPX800"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'url': '"""https://github.com/d4mi1/python-ipx800"""', 'download_url': '"""https://github.com/d4mi1/python-ipx800/archive/0.1.1.tar.gz"""', 'keywords': "['GCE-Electronics', 'IPX800']", 'classifiers': '[]'}), "(name='IPX800', packages=['IPX800'], version='0.1.5', description=\n 'Library for controlling GCE-Electronics IPX800', author='<NAME>',\n author_email='<EMAIL>', url='https://github.com/d4mi1/python-ipx800',\n download_url=\n 'https://github.com/d4mi1/python-ipx800/archive/0.1.1.tar.gz', keywords\n =['GCE-Electronics', 'IPX800'], classifiers=[])\n", (185, 542), False, 'from distutils.core import setup\n')]
|
# Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DNA - Service template - App Engine handlers.
App Engine handler definitions for the service, including the main "launcher".
"""
import base64
import json
from dna_general_settings import GCE_MACHINE_MAP
from dna_project_settings import PROJECT_ID
from gcp_connector import GCPConnector
from service_template_settings import GBQ_DATASET
from service_template_settings import GCE_RUN_SCRIPT
from service_template_settings import GCS_BUCKET
from service_template_settings import SERVICE_NAME
def service_template_launcher():
gcp = GCPConnector(PROJECT_ID)
queue_name = GCE_MACHINE_MAP['l0']['queue']
# Change the input for the initial config_data (e.g. from a Spreadsheet)
config_data = [['account1', 'data_for_account1'],
['account2', 'data_for_account2']]
for row in config_data:
# Add params to be passed via task payload
task_params = dict()
task_params['service'] = SERVICE_NAME # Mandatory field
task_params['run_script'] = GCE_RUN_SCRIPT # Mandatory field
task_params['account_id'] = row[0]
task_params['label'] = row[1]
task_params['bucket'] = GCS_BUCKET
task_params['dataset'] = GBQ_DATASET
# Add a new task to the task queue
string_payload = json.dumps(task_params)
base64_payload = base64.urlsafe_b64encode(string_payload.encode())
payload = base64_payload.decode()
gcp.gct_createtask(queue_name, payload)
return 'OK'
|
[
"gcp_connector.GCPConnector",
"json.dumps"
] |
[((1115, 1139), 'gcp_connector.GCPConnector', 'GCPConnector', (['PROJECT_ID'], {}), '(PROJECT_ID)\n', (1127, 1139), False, 'from gcp_connector import GCPConnector\n'), ((1806, 1829), 'json.dumps', 'json.dumps', (['task_params'], {}), '(task_params)\n', (1816, 1829), False, 'import json\n')]
|
# -*- coding: utf-8 -*-
"""
.. invisible:
_ _ _____ _ _____ _____
| | | | ___| | | ___/ ___|
| | | | |__ | | | |__ \ `--.
| | | | __|| | | __| `--. \
\ \_/ / |___| |___| |___/\__/ /
\___/\____/\_____|____/\____/
Created on Jan 25, 2015
Loaders which get data from pickles
███████████████████████████████████████████████████████████████████████████████
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
███████████████████████████████████████████████████████████████████████████████
"""
import pickle
import numpy
import six
from zope.interface import implementer
from veles import error
from veles.compat import from_none
from veles.external.progressbar import ProgressBar
from veles.memory import interleave
from veles.loader.base import CLASS_NAME, Loader
from veles.loader.image import IImageLoader, COLOR_CHANNELS_MAP
from veles.loader.fullbatch import FullBatchLoader, IFullBatchLoader
from veles.loader.fullbatch_image import FullBatchImageLoader
@implementer(IFullBatchLoader)
class PicklesLoader(FullBatchLoader):
"""
Loads samples from pickles for data set.
"""
def __init__(self, workflow, **kwargs):
super(PicklesLoader, self).__init__(workflow, **kwargs)
self._test_pickles = list(kwargs.get("test_pickles", []))
self._validation_pickles = list(kwargs.get("validation_pickles", []))
self._train_pickles = list(kwargs.get("train_pickles", []))
self._pickles = (self.test_pickles, self.validation_pickles,
self.train_pickles)
@property
def test_pickles(self):
return self._test_pickles
@property
def validation_pickles(self):
return self._validation_pickles
@property
def train_pickles(self):
return self._train_pickles
def reshape(self, shape):
return shape
def transform_data(self, data):
return data
def load_data(self):
pbar = ProgressBar(maxval=sum(len(p) for p in self._pickles),
term_width=40)
self.info("Loading %d pickles...", pbar.maxval)
pbar.start()
loaded = [self.load_pickles(i, self._pickles[i], pbar)
for i in range(3)]
pbar.finish()
self.info("Initializing the arrays...")
shape = loaded[2][1][0].shape[1:]
for i in range(2):
if loaded[i][0] > 0:
shi = loaded[i][1][0].shape[1:]
if shape != shi:
raise error.BadFormatError(
"TRAIN and %s sets have the different sample shape "
"(%s vs %s)" % (CLASS_NAME[i], shape, shi))
self.create_originals(self.reshape(shape))
offsets = [0, 0]
for ds in range(3):
if loaded[ds][0] == 0:
continue
for arr in loaded[ds][1]:
self.original_data[offsets[0]:(offsets[0] + arr.shape[0])] = \
self.transform_data(arr)
offsets[0] += arr.shape[0]
for arr in loaded[ds][2]:
self.original_labels[offsets[1]:(offsets[1] + arr.shape[0])] =\
arr
offsets[1] += arr.shape[0]
def load_pickles(self, index, pickles, pbar):
unpickled = []
for pick in pickles:
try:
with open(pick, "rb") as fin:
self.debug("Loading %s...", pick)
if six.PY3:
loaded = pickle.load(fin, encoding='charmap')
else:
loaded = pickle.load(fin)
unpickled.append(loaded)
pbar.inc()
except Exception as e:
self.warning(
"Failed to load %s (part of %s set)" %
(pick, CLASS_NAME[index]))
raise from_none(e)
data = []
labels = []
for obj, pick in zip(unpickled, pickles):
if not isinstance(obj, dict):
raise TypeError(
"%s has the wrong format (part of %s set)" %
(pick, CLASS_NAME[index]))
try:
data.append(obj["data"])
labels.append(
numpy.array(obj["labels"], dtype=Loader.LABEL_DTYPE))
except KeyError as e:
self.error("%s has the wrong format (part of %s set)",
pick, CLASS_NAME[index])
raise from_none(e)
lengths = [0, sum(len(l) for l in labels)]
for arr in data:
lengths[0] += arr.shape[0]
if arr.shape[1:] != data[0].shape[1:]:
raise error.BadFormatError(
"Array has a different shape: expected %s, got %s"
"(%s set)" % (data[0].shape[1:],
arr.shape[1:], CLASS_NAME[index]))
if lengths[0] != lengths[1]:
raise error.BadFormatError(
"Data and labels has the different number of samples (data %d,"
" labels %d)" % lengths)
length = lengths[0]
self.class_lengths[index] = length
return length, data, labels
@implementer(IImageLoader)
class PicklesImageFullBatchLoader(PicklesLoader, FullBatchImageLoader):
MAPPING = "full_batch_pickles_image"
def __init__(self, workflow, **kwargs):
super(PicklesImageFullBatchLoader, self).__init__(workflow, **kwargs)
# Since we can not extract the color space information from pickles
# set it explicitly without any default value
self.color_space = kwargs["color_space"]
def get_image_label(self, key):
return int(self.image_labels[key])
def get_image_info(self, key):
return self.image_data[key].shape[:2], self.color_space
def get_image_data(self, key):
return self.image_data[key]
def get_keys(self, index):
offsets = [0, self.class_lengths[0],
self.class_lengths[0] + self.class_lengths[1],
self.total_samples]
self.original_shape = self.image_data.shape[1:-1]
return range(offsets[index], offsets[index + 1])
def reshape(self, shape):
if shape[0] == COLOR_CHANNELS_MAP[self.color_space]:
return shape[1:] + (shape[0],)
return shape
def transform_data(self, data):
if data.shape[1] == COLOR_CHANNELS_MAP[self.color_space]:
return interleave(data)
return data
def load_data(self):
PicklesLoader.load_data(self)
self.original_class_lengths = self.class_lengths
self.image_data = self.original_data.mem
self.original_data.mem = None
self.image_labels = self.original_labels[:]
del self.original_labels[:]
FullBatchImageLoader.load_data(self)
assert self.original_class_lengths == self.class_lengths
del self.image_data
def initialize(self, device, **kwargs):
super(PicklesImageFullBatchLoader, self).initialize(
device=device, **kwargs)
del self.image_labels
|
[
"zope.interface.implementer",
"veles.compat.from_none",
"veles.error.BadFormatError",
"numpy.array",
"veles.loader.fullbatch_image.FullBatchImageLoader.load_data",
"pickle.load",
"veles.memory.interleave"
] |
[((1713, 1742), 'zope.interface.implementer', 'implementer', (['IFullBatchLoader'], {}), '(IFullBatchLoader)\n', (1724, 1742), False, 'from zope.interface import implementer\n'), ((5966, 5991), 'zope.interface.implementer', 'implementer', (['IImageLoader'], {}), '(IImageLoader)\n', (5977, 5991), False, 'from zope.interface import implementer\n'), ((7575, 7611), 'veles.loader.fullbatch_image.FullBatchImageLoader.load_data', 'FullBatchImageLoader.load_data', (['self'], {}), '(self)\n', (7605, 7611), False, 'from veles.loader.fullbatch_image import FullBatchImageLoader\n'), ((5713, 5829), 'veles.error.BadFormatError', 'error.BadFormatError', (["('Data and labels has the different number of samples (data %d, labels %d)' %\n lengths)"], {}), "(\n 'Data and labels has the different number of samples (data %d, labels %d)'\n % lengths)\n", (5733, 5829), False, 'from veles import error\n'), ((7234, 7250), 'veles.memory.interleave', 'interleave', (['data'], {}), '(data)\n', (7244, 7250), False, 'from veles.memory import interleave\n'), ((5443, 5589), 'veles.error.BadFormatError', 'error.BadFormatError', (["('Array has a different shape: expected %s, got %s(%s set)' % (data[0].\n shape[1:], arr.shape[1:], CLASS_NAME[index]))"], {}), "(\n 'Array has a different shape: expected %s, got %s(%s set)' % (data[0].\n shape[1:], arr.shape[1:], CLASS_NAME[index]))\n", (5463, 5589), False, 'from veles import error\n'), ((3224, 3348), 'veles.error.BadFormatError', 'error.BadFormatError', (["('TRAIN and %s sets have the different sample shape (%s vs %s)' % (\n CLASS_NAME[i], shape, shi))"], {}), "(\n 'TRAIN and %s sets have the different sample shape (%s vs %s)' % (\n CLASS_NAME[i], shape, shi))\n", (3244, 3348), False, 'from veles import error\n'), ((4612, 4624), 'veles.compat.from_none', 'from_none', (['e'], {}), '(e)\n', (4621, 4624), False, 'from veles.compat import from_none\n'), ((5009, 5061), 'numpy.array', 'numpy.array', (["obj['labels']"], {'dtype': 'Loader.LABEL_DTYPE'}), "(obj['labels'], dtype=Loader.LABEL_DTYPE)\n", (5020, 5061), False, 'import numpy\n'), ((5242, 5254), 'veles.compat.from_none', 'from_none', (['e'], {}), '(e)\n', (5251, 5254), False, 'from veles.compat import from_none\n'), ((4230, 4266), 'pickle.load', 'pickle.load', (['fin'], {'encoding': '"""charmap"""'}), "(fin, encoding='charmap')\n", (4241, 4266), False, 'import pickle\n'), ((4326, 4342), 'pickle.load', 'pickle.load', (['fin'], {}), '(fin)\n', (4337, 4342), False, 'import pickle\n')]
|
import Piper
import html
import os
import DB
class Telegram2VK(Piper.Piper):
def __init__(self, source, dest):
""" Gets 2 handlers """
super(Telegram2VK, self).__init__(source, dest)
def converter(self, in_q, out_q):
while True:
telegram_msg = in_q.get(block=True)
if 'message' in telegram_msg: # New message
if 'text' not in telegram_msg['message']:
continue
vk_id = DB.convert_ids('Telegram', 'VK', telegram_msg['message']['chat']['id'])
if vk_id is None:
continue
vk_msg = {'api_method': 'messages.send', 'params': {}}
vk_msg['params']['peer_id'] = vk_id
vk_msg['params']['message'] = telegram_msg['message']['text']
out_q.put(vk_msg)
# else:
# discarding the message
|
[
"DB.convert_ids"
] |
[((482, 553), 'DB.convert_ids', 'DB.convert_ids', (['"""Telegram"""', '"""VK"""', "telegram_msg['message']['chat']['id']"], {}), "('Telegram', 'VK', telegram_msg['message']['chat']['id'])\n", (496, 553), False, 'import DB\n')]
|
import sqlite3
if __name__ == '__main__':
SQL_FILE_NAME = "main_solo_vals_flame_advantaged.sql"
DB_FILE_NAME = "solo_values_FA.db"
connection = sqlite3.connect(DB_FILE_NAME)
cursor = connection.cursor()
file = open(SQL_FILE_NAME)
read_file = file.read()
cursor.executescript(read_file)
connection.close()
|
[
"sqlite3.connect"
] |
[((162, 191), 'sqlite3.connect', 'sqlite3.connect', (['DB_FILE_NAME'], {}), '(DB_FILE_NAME)\n', (177, 191), False, 'import sqlite3\n')]
|
from django.urls import path, include
from . import views
urlpatterns = [
path(
'game-filter-choices',
views.GameFilterChoicesView.as_view(),
name='game-filter-choices'
),
path(
'games',
views.ListGames.as_view(),
name='list-games'
),
path(
'games/create',
views.CreateGameView.as_view(),
name='create-game'
),
path(
'game/<int:pk>/join',
views.ToggleJoinGame.as_view(),
name='toggle-join-game'
),
path(
'game/finalize/<int:pk>',
views.ToggleFinalizeOrdersView.as_view(),
name='toggle-finalize-orders'
),
path(
'game/<int:pk>',
views.GameStateView.as_view(),
name='game-state'
),
path(
'game/<int:game>/order',
views.CreateOrderView.as_view(),
name='order'
),
path(
'game/<int:game>/orders',
views.ListOrdersView.as_view(),
name='orders'
),
path(
'game/<int:game>/nation-state',
views.RetrievePrivateNationStateView.as_view(),
name='private-nation-state'
),
path(
'game/<int:game>/order/<int:pk>',
views.DestroyOrderView.as_view(),
name='order'
),
path(
'api-auth/',
include('rest_framework.urls', namespace='rest_framework')
)
]
|
[
"django.urls.include"
] |
[((1310, 1368), 'django.urls.include', 'include', (['"""rest_framework.urls"""'], {'namespace': '"""rest_framework"""'}), "('rest_framework.urls', namespace='rest_framework')\n", (1317, 1368), False, 'from django.urls import path, include\n')]
|
"""Add newsletter history
Revision ID: <KEY>
Revises: 2<PASSWORD>a6ada0d
Create Date: 2020-11-03 12:01:49.481652
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '28165a6ada0d'
branch_labels = None
depends_on = None
def upgrade():
op.create_table('newsletter',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('inscription_id', sa.Integer(), nullable=True),
sa.Column('recommandation_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['inscription_id'], ['inscription.id'], ),
sa.ForeignKeyConstraint(['recommandation_id'], ['recommandation.id'], ),
sa.PrimaryKeyConstraint('id')
)
def downgrade():
op.drop_table('newsletter')
|
[
"alembic.op.drop_table",
"sqlalchemy.Integer",
"sqlalchemy.PrimaryKeyConstraint",
"sqlalchemy.ForeignKeyConstraint"
] |
[((804, 831), 'alembic.op.drop_table', 'op.drop_table', (['"""newsletter"""'], {}), "('newsletter')\n", (817, 831), False, 'from alembic import op\n'), ((589, 652), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['inscription_id']", "['inscription.id']"], {}), "(['inscription_id'], ['inscription.id'])\n", (612, 652), True, 'import sqlalchemy as sa\n'), ((664, 733), 'sqlalchemy.ForeignKeyConstraint', 'sa.ForeignKeyConstraint', (["['recommandation_id']", "['recommandation.id']"], {}), "(['recommandation_id'], ['recommandation.id'])\n", (687, 733), True, 'import sqlalchemy as sa\n'), ((745, 774), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (768, 774), True, 'import sqlalchemy as sa\n'), ((415, 427), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (425, 427), True, 'import sqlalchemy as sa\n'), ((482, 494), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (492, 494), True, 'import sqlalchemy as sa\n'), ((551, 563), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (561, 563), True, 'import sqlalchemy as sa\n')]
|
from django.shortcuts import render
from django.http import HttpResponse, HttpRequest
# Create your views here.
def index(request: HttpRequest):
return HttpResponse("Hello, world.")
|
[
"django.http.HttpResponse"
] |
[((156, 185), 'django.http.HttpResponse', 'HttpResponse', (['"""Hello, world."""'], {}), "('Hello, world.')\n", (168, 185), False, 'from django.http import HttpResponse, HttpRequest\n')]
|
'''
Unittests for pysal.model.spreg.error_sp_hom module
'''
import unittest
import pysal.lib
from pysal.model.spreg import error_sp_hom as HOM
import numpy as np
from pysal.lib.common import RTOL
import pysal.model.spreg
class BaseGM_Error_Hom_Tester(unittest.TestCase):
def setUp(self):
db=pysal.lib.io.open(pysal.lib.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
X.append(db.by_col("CRIME"))
self.X = np.array(X).T
self.X = np.hstack((np.ones(self.y.shape),self.X))
self.w = pysal.lib.weights.Rook.from_shapefile(pysal.lib.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
reg = HOM.BaseGM_Error_Hom(self.y, self.X, self.w.sparse, A1='hom_sc')
np.testing.assert_allclose(reg.y[0],np.array([80.467003]),RTOL)
x = np.array([ 1. , 19.531 , 15.72598])
np.testing.assert_allclose(reg.x[0],x,RTOL)
betas = np.array([[ 47.9478524 ], [ 0.70633223], [ -0.55595633], [ 0.41288558]])
np.testing.assert_allclose(reg.betas,betas,RTOL)
np.testing.assert_allclose(reg.u[0],np.array([27.466734]),RTOL)
np.testing.assert_allclose(reg.e_filtered[0],np.array([ 32.37298547]),RTOL)
i_s = 'Maximum number of iterations reached.'
np.testing.assert_string_equal(reg.iter_stop,i_s)
np.testing.assert_allclose(reg.predy[0],np.array([ 53.000269]),RTOL)
np.testing.assert_allclose(reg.n,49,RTOL)
np.testing.assert_allclose(reg.k,3,RTOL)
sig2 = 189.94459439729718
np.testing.assert_allclose(reg.sig2,sig2)
vm = np.array([[ 1.51340717e+02, -5.29057506e+00, -1.85654540e+00, -2.39139054e-03], [ -5.29057506e+00, 2.46669610e-01, 5.14259101e-02, 3.19241302e-04], [ -1.85654540e+00, 5.14259101e-02, 3.20510550e-02, -5.95640240e-05], [ -2.39139054e-03, 3.19241302e-04, -5.95640240e-05, 3.36690159e-02]])
np.testing.assert_allclose(reg.vm,vm,RTOL)
xtx = np.array([[ 4.90000000e+01, 7.04371999e+02, 1.72131237e+03], [ 7.04371999e+02, 1.16866734e+04, 2.15575320e+04], [ 1.72131237e+03, 2.15575320e+04, 7.39058986e+04]])
np.testing.assert_allclose(reg.xtx,xtx,RTOL)
class GM_Error_Hom_Tester(unittest.TestCase):
def setUp(self):
db=pysal.lib.io.open(pysal.lib.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
X.append(db.by_col("CRIME"))
self.X = np.array(X).T
self.w = pysal.lib.weights.Rook.from_shapefile(pysal.lib.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
reg = HOM.GM_Error_Hom(self.y, self.X, self.w, A1='hom_sc')
np.testing.assert_allclose(reg.y[0],np.array([80.467003]),RTOL)
x = np.array([ 1. , 19.531 , 15.72598])
np.testing.assert_allclose(reg.x[0],x,RTOL)
betas = np.array([[ 47.9478524 ], [ 0.70633223], [ -0.55595633], [ 0.41288558]])
np.testing.assert_allclose(reg.betas,betas,RTOL)
np.testing.assert_allclose(reg.u[0],np.array([27.46673388]),RTOL)
np.testing.assert_allclose(reg.e_filtered[0],np.array([ 32.37298547]),RTOL)
np.testing.assert_allclose(reg.predy[0],np.array([ 53.00026912]),RTOL)
np.testing.assert_allclose(reg.n,49,RTOL)
np.testing.assert_allclose(reg.k,3,RTOL)
vm = np.array([[ 1.51340717e+02, -5.29057506e+00, -1.85654540e+00, -2.39139054e-03], [ -5.29057506e+00, 2.46669610e-01, 5.14259101e-02, 3.19241302e-04], [ -1.85654540e+00, 5.14259101e-02, 3.20510550e-02, -5.95640240e-05], [ -2.39139054e-03, 3.19241302e-04, -5.95640240e-05, 3.36690159e-02]])
np.testing.assert_allclose(reg.vm,vm,RTOL)
i_s = 'Maximum number of iterations reached.'
np.testing.assert_string_equal(reg.iter_stop,i_s)
np.testing.assert_allclose(reg.iteration,1,RTOL)
my = 38.436224469387746
np.testing.assert_allclose(reg.mean_y,my)
std_y = 18.466069465206047
np.testing.assert_allclose(reg.std_y,std_y)
pr2 = 0.34950977055969729
np.testing.assert_allclose(reg.pr2,pr2)
sig2 = 189.94459439729718
np.testing.assert_allclose(reg.sig2,sig2)
std_err = np.array([ 12.30206149, 0.49665844, 0.17902808, 0.18349119])
np.testing.assert_allclose(reg.std_err,std_err,RTOL)
z_stat = np.array([[ 3.89754616e+00, 9.71723059e-05], [ 1.42216900e+00, 1.54977196e-01], [ -3.10541409e+00, 1.90012806e-03], [ 2.25016500e+00, 2.44384731e-02]])
np.testing.assert_allclose(reg.z_stat,z_stat,RTOL)
xtx = np.array([[ 4.90000000e+01, 7.04371999e+02, 1.72131237e+03], [ 7.04371999e+02, 1.16866734e+04, 2.15575320e+04], [ 1.72131237e+03, 2.15575320e+04, 7.39058986e+04]])
np.testing.assert_allclose(reg.xtx,xtx,RTOL)
class BaseGM_Endog_Error_Hom_Tester(unittest.TestCase):
def setUp(self):
db=pysal.lib.io.open(pysal.lib.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
self.X = np.array(X).T
self.X = np.hstack((np.ones(self.y.shape),self.X))
yd = []
yd.append(db.by_col("CRIME"))
self.yd = np.array(yd).T
q = []
q.append(db.by_col("DISCBD"))
self.q = np.array(q).T
self.w = pysal.lib.weights.Rook.from_shapefile(pysal.lib.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
reg = HOM.BaseGM_Endog_Error_Hom(self.y, self.X, self.yd, self.q, self.w.sparse, A1='hom_sc')
np.testing.assert_allclose(reg.y[0],np.array([ 80.467003]),RTOL)
x = np.array([ 1. , 19.531])
np.testing.assert_allclose(reg.x[0],x,RTOL)
z = np.array([ 1. , 19.531 , 15.72598])
np.testing.assert_allclose(reg.z[0],z,RTOL)
h = np.array([ 1. , 19.531, 5.03 ])
np.testing.assert_allclose(reg.h[0],h,RTOL)
yend = np.array([ 15.72598])
np.testing.assert_allclose(reg.yend[0],yend,RTOL)
q = np.array([ 5.03])
np.testing.assert_allclose(reg.q[0],q,RTOL)
betas = np.array([[ 55.36575166], [ 0.46432416], [ -0.66904404], [ 0.43205526]])
np.testing.assert_allclose(reg.betas,betas,RTOL)
u = np.array([ 26.55390939])
np.testing.assert_allclose(reg.u[0],u,RTOL)
np.testing.assert_allclose(reg.e_filtered[0],np.array([ 31.74114306]),RTOL)
predy = np.array([ 53.91309361])
np.testing.assert_allclose(reg.predy[0],predy,RTOL)
np.testing.assert_allclose(reg.n,49,RTOL)
np.testing.assert_allclose(reg.k,3,RTOL)
sig2 = 190.59435238060928
np.testing.assert_allclose(reg.sig2,sig2)
vm = np.array([[ 5.52064057e+02, -1.61264555e+01, -8.86360735e+00, 1.04251912e+00], [ -1.61264555e+01, 5.44898242e-01, 2.39518645e-01, -1.88092950e-02], [ -8.86360735e+00, 2.39518645e-01, 1.55501840e-01, -2.18638648e-02], [ 1.04251912e+00, -1.88092950e-02, -2.18638648e-02, 3.71222222e-02]])
np.testing.assert_allclose(reg.vm,vm,RTOL)
i_s = 'Maximum number of iterations reached.'
np.testing.assert_string_equal(reg.iter_stop,i_s)
its = 1
np.testing.assert_allclose(reg.iteration,its,RTOL)
my = 38.436224469387746
np.testing.assert_allclose(reg.mean_y,my)
std_y = 18.466069465206047
np.testing.assert_allclose(reg.std_y,std_y)
sig2 = 0
#np.testing.assert_allclose(reg.sig2,sig2)
hth = np.array([[ 49. , 704.371999 , 139.75 ], [ 704.371999 , 11686.67338121, 2246.12800625], [ 139.75 , 2246.12800625, 498.5851]])
np.testing.assert_allclose(reg.hth,hth,RTOL)
class GM_Endog_Error_Hom_Tester(unittest.TestCase):
def setUp(self):
db=pysal.lib.io.open(pysal.lib.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
self.X = np.array(X).T
yd = []
yd.append(db.by_col("CRIME"))
self.yd = np.array(yd).T
q = []
q.append(db.by_col("DISCBD"))
self.q = np.array(q).T
self.w = pysal.lib.weights.Rook.from_shapefile(pysal.lib.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
reg = HOM.GM_Endog_Error_Hom(self.y, self.X, self.yd, self.q, self.w, A1='hom_sc')
np.testing.assert_allclose(reg.y[0],np.array([ 80.467003]),RTOL)
x = np.array([ 1. , 19.531])
np.testing.assert_allclose(reg.x[0],x,RTOL)
z = np.array([ 1. , 19.531 , 15.72598])
np.testing.assert_allclose(reg.z[0],z,RTOL)
h = np.array([ 1. , 19.531, 5.03 ])
np.testing.assert_allclose(reg.h[0],h,RTOL)
yend = np.array([ 15.72598])
np.testing.assert_allclose(reg.yend[0],yend,RTOL)
q = np.array([ 5.03])
np.testing.assert_allclose(reg.q[0],q,RTOL)
betas = np.array([[ 55.36575166], [ 0.46432416], [ -0.66904404], [ 0.43205526]])
np.testing.assert_allclose(reg.betas,betas,RTOL)
u = np.array([ 26.55390939])
np.testing.assert_allclose(reg.u[0],u,RTOL)
np.testing.assert_allclose(reg.e_filtered[0],np.array([ 31.74114306]),RTOL)
predy = np.array([ 53.91309361])
np.testing.assert_allclose(reg.predy[0],predy,RTOL)
np.testing.assert_allclose(reg.n,49,RTOL)
np.testing.assert_allclose(reg.k,3,RTOL)
vm = np.array([[ 5.52064057e+02, -1.61264555e+01, -8.86360735e+00, 1.04251912e+00], [ -1.61264555e+01, 5.44898242e-01, 2.39518645e-01, -1.88092950e-02], [ -8.86360735e+00, 2.39518645e-01, 1.55501840e-01, -2.18638648e-02], [ 1.04251912e+00, -1.88092950e-02, -2.18638648e-02, 3.71222222e-02]])
np.testing.assert_allclose(reg.vm,vm,RTOL)
i_s = 'Maximum number of iterations reached.'
np.testing.assert_string_equal(reg.iter_stop,i_s)
its = 1
np.testing.assert_allclose(reg.iteration,its,RTOL)
my = 38.436224469387746
np.testing.assert_allclose(reg.mean_y,my)
std_y = 18.466069465206047
np.testing.assert_allclose(reg.std_y,std_y)
pr2 = 0.34647366525657419
np.testing.assert_allclose(reg.pr2,pr2)
sig2 = 190.59435238060928
np.testing.assert_allclose(reg.sig2,sig2)
#std_err
std_err = np.array([ 23.49604343, 0.73817223, 0.39433722, 0.19267128])
np.testing.assert_allclose(reg.std_err,std_err,RTOL)
z_stat = np.array([[ 2.35638617, 0.01845372], [ 0.62901874, 0.52933679], [-1.69662923, 0.08976678], [ 2.24244556, 0.02493259]])
np.testing.assert_allclose(reg.z_stat,z_stat,RTOL)
class BaseGM_Combo_Hom_Tester(unittest.TestCase):
def setUp(self):
db=pysal.lib.io.open(pysal.lib.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
self.X = np.array(X).T
self.w = pysal.lib.weights.Rook.from_shapefile(pysal.lib.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
yd2, q2 = pysal.model.spreg.utils.set_endog(self.y, self.X, self.w, None, None, 1, True)
self.X = np.hstack((np.ones(self.y.shape),self.X))
reg = HOM.BaseGM_Combo_Hom(self.y, self.X, yend=yd2, q=q2, w=self.w.sparse, A1='hom_sc')
np.testing.assert_allclose(reg.y[0],np.array([80.467003]),RTOL)
x = np.array([ 1. , 19.531])
np.testing.assert_allclose(reg.x[0],x,RTOL)
betas = np.array([[ 10.12541428], [ 1.56832263], [ 0.15132076], [ 0.21033397]])
np.testing.assert_allclose(reg.betas,betas,RTOL)
np.testing.assert_allclose(reg.u[0],np.array([34.3450723]),RTOL)
np.testing.assert_allclose(reg.e_filtered[0],np.array([ 36.6149682]),RTOL)
np.testing.assert_allclose(reg.predy[0],np.array([ 46.1219307]),RTOL)
np.testing.assert_allclose(reg.n,49,RTOL)
np.testing.assert_allclose(reg.k,3,RTOL)
vm = np.array([[ 2.33694742e+02, -6.66856869e-01, -5.58304254e+00, 4.85488380e+00], [ -6.66856869e-01, 1.94241504e-01, -5.42327138e-02, 5.37225570e-02], [ -5.58304254e+00, -5.42327138e-02, 1.63860721e-01, -1.44425498e-01], [ 4.85488380e+00, 5.37225570e-02, -1.44425498e-01, 1.78622255e-01]])
np.testing.assert_allclose(reg.vm,vm,RTOL)
z = np.array([ 1. , 19.531 , 35.4585005])
np.testing.assert_allclose(reg.z[0],z,RTOL)
h = np.array([ 1. , 19.531, 18.594])
np.testing.assert_allclose(reg.h[0],h,RTOL)
yend = np.array([ 35.4585005])
np.testing.assert_allclose(reg.yend[0],yend,RTOL)
q = np.array([ 18.594])
np.testing.assert_allclose(reg.q[0],q,RTOL)
i_s = 'Maximum number of iterations reached.'
np.testing.assert_string_equal(reg.iter_stop,i_s)
its = 1
np.testing.assert_allclose(reg.iteration,its,RTOL)
my = 38.436224469387746
np.testing.assert_allclose(reg.mean_y,my)
std_y = 18.466069465206047
np.testing.assert_allclose(reg.std_y,std_y)
sig2 = 232.22680651270042
#np.testing.assert_allclose(reg.sig2,sig2)
np.testing.assert_allclose(reg.sig2,sig2)
hth = np.array([[ 49. , 704.371999 , 724.7435916 ], [ 704.371999 , 11686.67338121, 11092.519988 ], [ 724.7435916 , 11092.519988 , 11614.62257048]])
np.testing.assert_allclose(reg.hth,hth,RTOL)
class GM_Combo_Hom_Tester(unittest.TestCase):
def setUp(self):
db=pysal.lib.io.open(pysal.lib.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
self.X = np.array(X).T
self.w = pysal.lib.weights.Rook.from_shapefile(pysal.lib.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
reg = HOM.GM_Combo_Hom(self.y, self.X, w=self.w, A1='hom_sc')
np.testing.assert_allclose(reg.y[0],np.array([80.467003]),RTOL)
x = np.array([ 1. , 19.531])
np.testing.assert_allclose(reg.x[0],x,RTOL)
betas = np.array([[ 10.12541428], [ 1.56832263], [ 0.15132076], [ 0.21033397]])
np.testing.assert_allclose(reg.betas,betas,RTOL)
np.testing.assert_allclose(reg.u[0],np.array([34.3450723]),RTOL)
np.testing.assert_allclose(reg.e_filtered[0],np.array([ 36.6149682]),RTOL)
np.testing.assert_allclose(reg.e_pred[0],np.array([ 32.90372983]),RTOL)
np.testing.assert_allclose(reg.predy[0],np.array([ 46.1219307]),RTOL)
np.testing.assert_allclose(reg.predy_e[0],np.array([47.56327317]),RTOL)
np.testing.assert_allclose(reg.n,49,RTOL)
np.testing.assert_allclose(reg.k,3,RTOL)
z = np.array([ 1. , 19.531 , 35.4585005])
np.testing.assert_allclose(reg.z[0],z,RTOL)
h = np.array([ 1. , 19.531, 18.594])
np.testing.assert_allclose(reg.h[0],h,RTOL)
yend = np.array([ 35.4585005])
np.testing.assert_allclose(reg.yend[0],yend,RTOL)
q = np.array([ 18.594])
np.testing.assert_allclose(reg.q[0],q,RTOL)
i_s = 'Maximum number of iterations reached.'
np.testing.assert_string_equal(reg.iter_stop,i_s)
np.testing.assert_allclose(reg.iteration,1,RTOL)
my = 38.436224469387746
np.testing.assert_allclose(reg.mean_y,my)
std_y = 18.466069465206047
np.testing.assert_allclose(reg.std_y,std_y)
pr2 = 0.28379825632694394
np.testing.assert_allclose(reg.pr2,pr2)
pr2_e = 0.25082892555141506
np.testing.assert_allclose(reg.pr2_e,pr2_e)
sig2 = 232.22680651270042
#np.testing.assert_allclose(reg.sig2, sig2)
np.testing.assert_allclose(reg.sig2, sig2)
std_err = np.array([ 15.28707761, 0.44072838, 0.40479714, 0.42263726])
np.testing.assert_allclose(reg.std_err,std_err,RTOL)
z_stat = np.array([[ 6.62351206e-01, 5.07746167e-01], [ 3.55847888e+00, 3.73008780e-04], [ 3.73818749e-01, 7.08539170e-01], [ 4.97670189e-01, 6.18716523e-01]])
np.testing.assert_allclose(reg.z_stat,z_stat,RTOL)
vm = np.array([[ 2.33694742e+02, -6.66856869e-01, -5.58304254e+00, 4.85488380e+00], [ -6.66856869e-01, 1.94241504e-01, -5.42327138e-02, 5.37225570e-02], [ -5.58304254e+00, -5.42327138e-02, 1.63860721e-01, -1.44425498e-01], [ 4.85488380e+00, 5.37225570e-02, -1.44425498e-01, 1.78622255e-01]])
np.testing.assert_allclose(reg.vm,vm,RTOL)
suite = unittest.TestSuite()
test_classes = [BaseGM_Error_Hom_Tester, GM_Error_Hom_Tester,\
BaseGM_Endog_Error_Hom_Tester, GM_Endog_Error_Hom_Tester, \
BaseGM_Combo_Hom_Tester, GM_Combo_Hom_Tester]
for i in test_classes:
a = unittest.TestLoader().loadTestsFromTestCase(i)
suite.addTest(a)
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite)
|
[
"unittest.TextTestRunner",
"unittest.TestSuite",
"pysal.model.spreg.error_sp_hom.GM_Combo_Hom",
"pysal.model.spreg.error_sp_hom.BaseGM_Error_Hom",
"pysal.model.spreg.error_sp_hom.GM_Endog_Error_Hom",
"pysal.model.spreg.error_sp_hom.BaseGM_Combo_Hom",
"numpy.ones",
"numpy.array",
"numpy.reshape",
"numpy.testing.assert_string_equal",
"unittest.TestLoader",
"numpy.testing.assert_allclose",
"pysal.model.spreg.error_sp_hom.GM_Error_Hom",
"pysal.model.spreg.error_sp_hom.BaseGM_Endog_Error_Hom"
] |
[((17068, 17088), 'unittest.TestSuite', 'unittest.TestSuite', ([], {}), '()\n', (17086, 17088), False, 'import unittest\n'), ((17414, 17439), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {}), '()\n', (17437, 17439), False, 'import unittest\n'), ((430, 452), 'numpy.reshape', 'np.reshape', (['y', '(49, 1)'], {}), '(y, (49, 1))\n', (440, 452), True, 'import numpy as np\n'), ((800, 864), 'pysal.model.spreg.error_sp_hom.BaseGM_Error_Hom', 'HOM.BaseGM_Error_Hom', (['self.y', 'self.X', 'self.w.sparse'], {'A1': '"""hom_sc"""'}), "(self.y, self.X, self.w.sparse, A1='hom_sc')\n", (820, 864), True, 'from pysal.model.spreg import error_sp_hom as HOM\n'), ((949, 982), 'numpy.array', 'np.array', (['[1.0, 19.531, 15.72598]'], {}), '([1.0, 19.531, 15.72598])\n', (957, 982), True, 'import numpy as np\n'), ((1001, 1046), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.x[0]', 'x', 'RTOL'], {}), '(reg.x[0], x, RTOL)\n', (1027, 1046), True, 'import numpy as np\n'), ((1061, 1128), 'numpy.array', 'np.array', (['[[47.9478524], [0.70633223], [-0.55595633], [0.41288558]]'], {}), '([[47.9478524], [0.70633223], [-0.55595633], [0.41288558]])\n', (1069, 1128), True, 'import numpy as np\n'), ((1144, 1194), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.betas', 'betas', 'RTOL'], {}), '(reg.betas, betas, RTOL)\n', (1170, 1194), True, 'import numpy as np\n'), ((1411, 1461), 'numpy.testing.assert_string_equal', 'np.testing.assert_string_equal', (['reg.iter_stop', 'i_s'], {}), '(reg.iter_stop, i_s)\n', (1441, 1461), True, 'import numpy as np\n'), ((1546, 1589), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.n', '(49)', 'RTOL'], {}), '(reg.n, 49, RTOL)\n', (1572, 1589), True, 'import numpy as np\n'), ((1596, 1638), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.k', '(3)', 'RTOL'], {}), '(reg.k, 3, RTOL)\n', (1622, 1638), True, 'import numpy as np\n'), ((1679, 1721), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.sig2', 'sig2'], {}), '(reg.sig2, sig2)\n', (1705, 1721), True, 'import numpy as np\n'), ((1734, 1992), 'numpy.array', 'np.array', (['[[151.340717, -5.29057506, -1.8565454, -0.00239139054], [-5.29057506, \n 0.24666961, 0.0514259101, 0.000319241302], [-1.8565454, 0.0514259101, \n 0.032051055, -5.9564024e-05], [-0.00239139054, 0.000319241302, -\n 5.9564024e-05, 0.0336690159]]'], {}), '([[151.340717, -5.29057506, -1.8565454, -0.00239139054], [-\n 5.29057506, 0.24666961, 0.0514259101, 0.000319241302], [-1.8565454, \n 0.0514259101, 0.032051055, -5.9564024e-05], [-0.00239139054, \n 0.000319241302, -5.9564024e-05, 0.0336690159]])\n', (1742, 1992), True, 'import numpy as np\n'), ((2040, 2084), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.vm', 'vm', 'RTOL'], {}), '(reg.vm, vm, RTOL)\n', (2066, 2084), True, 'import numpy as np\n'), ((2097, 2218), 'numpy.array', 'np.array', (['[[49.0, 704.371999, 1721.31237], [704.371999, 11686.6734, 21557.532], [\n 1721.31237, 21557.532, 73905.8986]]'], {}), '([[49.0, 704.371999, 1721.31237], [704.371999, 11686.6734, \n 21557.532], [1721.31237, 21557.532, 73905.8986]])\n', (2105, 2218), True, 'import numpy as np\n'), ((2280, 2326), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.xtx', 'xtx', 'RTOL'], {}), '(reg.xtx, xtx, RTOL)\n', (2306, 2326), True, 'import numpy as np\n'), ((2529, 2551), 'numpy.reshape', 'np.reshape', (['y', '(49, 1)'], {}), '(y, (49, 1))\n', (2539, 2551), True, 'import numpy as np\n'), ((2840, 2893), 'pysal.model.spreg.error_sp_hom.GM_Error_Hom', 'HOM.GM_Error_Hom', (['self.y', 'self.X', 'self.w'], {'A1': '"""hom_sc"""'}), "(self.y, self.X, self.w, A1='hom_sc')\n", (2856, 2893), True, 'from pysal.model.spreg import error_sp_hom as HOM\n'), ((2978, 3011), 'numpy.array', 'np.array', (['[1.0, 19.531, 15.72598]'], {}), '([1.0, 19.531, 15.72598])\n', (2986, 3011), True, 'import numpy as np\n'), ((3030, 3075), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.x[0]', 'x', 'RTOL'], {}), '(reg.x[0], x, RTOL)\n', (3056, 3075), True, 'import numpy as np\n'), ((3090, 3157), 'numpy.array', 'np.array', (['[[47.9478524], [0.70633223], [-0.55595633], [0.41288558]]'], {}), '([[47.9478524], [0.70633223], [-0.55595633], [0.41288558]])\n', (3098, 3157), True, 'import numpy as np\n'), ((3173, 3223), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.betas', 'betas', 'RTOL'], {}), '(reg.betas, betas, RTOL)\n', (3199, 3223), True, 'import numpy as np\n'), ((3467, 3510), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.n', '(49)', 'RTOL'], {}), '(reg.n, 49, RTOL)\n', (3493, 3510), True, 'import numpy as np\n'), ((3517, 3559), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.k', '(3)', 'RTOL'], {}), '(reg.k, 3, RTOL)\n', (3543, 3559), True, 'import numpy as np\n'), ((3571, 3829), 'numpy.array', 'np.array', (['[[151.340717, -5.29057506, -1.8565454, -0.00239139054], [-5.29057506, \n 0.24666961, 0.0514259101, 0.000319241302], [-1.8565454, 0.0514259101, \n 0.032051055, -5.9564024e-05], [-0.00239139054, 0.000319241302, -\n 5.9564024e-05, 0.0336690159]]'], {}), '([[151.340717, -5.29057506, -1.8565454, -0.00239139054], [-\n 5.29057506, 0.24666961, 0.0514259101, 0.000319241302], [-1.8565454, \n 0.0514259101, 0.032051055, -5.9564024e-05], [-0.00239139054, \n 0.000319241302, -5.9564024e-05, 0.0336690159]])\n', (3579, 3829), True, 'import numpy as np\n'), ((3877, 3921), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.vm', 'vm', 'RTOL'], {}), '(reg.vm, vm, RTOL)\n', (3903, 3921), True, 'import numpy as np\n'), ((3982, 4032), 'numpy.testing.assert_string_equal', 'np.testing.assert_string_equal', (['reg.iter_stop', 'i_s'], {}), '(reg.iter_stop, i_s)\n', (4012, 4032), True, 'import numpy as np\n'), ((4040, 4090), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.iteration', '(1)', 'RTOL'], {}), '(reg.iteration, 1, RTOL)\n', (4066, 4090), True, 'import numpy as np\n'), ((4129, 4171), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.mean_y', 'my'], {}), '(reg.mean_y, my)\n', (4155, 4171), True, 'import numpy as np\n'), ((4214, 4258), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.std_y', 'std_y'], {}), '(reg.std_y, std_y)\n', (4240, 4258), True, 'import numpy as np\n'), ((4300, 4340), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.pr2', 'pr2'], {}), '(reg.pr2, pr2)\n', (4326, 4340), True, 'import numpy as np\n'), ((4382, 4424), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.sig2', 'sig2'], {}), '(reg.sig2, sig2)\n', (4408, 4424), True, 'import numpy as np\n'), ((4442, 4501), 'numpy.array', 'np.array', (['[12.30206149, 0.49665844, 0.17902808, 0.18349119]'], {}), '([12.30206149, 0.49665844, 0.17902808, 0.18349119])\n', (4450, 4501), True, 'import numpy as np\n'), ((4515, 4569), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.std_err', 'std_err', 'RTOL'], {}), '(reg.std_err, std_err, RTOL)\n', (4541, 4569), True, 'import numpy as np\n'), ((4585, 4711), 'numpy.array', 'np.array', (['[[3.89754616, 9.71723059e-05], [1.422169, 0.154977196], [-3.10541409, \n 0.00190012806], [2.250165, 0.0244384731]]'], {}), '([[3.89754616, 9.71723059e-05], [1.422169, 0.154977196], [-\n 3.10541409, 0.00190012806], [2.250165, 0.0244384731]])\n', (4593, 4711), True, 'import numpy as np\n'), ((4756, 4808), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.z_stat', 'z_stat', 'RTOL'], {}), '(reg.z_stat, z_stat, RTOL)\n', (4782, 4808), True, 'import numpy as np\n'), ((4821, 4942), 'numpy.array', 'np.array', (['[[49.0, 704.371999, 1721.31237], [704.371999, 11686.6734, 21557.532], [\n 1721.31237, 21557.532, 73905.8986]]'], {}), '([[49.0, 704.371999, 1721.31237], [704.371999, 11686.6734, \n 21557.532], [1721.31237, 21557.532, 73905.8986]])\n', (4829, 4942), True, 'import numpy as np\n'), ((5004, 5050), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.xtx', 'xtx', 'RTOL'], {}), '(reg.xtx, xtx, RTOL)\n', (5030, 5050), True, 'import numpy as np\n'), ((5264, 5286), 'numpy.reshape', 'np.reshape', (['y', '(49, 1)'], {}), '(y, (49, 1))\n', (5274, 5286), True, 'import numpy as np\n'), ((5768, 5859), 'pysal.model.spreg.error_sp_hom.BaseGM_Endog_Error_Hom', 'HOM.BaseGM_Endog_Error_Hom', (['self.y', 'self.X', 'self.yd', 'self.q', 'self.w.sparse'], {'A1': '"""hom_sc"""'}), "(self.y, self.X, self.yd, self.q, self.w.sparse,\n A1='hom_sc')\n", (5794, 5859), True, 'from pysal.model.spreg import error_sp_hom as HOM\n'), ((5941, 5964), 'numpy.array', 'np.array', (['[1.0, 19.531]'], {}), '([1.0, 19.531])\n', (5949, 5964), True, 'import numpy as np\n'), ((5980, 6025), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.x[0]', 'x', 'RTOL'], {}), '(reg.x[0], x, RTOL)\n', (6006, 6025), True, 'import numpy as np\n'), ((6036, 6069), 'numpy.array', 'np.array', (['[1.0, 19.531, 15.72598]'], {}), '([1.0, 19.531, 15.72598])\n', (6044, 6069), True, 'import numpy as np\n'), ((6088, 6133), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.z[0]', 'z', 'RTOL'], {}), '(reg.z[0], z, RTOL)\n', (6114, 6133), True, 'import numpy as np\n'), ((6144, 6173), 'numpy.array', 'np.array', (['[1.0, 19.531, 5.03]'], {}), '([1.0, 19.531, 5.03])\n', (6152, 6173), True, 'import numpy as np\n'), ((6190, 6235), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.h[0]', 'h', 'RTOL'], {}), '(reg.h[0], h, RTOL)\n', (6216, 6235), True, 'import numpy as np\n'), ((6249, 6269), 'numpy.array', 'np.array', (['[15.72598]'], {}), '([15.72598])\n', (6257, 6269), True, 'import numpy as np\n'), ((6279, 6330), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.yend[0]', 'yend', 'RTOL'], {}), '(reg.yend[0], yend, RTOL)\n', (6305, 6330), True, 'import numpy as np\n'), ((6341, 6357), 'numpy.array', 'np.array', (['[5.03]'], {}), '([5.03])\n', (6349, 6357), True, 'import numpy as np\n'), ((6367, 6412), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.q[0]', 'q', 'RTOL'], {}), '(reg.q[0], q, RTOL)\n', (6393, 6412), True, 'import numpy as np\n'), ((6427, 6495), 'numpy.array', 'np.array', (['[[55.36575166], [0.46432416], [-0.66904404], [0.43205526]]'], {}), '([[55.36575166], [0.46432416], [-0.66904404], [0.43205526]])\n', (6435, 6495), True, 'import numpy as np\n'), ((6510, 6560), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.betas', 'betas', 'RTOL'], {}), '(reg.betas, betas, RTOL)\n', (6536, 6560), True, 'import numpy as np\n'), ((6571, 6594), 'numpy.array', 'np.array', (['[26.55390939]'], {}), '([26.55390939])\n', (6579, 6594), True, 'import numpy as np\n'), ((6604, 6649), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.u[0]', 'u', 'RTOL'], {}), '(reg.u[0], u, RTOL)\n', (6630, 6649), True, 'import numpy as np\n'), ((6748, 6771), 'numpy.array', 'np.array', (['[53.91309361]'], {}), '([53.91309361])\n', (6756, 6771), True, 'import numpy as np\n'), ((6781, 6834), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.predy[0]', 'predy', 'RTOL'], {}), '(reg.predy[0], predy, RTOL)\n', (6807, 6834), True, 'import numpy as np\n'), ((6841, 6884), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.n', '(49)', 'RTOL'], {}), '(reg.n, 49, RTOL)\n', (6867, 6884), True, 'import numpy as np\n'), ((6891, 6933), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.k', '(3)', 'RTOL'], {}), '(reg.k, 3, RTOL)\n', (6917, 6933), True, 'import numpy as np\n'), ((6974, 7016), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.sig2', 'sig2'], {}), '(reg.sig2, sig2)\n', (7000, 7016), True, 'import numpy as np\n'), ((7029, 7272), 'numpy.array', 'np.array', (['[[552.064057, -16.1264555, -8.86360735, 1.04251912], [-16.1264555, \n 0.544898242, 0.239518645, -0.018809295], [-8.86360735, 0.239518645, \n 0.15550184, -0.0218638648], [1.04251912, -0.018809295, -0.0218638648, \n 0.0371222222]]'], {}), '([[552.064057, -16.1264555, -8.86360735, 1.04251912], [-16.1264555,\n 0.544898242, 0.239518645, -0.018809295], [-8.86360735, 0.239518645, \n 0.15550184, -0.0218638648], [1.04251912, -0.018809295, -0.0218638648, \n 0.0371222222]])\n', (7037, 7272), True, 'import numpy as np\n'), ((7332, 7376), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.vm', 'vm', 'RTOL'], {}), '(reg.vm, vm, RTOL)\n', (7358, 7376), True, 'import numpy as np\n'), ((7437, 7487), 'numpy.testing.assert_string_equal', 'np.testing.assert_string_equal', (['reg.iter_stop', 'i_s'], {}), '(reg.iter_stop, i_s)\n', (7467, 7487), True, 'import numpy as np\n'), ((7511, 7563), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.iteration', 'its', 'RTOL'], {}), '(reg.iteration, its, RTOL)\n', (7537, 7563), True, 'import numpy as np\n'), ((7602, 7644), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.mean_y', 'my'], {}), '(reg.mean_y, my)\n', (7628, 7644), True, 'import numpy as np\n'), ((7687, 7731), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.std_y', 'std_y'], {}), '(reg.std_y, std_y)\n', (7713, 7731), True, 'import numpy as np\n'), ((7813, 7936), 'numpy.array', 'np.array', (['[[49.0, 704.371999, 139.75], [704.371999, 11686.67338121, 2246.12800625], [\n 139.75, 2246.12800625, 498.5851]]'], {}), '([[49.0, 704.371999, 139.75], [704.371999, 11686.67338121, \n 2246.12800625], [139.75, 2246.12800625, 498.5851]])\n', (7821, 7936), True, 'import numpy as np\n'), ((7987, 8033), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.hth', 'hth', 'RTOL'], {}), '(reg.hth, hth, RTOL)\n', (8013, 8033), True, 'import numpy as np\n'), ((8242, 8264), 'numpy.reshape', 'np.reshape', (['y', '(49, 1)'], {}), '(y, (49, 1))\n', (8252, 8264), True, 'import numpy as np\n'), ((8687, 8763), 'pysal.model.spreg.error_sp_hom.GM_Endog_Error_Hom', 'HOM.GM_Endog_Error_Hom', (['self.y', 'self.X', 'self.yd', 'self.q', 'self.w'], {'A1': '"""hom_sc"""'}), "(self.y, self.X, self.yd, self.q, self.w, A1='hom_sc')\n", (8709, 8763), True, 'from pysal.model.spreg import error_sp_hom as HOM\n'), ((8849, 8872), 'numpy.array', 'np.array', (['[1.0, 19.531]'], {}), '([1.0, 19.531])\n', (8857, 8872), True, 'import numpy as np\n'), ((8888, 8933), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.x[0]', 'x', 'RTOL'], {}), '(reg.x[0], x, RTOL)\n', (8914, 8933), True, 'import numpy as np\n'), ((8944, 8977), 'numpy.array', 'np.array', (['[1.0, 19.531, 15.72598]'], {}), '([1.0, 19.531, 15.72598])\n', (8952, 8977), True, 'import numpy as np\n'), ((8996, 9041), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.z[0]', 'z', 'RTOL'], {}), '(reg.z[0], z, RTOL)\n', (9022, 9041), True, 'import numpy as np\n'), ((9052, 9081), 'numpy.array', 'np.array', (['[1.0, 19.531, 5.03]'], {}), '([1.0, 19.531, 5.03])\n', (9060, 9081), True, 'import numpy as np\n'), ((9098, 9143), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.h[0]', 'h', 'RTOL'], {}), '(reg.h[0], h, RTOL)\n', (9124, 9143), True, 'import numpy as np\n'), ((9157, 9177), 'numpy.array', 'np.array', (['[15.72598]'], {}), '([15.72598])\n', (9165, 9177), True, 'import numpy as np\n'), ((9187, 9238), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.yend[0]', 'yend', 'RTOL'], {}), '(reg.yend[0], yend, RTOL)\n', (9213, 9238), True, 'import numpy as np\n'), ((9249, 9265), 'numpy.array', 'np.array', (['[5.03]'], {}), '([5.03])\n', (9257, 9265), True, 'import numpy as np\n'), ((9275, 9320), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.q[0]', 'q', 'RTOL'], {}), '(reg.q[0], q, RTOL)\n', (9301, 9320), True, 'import numpy as np\n'), ((9335, 9403), 'numpy.array', 'np.array', (['[[55.36575166], [0.46432416], [-0.66904404], [0.43205526]]'], {}), '([[55.36575166], [0.46432416], [-0.66904404], [0.43205526]])\n', (9343, 9403), True, 'import numpy as np\n'), ((9418, 9468), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.betas', 'betas', 'RTOL'], {}), '(reg.betas, betas, RTOL)\n', (9444, 9468), True, 'import numpy as np\n'), ((9479, 9502), 'numpy.array', 'np.array', (['[26.55390939]'], {}), '([26.55390939])\n', (9487, 9502), True, 'import numpy as np\n'), ((9512, 9557), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.u[0]', 'u', 'RTOL'], {}), '(reg.u[0], u, RTOL)\n', (9538, 9557), True, 'import numpy as np\n'), ((9656, 9679), 'numpy.array', 'np.array', (['[53.91309361]'], {}), '([53.91309361])\n', (9664, 9679), True, 'import numpy as np\n'), ((9689, 9742), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.predy[0]', 'predy', 'RTOL'], {}), '(reg.predy[0], predy, RTOL)\n', (9715, 9742), True, 'import numpy as np\n'), ((9749, 9792), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.n', '(49)', 'RTOL'], {}), '(reg.n, 49, RTOL)\n', (9775, 9792), True, 'import numpy as np\n'), ((9799, 9841), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.k', '(3)', 'RTOL'], {}), '(reg.k, 3, RTOL)\n', (9825, 9841), True, 'import numpy as np\n'), ((9853, 10096), 'numpy.array', 'np.array', (['[[552.064057, -16.1264555, -8.86360735, 1.04251912], [-16.1264555, \n 0.544898242, 0.239518645, -0.018809295], [-8.86360735, 0.239518645, \n 0.15550184, -0.0218638648], [1.04251912, -0.018809295, -0.0218638648, \n 0.0371222222]]'], {}), '([[552.064057, -16.1264555, -8.86360735, 1.04251912], [-16.1264555,\n 0.544898242, 0.239518645, -0.018809295], [-8.86360735, 0.239518645, \n 0.15550184, -0.0218638648], [1.04251912, -0.018809295, -0.0218638648, \n 0.0371222222]])\n', (9861, 10096), True, 'import numpy as np\n'), ((10156, 10200), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.vm', 'vm', 'RTOL'], {}), '(reg.vm, vm, RTOL)\n', (10182, 10200), True, 'import numpy as np\n'), ((10261, 10311), 'numpy.testing.assert_string_equal', 'np.testing.assert_string_equal', (['reg.iter_stop', 'i_s'], {}), '(reg.iter_stop, i_s)\n', (10291, 10311), True, 'import numpy as np\n'), ((10335, 10387), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.iteration', 'its', 'RTOL'], {}), '(reg.iteration, its, RTOL)\n', (10361, 10387), True, 'import numpy as np\n'), ((10426, 10468), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.mean_y', 'my'], {}), '(reg.mean_y, my)\n', (10452, 10468), True, 'import numpy as np\n'), ((10511, 10555), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.std_y', 'std_y'], {}), '(reg.std_y, std_y)\n', (10537, 10555), True, 'import numpy as np\n'), ((10597, 10637), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.pr2', 'pr2'], {}), '(reg.pr2, pr2)\n', (10623, 10637), True, 'import numpy as np\n'), ((10679, 10721), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.sig2', 'sig2'], {}), '(reg.sig2, sig2)\n', (10705, 10721), True, 'import numpy as np\n'), ((10756, 10815), 'numpy.array', 'np.array', (['[23.49604343, 0.73817223, 0.39433722, 0.19267128]'], {}), '([23.49604343, 0.73817223, 0.39433722, 0.19267128])\n', (10764, 10815), True, 'import numpy as np\n'), ((10829, 10883), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.std_err', 'std_err', 'RTOL'], {}), '(reg.std_err, std_err, RTOL)\n', (10855, 10883), True, 'import numpy as np\n'), ((10899, 11018), 'numpy.array', 'np.array', (['[[2.35638617, 0.01845372], [0.62901874, 0.52933679], [-1.69662923, \n 0.08976678], [2.24244556, 0.02493259]]'], {}), '([[2.35638617, 0.01845372], [0.62901874, 0.52933679], [-1.69662923,\n 0.08976678], [2.24244556, 0.02493259]])\n', (10907, 11018), True, 'import numpy as np\n'), ((11030, 11082), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.z_stat', 'z_stat', 'RTOL'], {}), '(reg.z_stat, z_stat, RTOL)\n', (11056, 11082), True, 'import numpy as np\n'), ((11290, 11312), 'numpy.reshape', 'np.reshape', (['y', '(49, 1)'], {}), '(y, (49, 1))\n', (11300, 11312), True, 'import numpy as np\n'), ((11720, 11807), 'pysal.model.spreg.error_sp_hom.BaseGM_Combo_Hom', 'HOM.BaseGM_Combo_Hom', (['self.y', 'self.X'], {'yend': 'yd2', 'q': 'q2', 'w': 'self.w.sparse', 'A1': '"""hom_sc"""'}), "(self.y, self.X, yend=yd2, q=q2, w=self.w.sparse, A1=\n 'hom_sc')\n", (11740, 11807), True, 'from pysal.model.spreg import error_sp_hom as HOM\n'), ((11887, 11910), 'numpy.array', 'np.array', (['[1.0, 19.531]'], {}), '([1.0, 19.531])\n', (11895, 11910), True, 'import numpy as np\n'), ((11926, 11971), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.x[0]', 'x', 'RTOL'], {}), '(reg.x[0], x, RTOL)\n', (11952, 11971), True, 'import numpy as np\n'), ((11986, 12053), 'numpy.array', 'np.array', (['[[10.12541428], [1.56832263], [0.15132076], [0.21033397]]'], {}), '([[10.12541428], [1.56832263], [0.15132076], [0.21033397]])\n', (11994, 12053), True, 'import numpy as np\n'), ((12069, 12119), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.betas', 'betas', 'RTOL'], {}), '(reg.betas, betas, RTOL)\n', (12095, 12119), True, 'import numpy as np\n'), ((12360, 12403), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.n', '(49)', 'RTOL'], {}), '(reg.n, 49, RTOL)\n', (12386, 12403), True, 'import numpy as np\n'), ((12410, 12452), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.k', '(3)', 'RTOL'], {}), '(reg.k, 3, RTOL)\n', (12436, 12452), True, 'import numpy as np\n'), ((12464, 12707), 'numpy.array', 'np.array', (['[[233.694742, -0.666856869, -5.58304254, 4.8548838], [-0.666856869, \n 0.194241504, -0.0542327138, 0.053722557], [-5.58304254, -0.0542327138, \n 0.163860721, -0.144425498], [4.8548838, 0.053722557, -0.144425498, \n 0.178622255]]'], {}), '([[233.694742, -0.666856869, -5.58304254, 4.8548838], [-0.666856869,\n 0.194241504, -0.0542327138, 0.053722557], [-5.58304254, -0.0542327138, \n 0.163860721, -0.144425498], [4.8548838, 0.053722557, -0.144425498, \n 0.178622255]])\n', (12472, 12707), True, 'import numpy as np\n'), ((12766, 12810), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.vm', 'vm', 'RTOL'], {}), '(reg.vm, vm, RTOL)\n', (12792, 12810), True, 'import numpy as np\n'), ((12821, 12856), 'numpy.array', 'np.array', (['[1.0, 19.531, 35.4585005]'], {}), '([1.0, 19.531, 35.4585005])\n', (12829, 12856), True, 'import numpy as np\n'), ((12879, 12924), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.z[0]', 'z', 'RTOL'], {}), '(reg.z[0], z, RTOL)\n', (12905, 12924), True, 'import numpy as np\n'), ((12935, 12966), 'numpy.array', 'np.array', (['[1.0, 19.531, 18.594]'], {}), '([1.0, 19.531, 18.594])\n', (12943, 12966), True, 'import numpy as np\n'), ((12981, 13026), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.h[0]', 'h', 'RTOL'], {}), '(reg.h[0], h, RTOL)\n', (13007, 13026), True, 'import numpy as np\n'), ((13040, 13062), 'numpy.array', 'np.array', (['[35.4585005]'], {}), '([35.4585005])\n', (13048, 13062), True, 'import numpy as np\n'), ((13072, 13123), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.yend[0]', 'yend', 'RTOL'], {}), '(reg.yend[0], yend, RTOL)\n', (13098, 13123), True, 'import numpy as np\n'), ((13134, 13152), 'numpy.array', 'np.array', (['[18.594]'], {}), '([18.594])\n', (13142, 13152), True, 'import numpy as np\n'), ((13162, 13207), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.q[0]', 'q', 'RTOL'], {}), '(reg.q[0], q, RTOL)\n', (13188, 13207), True, 'import numpy as np\n'), ((13268, 13318), 'numpy.testing.assert_string_equal', 'np.testing.assert_string_equal', (['reg.iter_stop', 'i_s'], {}), '(reg.iter_stop, i_s)\n', (13298, 13318), True, 'import numpy as np\n'), ((13342, 13394), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.iteration', 'its', 'RTOL'], {}), '(reg.iteration, its, RTOL)\n', (13368, 13394), True, 'import numpy as np\n'), ((13433, 13475), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.mean_y', 'my'], {}), '(reg.mean_y, my)\n', (13459, 13475), True, 'import numpy as np\n'), ((13518, 13562), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.std_y', 'std_y'], {}), '(reg.std_y, std_y)\n', (13544, 13562), True, 'import numpy as np\n'), ((13655, 13697), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.sig2', 'sig2'], {}), '(reg.sig2, sig2)\n', (13681, 13697), True, 'import numpy as np\n'), ((13711, 13848), 'numpy.array', 'np.array', (['[[49.0, 704.371999, 724.7435916], [704.371999, 11686.67338121, 11092.519988\n ], [724.7435916, 11092.519988, 11614.62257048]]'], {}), '([[49.0, 704.371999, 724.7435916], [704.371999, 11686.67338121, \n 11092.519988], [724.7435916, 11092.519988, 11614.62257048]])\n', (13719, 13848), True, 'import numpy as np\n'), ((13888, 13934), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.hth', 'hth', 'RTOL'], {}), '(reg.hth, hth, RTOL)\n', (13914, 13934), True, 'import numpy as np\n'), ((14138, 14160), 'numpy.reshape', 'np.reshape', (['y', '(49, 1)'], {}), '(y, (49, 1))\n', (14148, 14160), True, 'import numpy as np\n'), ((14412, 14467), 'pysal.model.spreg.error_sp_hom.GM_Combo_Hom', 'HOM.GM_Combo_Hom', (['self.y', 'self.X'], {'w': 'self.w', 'A1': '"""hom_sc"""'}), "(self.y, self.X, w=self.w, A1='hom_sc')\n", (14428, 14467), True, 'from pysal.model.spreg import error_sp_hom as HOM\n'), ((14552, 14575), 'numpy.array', 'np.array', (['[1.0, 19.531]'], {}), '([1.0, 19.531])\n', (14560, 14575), True, 'import numpy as np\n'), ((14591, 14636), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.x[0]', 'x', 'RTOL'], {}), '(reg.x[0], x, RTOL)\n', (14617, 14636), True, 'import numpy as np\n'), ((14651, 14718), 'numpy.array', 'np.array', (['[[10.12541428], [1.56832263], [0.15132076], [0.21033397]]'], {}), '([[10.12541428], [1.56832263], [0.15132076], [0.21033397]])\n', (14659, 14718), True, 'import numpy as np\n'), ((14734, 14784), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.betas', 'betas', 'RTOL'], {}), '(reg.betas, betas, RTOL)\n', (14760, 14784), True, 'import numpy as np\n'), ((15185, 15228), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.n', '(49)', 'RTOL'], {}), '(reg.n, 49, RTOL)\n', (15211, 15228), True, 'import numpy as np\n'), ((15235, 15277), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.k', '(3)', 'RTOL'], {}), '(reg.k, 3, RTOL)\n', (15261, 15277), True, 'import numpy as np\n'), ((15288, 15323), 'numpy.array', 'np.array', (['[1.0, 19.531, 35.4585005]'], {}), '([1.0, 19.531, 35.4585005])\n', (15296, 15323), True, 'import numpy as np\n'), ((15346, 15391), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.z[0]', 'z', 'RTOL'], {}), '(reg.z[0], z, RTOL)\n', (15372, 15391), True, 'import numpy as np\n'), ((15402, 15433), 'numpy.array', 'np.array', (['[1.0, 19.531, 18.594]'], {}), '([1.0, 19.531, 18.594])\n', (15410, 15433), True, 'import numpy as np\n'), ((15448, 15493), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.h[0]', 'h', 'RTOL'], {}), '(reg.h[0], h, RTOL)\n', (15474, 15493), True, 'import numpy as np\n'), ((15507, 15529), 'numpy.array', 'np.array', (['[35.4585005]'], {}), '([35.4585005])\n', (15515, 15529), True, 'import numpy as np\n'), ((15539, 15590), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.yend[0]', 'yend', 'RTOL'], {}), '(reg.yend[0], yend, RTOL)\n', (15565, 15590), True, 'import numpy as np\n'), ((15601, 15619), 'numpy.array', 'np.array', (['[18.594]'], {}), '([18.594])\n', (15609, 15619), True, 'import numpy as np\n'), ((15629, 15674), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.q[0]', 'q', 'RTOL'], {}), '(reg.q[0], q, RTOL)\n', (15655, 15674), True, 'import numpy as np\n'), ((15735, 15785), 'numpy.testing.assert_string_equal', 'np.testing.assert_string_equal', (['reg.iter_stop', 'i_s'], {}), '(reg.iter_stop, i_s)\n', (15765, 15785), True, 'import numpy as np\n'), ((15793, 15843), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.iteration', '(1)', 'RTOL'], {}), '(reg.iteration, 1, RTOL)\n', (15819, 15843), True, 'import numpy as np\n'), ((15882, 15924), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.mean_y', 'my'], {}), '(reg.mean_y, my)\n', (15908, 15924), True, 'import numpy as np\n'), ((15967, 16011), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.std_y', 'std_y'], {}), '(reg.std_y, std_y)\n', (15993, 16011), True, 'import numpy as np\n'), ((16053, 16093), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.pr2', 'pr2'], {}), '(reg.pr2, pr2)\n', (16079, 16093), True, 'import numpy as np\n'), ((16137, 16181), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.pr2_e', 'pr2_e'], {}), '(reg.pr2_e, pr2_e)\n', (16163, 16181), True, 'import numpy as np\n'), ((16275, 16317), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.sig2', 'sig2'], {}), '(reg.sig2, sig2)\n', (16301, 16317), True, 'import numpy as np\n'), ((16336, 16395), 'numpy.array', 'np.array', (['[15.28707761, 0.44072838, 0.40479714, 0.42263726]'], {}), '([15.28707761, 0.44072838, 0.40479714, 0.42263726])\n', (16344, 16395), True, 'import numpy as np\n'), ((16409, 16463), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.std_err', 'std_err', 'RTOL'], {}), '(reg.std_err, std_err, RTOL)\n', (16435, 16463), True, 'import numpy as np\n'), ((16479, 16606), 'numpy.array', 'np.array', (['[[0.662351206, 0.507746167], [3.55847888, 0.00037300878], [0.373818749, \n 0.70853917], [0.497670189, 0.618716523]]'], {}), '([[0.662351206, 0.507746167], [3.55847888, 0.00037300878], [\n 0.373818749, 0.70853917], [0.497670189, 0.618716523]])\n', (16487, 16606), True, 'import numpy as np\n'), ((16650, 16702), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.z_stat', 'z_stat', 'RTOL'], {}), '(reg.z_stat, z_stat, RTOL)\n', (16676, 16702), True, 'import numpy as np\n'), ((16714, 16957), 'numpy.array', 'np.array', (['[[233.694742, -0.666856869, -5.58304254, 4.8548838], [-0.666856869, \n 0.194241504, -0.0542327138, 0.053722557], [-5.58304254, -0.0542327138, \n 0.163860721, -0.144425498], [4.8548838, 0.053722557, -0.144425498, \n 0.178622255]]'], {}), '([[233.694742, -0.666856869, -5.58304254, 4.8548838], [-0.666856869,\n 0.194241504, -0.0542327138, 0.053722557], [-5.58304254, -0.0542327138, \n 0.163860721, -0.144425498], [4.8548838, 0.053722557, -0.144425498, \n 0.178622255]])\n', (16722, 16957), True, 'import numpy as np\n'), ((17016, 17060), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['reg.vm', 'vm', 'RTOL'], {}), '(reg.vm, vm, RTOL)\n', (17042, 17060), True, 'import numpy as np\n'), ((556, 567), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (564, 567), True, 'import numpy as np\n'), ((909, 930), 'numpy.array', 'np.array', (['[80.467003]'], {}), '([80.467003])\n', (917, 930), True, 'import numpy as np\n'), ((1237, 1258), 'numpy.array', 'np.array', (['[27.466734]'], {}), '([27.466734])\n', (1245, 1258), True, 'import numpy as np\n'), ((1318, 1341), 'numpy.array', 'np.array', (['[32.37298547]'], {}), '([32.37298547])\n', (1326, 1341), True, 'import numpy as np\n'), ((1509, 1530), 'numpy.array', 'np.array', (['[53.000269]'], {}), '([53.000269])\n', (1517, 1530), True, 'import numpy as np\n'), ((2655, 2666), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (2663, 2666), True, 'import numpy as np\n'), ((2938, 2959), 'numpy.array', 'np.array', (['[80.467003]'], {}), '([80.467003])\n', (2946, 2959), True, 'import numpy as np\n'), ((3266, 3289), 'numpy.array', 'np.array', (['[27.46673388]'], {}), '([27.46673388])\n', (3274, 3289), True, 'import numpy as np\n'), ((3349, 3372), 'numpy.array', 'np.array', (['[32.37298547]'], {}), '([32.37298547])\n', (3357, 3372), True, 'import numpy as np\n'), ((3428, 3451), 'numpy.array', 'np.array', (['[53.00026912]'], {}), '([53.00026912])\n', (3436, 3451), True, 'import numpy as np\n'), ((5353, 5364), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (5361, 5364), True, 'import numpy as np\n'), ((5498, 5510), 'numpy.array', 'np.array', (['yd'], {}), '(yd)\n', (5506, 5510), True, 'import numpy as np\n'), ((5583, 5594), 'numpy.array', 'np.array', (['q'], {}), '(q)\n', (5591, 5594), True, 'import numpy as np\n'), ((5900, 5921), 'numpy.array', 'np.array', (['[80.467003]'], {}), '([80.467003])\n', (5908, 5921), True, 'import numpy as np\n'), ((6701, 6724), 'numpy.array', 'np.array', (['[31.74114306]'], {}), '([31.74114306])\n', (6709, 6724), True, 'import numpy as np\n'), ((8331, 8342), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (8339, 8342), True, 'import numpy as np\n'), ((8417, 8429), 'numpy.array', 'np.array', (['yd'], {}), '(yd)\n', (8425, 8429), True, 'import numpy as np\n'), ((8502, 8513), 'numpy.array', 'np.array', (['q'], {}), '(q)\n', (8510, 8513), True, 'import numpy as np\n'), ((8808, 8829), 'numpy.array', 'np.array', (['[80.467003]'], {}), '([80.467003])\n', (8816, 8829), True, 'import numpy as np\n'), ((9609, 9632), 'numpy.array', 'np.array', (['[31.74114306]'], {}), '([31.74114306])\n', (9617, 9632), True, 'import numpy as np\n'), ((11379, 11390), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (11387, 11390), True, 'import numpy as np\n'), ((11847, 11868), 'numpy.array', 'np.array', (['[80.467003]'], {}), '([80.467003])\n', (11855, 11868), True, 'import numpy as np\n'), ((12162, 12184), 'numpy.array', 'np.array', (['[34.3450723]'], {}), '([34.3450723])\n', (12170, 12184), True, 'import numpy as np\n'), ((12244, 12266), 'numpy.array', 'np.array', (['[36.6149682]'], {}), '([36.6149682])\n', (12252, 12266), True, 'import numpy as np\n'), ((12322, 12344), 'numpy.array', 'np.array', (['[46.1219307]'], {}), '([46.1219307])\n', (12330, 12344), True, 'import numpy as np\n'), ((14227, 14238), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (14235, 14238), True, 'import numpy as np\n'), ((14512, 14533), 'numpy.array', 'np.array', (['[80.467003]'], {}), '([80.467003])\n', (14520, 14533), True, 'import numpy as np\n'), ((14827, 14849), 'numpy.array', 'np.array', (['[34.3450723]'], {}), '([34.3450723])\n', (14835, 14849), True, 'import numpy as np\n'), ((14909, 14931), 'numpy.array', 'np.array', (['[36.6149682]'], {}), '([36.6149682])\n', (14917, 14931), True, 'import numpy as np\n'), ((14988, 15011), 'numpy.array', 'np.array', (['[32.90372983]'], {}), '([32.90372983])\n', (14996, 15011), True, 'import numpy as np\n'), ((15067, 15089), 'numpy.array', 'np.array', (['[46.1219307]'], {}), '([46.1219307])\n', (15075, 15089), True, 'import numpy as np\n'), ((15147, 15170), 'numpy.array', 'np.array', (['[47.56327317]'], {}), '([47.56327317])\n', (15155, 15170), True, 'import numpy as np\n'), ((17305, 17326), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (17324, 17326), False, 'import unittest\n'), ((598, 619), 'numpy.ones', 'np.ones', (['self.y.shape'], {}), '(self.y.shape)\n', (605, 619), True, 'import numpy as np\n'), ((5395, 5416), 'numpy.ones', 'np.ones', (['self.y.shape'], {}), '(self.y.shape)\n', (5402, 5416), True, 'import numpy as np\n'), ((11675, 11696), 'numpy.ones', 'np.ones', (['self.y.shape'], {}), '(self.y.shape)\n', (11682, 11696), True, 'import numpy as np\n')]
|
'''
The code is partially borrowed from:
https://github.com/v-iashin/video_features/blob/861efaa4ed67/utils/utils.py
and
https://github.com/PeihaoChen/regnet/blob/199609/extract_audio_and_video.py
'''
import os
import shutil
import subprocess
from glob import glob
from pathlib import Path
from typing import Dict
import cv2
import matplotlib.pyplot as plt
import numpy as np
import torch
import torchvision.models as models
import torchvision.transforms as transforms
import torchvision.transforms.functional as F
from omegaconf.omegaconf import OmegaConf
from sample_visualization import (load_feature_extractor,
load_model_from_config, load_vocoder)
from specvqgan.data.vggsound import CropFeats
from specvqgan.util import download, md5_hash
from specvqgan.models.cond_transformer import disabled_train
from train import instantiate_from_config
from feature_extraction.extract_mel_spectrogram import get_spectrogram
plt.rcParams['savefig.bbox'] = 'tight'
def which_ffmpeg() -> str:
'''Determines the path to ffmpeg library
Returns:
str -- path to the library
'''
result = subprocess.run(['which', 'ffmpeg'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
ffmpeg_path = result.stdout.decode('utf-8').replace('\n', '')
return ffmpeg_path
def which_ffprobe() -> str:
'''Determines the path to ffprobe library
Returns:
str -- path to the library
'''
result = subprocess.run(['which', 'ffprobe'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
ffprobe_path = result.stdout.decode('utf-8').replace('\n', '')
return ffprobe_path
def check_video_for_audio(path):
assert which_ffprobe() != '', 'Is ffmpeg installed? Check if the conda environment is activated.'
cmd = f'{which_ffprobe()} -loglevel error -show_entries stream=codec_type -of default=nw=1 {path}'
result = subprocess.run(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
result = result.stdout.decode('utf-8')
print(result)
return 'codec_type=audio' in result
def get_duration(path):
assert which_ffprobe() != '', 'Is ffmpeg installed? Check if the conda environment is activated.'
cmd = f'{which_ffprobe()} -hide_banner -loglevel panic' \
f' -v error -show_entries format=duration -of default=noprint_wrappers=1:nokey=1 {path}'
result = subprocess.run(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
duration = float(result.stdout.decode('utf-8').replace('\n', ''))
return duration
def trim_video(video_path: str, start: int, trim_duration: int = 10, tmp_path: str = './tmp'):
assert which_ffmpeg() != '', 'Is ffmpeg installed? Check if the conda environment is activated.'
if Path(video_path).suffix != '.mp4':
print(f'File Extension is not `mp4` (it is {Path(video_path).suffix}). It will be re-encoded to mp4.')
video_duration = get_duration(video_path)
print('Video Duration:', video_duration)
assert video_duration > start, f'Video Duration < Trim Start: {video_duration} < {start}'
# create tmp dir if doesn't exist
os.makedirs(tmp_path, exist_ok=True)
trim_vid_path = os.path.join(tmp_path, f'{Path(video_path).stem}_trim_to_{trim_duration}s.mp4')
cmd = f'{which_ffmpeg()} -hide_banner -loglevel panic' \
f' -i {video_path} -ss {start} -t {trim_duration} -y {trim_vid_path}'
subprocess.call(cmd.split())
print('Trimmed the input video', video_path, 'and saved the output @', trim_vid_path)
return trim_vid_path
def reencode_video_with_diff_fps(video_path: str, tmp_path: str, extraction_fps: int) -> str:
'''Reencodes the video given the path and saves it to the tmp_path folder.
Args:
video_path (str): original video
tmp_path (str): the folder where tmp files are stored (will be appended with a proper filename).
extraction_fps (int): target fps value
Returns:
str: The path where the tmp file is stored. To be used to load the video from
'''
assert which_ffmpeg() != '', 'Is ffmpeg installed? Check if the conda environment is activated.'
# assert video_path.endswith('.mp4'), 'The file does not end with .mp4. Comment this if expected'
# create tmp dir if doesn't exist
os.makedirs(tmp_path, exist_ok=True)
# form the path to tmp directory
new_path = os.path.join(tmp_path, f'{Path(video_path).stem}_new_fps.mp4')
cmd = f'{which_ffmpeg()} -hide_banner -loglevel panic '
cmd += f'-y -i {video_path} -filter:v fps=fps={extraction_fps} {new_path}'
subprocess.call(cmd.split())
return new_path
def maybe_download_model(model_name: str, log_dir: str) -> str:
name2info = {
'2021-06-20T16-35-20_vggsound_transformer': {
'info': 'No Feats',
'hash': 'b1f9bb63d831611479249031a1203371',
'link': 'https://a3s.fi/swift/v1/AUTH_a235c0f452d648828f745589cde1219a'
'/specvqgan_public/models/2021-06-20T16-35-20_vggsound_transformer.tar.gz',
},
'2021-07-30T21-03-22_vggsound_transformer': {
'info': '1 ResNet50 Feature',
'hash': '27a61d4b74a72578d13579333ed056f6',
'link': 'https://a3s.fi/swift/v1/AUTH_a235c0f452d648828f745589cde1219a'
'/specvqgan_public/models/2021-07-30T21-03-22_vggsound_transformer.tar.gz',
},
'2021-07-30T21-34-25_vggsound_transformer': {
'info': '5 ResNet50 Features',
'hash': 'f4d7105811589d441b69f00d7d0b8dc8',
'link': 'https://a3s.fi/swift/v1/AUTH_a235c0f452d648828f745589cde1219a'
'/specvqgan_public/models/2021-07-30T21-34-25_vggsound_transformer.tar.gz',
},
'2021-07-30T21-34-41_vggsound_transformer': {
'info': '212 ResNet50 Features',
'hash': 'b222cc0e7aeb419f533d5806a08669fe',
'link': 'https://a3s.fi/swift/v1/AUTH_a235c0f452d648828f745589cde1219a'
'/specvqgan_public/models/2021-07-30T21-34-41_vggsound_transformer.tar.gz',
},
'2021-06-03T00-43-28_vggsound_transformer': {
'info': 'Class Label',
'hash': '98a3788ab973f1c3cc02e2e41ad253bc',
'link': 'https://a3s.fi/swift/v1/AUTH_a235c0f452d648828f745589cde1219a'
'/specvqgan_public/models/2021-06-03T00-43-28_vggsound_transformer.tar.gz',
},
'2021-05-19T22-16-54_vggsound_codebook': {
'info': 'VGGSound Codebook',
'hash': '7ea229427297b5d220fb1c80db32dbc5',
'link': 'https://a3s.fi/swift/v1/AUTH_a235c0f452d648828f745589cde1219a'
'/specvqgan_public/models/2021-05-19T22-16-54_vggsound_codebook.tar.gz',
}
}
print(f'Using: {model_name} ({name2info[model_name]["info"]})')
model_dir = os.path.join(log_dir, model_name)
if not os.path.exists(model_dir):
tar_local_path = os.path.join(log_dir, f'{model_name}.tar.gz')
# check if tar already exists and its md5sum
if not os.path.exists(tar_local_path) or md5_hash(tar_local_path) != name2info[model_name]['hash']:
down_link = name2info[model_name]['link']
download(down_link, tar_local_path)
print('Unpacking', tar_local_path, 'to', log_dir)
shutil.unpack_archive(tar_local_path, log_dir)
# clean-up space as we already have unpacked folder
os.remove(tar_local_path)
return model_dir
def load_config(model_dir: str):
# Load the config
config_main = sorted(glob(os.path.join(model_dir, 'configs/*-project.yaml')))[-1]
config_pylt = sorted(glob(os.path.join(model_dir, 'configs/*-lightning.yaml')))[-1]
config = OmegaConf.merge(
OmegaConf.load(config_main),
OmegaConf.load(config_pylt),
)
# patch config. E.g. if the model is trained on another machine with different paths
for a in ['spec_dir_path', 'rgb_feats_dir_path', 'flow_feats_dir_path']:
if config.data.params[a] is not None:
if 'vggsound.VGGSound' in config.data.params.train.target:
base_path = './data/vggsound/'
elif 'vas.VAS' in config.data.params.train.target:
base_path = './data/vas/features/*/'
else:
raise NotImplementedError
config.data.params[a] = os.path.join(base_path, Path(config.data.params[a]).name)
return config
def load_model(model_name, log_dir, device):
to_use_gpu = True if device.type == 'cuda' else False
model_dir = maybe_download_model(model_name, log_dir)
config = load_config(model_dir)
# Sampling model
ckpt = sorted(glob(os.path.join(model_dir, 'checkpoints/*.ckpt')))[-1]
pl_sd = torch.load(ckpt, map_location='cpu')
sampler = load_model_from_config(config.model, pl_sd['state_dict'], to_use_gpu)['model']
sampler.to(device)
# aux models (vocoder and melception)
ckpt_melgan = config.lightning.callbacks.image_logger.params.vocoder_cfg.params.ckpt_vocoder
melgan = load_vocoder(ckpt_melgan, eval_mode=True)['model'].to(device)
melception = load_feature_extractor(to_use_gpu, eval_mode=True)
return config, sampler, melgan, melception
def load_neural_audio_codec(model_name, log_dir, device):
model_dir = maybe_download_model(model_name, log_dir)
config = load_config(model_dir)
config.model.params.ckpt_path = f'./logs/{model_name}/checkpoints/last.ckpt'
print(config.model.params.ckpt_path)
model = instantiate_from_config(config.model)
model = model.to(device)
model = model.eval()
model.train = disabled_train
vocoder = load_vocoder(Path('./vocoder/logs/vggsound/'), eval_mode=True)['model'].to(device)
return config, model, vocoder
class LeftmostCropOrTile(object):
def __init__(self, crop_or_tile_to):
self.crop_or_tile_to = crop_or_tile_to
def __call__(self, item: Dict):
# tile or crop features to the `crop_or_tile_to`
T, D = item['feature'].shape
if T != self.crop_or_tile_to:
how_many_tiles_needed = 1 + (self.crop_or_tile_to // T)
item['feature'] = np.tile(item['feature'], (how_many_tiles_needed, 1))[:self.crop_or_tile_to, :]
return item
class ExtractResNet50(torch.nn.Module):
def __init__(self, extraction_fps, feat_cfg, device, batch_size=32, tmp_dir='./tmp'):
super(ExtractResNet50, self).__init__()
self.tmp_path = tmp_dir
self.extraction_fps = extraction_fps
self.batch_size = batch_size
self.feat_cfg = feat_cfg
self.means = [0.485, 0.456, 0.406]
self.stds = [0.229, 0.224, 0.225]
self.transforms = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=self.means, std=self.stds)
])
random_crop = False
self.post_transforms = transforms.Compose([
LeftmostCropOrTile(feat_cfg.feat_len),
CropFeats([feat_cfg.feat_crop_len, feat_cfg.feat_depth], random_crop),
(lambda x: x) if feat_cfg.feat_sampler_cfg is None else instantiate_from_config(feat_cfg.feat_sampler_cfg),
])
self.device = device
self.model = models.resnet50(pretrained=True).to(device)
self.model.eval()
# save the pre-trained classifier for show_preds and replace it in the net with identity
self.model_class = self.model.fc
self.model.fc = torch.nn.Identity()
@torch.no_grad()
def forward(self, video_path: str) -> Dict[str, np.ndarray]:
if self.feat_cfg.replace_feats_with_random:
T, D = self.feat_cfg.feat_sampler_cfg.params.feat_sample_size, self.feat_cfg.feat_depth
print(f'Since we are in "No Feats" setting, returning a random feature: [{T}, {D}]')
random_features = {'feature': torch.rand(T, D)}
return random_features, []
# take the video, change fps and save to the tmp folder
if self.extraction_fps is not None:
video_path = reencode_video_with_diff_fps(video_path, self.tmp_path, self.extraction_fps)
# read a video
cap = cv2.VideoCapture(video_path)
batch_list = []
vid_feats = []
cached_frames = []
transforms_for_show = transforms.Compose(self.transforms.transforms[:4])
# sometimes when the target fps is 1 or 2, the first frame of the reencoded video is missing
# and cap.read returns None but the rest of the frames are ok. timestep is 0.0 for the 2nd frame in
# this case
first_frame = True
# iterating through the opened video frame-by-frame and occationally run the model once a batch is
# formed
while cap.isOpened():
frame_exists, rgb = cap.read()
if first_frame and not frame_exists:
continue
first_frame = False
if frame_exists:
# prepare data and cache if needed
rgb = cv2.cvtColor(rgb, cv2.COLOR_BGR2RGB)
cached_frames.append(transforms_for_show(rgb))
rgb = self.transforms(rgb).unsqueeze(0).to(self.device)
batch_list.append(rgb)
# when batch is formed to inference
if len(batch_list) == self.batch_size:
batch_feats = self.model(torch.cat(batch_list))
vid_feats.extend(batch_feats.tolist())
# clean up the batch list
batch_list = []
else:
# if the last batch was smaller than the batch size, we still need to process those frames
if len(batch_list) != 0:
batch_feats = self.model(torch.cat(batch_list))
vid_feats.extend(batch_feats.tolist())
cap.release()
break
vid_feats = np.array(vid_feats)
features = {'feature': vid_feats}
print('Raw Extracted Representation:', features['feature'].shape)
if self.post_transforms is not None:
features = self.post_transforms(features)
# using 'feature' as the key to reuse the feature resampling transform
cached_frames = self.post_transforms.transforms[-1]({'feature': torch.stack(cached_frames)})['feature']
print('Post-processed Representation:', features['feature'].shape)
return features, cached_frames
def extract_melspectrogram(in_path: str, sr: int, duration: int = 10, tmp_path: str = './tmp') -> np.ndarray:
'''Extract Melspectrogram similar to RegNet.'''
assert which_ffmpeg() != '', 'Is ffmpeg installed? Check if the conda environment is activated.'
# assert in_path.endswith('.mp4'), 'The file does not end with .mp4. Comment this if expected'
# create tmp dir if doesn't exist
os.makedirs(tmp_path, exist_ok=True)
# Extract audio from a video if needed
if in_path.endswith('.wav'):
audio_raw = in_path
else:
audio_raw = os.path.join(tmp_path, f'{Path(in_path).stem}.wav')
cmd = f'{which_ffmpeg()} -i {in_path} -hide_banner -loglevel panic -f wav -vn -y {audio_raw}'
subprocess.call(cmd.split())
# Extract audio from a video
audio_new = os.path.join(tmp_path, f'{Path(in_path).stem}_{sr}hz.wav')
cmd = f'{which_ffmpeg()} -i {audio_raw} -hide_banner -loglevel panic -ac 1 -ab 16k -ar {sr} -y {audio_new}'
subprocess.call(cmd.split())
length = int(duration * sr)
audio_zero_pad, spec = get_spectrogram(audio_new, save_dir=None, length=length, save_results=False)
# specvqgan expects inputs to be in [-1, 1] but spectrograms are in [0, 1]
spec = 2 * spec - 1
return spec
def show_grid(imgs):
print('Rendering the Plot with Frames Used in Conditioning')
figsize = ((imgs.shape[1] // 228 + 1) * 5, (imgs.shape[2] // 228 + 1) * 5)
if not isinstance(imgs, list):
imgs = [imgs]
fig, axs = plt.subplots(ncols=len(imgs), squeeze=False, figsize=figsize)
for i, img in enumerate(imgs):
img = img.detach()
img = F.to_pil_image(img)
axs[0, i].imshow(np.asarray(img))
axs[0, i].set(xticklabels=[], yticklabels=[], xticks=[], yticks=[])
return fig
def calculate_codebook_bitrate(duration, quant_z, codebook_size):
# Calculating the Bitrate
bottle_neck_size = quant_z.shape[-2:]
bits_per_codebook_entry = (codebook_size-1).bit_length()
bitrate = bits_per_codebook_entry * bottle_neck_size.numel() / duration / 1024
print(f'The input audio is {duration:.2f} seconds long.')
print(f'Codebook size is {codebook_size} i.e. a codebook entry allocates {bits_per_codebook_entry} bits')
print(f'SpecVQGAN bottleneck size: {list(bottle_neck_size)}')
print(f'Thus, bitrate is {bitrate:.2f} kbps')
return bitrate
def get_audio_file_bitrate(file):
assert which_ffprobe() != '', 'Is ffmpeg installed? Check if the conda environment is activated.'
cmd = f'{which_ffprobe()} -v error -select_streams a:0'\
f' -show_entries stream=bit_rate -of default=noprint_wrappers=1:nokey=1 {file}'
result = subprocess.run(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
bitrate = int(result.stdout.decode('utf-8').replace('\n', ''))
bitrate /= 1024
return bitrate
if __name__ == '__main__':
# if empty, it wasn't found
print(which_ffmpeg())
|
[
"os.remove",
"train.instantiate_from_config",
"omegaconf.omegaconf.OmegaConf.load",
"torch.cat",
"pathlib.Path",
"numpy.tile",
"feature_extraction.extract_mel_spectrogram.get_spectrogram",
"torchvision.transforms.Normalize",
"torch.no_grad",
"os.path.join",
"sample_visualization.load_vocoder",
"cv2.cvtColor",
"torch.load",
"specvqgan.data.vggsound.CropFeats",
"os.path.exists",
"torchvision.transforms.ToPILImage",
"torchvision.transforms.functional.to_pil_image",
"torchvision.transforms.Compose",
"specvqgan.util.md5_hash",
"torchvision.transforms.CenterCrop",
"sample_visualization.load_feature_extractor",
"numpy.asarray",
"torchvision.models.resnet50",
"sample_visualization.load_model_from_config",
"torch.rand",
"torch.nn.Identity",
"torchvision.transforms.Resize",
"subprocess.run",
"shutil.unpack_archive",
"os.makedirs",
"torch.stack",
"cv2.VideoCapture",
"numpy.array",
"specvqgan.util.download",
"torchvision.transforms.ToTensor"
] |
[((1139, 1229), 'subprocess.run', 'subprocess.run', (["['which', 'ffmpeg']"], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.STDOUT'}), "(['which', 'ffmpeg'], stdout=subprocess.PIPE, stderr=\n subprocess.STDOUT)\n", (1153, 1229), False, 'import subprocess\n'), ((1459, 1550), 'subprocess.run', 'subprocess.run', (["['which', 'ffprobe']"], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.STDOUT'}), "(['which', 'ffprobe'], stdout=subprocess.PIPE, stderr=\n subprocess.STDOUT)\n", (1473, 1550), False, 'import subprocess\n'), ((3117, 3153), 'os.makedirs', 'os.makedirs', (['tmp_path'], {'exist_ok': '(True)'}), '(tmp_path, exist_ok=True)\n', (3128, 3153), False, 'import os\n'), ((4276, 4312), 'os.makedirs', 'os.makedirs', (['tmp_path'], {'exist_ok': '(True)'}), '(tmp_path, exist_ok=True)\n', (4287, 4312), False, 'import os\n'), ((6832, 6865), 'os.path.join', 'os.path.join', (['log_dir', 'model_name'], {}), '(log_dir, model_name)\n', (6844, 6865), False, 'import os\n'), ((8747, 8783), 'torch.load', 'torch.load', (['ckpt'], {'map_location': '"""cpu"""'}), "(ckpt, map_location='cpu')\n", (8757, 8783), False, 'import torch\n'), ((9132, 9182), 'sample_visualization.load_feature_extractor', 'load_feature_extractor', (['to_use_gpu'], {'eval_mode': '(True)'}), '(to_use_gpu, eval_mode=True)\n', (9154, 9182), False, 'from sample_visualization import load_feature_extractor, load_model_from_config, load_vocoder\n'), ((9518, 9555), 'train.instantiate_from_config', 'instantiate_from_config', (['config.model'], {}), '(config.model)\n', (9541, 9555), False, 'from train import instantiate_from_config\n'), ((11600, 11615), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (11613, 11615), False, 'import torch\n'), ((14976, 15012), 'os.makedirs', 'os.makedirs', (['tmp_path'], {'exist_ok': '(True)'}), '(tmp_path, exist_ok=True)\n', (14987, 15012), False, 'import os\n'), ((15653, 15729), 'feature_extraction.extract_mel_spectrogram.get_spectrogram', 'get_spectrogram', (['audio_new'], {'save_dir': 'None', 'length': 'length', 'save_results': '(False)'}), '(audio_new, save_dir=None, length=length, save_results=False)\n', (15668, 15729), False, 'from feature_extraction.extract_mel_spectrogram import get_spectrogram\n'), ((6877, 6902), 'os.path.exists', 'os.path.exists', (['model_dir'], {}), '(model_dir)\n', (6891, 6902), False, 'import os\n'), ((6929, 6974), 'os.path.join', 'os.path.join', (['log_dir', 'f"""{model_name}.tar.gz"""'], {}), "(log_dir, f'{model_name}.tar.gz')\n", (6941, 6974), False, 'import os\n'), ((7750, 7777), 'omegaconf.omegaconf.OmegaConf.load', 'OmegaConf.load', (['config_main'], {}), '(config_main)\n', (7764, 7777), False, 'from omegaconf.omegaconf import OmegaConf\n'), ((7787, 7814), 'omegaconf.omegaconf.OmegaConf.load', 'OmegaConf.load', (['config_pylt'], {}), '(config_pylt)\n', (7801, 7814), False, 'from omegaconf.omegaconf import OmegaConf\n'), ((8798, 8867), 'sample_visualization.load_model_from_config', 'load_model_from_config', (['config.model', "pl_sd['state_dict']", 'to_use_gpu'], {}), "(config.model, pl_sd['state_dict'], to_use_gpu)\n", (8820, 8867), False, 'from sample_visualization import load_feature_extractor, load_model_from_config, load_vocoder\n'), ((11574, 11593), 'torch.nn.Identity', 'torch.nn.Identity', ([], {}), '()\n', (11591, 11593), False, 'import torch\n'), ((12279, 12307), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video_path'], {}), '(video_path)\n', (12295, 12307), False, 'import cv2\n'), ((12412, 12462), 'torchvision.transforms.Compose', 'transforms.Compose', (['self.transforms.transforms[:4]'], {}), '(self.transforms.transforms[:4])\n', (12430, 12462), True, 'import torchvision.transforms as transforms\n'), ((14020, 14039), 'numpy.array', 'np.array', (['vid_feats'], {}), '(vid_feats)\n', (14028, 14039), True, 'import numpy as np\n'), ((16228, 16247), 'torchvision.transforms.functional.to_pil_image', 'F.to_pil_image', (['img'], {}), '(img)\n', (16242, 16247), True, 'import torchvision.transforms.functional as F\n'), ((2742, 2758), 'pathlib.Path', 'Path', (['video_path'], {}), '(video_path)\n', (2746, 2758), False, 'from pathlib import Path\n'), ((7202, 7237), 'specvqgan.util.download', 'download', (['down_link', 'tar_local_path'], {}), '(down_link, tar_local_path)\n', (7210, 7237), False, 'from specvqgan.util import download, md5_hash\n'), ((7312, 7358), 'shutil.unpack_archive', 'shutil.unpack_archive', (['tar_local_path', 'log_dir'], {}), '(tar_local_path, log_dir)\n', (7333, 7358), False, 'import shutil\n'), ((7435, 7460), 'os.remove', 'os.remove', (['tar_local_path'], {}), '(tar_local_path)\n', (7444, 7460), False, 'import os\n'), ((16273, 16288), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (16283, 16288), True, 'import numpy as np\n'), ((7043, 7073), 'os.path.exists', 'os.path.exists', (['tar_local_path'], {}), '(tar_local_path)\n', (7057, 7073), False, 'import os\n'), ((7077, 7101), 'specvqgan.util.md5_hash', 'md5_hash', (['tar_local_path'], {}), '(tar_local_path)\n', (7085, 7101), False, 'from specvqgan.util import download, md5_hash\n'), ((7568, 7617), 'os.path.join', 'os.path.join', (['model_dir', '"""configs/*-project.yaml"""'], {}), "(model_dir, 'configs/*-project.yaml')\n", (7580, 7617), False, 'import os\n'), ((7654, 7705), 'os.path.join', 'os.path.join', (['model_dir', '"""configs/*-lightning.yaml"""'], {}), "(model_dir, 'configs/*-lightning.yaml')\n", (7666, 7705), False, 'import os\n'), ((8683, 8728), 'os.path.join', 'os.path.join', (['model_dir', '"""checkpoints/*.ckpt"""'], {}), "(model_dir, 'checkpoints/*.ckpt')\n", (8695, 8728), False, 'import os\n'), ((9053, 9094), 'sample_visualization.load_vocoder', 'load_vocoder', (['ckpt_melgan'], {'eval_mode': '(True)'}), '(ckpt_melgan, eval_mode=True)\n', (9065, 9094), False, 'from sample_visualization import load_feature_extractor, load_model_from_config, load_vocoder\n'), ((10164, 10216), 'numpy.tile', 'np.tile', (["item['feature']", '(how_many_tiles_needed, 1)'], {}), "(item['feature'], (how_many_tiles_needed, 1))\n", (10171, 10216), True, 'import numpy as np\n'), ((10735, 10758), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (10756, 10758), True, 'import torchvision.transforms as transforms\n'), ((10772, 10794), 'torchvision.transforms.Resize', 'transforms.Resize', (['(256)'], {}), '(256)\n', (10789, 10794), True, 'import torchvision.transforms as transforms\n'), ((10808, 10834), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (10829, 10834), True, 'import torchvision.transforms as transforms\n'), ((10848, 10869), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (10867, 10869), True, 'import torchvision.transforms as transforms\n'), ((10883, 10935), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': 'self.means', 'std': 'self.stds'}), '(mean=self.means, std=self.stds)\n', (10903, 10935), True, 'import torchvision.transforms as transforms\n'), ((11090, 11159), 'specvqgan.data.vggsound.CropFeats', 'CropFeats', (['[feat_cfg.feat_crop_len, feat_cfg.feat_depth]', 'random_crop'], {}), '([feat_cfg.feat_crop_len, feat_cfg.feat_depth], random_crop)\n', (11099, 11159), False, 'from specvqgan.data.vggsound import CropFeats\n'), ((11342, 11374), 'torchvision.models.resnet50', 'models.resnet50', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (11357, 11374), True, 'import torchvision.models as models\n'), ((11973, 11989), 'torch.rand', 'torch.rand', (['T', 'D'], {}), '(T, D)\n', (11983, 11989), False, 'import torch\n'), ((13127, 13163), 'cv2.cvtColor', 'cv2.cvtColor', (['rgb', 'cv2.COLOR_BGR2RGB'], {}), '(rgb, cv2.COLOR_BGR2RGB)\n', (13139, 13163), False, 'import cv2\n'), ((3200, 3216), 'pathlib.Path', 'Path', (['video_path'], {}), '(video_path)\n', (3204, 3216), False, 'from pathlib import Path\n'), ((4392, 4408), 'pathlib.Path', 'Path', (['video_path'], {}), '(video_path)\n', (4396, 4408), False, 'from pathlib import Path\n'), ((8388, 8415), 'pathlib.Path', 'Path', (['config.data.params[a]'], {}), '(config.data.params[a])\n', (8392, 8415), False, 'from pathlib import Path\n'), ((9670, 9702), 'pathlib.Path', 'Path', (['"""./vocoder/logs/vggsound/"""'], {}), "('./vocoder/logs/vggsound/')\n", (9674, 9702), False, 'from pathlib import Path\n'), ((11229, 11279), 'train.instantiate_from_config', 'instantiate_from_config', (['feat_cfg.feat_sampler_cfg'], {}), '(feat_cfg.feat_sampler_cfg)\n', (11252, 11279), False, 'from train import instantiate_from_config\n'), ((15415, 15428), 'pathlib.Path', 'Path', (['in_path'], {}), '(in_path)\n', (15419, 15428), False, 'from pathlib import Path\n'), ((2829, 2845), 'pathlib.Path', 'Path', (['video_path'], {}), '(video_path)\n', (2833, 2845), False, 'from pathlib import Path\n'), ((13490, 13511), 'torch.cat', 'torch.cat', (['batch_list'], {}), '(batch_list)\n', (13499, 13511), False, 'import torch\n'), ((13865, 13886), 'torch.cat', 'torch.cat', (['batch_list'], {}), '(batch_list)\n', (13874, 13886), False, 'import torch\n'), ((14415, 14441), 'torch.stack', 'torch.stack', (['cached_frames'], {}), '(cached_frames)\n', (14426, 14441), False, 'import torch\n'), ((15174, 15187), 'pathlib.Path', 'Path', (['in_path'], {}), '(in_path)\n', (15178, 15187), False, 'from pathlib import Path\n')]
|
#!/usr/bin/env python3
import sys
import os.path
import gzip
from os import path
def process_file(file, output_file):
if path.exists(file) == False:
print("Cannot continue because {} does not exist".format(file), file=sys.stderr)
sys.exit(1)
if path.exists(output_file) == True:
os.remove(output_file)
with gzip.open(file, "rt", encoding="utf-8") as f:
with open(output_file, "w", encoding="utf-8") as output:
previous = []
# Expected format input is
# checksum,sequence,identifier
for line in f:
current = line.rstrip().split(",")
if previous:
if current[0] == previous[0]: # check checksums match
if current[1] != previous[1]: # check if the seqs do not match
# Expected format output is
# clashed_checksum,identifier_one,seq_one,identifier_two,seq_two
print(
"{},{},{},{},{}".format(
previous[0],
previous[2],
previous[1],
current[2],
current[1],
),
file=output,
)
previous = current
def main():
if len(sys.argv) != 3:
print(
"Please provide the commpressed sorted comma separated file to process and output file"
)
print("./compare.py input.csv.gz report.csv")
sys.exit(0)
process_file(sys.argv[1], sys.argv[2])
if __name__ == "__main__":
main()
|
[
"os.path.exists",
"sys.exit",
"gzip.open"
] |
[((128, 145), 'os.path.exists', 'path.exists', (['file'], {}), '(file)\n', (139, 145), False, 'from os import path\n'), ((253, 264), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (261, 264), False, 'import sys\n'), ((272, 296), 'os.path.exists', 'path.exists', (['output_file'], {}), '(output_file)\n', (283, 296), False, 'from os import path\n'), ((347, 386), 'gzip.open', 'gzip.open', (['file', '"""rt"""'], {'encoding': '"""utf-8"""'}), "(file, 'rt', encoding='utf-8')\n", (356, 386), False, 'import gzip\n'), ((1693, 1704), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1701, 1704), False, 'import sys\n')]
|
# -*- coding: utf-8 -*-
import json
import pytest
from requests import Response
import py42.settings
from py42.clients.users import UserClient
from py42.response import Py42Response
USER_URI = "/api/User"
DEFAULT_GET_ALL_PARAMS = {
"active": None,
"email": None,
"orgUid": None,
"roleId": None,
"pgNum": 1,
"pgSize": 500,
"q": None,
}
MOCK_GET_USER_RESPONSE = """{"totalCount": 3000, "users": ["foo"]}"""
MOCK_EMPTY_GET_USER_RESPONSE = """{"totalCount": 3000, "users": []}"""
MOCK_text = '{"item_list_key": [{"foo": "foo_val"}, {"bar": "bar_val"}]}'
class TestUserClient(object):
@pytest.fixture
def mock_get_all_response(self, mocker):
response = mocker.MagicMock(spec=Response)
response.status_code = 200
response.encoding = "utf-8"
response.text = MOCK_GET_USER_RESPONSE
return Py42Response(response)
@pytest.fixture
def mock_get_all_empty_response(self, mocker):
response = mocker.MagicMock(spec=Response)
response.status_code = 200
response.encoding = "utf-8"
response.text = MOCK_EMPTY_GET_USER_RESPONSE
return Py42Response(response)
@pytest.fixture
def post_api_mock_response(self, mocker):
response = mocker.MagicMock(spec=Response)
response.status_code = 200
response.encoding = "utf-8"
response.text = MOCK_text
return Py42Response(response)
def test_post_create_user_is_successful(self, mock_session, post_api_mock_response):
user_client = UserClient(mock_session)
mock_session.post.return_value = post_api_mock_response
org_uid = "TEST_ORG_ID"
username = "<EMAIL>"
password = "password"
name = "TESTNAME"
note = "Test Note"
user_client.create_user(org_uid, username, username, password, name, name, note)
expected_params = {
u"orgUid": org_uid,
u"username": username,
u"email": username,
u"password": password,
u"firstName": name,
u"lastName": name,
u"notes": note,
}
mock_session.post.assert_called_once_with(
USER_URI, data=json.dumps(expected_params)
)
def test_get_all_calls_get_with_uri_and_params(
self, mock_session, mock_get_all_response
):
mock_session.get.side_effect = [mock_get_all_response]
client = UserClient(mock_session)
for _ in client.get_all():
break
first_call = mock_session.get.call_args_list[0]
assert first_call[0][0] == USER_URI
assert first_call[1]["params"] == DEFAULT_GET_ALL_PARAMS
def test_unicode_username_get_user_by_username_calls_get_with_username(
self, mock_session, successful_response
):
username = u"您已经发现了秘密信息"
mock_session.get.return_value = successful_response
client = UserClient(mock_session)
client.get_by_username(username)
expected_params = {u"username": username}
mock_session.get.assert_called_once_with(USER_URI, params=expected_params)
def test_get_user_by_id_calls_get_with_uri_and_params(
self, mock_session, successful_response
):
mock_session.get.return_value = successful_response
client = UserClient(mock_session)
client.get_by_id(123456)
uri = "{}/{}".format(USER_URI, 123456)
mock_session.get.assert_called_once_with(uri, params={})
def test_get_all_calls_get_expected_number_of_times(
self, mock_session, mock_get_all_response, mock_get_all_empty_response
):
py42.settings.items_per_page = 1
client = UserClient(mock_session)
mock_session.get.side_effect = [
mock_get_all_response,
mock_get_all_response,
mock_get_all_empty_response,
]
for _ in client.get_all():
pass
py42.settings.items_per_page = 500
assert mock_session.get.call_count == 3
def test_get_scim_data_by_uid_calls_get_with_expected_uri_and_params(
self, mock_session
):
client = UserClient(mock_session)
client.get_scim_data_by_uid("USER_ID")
uri = "/api/v7/scim-user-data/collated-view"
mock_session.get.assert_called_once_with(uri, params={"userId": "USER_ID"})
def test_get_available_roles_calls_get_with_expected_uri(self, mock_session):
client = UserClient(mock_session)
client.get_available_roles()
uri = "/api/v4/role/view"
mock_session.get.assert_called_once_with(uri)
def test_get_roles_calls_get_with_expected_uri(self, mock_session):
client = UserClient(mock_session)
client.get_roles(12345)
uri = "/api/UserRole/12345"
mock_session.get.assert_called_once_with(uri)
def test_add_role_calls_post_with_expected_uri_and_data(self, mock_session):
client = UserClient(mock_session)
client.add_role(12345, "Test Role Name")
uri = "/api/UserRole"
assert mock_session.post.call_args[0][0] == uri
assert '"roleName": "Test Role Name"' in mock_session.post.call_args[1]["data"]
assert '"userId": 12345' in mock_session.post.call_args[1]["data"]
def test_delete_role_calls_delete_with_expected_uri_and_params(self, mock_session):
client = UserClient(mock_session)
client.remove_role(12345, "Test Role Name")
uri = "/api/UserRole?userId=12345&roleName=Test%20Role%20Name"
mock_session.delete.assert_called_once_with(uri)
def test_get_page_calls_get_with_expected_url_and_params(self, mock_session):
client = UserClient(mock_session)
client.get_page(10, True, "email", "org", "role", 100, "q")
mock_session.get.assert_called_once_with(
"/api/User",
params={
"active": True,
"email": "email",
"orgUid": "org",
"roleId": "role",
"pgNum": 10,
"pgSize": 100,
"q": "q",
},
)
|
[
"py42.response.Py42Response",
"json.dumps",
"py42.clients.users.UserClient"
] |
[((866, 888), 'py42.response.Py42Response', 'Py42Response', (['response'], {}), '(response)\n', (878, 888), False, 'from py42.response import Py42Response\n'), ((1151, 1173), 'py42.response.Py42Response', 'Py42Response', (['response'], {}), '(response)\n', (1163, 1173), False, 'from py42.response import Py42Response\n'), ((1412, 1434), 'py42.response.Py42Response', 'Py42Response', (['response'], {}), '(response)\n', (1424, 1434), False, 'from py42.response import Py42Response\n'), ((1547, 1571), 'py42.clients.users.UserClient', 'UserClient', (['mock_session'], {}), '(mock_session)\n', (1557, 1571), False, 'from py42.clients.users import UserClient\n'), ((2439, 2463), 'py42.clients.users.UserClient', 'UserClient', (['mock_session'], {}), '(mock_session)\n', (2449, 2463), False, 'from py42.clients.users import UserClient\n'), ((2924, 2948), 'py42.clients.users.UserClient', 'UserClient', (['mock_session'], {}), '(mock_session)\n', (2934, 2948), False, 'from py42.clients.users import UserClient\n'), ((3315, 3339), 'py42.clients.users.UserClient', 'UserClient', (['mock_session'], {}), '(mock_session)\n', (3325, 3339), False, 'from py42.clients.users import UserClient\n'), ((3687, 3711), 'py42.clients.users.UserClient', 'UserClient', (['mock_session'], {}), '(mock_session)\n', (3697, 3711), False, 'from py42.clients.users import UserClient\n'), ((4143, 4167), 'py42.clients.users.UserClient', 'UserClient', (['mock_session'], {}), '(mock_session)\n', (4153, 4167), False, 'from py42.clients.users import UserClient\n'), ((4452, 4476), 'py42.clients.users.UserClient', 'UserClient', (['mock_session'], {}), '(mock_session)\n', (4462, 4476), False, 'from py42.clients.users import UserClient\n'), ((4692, 4716), 'py42.clients.users.UserClient', 'UserClient', (['mock_session'], {}), '(mock_session)\n', (4702, 4716), False, 'from py42.clients.users import UserClient\n'), ((4938, 4962), 'py42.clients.users.UserClient', 'UserClient', (['mock_session'], {}), '(mock_session)\n', (4948, 4962), False, 'from py42.clients.users import UserClient\n'), ((5367, 5391), 'py42.clients.users.UserClient', 'UserClient', (['mock_session'], {}), '(mock_session)\n', (5377, 5391), False, 'from py42.clients.users import UserClient\n'), ((5672, 5696), 'py42.clients.users.UserClient', 'UserClient', (['mock_session'], {}), '(mock_session)\n', (5682, 5696), False, 'from py42.clients.users import UserClient\n'), ((2211, 2238), 'json.dumps', 'json.dumps', (['expected_params'], {}), '(expected_params)\n', (2221, 2238), False, 'import json\n')]
|
#!/usr/bin/python3
# #####################################
# info: This class can connect to VFD MDM166
#
# date: 2017-06-13
# version: 0.1.1
#
# Dependencies:
# $ sudo apt-get install python3-dev libusb-1.0-0-dev libudev-dev python3-pip
# $ sudo pip3 install --upgrade setuptools
# $ sudo pip3 install hidapi
# place a file 99-hidraw-vfd-permissions.rules with this line to /etc/udev/rules.d:
# SUBSYSTEM=="usb", ATTR{idVendor}=="19c2", ATTR{idProduct}=="6a11", MODE="0666"
#
# history:
#
# #####################################
# Import solution :-)
import hid
import dot_matrix_font
class usbVFD:
def __init__(self,vid=0x19c2,pid=0x6a11):
# just open an usb-hid-connection to the VFD:
self.dev = hid.device()
self.dev.open(vendor_id=vid, product_id=pid)
self.font = dot_matrix_font.dot_matrix_font()
def send_command(self,command):
#just send the command with the length ahead
l=bytes([len(command)])
command=l+command
self.dev.write(command)
########################################################################################
# general commands:
def dimming(self,luminance=100):
command = b'\x1b\x40'
if luminance>=75:
command+=b'\x02'
elif luminance>=25:
command+=b'\x01'
else:
command+=b'\x00'
self.send_command(command)
def clear_display(self):
self.send_command(command=b'\x1b\x50')
def all_on(self):
self.send_command(command=b'\x1b\x55')
def reset(self):
self.send_command(command=b'\1F')
def set_addr_counter(self,add):
self.send_command(command=b'\x1b\x60'+bytes([add]))
def write_grafic(self,data):
self.send_command(command=b'\x1b\x70'+bytes([len(data)])+bytes(data))
########################################################################################
# clock:
def calc_BCD(self,n):
if n>0xFF:
n=0xFF
higher_nibble, lower_nibble = divmod(n,10)
return higher_nibble<<4 | lower_nibble
def set_clock_data(self,hour,minute):
self.send_command(command=b'\x1B\x00'+bytes([self.calc_BCD(minute)])+bytes([self.calc_BCD(hour)]))
def set_clock_format(self,clock_format='24h',row='1row'):
command = b'\x1b'
if row=='upper':
command+=b'\x01'
else:
command+=b'\x02'
if clock_format=='24h':
command+=b'\x01'
else:
command+=b'\x00'
self.send_command(command)
########################################################################################
# symbols: symbol=address of symbol, grayscale from 0...100%
def set_symbol(self,symbol,grayscale=100):
command = b'\x1B\x30'+symbol
if grayscale >= 75:
command += b'\x02'
elif grayscale >= 25:
command += b'\x01'
else:
command += b'\x00'
self.send_command(command)
######
# named access to symbols for convenience
#
def set_play(self,grayscale=100):
self.set_symbol(symbol=b'\x00',grayscale=grayscale)
def set_pause(self,grayscale=100):
self.set_symbol(symbol=b'\x01',grayscale=grayscale)
def set_rec(self, grayscale=100):
self.set_symbol(symbol=b'\x02', grayscale=grayscale)
def set_envelope(self, grayscale=100):
self.set_symbol(symbol=b'\x03',grayscale=grayscale)
def set_envelope_at(self, grayscale=100):
self.set_symbol(symbol=b'\x04',grayscale=grayscale)
def set_mute(self, grayscale=100):
self.set_symbol(symbol=b'\x05',grayscale=grayscale)
def set_i(self, grayscale=100, segment=1):
if segment <=1:
segment=1
elif segment>=4:
segment=4
self.set_symbol(symbol=bytes([0x05+segment]))
def set_vol_logo(self,grayscale=100):
self.set_symbol(symbol=b'\x0A',grayscale=grayscale)
def set_vol_bar(self,grayscale=100,segment=1):
if segment <=1:
segment=1
elif segment>=14:
segment=14
self.set_symbol(symbol=bytes([0x0A+segment]))
########################################################################################
# write text: line is 0 for upper row, 1 for lower row
def write_str(self,text,row=0):
char_count = 0
for char in text:
addr_count = 0
for i in range(0, 6):
# send column after column:
self.set_addr_counter(addr_count + char_count * 12 + row)
col = [str(row[i]) for row in self.font.str_to_dot_matrix(char)]
col = [int(''.join(col), 2)]
self.write_grafic(col)
addr_count += 2 # each column has two addresses: upper and lower on
char_count+=1
|
[
"hid.device",
"dot_matrix_font.dot_matrix_font"
] |
[((722, 734), 'hid.device', 'hid.device', ([], {}), '()\n', (732, 734), False, 'import hid\n'), ((809, 842), 'dot_matrix_font.dot_matrix_font', 'dot_matrix_font.dot_matrix_font', ([], {}), '()\n', (840, 842), False, 'import dot_matrix_font\n')]
|
# ==============================================================================
# 2017_04_15 LSW@NCHC.
#
# Change 3 code to use new in, out dir name for fit the needs.
# cp new.image to /out/ do not need to chnage code of classify.py.
#
# USAGE: time py Check.py /home/TF_io/
# ==============================================================================
"""Daemon function with Popen call.
Glue code to check image dir then call next function.
NOTE: pyinstaller this Check.py to Check.exe before you use it.
"""
import os, time
import sys
from shutil import copyfile
import subprocess
this_n = sys.argv[0]
io_dir = sys.argv[1]
path_to_watch = io_dir + "/in/"
path_to_check = io_dir + "/out/"
before = dict ([(f, None) for f in os.listdir(path_to_watch) if f.endswith('.jpg')])
while 1:
time.sleep (1)
after = dict ([(f, None) for f in os.listdir(path_to_watch) if f.endswith('.jpg')])
for f in after:
if not f in before:
#print("Added: ", ", ".join (f))
# check if cfg exist, else exit this loop
if os.path.isfile(io_dir + "/" + f.split('_')[0] + ".cfg"):
print("roi_cfg,", io_dir + "/" + f.split('_')[0] + ".cfg", "exist:", os.path.isfile(io_dir + "/" + f.split('_')[0] + ".cfg"))
print("New Image Found:", f)
print("cp",path_to_watch + f, "to", path_to_check + f)
copyfile(path_to_watch + "/" + f, path_to_check + "/" + f)
print("roi_cfg:", f.split('_')[0] + ".cfg")
# Call classify.exe
path_out_img = path_to_check + f
path_cam_cfg = io_dir + "/" + f.split('_')[0] + ".cfg"
p = subprocess.Popen(['./classify.exe', "--image_file", path_out_img, path_cam_cfg, "--model_dir", "hw_model"], stdout = subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
print(stdout, stderr)
else:
print("roi_cfg,", io_dir + "/" + f.split('_')[0] + ".cfg", "exist:", os.path.isfile(io_dir + "/" + f.split('_')[0] + ".cfg"))
removed = [f for f in before if not f in after]
#if added:
# for a in added:
# print("Added: ", ", ".join (a))
# print(roi_cfg = a.split('_'))
if removed: print("Removed: ", ", ".join (removed))
before = after
|
[
"shutil.copyfile",
"subprocess.Popen",
"os.listdir",
"time.sleep"
] |
[((799, 812), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (809, 812), False, 'import os, time\n'), ((738, 763), 'os.listdir', 'os.listdir', (['path_to_watch'], {}), '(path_to_watch)\n', (748, 763), False, 'import os, time\n'), ((850, 875), 'os.listdir', 'os.listdir', (['path_to_watch'], {}), '(path_to_watch)\n', (860, 875), False, 'import os, time\n'), ((1337, 1395), 'shutil.copyfile', 'copyfile', (["(path_to_watch + '/' + f)", "(path_to_check + '/' + f)"], {}), "(path_to_watch + '/' + f, path_to_check + '/' + f)\n", (1345, 1395), False, 'from shutil import copyfile\n'), ((1601, 1764), 'subprocess.Popen', 'subprocess.Popen', (["['./classify.exe', '--image_file', path_out_img, path_cam_cfg,\n '--model_dir', 'hw_model']"], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), "(['./classify.exe', '--image_file', path_out_img,\n path_cam_cfg, '--model_dir', 'hw_model'], stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n", (1617, 1764), False, 'import subprocess\n')]
|
from timm.models.layers.weight_init import trunc_normal_
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.utils import _pair
from einops import rearrange
from mmcv.cnn import build_conv_layer, kaiming_init
class FeatEmbed(nn.Module):
"""Image to Patch Embedding.
Args:
img_size (int | tuple): Size of input image.
patch_size (int): Size of one patch.
in_channels (int): Channel num of input features. Defaults to 3.
embed_dims (int): Dimensions of embedding. Defaults to 768.
conv_cfg (dict | None): Config dict for convolution layer. Defaults to
`dict(type='Conv2d')`.
"""
def __init__(self,
img_size,
patch_size,
in_channels=256,
embed_dims=256,
conv_cfg=dict(type='Conv2d')):
super().__init__()
self.img_size = _pair(img_size)
self.patch_size = _pair(patch_size)
num_patches = (self.img_size[1] // self.patch_size[1]) * (
self.img_size[0] // self.patch_size[0])
assert num_patches * self.patch_size[0] * self.patch_size[1] == \
self.img_size[0] * self.img_size[1], \
'The image size H*W must be divisible by patch size'
self.num_patches = num_patches
# Use conv layer to embed
self.projection = build_conv_layer(
conv_cfg,
in_channels,
embed_dims,
kernel_size=patch_size,
stride=patch_size)
self.init_weights()
def init_weights(self):
# Lecun norm from ClassyVision
kaiming_init(self.projection, mode='fan_in', nonlinearity='linear')
def forward(self, x):
x = self.projection(x).flatten(2)
x = rearrange(x, 'b d n -> b n d')
return x
|
[
"mmcv.cnn.kaiming_init",
"einops.rearrange",
"torch.nn.modules.utils._pair",
"mmcv.cnn.build_conv_layer"
] |
[((928, 943), 'torch.nn.modules.utils._pair', '_pair', (['img_size'], {}), '(img_size)\n', (933, 943), False, 'from torch.nn.modules.utils import _pair\n'), ((970, 987), 'torch.nn.modules.utils._pair', '_pair', (['patch_size'], {}), '(patch_size)\n', (975, 987), False, 'from torch.nn.modules.utils import _pair\n'), ((1404, 1502), 'mmcv.cnn.build_conv_layer', 'build_conv_layer', (['conv_cfg', 'in_channels', 'embed_dims'], {'kernel_size': 'patch_size', 'stride': 'patch_size'}), '(conv_cfg, in_channels, embed_dims, kernel_size=patch_size,\n stride=patch_size)\n', (1420, 1502), False, 'from mmcv.cnn import build_conv_layer, kaiming_init\n'), ((1665, 1732), 'mmcv.cnn.kaiming_init', 'kaiming_init', (['self.projection'], {'mode': '"""fan_in"""', 'nonlinearity': '"""linear"""'}), "(self.projection, mode='fan_in', nonlinearity='linear')\n", (1677, 1732), False, 'from mmcv.cnn import build_conv_layer, kaiming_init\n'), ((1814, 1844), 'einops.rearrange', 'rearrange', (['x', '"""b d n -> b n d"""'], {}), "(x, 'b d n -> b n d')\n", (1823, 1844), False, 'from einops import rearrange\n')]
|
from datetime import datetime, timedelta
from os import environ
from peewee import (
BigIntegerField,
DateField,
DateTimeField,
CharField,
FloatField,
Model,
BooleanField,
InternalError,
)
from playhouse.db_url import connect
# Use default sqlite db in tests
db = connect(environ.get("DATABASE_URL") or "sqlite:///default.db")
class BaseModel(Model):
class Meta:
database = db
class Reminder(BaseModel):
user_name = CharField()
tweet_id = BigIntegerField()
created_on = DateField()
remind_on = DateTimeField()
stock_symbol = CharField()
stock_price = FloatField()
short = BooleanField(default=False)
is_finished = BooleanField(default=False)
class Meta:
table_name = "reminders"
def finish(self):
self.is_finished = True
self.save()
def refresh_from_db(self):
return Reminder.get_by_id(self.id)
@classmethod
def create_instance(cls, values):
with db.atomic() as transaction:
try:
Reminder.create(
user_name=values["user_name"],
tweet_id=values["tweet_id"],
created_on=values["created_on"],
remind_on=values["remind_on"],
stock_symbol=values["stock_symbol"],
stock_price=values["stock_price"],
short=values["short"],
)
except InternalError:
transaction.rollback()
@classmethod
def due_now(cls):
return cls.select().where(
cls.remind_on.between(
# TODO: I think this should rather fetch all reminders for today's date.
# If the job fails, upon retry, the reminder might not be fetched if
# it's outside of the 6 min window
datetime.now() - timedelta(minutes=3),
datetime.now() + timedelta(minutes=3),
),
cls.is_finished == False, # noqa
)
def migrate():
tables = db.get_tables()
if [Reminder] not in tables:
db.create_tables([Reminder])
if __name__ == "__main__":
migrate()
|
[
"peewee.FloatField",
"peewee.DateField",
"peewee.DateTimeField",
"os.environ.get",
"peewee.CharField",
"datetime.timedelta",
"peewee.BooleanField",
"peewee.BigIntegerField",
"datetime.datetime.now"
] |
[((470, 481), 'peewee.CharField', 'CharField', ([], {}), '()\n', (479, 481), False, 'from peewee import BigIntegerField, DateField, DateTimeField, CharField, FloatField, Model, BooleanField, InternalError\n'), ((497, 514), 'peewee.BigIntegerField', 'BigIntegerField', ([], {}), '()\n', (512, 514), False, 'from peewee import BigIntegerField, DateField, DateTimeField, CharField, FloatField, Model, BooleanField, InternalError\n'), ((532, 543), 'peewee.DateField', 'DateField', ([], {}), '()\n', (541, 543), False, 'from peewee import BigIntegerField, DateField, DateTimeField, CharField, FloatField, Model, BooleanField, InternalError\n'), ((560, 575), 'peewee.DateTimeField', 'DateTimeField', ([], {}), '()\n', (573, 575), False, 'from peewee import BigIntegerField, DateField, DateTimeField, CharField, FloatField, Model, BooleanField, InternalError\n'), ((595, 606), 'peewee.CharField', 'CharField', ([], {}), '()\n', (604, 606), False, 'from peewee import BigIntegerField, DateField, DateTimeField, CharField, FloatField, Model, BooleanField, InternalError\n'), ((625, 637), 'peewee.FloatField', 'FloatField', ([], {}), '()\n', (635, 637), False, 'from peewee import BigIntegerField, DateField, DateTimeField, CharField, FloatField, Model, BooleanField, InternalError\n'), ((650, 677), 'peewee.BooleanField', 'BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (662, 677), False, 'from peewee import BigIntegerField, DateField, DateTimeField, CharField, FloatField, Model, BooleanField, InternalError\n'), ((696, 723), 'peewee.BooleanField', 'BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (708, 723), False, 'from peewee import BigIntegerField, DateField, DateTimeField, CharField, FloatField, Model, BooleanField, InternalError\n'), ((306, 333), 'os.environ.get', 'environ.get', (['"""DATABASE_URL"""'], {}), "('DATABASE_URL')\n", (317, 333), False, 'from os import environ\n'), ((1874, 1888), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1886, 1888), False, 'from datetime import datetime, timedelta\n'), ((1891, 1911), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(3)'}), '(minutes=3)\n', (1900, 1911), False, 'from datetime import datetime, timedelta\n'), ((1929, 1943), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1941, 1943), False, 'from datetime import datetime, timedelta\n'), ((1946, 1966), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(3)'}), '(minutes=3)\n', (1955, 1966), False, 'from datetime import datetime, timedelta\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Author: <NAME>
Description:
This PySPark scripts maps geolocated mobility data for valid users
to specific land use type where the activity occured and counts
number of unique users within each land use type aggregated to 250m x 250m
neighborhoods in New York City.
"""
# imports
from pyspark.sql.session import SparkSession
from pyspark.sql.functions import col, concat, lit, substring, countDistinct, date_format, to_date, upper
from pyspark.sql.types import * # import types
import numpy as np
from math import sin, cos, sqrt, atan2, radians
spark = SparkSession.builder.getOrCreate()
def distance_km(x1, y1, x2, y2):
# approximate radius of earth in km
R = 6373.0
lat1 = radians(y1)
lon1 = radians(x1)
lat2 = radians(y2)
lon2 = radians(x2)
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
distance = R * c
return distance
# Load grid data
land_use = spark.read.parquet('/raster/grid_classification/parquet_grid_data/')
x_raster_step = 0.000009
y_raster_step = 0.000012
# Load venpath data activity
df = spark.read.parquet('<directory-to-mobility-data-on-HDFS>')
df = df.withColumn('ad_id_upper', upper(col('ad_id')))
# define boundaries extent
llc_lon = -74.2555954656
llc_lat = 40.4961100684
urc_lon = -73.7000071112
urc_lat = 40.9155259862
# subset data based on bounding box
nyc = df.filter((col('ad_id')!='00000000-0000-0000-0000-000000000000') \
& (col('lon')>=llc_lon) \
& (col('lon')<=urc_lon) \
& (col('lat')>=llc_lat) \
& (col('lat')<=urc_lat) )
# create date column
nyc = nyc.withColumn("date", to_date(col("timestamp")))
# find valid users based on number of days active
ad_id_count = nyc.groupby("ad_id_upper").agg(countDistinct("date").alias('day_count')).withColumnRenamed("ad_id_upper", "id")
ad_id_count_filtered = ad_id_count.filter((col("day_count")>14))
nyc = nyc.join(ad_id_count_filtered, nyc.ad_id_upper == ad_id_count_filtered.id, how='inner')
# cast raster cell indices
nyc = nyc.withColumn("x_raster_cell", ((nyc["lon"]-llc_lon) / x_raster_step).cast('integer'))
nyc = nyc.withColumn("y_raster_cell", ((nyc["lat"]-llc_lat) / y_raster_step).cast('integer'))
# join with land use raster
nyc = nyc.join(land_use, (nyc.x_raster_cell == land_use.x_cell) & (nyc.y_raster_cell == land_use.y_cell), how='left')
# calculate the extent of the bounding box in kilometers
xx = distance_km(llc_lon, np.mean([llc_lat, urc_lat]), urc_lon, np.mean([llc_lat, urc_lat]))
yy = distance_km(np.mean([llc_lon, urc_lon]), llc_lat, np.mean([llc_lon, urc_lon]), urc_lat)
# find number of 500 m cels in x and y dimension
x_grid = xx / 0.25
y_grid = yy / 0.25
# define the x and y step size in geographic coordinates
x_grid_step = (urc_lon - llc_lon)/x_grid
y_grid_step = (urc_lat - llc_lat)/y_grid
# assign cell x, y, coordiantes and index for each ping
nyc = nyc.withColumn("x_250m_cell", ((nyc["lon"]-llc_lon) / x_grid_step).cast('integer'))
nyc = nyc.withColumn("cell_250m_lon", llc_lon+nyc["x_250m_cell"]*x_grid_step+0.5*x_grid_step)
nyc = nyc.withColumn("y_250m_cell", ((nyc["lat"]-llc_lat) / y_grid_step).cast('integer'))
nyc = nyc.withColumn("cell_250m_lat", llc_lat+nyc["y_250m_cell"]*y_grid_step+0.5*y_grid_step)
nyc = nyc.withColumn('cell_index', concat(col("x_250m_cell"), lit(";"), col("y_250m_cell")))
# create hour column
nyc = nyc.withColumn("hour", date_format(col("timestamp").cast("timestamp"), "yyyy-MM-dd HH:00"))
# count cell aggregations and save to file
hourly_counts = nyc.groupby("hour", "cell_index", "class").agg(countDistinct("ad_id_upper"))
hourly_counts.write \
.format("com.databricks.spark.csv") \
.mode("overwrite") \
.save("/user/bjb417/covid/output/nyc/nyc_land_use/nyc_250mGrid_landUse_uniqueDev_hourlyCounts_active14days.csv")
# save 250m x 250m grid information
grid = nyc.select("cell_index", "x_250m_cell", "y_250m_cell", "cell_250m_lon", "cell_250m_lat") \
.drop_duplicates(subset=['cell_index'])
grid.write \
.format("com.databricks.spark.csv") \
.mode("overwrite") \
.save("/user/bjb417/covid/output/nyc/nyc_land_use/nyc_250mGrid_landUse_active14days.csv")
|
[
"math.sqrt",
"math.radians",
"pyspark.sql.functions.lit",
"math.sin",
"numpy.mean",
"pyspark.sql.functions.col",
"math.cos",
"pyspark.sql.session.SparkSession.builder.getOrCreate",
"pyspark.sql.functions.countDistinct"
] |
[((610, 644), 'pyspark.sql.session.SparkSession.builder.getOrCreate', 'SparkSession.builder.getOrCreate', ([], {}), '()\n', (642, 644), False, 'from pyspark.sql.session import SparkSession\n'), ((736, 747), 'math.radians', 'radians', (['y1'], {}), '(y1)\n', (743, 747), False, 'from math import sin, cos, sqrt, atan2, radians\n'), ((756, 767), 'math.radians', 'radians', (['x1'], {}), '(x1)\n', (763, 767), False, 'from math import sin, cos, sqrt, atan2, radians\n'), ((776, 787), 'math.radians', 'radians', (['y2'], {}), '(y2)\n', (783, 787), False, 'from math import sin, cos, sqrt, atan2, radians\n'), ((796, 807), 'math.radians', 'radians', (['x2'], {}), '(x2)\n', (803, 807), False, 'from math import sin, cos, sqrt, atan2, radians\n'), ((2490, 2517), 'numpy.mean', 'np.mean', (['[llc_lat, urc_lat]'], {}), '([llc_lat, urc_lat])\n', (2497, 2517), True, 'import numpy as np\n'), ((2528, 2555), 'numpy.mean', 'np.mean', (['[llc_lat, urc_lat]'], {}), '([llc_lat, urc_lat])\n', (2535, 2555), True, 'import numpy as np\n'), ((2574, 2601), 'numpy.mean', 'np.mean', (['[llc_lon, urc_lon]'], {}), '([llc_lon, urc_lon])\n', (2581, 2601), True, 'import numpy as np\n'), ((2612, 2639), 'numpy.mean', 'np.mean', (['[llc_lon, urc_lon]'], {}), '([llc_lon, urc_lon])\n', (2619, 2639), True, 'import numpy as np\n'), ((3625, 3653), 'pyspark.sql.functions.countDistinct', 'countDistinct', (['"""ad_id_upper"""'], {}), "('ad_id_upper')\n", (3638, 3653), False, 'from pyspark.sql.functions import col, concat, lit, substring, countDistinct, date_format, to_date, upper\n'), ((1268, 1280), 'pyspark.sql.functions.col', 'col', (['"""ad_id"""'], {}), "('ad_id')\n", (1271, 1280), False, 'from pyspark.sql.functions import col, concat, lit, substring, countDistinct, date_format, to_date, upper\n'), ((1687, 1703), 'pyspark.sql.functions.col', 'col', (['"""timestamp"""'], {}), "('timestamp')\n", (1690, 1703), False, 'from pyspark.sql.functions import col, concat, lit, substring, countDistinct, date_format, to_date, upper\n'), ((1926, 1942), 'pyspark.sql.functions.col', 'col', (['"""day_count"""'], {}), "('day_count')\n", (1929, 1942), False, 'from pyspark.sql.functions import col, concat, lit, substring, countDistinct, date_format, to_date, upper\n'), ((3347, 3365), 'pyspark.sql.functions.col', 'col', (['"""x_250m_cell"""'], {}), "('x_250m_cell')\n", (3350, 3365), False, 'from pyspark.sql.functions import col, concat, lit, substring, countDistinct, date_format, to_date, upper\n'), ((3367, 3375), 'pyspark.sql.functions.lit', 'lit', (['""";"""'], {}), "(';')\n", (3370, 3375), False, 'from pyspark.sql.functions import col, concat, lit, substring, countDistinct, date_format, to_date, upper\n'), ((3377, 3395), 'pyspark.sql.functions.col', 'col', (['"""y_250m_cell"""'], {}), "('y_250m_cell')\n", (3380, 3395), False, 'from pyspark.sql.functions import col, concat, lit, substring, countDistinct, date_format, to_date, upper\n'), ((853, 866), 'math.sin', 'sin', (['(dlat / 2)'], {}), '(dlat / 2)\n', (856, 866), False, 'from math import sin, cos, sqrt, atan2, radians\n'), ((928, 935), 'math.sqrt', 'sqrt', (['a'], {}), '(a)\n', (932, 935), False, 'from math import sin, cos, sqrt, atan2, radians\n'), ((937, 948), 'math.sqrt', 'sqrt', (['(1 - a)'], {}), '(1 - a)\n', (941, 948), False, 'from math import sin, cos, sqrt, atan2, radians\n'), ((1605, 1615), 'pyspark.sql.functions.col', 'col', (['"""lat"""'], {}), "('lat')\n", (1608, 1615), False, 'from pyspark.sql.functions import col, concat, lit, substring, countDistinct, date_format, to_date, upper\n'), ((872, 881), 'math.cos', 'cos', (['lat1'], {}), '(lat1)\n', (875, 881), False, 'from math import sin, cos, sqrt, atan2, radians\n'), ((884, 893), 'math.cos', 'cos', (['lat2'], {}), '(lat2)\n', (887, 893), False, 'from math import sin, cos, sqrt, atan2, radians\n'), ((896, 909), 'math.sin', 'sin', (['(dlon / 2)'], {}), '(dlon / 2)\n', (899, 909), False, 'from math import sin, cos, sqrt, atan2, radians\n'), ((1578, 1588), 'pyspark.sql.functions.col', 'col', (['"""lat"""'], {}), "('lat')\n", (1581, 1588), False, 'from pyspark.sql.functions import col, concat, lit, substring, countDistinct, date_format, to_date, upper\n'), ((3461, 3477), 'pyspark.sql.functions.col', 'col', (['"""timestamp"""'], {}), "('timestamp')\n", (3464, 3477), False, 'from pyspark.sql.functions import col, concat, lit, substring, countDistinct, date_format, to_date, upper\n'), ((1551, 1561), 'pyspark.sql.functions.col', 'col', (['"""lon"""'], {}), "('lon')\n", (1554, 1561), False, 'from pyspark.sql.functions import col, concat, lit, substring, countDistinct, date_format, to_date, upper\n'), ((1802, 1823), 'pyspark.sql.functions.countDistinct', 'countDistinct', (['"""date"""'], {}), "('date')\n", (1815, 1823), False, 'from pyspark.sql.functions import col, concat, lit, substring, countDistinct, date_format, to_date, upper\n'), ((1464, 1476), 'pyspark.sql.functions.col', 'col', (['"""ad_id"""'], {}), "('ad_id')\n", (1467, 1476), False, 'from pyspark.sql.functions import col, concat, lit, substring, countDistinct, date_format, to_date, upper\n'), ((1524, 1534), 'pyspark.sql.functions.col', 'col', (['"""lon"""'], {}), "('lon')\n", (1527, 1534), False, 'from pyspark.sql.functions import col, concat, lit, substring, countDistinct, date_format, to_date, upper\n')]
|
import os
import random
import argparse
import numpy as np
from PIL import Image, ImageDraw, ImageFont
def make_blank_placeholder(image_file, out_file):
#print(out_file)
image = np.asarray(Image.open(image_file))
blank = np.ones(image.shape)*255
blank = blank.astype(np.uint8)
#print(blank.shape)
im = Image.fromarray(blank)
draw = ImageDraw.Draw(im)
(x, y) = ((image.shape[0]//2)-50+random.randint(-10,10), (image.shape[1]//2)-50+random.randint(-10,10))
font = ImageFont.truetype('/Library/Fonts/Arial Bold.ttf', 45)
message = "No Data"
color = 'rgb(0, 0, 0)' # black color
draw.text((x, y), message, fill=color, font=font)
#im.convert('L')
im.save(out_file)
def reduce_quality(image_file):
im = Image.open(image_file)
im.save(image_file, quality=90)
def main():
parser = argparse.ArgumentParser(description='Process some images.')
parser.add_argument('--path', metavar='path', type=str,
help='path to images')
parser.add_argument('--volume_identifier', metavar='vol_id', type=str,
help='unique volume identifier e.g. 3R_ROI1')
args = parser.parse_args()
path = args.path
vol_id = args.volume_identifier
#old_dirpath=None
images = []
id_nums = []
'''
metadata = {'Raw Z resolution (nm)': 50,
'Raw XY resolution (nm)': 10,
'Volume ID': vol_id,
'default_frame': 3,
'#set': None}
'''
manifest = open(os.path.join(path+'manifest.csv'),'w')
manifest.write((',').join(['image1', 'image2', 'image3', 'image4', 'image5', 'Raw Z resolution (nm)', 'Raw XY resolution (nm)', 'Volume ID', 'default_frame', '#set\n']))
for (dirpath, dirnames, filenames) in os.walk(path):
#if dirpath != old_dirpath:
# images = []
# id_nums = []
for f in filenames:
'''
metadata = {'Raw Z resolution (nm)': 50,
'Raw XY resolution (nm)': 10,
'default_frame': 3}
'''
image_file = os.path.join(dirpath, f)
if '.DS_Store' in image_file:
continue
if '.csv' in image_file:
continue
#print(image_file)
#if 'ROI1' in image_file:
# reduce_quality(image_file)
file_stub = image_file.strip('.jpg')[:-3] + '%03d_blank.jpg'
id_num = image_file.strip('.jpg')[-3:]
if id_num == 'ank':
continue
if id_num == 'opy':
id_num = image_file.strip(' copy.jpg')[-3:]
file_stub = image_file.strip(' copy.jpg')[:-3] + '%03d_blank.jpg'
id_nums.append(int(id_num))
images.append(image_file)
if images == []:
continue
sorted_images = [x for _,x in sorted(zip(id_nums,images))]
id_nums.sort()
make_blank_placeholder(images[0], file_stub%(id_nums[0]-2))
make_blank_placeholder(images[0], file_stub%(id_nums[0]-1))
make_blank_placeholder(images[0], file_stub%(id_nums[-1]+1))
make_blank_placeholder(images[0], file_stub%(id_nums[-1]+2))
images = [file_stub%(id_nums[0]-2), file_stub%(id_nums[0]-1)] + \
sorted_images + \
[file_stub%(id_nums[-1]+1), file_stub%(id_nums[-1]+2)]
#print(len(images))
for i in range(2,len(images)-2, 1):
#print(len(images[i-2:i+3]))
print(images[i-2:i+3])
#print(','.join(images[i-2:i+2]))
manifest.write((',').join([im.split('/')[-1] for im in images[i-2:i+3]]+['50', '10', vol_id, '3', dirpath.split('/')[-1]])+'\n')
if __name__ == '__main__':
main()
|
[
"argparse.ArgumentParser",
"random.randint",
"os.walk",
"numpy.ones",
"PIL.Image.open",
"PIL.ImageFont.truetype",
"PIL.Image.fromarray",
"PIL.ImageDraw.Draw",
"os.path.join"
] |
[((317, 339), 'PIL.Image.fromarray', 'Image.fromarray', (['blank'], {}), '(blank)\n', (332, 339), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((349, 367), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['im'], {}), '(im)\n', (363, 367), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((483, 538), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['"""/Library/Fonts/Arial Bold.ttf"""', '(45)'], {}), "('/Library/Fonts/Arial Bold.ttf', 45)\n", (501, 538), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((731, 753), 'PIL.Image.open', 'Image.open', (['image_file'], {}), '(image_file)\n', (741, 753), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((812, 871), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Process some images."""'}), "(description='Process some images.')\n", (835, 871), False, 'import argparse\n'), ((1715, 1728), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (1722, 1728), False, 'import os\n'), ((196, 218), 'PIL.Image.open', 'Image.open', (['image_file'], {}), '(image_file)\n', (206, 218), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((230, 250), 'numpy.ones', 'np.ones', (['image.shape'], {}), '(image.shape)\n', (237, 250), True, 'import numpy as np\n'), ((1464, 1499), 'os.path.join', 'os.path.join', (["(path + 'manifest.csv')"], {}), "(path + 'manifest.csv')\n", (1476, 1499), False, 'import os\n'), ((403, 426), 'random.randint', 'random.randint', (['(-10)', '(10)'], {}), '(-10, 10)\n', (417, 426), False, 'import random\n'), ((450, 473), 'random.randint', 'random.randint', (['(-10)', '(10)'], {}), '(-10, 10)\n', (464, 473), False, 'import random\n'), ((1997, 2021), 'os.path.join', 'os.path.join', (['dirpath', 'f'], {}), '(dirpath, f)\n', (2009, 2021), False, 'import os\n')]
|
import pickle
import logging
import hashlib
import numpy as np
import os
from pathlib import Path
import spacy
import shutil
import sys
import tarfile
import tempfile
import torch
from typing import Dict, List
sys.path.append("nbsvm")
from nltk import word_tokenize
from nltk.stem import WordNetLemmatizer
from nltk.stem.snowball import SnowballStemmer
from nltk.corpus import stopwords
from lime.lime_text import LimeTextExplainer
from allennlp.models.archival import load_archive
from allennlp.data import Vocabulary
from allennlp.data.dataset_readers import DatasetReader
from flask import Flask, request, Response, jsonify, render_template, send_from_directory
logging.basicConfig(level=logging.INFO)
stemmer = SnowballStemmer('english')
stopWords = set(stopwords.words('english'))
class LemmaTokenizer(object):
def __init__(self):
self.wnl = WordNetLemmatizer()
def __call__(self, articles):
return [stemmer.stem(self.wnl.lemmatize(t)) for t in word_tokenize(articles) if t not in stopWords]
# this was done to make sure the model unpickles correctly (may not actually be necessary)
setattr(sys.modules["__main__"], LemmaTokenizer.__name__, LemmaTokenizer)
class LimePredictor(object):
def __init__(self, idx2label: Dict[int, str]):
self.idx2label = idx2label
self.label2idx = {v: k for k, v in idx2label.items()}
self.class_names = [idx2label[i] for i in range(len(self.idx2label))]
def predict(self, text: str) -> Dict[str, np.ndarray]:
raise NotImplementedError
def predict_batch(self, texts: List[str]) -> np.ndarray:
raise NotImplementedError
class NBSVMLimePredictor(LimePredictor):
def __init__(self, model_path: str):
model_path = Path(model_path)
with open(str(model_path), "rb") as f:
self.model = pickle.load(f)
nbsvm = self.model.steps[1][1]
nbsvm.predict_proba = nbsvm._predict_proba_lr
self.idx2label = {i: l for i, l in enumerate(nbsvm.classes_.tolist())}
super(NBSVMLimePredictor, self).__init__(self.idx2label)
def predict(self, text: str) -> Dict[str, np.ndarray]:
out = {}
out['label'] = self.model.predict([text])[0]
logits = self.model.predict_proba([text])[0]
out['logits'] = logits
out['probs'] = logits
return out
def predict_batch(self, texts: List[str]) -> np.ndarray:
return self.model.predict_proba(texts)
class AllenNLPLimePredictor(LimePredictor):
def __init__(self, archive_path: str, device: int = -1, batch_size: int = 32):
archive_path = Path(archive_path)
archive = load_archive(archive_path)
self.params = archive.config
self.model = archive.model.eval()
self.batch_size = batch_size
self.reader = DatasetReader.from_params(self.params.get("dataset_reader"))
self.vocab = self._load_vocab(archive_path)
self.idx2label = self.vocab.get_index_to_token_vocabulary('labels')
if device != -1:
self.model.to(f"cuda:{device}")
super(AllenNLPLimePredictor, self).__init__(self.idx2label)
@staticmethod
def _load_vocab(archive_path: Path) -> Vocabulary:
# an annoying hack to load the vocab file
tempdir = tempfile.mkdtemp()
with tarfile.open(archive_path, 'r:gz') as _archive:
_archive.extractall(tempdir)
vocab_path = Path(tempdir) / "vocabulary"
vocab = Vocabulary.from_files(vocab_path)
shutil.rmtree(tempdir)
return vocab
def predict(self, text: str) -> Dict[str, np.ndarray]:
return self.model.forward_on_instance(self.reader.text_to_instance(text))
def predict_batch(self, texts: List[str]) -> np.ndarray:
with torch.no_grad():
instances = [self.reader.text_to_instance(t) for t in texts]
instance_chunks = [instances[x: x + self.batch_size] for x in
range(0, len(instances), self.batch_size)]
preds = []
for batch in instance_chunks:
pred = self.model.forward_on_instances(batch)
preds.extend(pred)
probs = [p['probs'] for p in preds]
return np.stack(probs, axis=0)
class ServerError(Exception):
status_code = 400
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
error_dict = dict(self.payload or ())
error_dict['message'] = self.message
return error_dict
app = Flask(__name__) # pylint: disable=invalid-name
# We hash the javascript file and use it as a cache breaker
hasher = hashlib.md5()
app_js = open("static/app.js")
hasher.update(app_js.read().encode('utf-8'))
js_hash = hasher.hexdigest()
nlp = spacy.load('en_core_web_sm', disable=['vectors', 'textcat', 'tagger', 'ner'])
# nlp.add_pipe(nlp.create_pipe('sentencizer'))
split_expr = lambda text: [sent.string.strip() for sent in nlp(text).sents]
home_path = Path(os.environ.get("HOME", "."))
nbsvm_predictor = NBSVMLimePredictor(home_path / ".models/nbsvm_imdb_sent_500.pkl")
device = 0 if torch.cuda.is_available() else -1
bert_predictor = AllenNLPLimePredictor(home_path / ".models/bert_base_1000.tar.gz", device=device)
nbsvm_explainer = LimeTextExplainer(class_names=nbsvm_predictor.class_names,
bow=True, split_expression=split_expr)
bert_explainer = LimeTextExplainer(class_names=bert_predictor.class_names,
bow=False, split_expression=split_expr)
models = {
'bert': {'explainer': bert_explainer, 'predictor': bert_predictor},
'nbsvm': {'explainer': nbsvm_explainer, 'predictor': nbsvm_predictor}
}
@app.errorhandler(ServerError)
def handle_invalid_usage(error: ServerError) -> Response: # pylint: disable=unused-variable
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
@app.route('/')
def index() -> Response: # pylint: disable=unused-variable
return render_template(
'app.html',
google_analytics_ua="UA-120916510-5", # TODO:don't hardcode this!
js_hash=js_hash
)
@app.route('/static/<path:path>')
def static_proxy(path: str) -> Response: # pylint: disable=unused-variable
return send_from_directory('static', path)
@app.route('/predict', methods=['POST', 'OPTIONS'])
def predict() -> Response: # pylint: disable=unused-variable
if request.method == "OPTIONS":
return Response(response="", status=200)
data = request.get_json()
previous_str = data["previous"]
# Log the query
app.logger.info(f"<{previous_str}>")
lime_tokens = split_expr(previous_str)
model_name = data.get("model_name", "BERT").lower()
predictor = models[model_name]['predictor']
explainer = models[model_name]['explainer']
app.logger.info(f"Using model {model_name}")
out = predictor.predict(previous_str)
class_probabilities = out['probs'].tolist()
label = out['label']
explanation = explainer.explain_instance(previous_str, predictor.predict_batch,
num_features=10, labels=[1], num_samples=100)
score_dict = dict(explanation.as_list(1))
lime_scores = [score_dict.get(tok, 0.) for tok in lime_tokens]
if predictor.label2idx['neg'] != 0:
# we need to reverse the lime scores
lime_scores = [-1 * score for score in lime_scores]
# make sure class probabilities are always consistently ordered
class_probabilities = [class_probabilities[predictor.label2idx[lbl]] for lbl in ['neg', 'pos']]
app.logger.info(label)
app.logger.info(lime_scores)
app.logger.info(lime_tokens)
app.logger.info(class_probabilities)
return jsonify({
"lime_scores": lime_scores,
"lime_tokens": lime_tokens,
"label": label,
"class_probabilities": class_probabilities,
"words": lime_tokens,
"output": previous_str,
"sentiment": label
})
if __name__ == "__main__":
app.run(host='0.0.0.0', threaded=False)
|
[
"flask.jsonify",
"pathlib.Path",
"pickle.load",
"shutil.rmtree",
"torch.no_grad",
"allennlp.data.Vocabulary.from_files",
"flask.request.get_json",
"nltk.word_tokenize",
"sys.path.append",
"nltk.stem.WordNetLemmatizer",
"spacy.load",
"tempfile.mkdtemp",
"flask.render_template",
"tarfile.open",
"flask.send_from_directory",
"flask.Response",
"lime.lime_text.LimeTextExplainer",
"numpy.stack",
"hashlib.md5",
"torch.cuda.is_available",
"nltk.corpus.stopwords.words",
"nltk.stem.snowball.SnowballStemmer",
"logging.basicConfig",
"flask.Flask",
"os.environ.get",
"allennlp.models.archival.load_archive"
] |
[((210, 234), 'sys.path.append', 'sys.path.append', (['"""nbsvm"""'], {}), "('nbsvm')\n", (225, 234), False, 'import sys\n'), ((670, 709), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (689, 709), False, 'import logging\n'), ((721, 747), 'nltk.stem.snowball.SnowballStemmer', 'SnowballStemmer', (['"""english"""'], {}), "('english')\n", (736, 747), False, 'from nltk.stem.snowball import SnowballStemmer\n'), ((4703, 4718), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (4708, 4718), False, 'from flask import Flask, request, Response, jsonify, render_template, send_from_directory\n'), ((4820, 4833), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (4831, 4833), False, 'import hashlib\n'), ((4946, 5023), 'spacy.load', 'spacy.load', (['"""en_core_web_sm"""'], {'disable': "['vectors', 'textcat', 'tagger', 'ner']"}), "('en_core_web_sm', disable=['vectors', 'textcat', 'tagger', 'ner'])\n", (4956, 5023), False, 'import spacy\n'), ((5444, 5545), 'lime.lime_text.LimeTextExplainer', 'LimeTextExplainer', ([], {'class_names': 'nbsvm_predictor.class_names', 'bow': '(True)', 'split_expression': 'split_expr'}), '(class_names=nbsvm_predictor.class_names, bow=True,\n split_expression=split_expr)\n', (5461, 5545), False, 'from lime.lime_text import LimeTextExplainer\n'), ((5593, 5694), 'lime.lime_text.LimeTextExplainer', 'LimeTextExplainer', ([], {'class_names': 'bert_predictor.class_names', 'bow': '(False)', 'split_expression': 'split_expr'}), '(class_names=bert_predictor.class_names, bow=False,\n split_expression=split_expr)\n', (5610, 5694), False, 'from lime.lime_text import LimeTextExplainer\n'), ((764, 790), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (779, 790), False, 'from nltk.corpus import stopwords\n'), ((5165, 5192), 'os.environ.get', 'os.environ.get', (['"""HOME"""', '"""."""'], {}), "('HOME', '.')\n", (5179, 5192), False, 'import os\n'), ((5292, 5317), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5315, 5317), False, 'import torch\n'), ((6204, 6291), 'flask.render_template', 'render_template', (['"""app.html"""'], {'google_analytics_ua': '"""UA-120916510-5"""', 'js_hash': 'js_hash'}), "('app.html', google_analytics_ua='UA-120916510-5', js_hash=\n js_hash)\n", (6219, 6291), False, 'from flask import Flask, request, Response, jsonify, render_template, send_from_directory\n'), ((6468, 6503), 'flask.send_from_directory', 'send_from_directory', (['"""static"""', 'path'], {}), "('static', path)\n", (6487, 6503), False, 'from flask import Flask, request, Response, jsonify, render_template, send_from_directory\n'), ((6717, 6735), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (6733, 6735), False, 'from flask import Flask, request, Response, jsonify, render_template, send_from_directory\n'), ((7943, 8142), 'flask.jsonify', 'jsonify', (["{'lime_scores': lime_scores, 'lime_tokens': lime_tokens, 'label': label,\n 'class_probabilities': class_probabilities, 'words': lime_tokens,\n 'output': previous_str, 'sentiment': label}"], {}), "({'lime_scores': lime_scores, 'lime_tokens': lime_tokens, 'label':\n label, 'class_probabilities': class_probabilities, 'words': lime_tokens,\n 'output': previous_str, 'sentiment': label})\n", (7950, 8142), False, 'from flask import Flask, request, Response, jsonify, render_template, send_from_directory\n'), ((868, 887), 'nltk.stem.WordNetLemmatizer', 'WordNetLemmatizer', ([], {}), '()\n', (885, 887), False, 'from nltk.stem import WordNetLemmatizer\n'), ((1751, 1767), 'pathlib.Path', 'Path', (['model_path'], {}), '(model_path)\n', (1755, 1767), False, 'from pathlib import Path\n'), ((2617, 2635), 'pathlib.Path', 'Path', (['archive_path'], {}), '(archive_path)\n', (2621, 2635), False, 'from pathlib import Path\n'), ((2654, 2680), 'allennlp.models.archival.load_archive', 'load_archive', (['archive_path'], {}), '(archive_path)\n', (2666, 2680), False, 'from allennlp.models.archival import load_archive\n'), ((3287, 3305), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (3303, 3305), False, 'import tempfile\n'), ((3474, 3507), 'allennlp.data.Vocabulary.from_files', 'Vocabulary.from_files', (['vocab_path'], {}), '(vocab_path)\n', (3495, 3507), False, 'from allennlp.data import Vocabulary\n'), ((3516, 3538), 'shutil.rmtree', 'shutil.rmtree', (['tempdir'], {}), '(tempdir)\n', (3529, 3538), False, 'import shutil\n'), ((4236, 4259), 'numpy.stack', 'np.stack', (['probs'], {'axis': '(0)'}), '(probs, axis=0)\n', (4244, 4259), True, 'import numpy as np\n'), ((6671, 6704), 'flask.Response', 'Response', ([], {'response': '""""""', 'status': '(200)'}), "(response='', status=200)\n", (6679, 6704), False, 'from flask import Flask, request, Response, jsonify, render_template, send_from_directory\n'), ((1840, 1854), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1851, 1854), False, 'import pickle\n'), ((3319, 3353), 'tarfile.open', 'tarfile.open', (['archive_path', '"""r:gz"""'], {}), "(archive_path, 'r:gz')\n", (3331, 3353), False, 'import tarfile\n'), ((3429, 3442), 'pathlib.Path', 'Path', (['tempdir'], {}), '(tempdir)\n', (3433, 3442), False, 'from pathlib import Path\n'), ((3777, 3792), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3790, 3792), False, 'import torch\n'), ((984, 1007), 'nltk.word_tokenize', 'word_tokenize', (['articles'], {}), '(articles)\n', (997, 1007), False, 'from nltk import word_tokenize\n')]
|
from django.db import models
from operation.models import Operation
from processor.utils import push_record_to_sqs_queue
import logging
SAFETY_LEVELS = (
(0, 'SAFE'),
(1, 'NOT CONFIRMED'),
(2, 'UNREACHABLE'),
(3, 'NEED_HELP'),
(4, 'NOT IN ZONE')
)
class Victim(models.Model):
"""
Used to store refugee information
"""
name = models.CharField(max_length=64)
phone_number = models.CharField(max_length=20, unique=True)
notification_contact_number = models.CharField(max_length=20, blank=True)
safety_level = models.IntegerField(choices=SAFETY_LEVELS, default=1)
retry_count = models.IntegerField(default=0)
location = models.TextField(null=True)
additional_information = models.TextField(null=True)
status_updated_by = models.TextField(null=True)
operation = models.ForeignKey(Operation,blank=True,default=None)
def save(self, *args, **kwags):
super(Victim, self).save(*args, **kwags)
logging.info('Added a new refugee with ID = %d' % self.id)
push_record_to_sqs_queue(self.id)
|
[
"django.db.models.TextField",
"django.db.models.CharField",
"django.db.models.ForeignKey",
"processor.utils.push_record_to_sqs_queue",
"django.db.models.IntegerField",
"logging.info"
] |
[((365, 396), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)'}), '(max_length=64)\n', (381, 396), False, 'from django.db import models\n'), ((416, 460), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'unique': '(True)'}), '(max_length=20, unique=True)\n', (432, 460), False, 'from django.db import models\n'), ((495, 538), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'blank': '(True)'}), '(max_length=20, blank=True)\n', (511, 538), False, 'from django.db import models\n'), ((558, 611), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'choices': 'SAFETY_LEVELS', 'default': '(1)'}), '(choices=SAFETY_LEVELS, default=1)\n', (577, 611), False, 'from django.db import models\n'), ((630, 660), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (649, 660), False, 'from django.db import models\n'), ((676, 703), 'django.db.models.TextField', 'models.TextField', ([], {'null': '(True)'}), '(null=True)\n', (692, 703), False, 'from django.db import models\n'), ((733, 760), 'django.db.models.TextField', 'models.TextField', ([], {'null': '(True)'}), '(null=True)\n', (749, 760), False, 'from django.db import models\n'), ((785, 812), 'django.db.models.TextField', 'models.TextField', ([], {'null': '(True)'}), '(null=True)\n', (801, 812), False, 'from django.db import models\n'), ((829, 883), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Operation'], {'blank': '(True)', 'default': 'None'}), '(Operation, blank=True, default=None)\n', (846, 883), False, 'from django.db import models\n'), ((976, 1034), 'logging.info', 'logging.info', (["('Added a new refugee with ID = %d' % self.id)"], {}), "('Added a new refugee with ID = %d' % self.id)\n", (988, 1034), False, 'import logging\n'), ((1043, 1076), 'processor.utils.push_record_to_sqs_queue', 'push_record_to_sqs_queue', (['self.id'], {}), '(self.id)\n', (1067, 1076), False, 'from processor.utils import push_record_to_sqs_queue\n')]
|
from fastapi import APIRouter, Depends
from app.dtos.responses.actor import ActorsDto, ActorDto
from app.services.actor_service import ActorService
from app.services.implementations.actor_service_implementation import (
ActorServiceImplementation,
)
router = APIRouter(tags=["Actor Resource"])
@router.get(path="/actors", response_model=ActorsDto)
async def get_actors(
actor_service: ActorService = Depends(ActorServiceImplementation),
) -> ActorsDto:
return await actor_service.get_all_actors()
@router.get(path="/actors/{actor_id}", response_model=ActorDto)
async def get_actor(
actor_id: int, actor_service: ActorService = Depends(ActorServiceImplementation)
) -> ActorDto:
return await actor_service.get_actor(actor_id=actor_id)
|
[
"fastapi.Depends",
"fastapi.APIRouter"
] |
[((265, 299), 'fastapi.APIRouter', 'APIRouter', ([], {'tags': "['Actor Resource']"}), "(tags=['Actor Resource'])\n", (274, 299), False, 'from fastapi import APIRouter, Depends\n'), ((412, 447), 'fastapi.Depends', 'Depends', (['ActorServiceImplementation'], {}), '(ActorServiceImplementation)\n', (419, 447), False, 'from fastapi import APIRouter, Depends\n'), ((649, 684), 'fastapi.Depends', 'Depends', (['ActorServiceImplementation'], {}), '(ActorServiceImplementation)\n', (656, 684), False, 'from fastapi import APIRouter, Depends\n')]
|
import numpy as np
import matplotlib.pyplot as plt
# 计算delta
def calculate_delta(t, chosen_count, item):
if chosen_count[item] == 0:
return 1
else:
return np.sqrt(2 * np.log(t) / chosen_count[item])
def choose_arm(upper_bound_probs):
max = np.max(upper_bound_probs)
idx = np.where(upper_bound_probs == max) # 返回tuple,包含符合条件值的下标
idx = np.array(idx[0]) # 转为array
if np.size(idx) == 1:
return idx[0]
else:
return np.random.choice(idx, 1)[0]
def train():
# 时间
T = []
# 可选的臂(根据数据)
num_arms = 10
# 总回报
total_reward = 0
total_best_reward = 0
total_reward_with_T = []
total_regret_with_T = []
np.random.seed(23)
true_rewards_prop = np.random.uniform(low=0, high=1, size=num_arms) # 每个老虎机真实的吐钱概率
true_max_prop_arm = np.argmax(true_rewards_prop)
print("true reward prop: \n", true_rewards_prop)
print("\ntrue_max_prop_arm: ", true_max_prop_arm)
estimated_rewards = np.zeros(num_arms) # 每个老虎机吐钱的观测概率,初始都为0
chosen_count = np.zeros(num_arms) # 每个老虎机当前已经探索的次数,初始都为0
# for i in range(10):
# choosen_arm = i % 10
# reward = np.random.binomial(n=1, p=true_rewards_prop[choosen_arm])
# best_reward = np.random.binomial(n=1, p=true_rewards_prop[true_max_prop_arm])
#
# total_reward += reward
# total_best_reward += best_reward
# T.append(i)
# total_reward_with_T.append(total_reward)
# total_regret_with_T.append(total_best_reward - total_reward)
#
# if i < 10:
# estimated_rewards[choosen_arm] = reward
# else:
# # estimated_rewards[choosen_arm] = ((i - 1) * estimated_rewards[choosen_arm] + reward) / i
# estimated_rewards[choosen_arm] = (chosen_count[choosen_arm] * estimated_rewards[choosen_arm] + reward) / (
# chosen_count[choosen_arm] + 1)
# chosen_count[choosen_arm] += 1
print("\ninit estimated reward: ")
print(estimated_rewards)
# 初始化
for t in range(0, 20000):
upper_bound_probs = [estimated_rewards[item] + calculate_delta(t, chosen_count, item) for item in
range(num_arms)]
# 选择最大置信区间上界的arm
# choosen_arm = np.argmax(upper_bound_probs)
choosen_arm = choose_arm(upper_bound_probs)
reward = np.random.binomial(n=1, p=true_rewards_prop[choosen_arm])
best_reward = np.random.binomial(n=1, p=true_rewards_prop[true_max_prop_arm])
total_reward += reward
total_best_reward += best_reward
T.append(t)
total_reward_with_T.append(total_reward)
total_regret_with_T.append(total_best_reward - total_reward)
# 更新每个老虎机的吐钱概率
# estimated_rewards[choosen_arm] = ((t - 1) * estimated_rewards[choosen_arm] + reward) / t
estimated_rewards[choosen_arm] = (chosen_count[choosen_arm] * estimated_rewards[choosen_arm] + reward) / (
chosen_count[choosen_arm] + 1)
chosen_count[choosen_arm] += 1
# if t % 200 == 0:
# print("estimated reward: ")
# print(estimated_rewards)
print("\ntotal reward: ", total_reward)
print("\nbest reward: ", total_best_reward)
print("\nestimated reward: ")
print(estimated_rewards)
print("\nchoosen arm: ", chosen_count)
# CTR趋势画图
plt.xlabel("T")
plt.ylabel("Total regret")
plt.plot(T, total_regret_with_T)
# 存入路径
plt.savefig('./regret1.png')
if __name__ == "__main__":
# 训练
train()
|
[
"numpy.random.uniform",
"numpy.size",
"numpy.random.seed",
"numpy.random.binomial",
"matplotlib.pyplot.plot",
"numpy.argmax",
"numpy.log",
"numpy.zeros",
"numpy.max",
"numpy.where",
"numpy.array",
"numpy.random.choice",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] |
[((272, 297), 'numpy.max', 'np.max', (['upper_bound_probs'], {}), '(upper_bound_probs)\n', (278, 297), True, 'import numpy as np\n'), ((308, 342), 'numpy.where', 'np.where', (['(upper_bound_probs == max)'], {}), '(upper_bound_probs == max)\n', (316, 342), True, 'import numpy as np\n'), ((375, 391), 'numpy.array', 'np.array', (['idx[0]'], {}), '(idx[0])\n', (383, 391), True, 'import numpy as np\n'), ((699, 717), 'numpy.random.seed', 'np.random.seed', (['(23)'], {}), '(23)\n', (713, 717), True, 'import numpy as np\n'), ((743, 790), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0)', 'high': '(1)', 'size': 'num_arms'}), '(low=0, high=1, size=num_arms)\n', (760, 790), True, 'import numpy as np\n'), ((831, 859), 'numpy.argmax', 'np.argmax', (['true_rewards_prop'], {}), '(true_rewards_prop)\n', (840, 859), True, 'import numpy as np\n'), ((994, 1012), 'numpy.zeros', 'np.zeros', (['num_arms'], {}), '(num_arms)\n', (1002, 1012), True, 'import numpy as np\n'), ((1055, 1073), 'numpy.zeros', 'np.zeros', (['num_arms'], {}), '(num_arms)\n', (1063, 1073), True, 'import numpy as np\n'), ((3387, 3402), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""T"""'], {}), "('T')\n", (3397, 3402), True, 'import matplotlib.pyplot as plt\n'), ((3407, 3433), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Total regret"""'], {}), "('Total regret')\n", (3417, 3433), True, 'import matplotlib.pyplot as plt\n'), ((3438, 3470), 'matplotlib.pyplot.plot', 'plt.plot', (['T', 'total_regret_with_T'], {}), '(T, total_regret_with_T)\n', (3446, 3470), True, 'import matplotlib.pyplot as plt\n'), ((3486, 3514), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./regret1.png"""'], {}), "('./regret1.png')\n", (3497, 3514), True, 'import matplotlib.pyplot as plt\n'), ((410, 422), 'numpy.size', 'np.size', (['idx'], {}), '(idx)\n', (417, 422), True, 'import numpy as np\n'), ((2376, 2433), 'numpy.random.binomial', 'np.random.binomial', ([], {'n': '(1)', 'p': 'true_rewards_prop[choosen_arm]'}), '(n=1, p=true_rewards_prop[choosen_arm])\n', (2394, 2433), True, 'import numpy as np\n'), ((2456, 2519), 'numpy.random.binomial', 'np.random.binomial', ([], {'n': '(1)', 'p': 'true_rewards_prop[true_max_prop_arm]'}), '(n=1, p=true_rewards_prop[true_max_prop_arm])\n', (2474, 2519), True, 'import numpy as np\n'), ((476, 500), 'numpy.random.choice', 'np.random.choice', (['idx', '(1)'], {}), '(idx, 1)\n', (492, 500), True, 'import numpy as np\n'), ((193, 202), 'numpy.log', 'np.log', (['t'], {}), '(t)\n', (199, 202), True, 'import numpy as np\n')]
|
# encoding: utf-8
from os import path, getenv
from datetime import timedelta
import ast
basedir = path.abspath(path.dirname(__file__))
class Config (object):
APP_NAME = getenv('APP_NAME', 'Python Flask Boilerplate')
DEV = ast.literal_eval(getenv('DEV', 'True'))
DEBUG = ast.literal_eval(getenv('DEBUG', 'True'))
HOST = '0.0.0.0'
PORT = 5678
USER_DEFAULT_PASSWORD = '<PASSWORD>'
SQLALCHEMY_DATABASE_URI = getenv('SQLALCHEMY_DATABASE_URI', 'postgresql://postgres:654321@localhost:5432/postgres')
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_MIGRATE_REPO = path.join(basedir, 'db', 'db_repository')
'''Flask-JWT'''
SECRET_KEY = 'super-secret'
JWT_AUTH_URL_RULE = '/signin'
JWT_AUTH_USERNAME_KEY = 'name'
JWT_AUTH_PASSWORD_KEY = '<PASSWORD>'
JWT_EXPIRATION_DELTA = timedelta(seconds = 1800)
'''Docker-Network media-service container'''
# MEDIA_SERVICE_RESTFUL_API_URL = getenv('MEDIA_SERVICE_RESTFUL_API_URL', 'http://localhost:8080/index/api')
# MEDIA_SERVICE_SECRET = '<KEY>'
### SQLALCHEMY_DATABASE_URI = 'mysql://user:pass@server_ip:server_port/db_name'
current = Config
|
[
"os.path.dirname",
"os.path.join",
"os.getenv",
"datetime.timedelta"
] |
[((119, 141), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (131, 141), False, 'from os import path, getenv\n'), ((187, 233), 'os.getenv', 'getenv', (['"""APP_NAME"""', '"""Python Flask Boilerplate"""'], {}), "('APP_NAME', 'Python Flask Boilerplate')\n", (193, 233), False, 'from os import path, getenv\n'), ((441, 534), 'os.getenv', 'getenv', (['"""SQLALCHEMY_DATABASE_URI"""', '"""postgresql://postgres:654321@localhost:5432/postgres"""'], {}), "('SQLALCHEMY_DATABASE_URI',\n 'postgresql://postgres:654321@localhost:5432/postgres')\n", (447, 534), False, 'from os import path, getenv\n'), ((602, 643), 'os.path.join', 'path.join', (['basedir', '"""db"""', '"""db_repository"""'], {}), "(basedir, 'db', 'db_repository')\n", (611, 643), False, 'from os import path, getenv\n'), ((829, 852), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(1800)'}), '(seconds=1800)\n', (838, 852), False, 'from datetime import timedelta\n'), ((261, 282), 'os.getenv', 'getenv', (['"""DEV"""', '"""True"""'], {}), "('DEV', 'True')\n", (267, 282), False, 'from os import path, getenv\n'), ((312, 335), 'os.getenv', 'getenv', (['"""DEBUG"""', '"""True"""'], {}), "('DEBUG', 'True')\n", (318, 335), False, 'from os import path, getenv\n')]
|
from django import forms
from django.contrib.auth import get_user_model
from qa.models import Question
from qa.models import Answer
class QuestionForm(forms.ModelForm):
user = forms.ModelChoiceField(
widget = forms.HiddenInput,
queryset = get_user_model().objects.all(),
disabled = True,)
class Meta:
model = Question
fields = ['title', 'question', 'user']
class AnswerForm(forms.ModelForm):
user = forms.ModelChoiceField(
widget = forms.HiddenInput,
queryset = get_user_model().objects.all(),
disabled = True,)
question = forms.ModelChoiceField(
widget = forms.HiddenInput,
queryset = Question.objects.all(),
disabled = True,)
class Meta:
model = Answer
fields = ['answer', 'question', 'user']
class AnswerAcceptanceForm(forms.ModelForm):
accepted = forms.BooleanField(widget = forms.HiddenInput,
required = False,
)
class Meta:
model = Answer
fields = ['accepted',]
|
[
"django.forms.BooleanField",
"qa.models.Question.objects.all",
"django.contrib.auth.get_user_model"
] |
[((995, 1055), 'django.forms.BooleanField', 'forms.BooleanField', ([], {'widget': 'forms.HiddenInput', 'required': '(False)'}), '(widget=forms.HiddenInput, required=False)\n', (1013, 1055), False, 'from django import forms\n'), ((784, 806), 'qa.models.Question.objects.all', 'Question.objects.all', ([], {}), '()\n', (804, 806), False, 'from qa.models import Question\n'), ((285, 301), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (299, 301), False, 'from django.contrib.auth import get_user_model\n'), ((595, 611), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (609, 611), False, 'from django.contrib.auth import get_user_model\n')]
|
from flamingo.url.conf import path
routers = [
path(url="/test", view_func_or_module="tapp.urls", name="test")
]
|
[
"flamingo.url.conf.path"
] |
[((53, 116), 'flamingo.url.conf.path', 'path', ([], {'url': '"""/test"""', 'view_func_or_module': '"""tapp.urls"""', 'name': '"""test"""'}), "(url='/test', view_func_or_module='tapp.urls', name='test')\n", (57, 116), False, 'from flamingo.url.conf import path\n')]
|
"""
===============================================
Repair EEG artefacts caused by ocular movements
===============================================
Identify "bad" components in ICA solution (e.g., components which are highly
correlated the time course of the electrooculogram).
Authors: <NAME> <<EMAIL>>
License: BSD (3-clause)
"""
import numpy as np
import matplotlib.pyplot as plt
from mne import open_report, events_from_annotations, Epochs
from mne.io import read_raw_fif
from mne.preprocessing import read_ica, corrmap
# All parameters are defined in config.py
from config import fname, parser, LoggingFormat
# Handle command line arguments
args = parser.parse_args()
subject = args.subject
print(LoggingFormat.PURPLE +
LoggingFormat.BOLD +
'Finding and removing bad components for subject %s' % subject +
LoggingFormat.END)
###############################################################################
# 1) Import the output from previous processing step
input_file = fname.output(subject=subject,
processing_step='repair_bads',
file_type='raw.fif')
raw = read_raw_fif(input_file, preload=True)
# activate average reference
raw.apply_proj()
###############################################################################
# 2) Import ICA weights from precious processing step
ica_file = fname.output(subject=subject,
processing_step='fit_ica',
file_type='ica.fif')
ica = read_ica(ica_file)
###############################################################################
# 3) Find bad components via correlation with template ICA
temp_subjs = [2, 10]
# temp_raws = []
temp_icas = []
# import template subjects
for subj in temp_subjs:
# temp_raws.append(read_raw_fif(fname.output(subject=subj,
# processing_step='repair_bads',
# file_type='raw.fif')))
temp_icas.append(read_ica(fname.output(subject=subj,
processing_step='fit_ica',
file_type='ica.fif')))
# set thresholds for correlation
if subject in {5, 28, 32, 39, 45}:
threshold = 0.90
else:
threshold = 0.85
# compute correlations with template ocular movements up/down and left/right
corrmap(icas=[temp_icas[1], ica],
template=(0, 0), threshold=threshold, label='blink_up', plot=False)
corrmap(icas=[temp_icas[1], ica],
template=(0, 1), threshold=threshold, label='blink_side', plot=False)
# compute correlations with template ocular movements that look slightly
# different
corrmap(icas=[temp_icas[0], ica],
template=(0, 0), threshold=threshold, label='blink_misc', plot=False)
corrmap(icas=[temp_icas[0], ica],
template=(0, 1), threshold=threshold, label='blink_misc', plot=False)
###############################################################################
# 4) Create summary plots to show signal correction on main experimental
# condition
# create a-cue epochs
a_evs = events_from_annotations(raw, regexp='^(70)')[0]
a_epo = Epochs(raw, a_evs,
tmin=-2.0,
tmax=2.0,
reject_by_annotation=True,
proj=False,
preload=True)
a_epo.apply_baseline(baseline=(-0.3, -0.05))
a_evo = a_epo.average()
# loop over identified "bad" components
bad_components = []
for label in ica.labels_:
bad_components.extend(ica.labels_[label])
for bad_comp in np.unique(bad_components):
# show component frequency spectrum
fig_comp = ica.plot_properties(a_epo,
picks=bad_comp,
psd_args={'fmax': 35.},
show=False)[0]
# show how the signal is affected by component rejection
fig_evoked = ica.plot_overlay(a_evo, exclude=[bad_comp], show=False)
plt.close(fig_evoked)
# create HTML report
with open_report(fname.report(subject=subject)[0]) as report:
report.add_figs_to_section(fig_comp, 'Component %s identified '
'by correlation with template'
% bad_comp,
section='ICA',
replace=True)
report.add_figs_to_section(fig_evoked, 'Component %s rejected'
% bad_comp,
section='ICA',
replace=True)
report.save(fname.report(subject=subject)[1], overwrite=True,
open_browser=False)
# add bad components to exclusion list
ica.exclude = np.unique(bad_components)
# apply ica weights to data
ica.apply(raw)
###############################################################################
# 5) Save repaired data set
# output path
output_path = fname.output(processing_step='repaired_with_ica',
subject=subject,
file_type='raw.fif')
# save file
raw.save(output_path, overwrite=True)
|
[
"mne.io.read_raw_fif",
"mne.events_from_annotations",
"config.parser.parse_args",
"mne.preprocessing.read_ica",
"matplotlib.pyplot.close",
"mne.preprocessing.corrmap",
"mne.Epochs",
"config.fname.report",
"config.fname.output",
"numpy.unique"
] |
[((659, 678), 'config.parser.parse_args', 'parser.parse_args', ([], {}), '()\n', (676, 678), False, 'from config import fname, parser, LoggingFormat\n'), ((1002, 1088), 'config.fname.output', 'fname.output', ([], {'subject': 'subject', 'processing_step': '"""repair_bads"""', 'file_type': '"""raw.fif"""'}), "(subject=subject, processing_step='repair_bads', file_type=\n 'raw.fif')\n", (1014, 1088), False, 'from config import fname, parser, LoggingFormat\n'), ((1142, 1180), 'mne.io.read_raw_fif', 'read_raw_fif', (['input_file'], {'preload': '(True)'}), '(input_file, preload=True)\n', (1154, 1180), False, 'from mne.io import read_raw_fif\n'), ((1373, 1450), 'config.fname.output', 'fname.output', ([], {'subject': 'subject', 'processing_step': '"""fit_ica"""', 'file_type': '"""ica.fif"""'}), "(subject=subject, processing_step='fit_ica', file_type='ica.fif')\n", (1385, 1450), False, 'from config import fname, parser, LoggingFormat\n'), ((1505, 1523), 'mne.preprocessing.read_ica', 'read_ica', (['ica_file'], {}), '(ica_file)\n', (1513, 1523), False, 'from mne.preprocessing import read_ica, corrmap\n'), ((2372, 2477), 'mne.preprocessing.corrmap', 'corrmap', ([], {'icas': '[temp_icas[1], ica]', 'template': '(0, 0)', 'threshold': 'threshold', 'label': '"""blink_up"""', 'plot': '(False)'}), "(icas=[temp_icas[1], ica], template=(0, 0), threshold=threshold,\n label='blink_up', plot=False)\n", (2379, 2477), False, 'from mne.preprocessing import read_ica, corrmap\n'), ((2482, 2589), 'mne.preprocessing.corrmap', 'corrmap', ([], {'icas': '[temp_icas[1], ica]', 'template': '(0, 1)', 'threshold': 'threshold', 'label': '"""blink_side"""', 'plot': '(False)'}), "(icas=[temp_icas[1], ica], template=(0, 1), threshold=threshold,\n label='blink_side', plot=False)\n", (2489, 2589), False, 'from mne.preprocessing import read_ica, corrmap\n'), ((2680, 2787), 'mne.preprocessing.corrmap', 'corrmap', ([], {'icas': '[temp_icas[0], ica]', 'template': '(0, 0)', 'threshold': 'threshold', 'label': '"""blink_misc"""', 'plot': '(False)'}), "(icas=[temp_icas[0], ica], template=(0, 0), threshold=threshold,\n label='blink_misc', plot=False)\n", (2687, 2787), False, 'from mne.preprocessing import read_ica, corrmap\n'), ((2792, 2899), 'mne.preprocessing.corrmap', 'corrmap', ([], {'icas': '[temp_icas[0], ica]', 'template': '(0, 1)', 'threshold': 'threshold', 'label': '"""blink_misc"""', 'plot': '(False)'}), "(icas=[temp_icas[0], ica], template=(0, 1), threshold=threshold,\n label='blink_misc', plot=False)\n", (2799, 2899), False, 'from mne.preprocessing import read_ica, corrmap\n'), ((3157, 3254), 'mne.Epochs', 'Epochs', (['raw', 'a_evs'], {'tmin': '(-2.0)', 'tmax': '(2.0)', 'reject_by_annotation': '(True)', 'proj': '(False)', 'preload': '(True)'}), '(raw, a_evs, tmin=-2.0, tmax=2.0, reject_by_annotation=True, proj=\n False, preload=True)\n', (3163, 3254), False, 'from mne import open_report, events_from_annotations, Epochs\n'), ((3544, 3569), 'numpy.unique', 'np.unique', (['bad_components'], {}), '(bad_components)\n', (3553, 3569), True, 'import numpy as np\n'), ((4743, 4768), 'numpy.unique', 'np.unique', (['bad_components'], {}), '(bad_components)\n', (4752, 4768), True, 'import numpy as np\n'), ((4950, 5041), 'config.fname.output', 'fname.output', ([], {'processing_step': '"""repaired_with_ica"""', 'subject': 'subject', 'file_type': '"""raw.fif"""'}), "(processing_step='repaired_with_ica', subject=subject,\n file_type='raw.fif')\n", (4962, 5041), False, 'from config import fname, parser, LoggingFormat\n'), ((3101, 3145), 'mne.events_from_annotations', 'events_from_annotations', (['raw'], {'regexp': '"""^(70)"""'}), "(raw, regexp='^(70)')\n", (3124, 3145), False, 'from mne import open_report, events_from_annotations, Epochs\n'), ((3952, 3973), 'matplotlib.pyplot.close', 'plt.close', (['fig_evoked'], {}), '(fig_evoked)\n', (3961, 3973), True, 'import matplotlib.pyplot as plt\n'), ((2014, 2088), 'config.fname.output', 'fname.output', ([], {'subject': 'subj', 'processing_step': '"""fit_ica"""', 'file_type': '"""ica.fif"""'}), "(subject=subj, processing_step='fit_ica', file_type='ica.fif')\n", (2026, 2088), False, 'from config import fname, parser, LoggingFormat\n'), ((4021, 4050), 'config.fname.report', 'fname.report', ([], {'subject': 'subject'}), '(subject=subject)\n', (4033, 4050), False, 'from config import fname, parser, LoggingFormat\n'), ((4598, 4627), 'config.fname.report', 'fname.report', ([], {'subject': 'subject'}), '(subject=subject)\n', (4610, 4627), False, 'from config import fname, parser, LoggingFormat\n')]
|
from tkinter import *
from classes.AttackBarbarians import AttackBarbarians
from classes.ExploreFog import ExploreFog
from classes.Screenshot import Screenshot
from classes.tester import Tester
starter = Tk()
starter.winfo_toplevel().title('Rise of Kingdom - Automator')
starter.geometry('250x500')
class MainInterface:
v=StringVar()
v.set('BlueStacks')
i=IntVar()
i.set(26)
q=IntVar()
q.set(35)
x=IntVar()
x.set(4)
txt_process_name = Entry(starter,text=v)
txt_minbarb_level = Entry(starter,text=i)
txt_maxbarb_level = Entry(starter,text=q)
txt_troop_count = Entry(starter,text=x)
def __init__(self, barb_level, function):
self.barb_level = barb_level
self.function = function
def barb_allday(self):
process_name = self.txt_process_name.get()
minbarb_level = self.txt_minbarb_level.get()
maxbarb_level = self.txt_maxbarb_level.get()
troop_count = self.txt_troop_count.get()
while True:
attack = AttackBarbarians(minbarb_level,maxbarb_level,troop_count,process_name)
attack.start()
def test_start(self):
Tester.start()
def start_explore(self):
ExploreFog.start()
def take_screenshot(self):
Screenshot.shot('default.png')
def start_interface(self):
lbl_process_name = Label(starter, text="Enter process name")
lbl_minbarb_attack = Label(starter, text="Enter barbarian minlevel")
lbl_maxbarb_attack = Label(starter, text="Enter barbarian maxlevel")
lbl_barb_troop = Label(starter, text="Enter troop number and press button")
#btn_barb_attack = Button(starter, text="Attack Barbarian", command=(lambda: self.start_attack()))
#btn_explore = Button(starter, text="Explore Kingdom", command=(lambda: self.start_explore()))
#btn_test = Button(starter, text="test method", command=(lambda: self.test_start()))
btn_take_screenshot = Button(starter, text="Screenshot", command=(lambda: self.take_screenshot()))
#lbl_barb_allday = Label(starter, text="barb_allday")
btn_barb_allday = Button(starter, text="barb_allday", command=(lambda: self.barb_allday()))
lbl_process_name.pack()
self.txt_process_name.pack()
lbl_minbarb_attack.pack()
self.txt_minbarb_level.pack()
lbl_maxbarb_attack.pack()
self.txt_maxbarb_level.pack()
lbl_barb_troop.pack()
self.txt_troop_count.pack()
#btn_barb_attack.pack()
#btn_explore.pack()
#btn_test.pack()
btn_take_screenshot.pack()
# lbl_barb_allday.pack()
btn_barb_allday.pack()
starter.mainloop()
interface = MainInterface(barb_level=12, function='start_attack()')
interface.start_interface()
|
[
"classes.ExploreFog.ExploreFog.start",
"classes.tester.Tester.start",
"classes.Screenshot.Screenshot.shot",
"classes.AttackBarbarians.AttackBarbarians"
] |
[((1166, 1180), 'classes.tester.Tester.start', 'Tester.start', ([], {}), '()\n', (1178, 1180), False, 'from classes.tester import Tester\n'), ((1219, 1237), 'classes.ExploreFog.ExploreFog.start', 'ExploreFog.start', ([], {}), '()\n', (1235, 1237), False, 'from classes.ExploreFog import ExploreFog\n'), ((1278, 1308), 'classes.Screenshot.Screenshot.shot', 'Screenshot.shot', (['"""default.png"""'], {}), "('default.png')\n", (1293, 1308), False, 'from classes.Screenshot import Screenshot\n'), ((1034, 1107), 'classes.AttackBarbarians.AttackBarbarians', 'AttackBarbarians', (['minbarb_level', 'maxbarb_level', 'troop_count', 'process_name'], {}), '(minbarb_level, maxbarb_level, troop_count, process_name)\n', (1050, 1107), False, 'from classes.AttackBarbarians import AttackBarbarians\n')]
|
# Copyright 2021 the Ithaca Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ithaca: Restoring and attributing ancient texts with deep neural networks."""
import bz2
import distutils
import functools
import glob
import os
import pickle
from absl import app
from absl import flags
from absl import logging
import dataloader
from ithaca.models.model import Model
from ithaca.util.alphabet import GreekAlphabet
from ithaca.util.loss import categorical_kl_divergence
from ithaca.util.loss import cross_entropy_label_smoothing_loss
from ithaca.util.loss import cross_entropy_loss
from ithaca.util.loss import cross_entropy_mask_loss
from ithaca.util.loss import date_loss_l1
from ithaca.util.optim import adaptive_grad_clip
from ithaca.util.optim import linear_warmup_and_sqrt_decay
from ithaca.util.optim import linear_weight
from ithaca.util.region_names import load_region_maps
import jax
import jax.numpy as jnp
from jaxline import experiment
from jaxline import platform
from jaxline import utils as jl_utils
import numpy as np
import optax
import tensorflow_datasets.public_api as tfds
FLAGS = flags.FLAGS
class Experiment(experiment.AbstractExperiment):
"""Ithaca experiment."""
# Holds a map from object properties that will be checkpointed to their name
# within a checkpoint. Currently it is assume that these are all sharded
# device arrays.
CHECKPOINT_ATTRS = {
'_params': 'params',
'_opt_state': 'opt_state',
}
def __init__(self, mode, init_rng, config):
"""Initializes experiment."""
super(Experiment, self).__init__(mode=mode)
self.mode = mode
self.init_rng = init_rng
self.config = config
# Same random key on each device.
self._rng_key = jl_utils.bcast_local_devices(self.init_rng)
# Checkpointed experiment state.
self._params = None
self._opt_state = None
# Input pipelines.
self._train_input = None
self._eval_input = None
# Forward and update functions.
self.forward = Model(**self.config.model)
self._update_func = jax.pmap(
self._update_func, axis_name='i', donate_argnums=(0, 1))
self._learning_rate_fn = functools.partial(
linear_warmup_and_sqrt_decay,
max_lr=self.config.optimizer.kwargs.learning_rate,
warmup_steps=self.config.optimizer.warmup)
self._opt_init, self._opt_update = self.optimizer()
if 'use_jit' in self.config.evaluation and self.config.evaluation.use_jit:
self._eval_batch = jax.jit(self._eval_batch)
# Create alphabet
alphabet_kwargs = dict(self.config.alphabet)
wordlist_path = alphabet_kwargs.pop('wordlist_path')
with open(wordlist_path, 'r') as f:
self._alphabet = GreekAlphabet(wordlist_file=f, **alphabet_kwargs)
# Create region mapping
self._region_map = {'main': None, 'sub': None}
if self.config.dataset.region_main_path:
with open(self.config.dataset.region_main_path, 'r') as f:
self._region_map['main'] = load_region_maps(f)
if self.config.dataset.region_sub_path:
with open(self.config.dataset.region_sub_path, 'r') as f:
self._region_map['sub'] = load_region_maps(f)
def optimizer(self):
config_opt = self.config.optimizer
kwargs = config_opt.kwargs.to_dict()
kwargs['learning_rate'] = self._learning_rate_fn
opt = getattr(optax, config_opt.name)(**kwargs)
if hasattr(config_opt, 'clip_adaptive') and config_opt.clip_adaptive:
if config_opt.clip_level > 0.:
opt = optax.chain(adaptive_grad_clip(config_opt.clip_level), opt)
elif config_opt.clip_level > 0.:
opt = optax.chain(optax.clip_by_global_norm(config_opt.clip_level), opt)
return opt
# _ _
# | |_ _ __ __ _(_)_ __
# | __| '__/ _` | | '_ \
# | |_| | | (_| | | | | |
# \__|_| \__,_|_|_| |_|
#
def step(self, global_step, rng, **unused_args):
"""See base class."""
if self._train_input is None:
self._initialize_train(rng)
batch = next(self._train_input)
(self._params, self._opt_state, scalars) = (
self._update_func(self._params, self._opt_state, global_step, batch,
rng))
scalars = jl_utils.get_first(scalars)
return scalars
def _initialize_train(self, rng):
# Check we haven't already restored params
if self._params is None:
logging.info(
'Initializing parameters rather than restoring from checkpoint.')
batch = next(self._build_train_input())
rng = jl_utils.get_first(rng)
params_rng, dropout_rng = jax.random.split(rng)
params_rng = jl_utils.bcast_local_devices(params_rng)
dropout_rng = jl_utils.bcast_local_devices(dropout_rng)
init_net = jax.pmap(
functools.partial(self.forward.init, is_training=True))
self._params = init_net({
'params': params_rng,
'dropout': dropout_rng
},
text_char=batch['text_char'],
text_word=batch['text_word'])
init_opt = jax.pmap(self._opt_init)
self._opt_state = init_opt(self._params)
self._train_input = jl_utils.py_prefetch(self._build_train_input)
self._train_input = jl_utils.double_buffer_on_gpu(self._train_input)
def _build_train_input(self):
"""See base class."""
num_devices = jax.device_count()
global_batch_size = self.config.training.batch_size
per_device_batch_size, ragged = divmod(global_batch_size, num_devices)
logging.info(
'num_devices: %d, per_device_batch_size: %d, global_batch_size: %d',
num_devices, per_device_batch_size, global_batch_size)
if ragged:
raise ValueError(
f'Global batch size {global_batch_size} must be divisible by '
f'num devices {num_devices}')
config_dataset = self.config.dataset
with open(config_dataset.dataset_path) as dataset_file:
ds = dataloader.loader_tf(
per_device_batch_size,
config_dataset,
self._region_map,
alphabet=self._alphabet,
dataset_file=dataset_file,
mode='train')
ds = ds.batch(jax.local_device_count())
return iter(tfds.as_numpy(ds))
def _loss_fn(self, params, batch, global_step, rng):
text_char = batch['text_char']
text_word = batch['text_word']
text_unmasked = batch['text_unmasked']
text_mask = batch['text_mask']
next_sentence_mask = batch['next_sentence_mask']
next_sentence_label = batch['next_sentence_label']
subregion = batch['region_sub_id']
date_min = batch['date_min']
date_max = batch['date_max']
date_dist = batch['date_dist']
date_available = batch['date_available']
eps = 1e-6
(date_pred, subregion_logits, mask_logits, nsp_logits) = self.forward.apply(
params,
text_char=text_char,
text_word=text_word,
text_char_onehot=None,
text_word_onehot=None,
is_training=True,
rngs={'dropout': rng})
date_loss = 0.
subregion_loss = 0.
subregion_accuracy = 0.
mask_loss = 0.
mask_accuracy = 0.
nsp_loss = 0.
nsp_accuracy = 0.
# Date loss
if self.config.loss.date.enabled:
if self.config.loss.date.label_smoothing > 0:
date_dist_prob = jnp.exp(date_dist) # logprob to prob
date_dist_prob_smooth = date_dist_prob * jax.random.uniform(
rng,
shape=date_dist_prob.shape,
dtype=date_dist_prob.dtype,
minval=1 - self.config.loss.date.label_smoothing,
maxval=1 + self.config.loss.date.label_smoothing)
date_dist_prob_smooth /= date_dist_prob_smooth.sum(axis=-1)[:,
jnp.newaxis]
date_dist_prob_smooth = jnp.clip(date_dist_prob_smooth, 1e-6, 1)
date_dist = jnp.log(date_dist_prob_smooth)
date_loss = 0.
if 'l1' in self.config.loss.date.type.split('+'):
date_pred_x = jnp.arange(
self.config.dataset.date_min +
self.config.dataset.date_interval / 2,
self.config.dataset.date_max +
self.config.dataset.date_interval / 2,
self.config.dataset.date_interval).reshape(-1, 1)
date_pred_val = jnp.dot(jax.nn.softmax(date_pred, axis=-1), date_pred_x)
date_loss_l1_ = jax.vmap(date_loss_l1)(date_pred_val, date_min,
date_max, date_available)
jnp.nan_to_num(date_loss_l1_, copy=False)
date_loss += (
jnp.mean(date_loss_l1_, axis=0) * self.config.loss.date.weight_l1)
if 'dist' in self.config.loss.date.type.split('+'):
date_loss_dist_ = categorical_kl_divergence(date_dist, date_pred)
date_loss_dist_ *= date_available
jnp.nan_to_num(date_loss_dist_, copy=False)
date_loss += (
jnp.mean(date_loss_dist_, axis=0) *
self.config.loss.date.weight_dist)
date_loss *= linear_weight(global_step, self.config.loss.date.step_start,
self.config.loss.date.step_end)
# Region and subregion loss
if self.config.loss.region.enabled:
subregion_loss = jnp.mean(
cross_entropy_label_smoothing_loss(
subregion_logits,
subregion,
label_smoothing=self.config.loss.region.label_smoothing), 0)
jnp.nan_to_num(subregion_loss, copy=False)
subregion_loss *= self.config.loss.region.weight
subregion_accuracy = jnp.mean(
jnp.argmax(subregion_logits, -1) == subregion)
w = linear_weight(global_step, self.config.loss.region.step_start,
self.config.loss.region.step_end)
subregion_loss *= w
# Mask loss
if self.config.loss.mask.enabled:
mask_loss = jnp.sum(
cross_entropy_label_smoothing_loss(
mask_logits,
text_unmasked,
text_mask,
label_smoothing=self.config.loss.mask.label_smoothing), 1) # [B]
assert mask_loss.ndim == 1
jnp.nan_to_num(mask_loss, copy=False)
mask_loss = jnp.mean(mask_loss, 0) * self.config.loss.mask.weight # []
mask_all_accuracy = (jnp.argmax(mask_logits, -1) == text_unmasked).astype(
mask_logits.dtype)
mask_accuracy = jnp.divide(
jnp.sum(
jnp.multiply(mask_all_accuracy,
text_mask.astype(mask_logits.dtype))),
jnp.sum(text_mask) + eps)
mask_loss *= linear_weight(global_step, self.config.loss.mask.step_start,
self.config.loss.mask.step_end)
# NSP loss
if self.config.loss.nsp.enabled:
nsp_loss = jnp.sum(
jax.vmap(jax.vmap(cross_entropy_mask_loss))(nsp_logits,
next_sentence_label,
next_sentence_mask),
1) # [B]
assert nsp_loss.ndim == 1
jnp.nan_to_num(nsp_loss, copy=False)
nsp_loss = jnp.mean(nsp_loss, 0) * self.config.loss.nsp.weight
nsp_all_accuracy = (jnp.argmax(
nsp_logits, -1) == next_sentence_label).astype(nsp_logits.dtype)
nsp_accuracy = jnp.divide(
jnp.sum(
jnp.multiply(nsp_all_accuracy,
next_sentence_mask.astype(nsp_logits.dtype))),
jnp.sum(next_sentence_mask) + eps)
nsp_loss *= linear_weight(global_step, self.config.loss.nsp.step_start,
self.config.loss.nsp.step_end)
loss = date_loss + subregion_loss + mask_loss + nsp_loss
scaled_loss = loss / jax.device_count()
# NOTE: We use scaled_loss for grads and unscaled for logging.
return scaled_loss, (loss, date_loss, subregion_loss, subregion_accuracy,
mask_loss, mask_accuracy, nsp_loss, nsp_accuracy)
def _update_func(self, params, opt_state, global_step, batch, rng):
"""Applies an update to parameters and returns new state."""
# This function computes the gradient of the first output of loss_fn and
# passes through the other arguments unchanged.
grad_loss_fn = jax.grad(self._loss_fn, has_aux=True)
scaled_grads, (loss, date_loss, subregion_loss, subregion_accuracy,
mask_loss, mask_accuracy, nsp_loss,
nsp_accuracy) = grad_loss_fn(params, batch, global_step, rng)
scaled_grads = jax.tree_map(jnp.nan_to_num, scaled_grads)
grads = jl_utils.tree_psum(scaled_grads, axis_name='i')
# Compute and apply updates via our optimizer.
learning_rate = self._learning_rate_fn(global_step)
updates, opt_state = self._opt_update(grads, opt_state, params=params)
params = optax.apply_updates(params, updates)
# Scalars to log (note: we log the mean across all hosts/devices).
scalars = {
'loss/train': loss,
'loss/date': date_loss,
'loss/subregion': subregion_loss,
'loss/mask': mask_loss,
'loss/nsp': nsp_loss,
'accuracy/subregion': subregion_accuracy,
'accuracy/mask': mask_accuracy,
'accuracy/nsp': nsp_accuracy,
'opt/learning_rate': learning_rate,
'opt/grad_norm': optax.global_norm(grads),
'opt/param_norm': optax.global_norm(params),
}
scalars = jax.lax.pmean(scalars, axis_name='i')
return params, opt_state, scalars
# _
# _____ ____ _| |
# / _ \ \ / / _` | |
# | __/\ V / (_| | |
# \___| \_/ \__,_|_|
#
def evaluate(self, global_step, rng, **unused_kwargs):
"""See base class."""
if self._eval_input is None:
self._initialize_eval()
global_step = np.array(jl_utils.get_first(global_step))
summary, outputs = self._eval_epoch(jl_utils.get_first(rng))
for k, v in summary.items():
summary[k] = np.array(v)
score = summary['score/eval']
logging.info('[Step %d] eval_score=%.2f', global_step, score)
# Log outputs
checkpoint_dir = jl_utils.get_checkpoint_dir(FLAGS.config,
jax.process_index())
outputs_path = os.path.join(checkpoint_dir, 'best_outputs.pkl.bz2')
score_path = os.path.join(checkpoint_dir, 'best_score.txt')
model_log_path = os.path.join(checkpoint_dir, 'model_log')
best_model_log_path = os.path.join(checkpoint_dir, 'best_model_log')
# Check for preexisting outputs
best_score = None
best_step = None
if os.path.exists(score_path):
with open(score_path, 'r') as f:
tok = f.read().strip().split(' ')
best_step = int(tok[0])
best_score = float(tok[1])
# Store outputs if score is better
if best_score is None or (score > best_score and global_step > best_step):
best_score = score
with open(score_path, 'w') as f:
f.write(f'{global_step} {best_score}')
with open(outputs_path, 'wb') as f:
outputs_pkl = pickle.dumps(outputs, protocol=2)
outputs_pkl_bz2 = bz2.compress(outputs_pkl)
f.write(outputs_pkl_bz2)
if self.config.evaluation.store_model_log:
if os.path.isdir(best_model_log_path):
map(os.remove, glob.glob(best_model_log_path + '/*'))
else:
os.makedirs(best_model_log_path)
distutils.dir_util.copy_tree(model_log_path, best_model_log_path)
logging.info('[Step %d] Writing eval outputs: %s.', global_step,
outputs_path)
# Log best score
summary['score/eval_best'] = best_score
return summary
def _initialize_eval(self):
self._eval_input = jl_utils.py_prefetch(self._build_eval_input)
def _build_eval_input(self):
"""Builds the evaluation input pipeline."""
config_dataset = self.config.dataset
with open(config_dataset.dataset_path) as dataset_file:
ds = dataloader.loader_tf(
self.config.evaluation.batch_size,
config_dataset,
self._region_map,
alphabet=self._alphabet,
dataset_file=dataset_file,
mode=self.config.evaluation.mode)
return iter(tfds.as_numpy(ds))
def _eval_batch(self, params, batch, rng):
"""Evaluates a batch."""
phi_id = batch['id']
text_char = batch['text_char']
text_word = batch['text_word']
text_unmasked = batch['text_unmasked']
text_mask = batch['text_mask']
next_sentence_mask = batch['next_sentence_mask']
next_sentence_label = batch['next_sentence_label']
subregion = batch['region_sub_id']
date_min = batch['date_min']
date_max = batch['date_max']
date_dist = batch['date_dist']
date_available = batch['date_available']
# with hlogging.context() as log:
(date_pred, subregion_logits, mask_logits, nsp_logits) = self.forward.apply(
params,
text_char=text_char,
text_word=text_word,
text_char_onehot=None,
text_word_onehot=None,
is_training=False,
rngs={'dropout': rng})
# Log model weights
model_log = {}
subregion_loss = 0.
subregion_accuracy = 0.
date_loss = 0.
date_l1_loss = 0.
nsp_loss = 0.
nsp_accuracy = 0.
# eps = 1e-6
date_count = 0
mask_count = 0
nsp_count = 0
# Date loss
if self.config.loss.date.enabled:
date_pred_x = jnp.arange(
self.config.dataset.date_min + self.config.dataset.date_interval / 2,
self.config.dataset.date_max + self.config.dataset.date_interval / 2,
self.config.dataset.date_interval).reshape(-1, 1)
date_pred_val = jnp.dot(jax.nn.softmax(date_pred, axis=-1), date_pred_x)
date_l1_loss = jnp.sum(
jax.vmap(date_loss_l1)(date_pred_val, date_min, date_max,
date_available),
axis=0)
if 'l1' in self.config.loss.date.type.split('+'):
date_loss += date_l1_loss * self.config.loss.date.weight_l1
if 'dist' in self.config.loss.date.type.split('+'):
date_loss_dist_ = categorical_kl_divergence(date_dist, date_pred)
date_loss_dist_ *= date_available
date_loss += (
jnp.sum(date_loss_dist_, axis=0) *
self.config.loss.date.weight_dist)
date_count = jnp.sum(date_available)
# Region and subregion loss
if self.config.loss.region.enabled:
subregion_loss = jnp.sum(
cross_entropy_loss(subregion_logits, subregion), 0)
subregion_loss *= self.config.loss.region.weight
subregion_accuracy = jnp.mean(
jnp.argmax(subregion_logits, -1) == subregion)
# Mask loss
if self.config.loss.mask.enabled:
mask_loss = jnp.sum(
cross_entropy_label_smoothing_loss(
mask_logits, text_unmasked, text_mask, label_smoothing=0),
1) # [B]
# mask_loss /= jnp.sum(text_mask, axis=1) + eps # [B]
assert mask_loss.ndim == 1
mask_loss = jnp.mean(mask_loss, 0) * self.config.loss.mask.weight # []
mask_all_accuracy = (jnp.argmax(mask_logits, -1) == text_unmasked).astype(
mask_logits.dtype)
mask_accuracy = jnp.sum(
jnp.multiply(mask_all_accuracy, text_mask.astype(mask_logits.dtype)))
mask_count = jnp.sum(text_mask)
# NSP loss
if self.config.loss.nsp.enabled:
nsp_loss = jnp.sum(
jax.vmap(jax.vmap(cross_entropy_mask_loss))(nsp_logits,
next_sentence_label,
next_sentence_mask),
1) # [B]
assert nsp_loss.ndim == 1
nsp_loss = jnp.sum(nsp_loss, 0) * self.config.loss.nsp.weight
nsp_all_accuracy = (jnp.argmax(
nsp_logits, -1) == next_sentence_label).astype(nsp_logits.dtype)
nsp_accuracy = jnp.sum(
jnp.multiply(nsp_all_accuracy,
next_sentence_mask.astype(nsp_logits.dtype)))
nsp_count = jnp.sum(next_sentence_mask)
# Outputs
scalars = {
'score/eval':
(mask_accuracy + subregion_accuracy - date_l1_loss * 0.01),
'loss/eval': mask_loss + date_loss + subregion_loss,
'loss/date': date_loss,
'loss/date_l1': date_l1_loss,
'loss/subregion': subregion_loss,
'loss/mask': mask_loss,
'loss/nsp': nsp_loss,
'count/date': date_count,
'count/nsp': nsp_count,
'count/mask': mask_count,
'accuracy/subregion': subregion_accuracy,
'accuracy/mask': mask_accuracy,
'accuracy/nsp': nsp_accuracy,
}
outputs = {
'outputs/id': phi_id,
'outputs/date_pred': date_pred.astype('float16'),
'outputs/date_min': date_min,
'outputs/date_max': date_max,
'outputs/date_dist': date_dist.astype('float16'),
'outputs/date_available': date_available,
'outputs/subregion_logits': subregion_logits.astype('float16'),
'outputs/subregion': subregion,
}
return scalars, outputs, model_log
def _eval_epoch(self, rng):
"""Evaluates an epoch."""
summary = {}
outputs = {}
total_num_sequences = 0
# Prepare directories for storing model log
checkpoint_dir = jl_utils.get_checkpoint_dir(FLAGS.config,
jax.process_index())
model_log_path = os.path.join(checkpoint_dir, 'model_log')
if self.config.evaluation.store_model_log:
if os.path.isdir(model_log_path):
map(os.remove, glob.glob(model_log_path + '/*'))
else:
os.makedirs(model_log_path)
# Checkpoints broadcast for each local device
params = jl_utils.get_first(self._params)
# Model log buffer initialisation
model_log_buffer = []
def _flush_model_log_buffer(model_log_buffer):
"""Writes model log to bz2 pickle files."""
while model_log_buffer:
model_log_batch_path, model_log_pkl_bz2 = model_log_buffer.pop(0)
with open(model_log_batch_path, 'wb') as f:
f.write(model_log_pkl_bz2)
# Converting to numpy here allows us to reset the generator
for batch in self._eval_input():
# Make sure that the input has batch_dim=1
assert batch['text_char'].shape[0] == 1
summary_batch, outputs_batch, model_log_batch = self._eval_batch(
params, batch, rng)
# Append batch values to dictionary
for k, v in summary_batch.items():
summary[k] = summary.get(k, 0) + v
for k, v in outputs_batch.items():
outputs.setdefault(k, []).append(v)
total_num_sequences += self.config.evaluation.batch_size
# Store model log per batch
if self.config.evaluation.store_model_log:
# Append to buffer
model_log_batch_path = os.path.join(
model_log_path,
str(outputs_batch['outputs/id'][0]) + '.pkl.bz2')
model_log_pkl = pickle.dumps(model_log_batch, protocol=2)
model_log_pkl_bz2 = bz2.compress(model_log_pkl)
model_log_buffer += [(model_log_batch_path, model_log_pkl_bz2)]
# Flush model log buffer
if (len(model_log_buffer) %
self.config.evaluation.store_model_log_steps == 0):
_flush_model_log_buffer(model_log_buffer)
# Flush remaining model log buffer
if self.config.evaluation.store_model_log:
_flush_model_log_buffer(model_log_buffer)
# Normalise and concatenate
summary['loss/date'] /= summary['count/date']
summary['loss/date_l1'] /= summary['count/date']
summary['loss/mask'] /= summary['count/mask']
summary['accuracy/mask'] /= summary['count/mask']
summary['loss/nsp'] /= summary['count/nsp']
summary['accuracy/nsp'] /= summary['count/nsp']
summary['loss/subregion'] /= total_num_sequences
summary['accuracy/subregion'] /= total_num_sequences
summary['score/eval'] = (
summary['accuracy/mask'] + summary['accuracy/subregion'] -
summary['loss/date_l1'] * 0.01)
summary['loss/eval'] = (
summary['loss/mask'] + summary['loss/date'] + summary['loss/subregion'])
for k, v in outputs.items():
outputs[k] = np.concatenate(v, axis=0)
return summary, outputs
if __name__ == '__main__':
flags.mark_flag_as_required('config')
app.run(functools.partial(platform.main, Experiment))
|
[
"absl.logging.info",
"jaxline.utils.double_buffer_on_gpu",
"glob.glob",
"os.path.join",
"ithaca.util.loss.cross_entropy_loss",
"jax.process_index",
"jax.random.uniform",
"jax.jit",
"jax.numpy.mean",
"jax.local_device_count",
"ithaca.models.model.Model",
"optax.apply_updates",
"absl.flags.mark_flag_as_required",
"os.path.exists",
"dataloader.loader_tf",
"ithaca.util.region_names.load_region_maps",
"jax.numpy.argmax",
"ithaca.util.alphabet.GreekAlphabet",
"ithaca.util.loss.cross_entropy_label_smoothing_loss",
"pickle.dumps",
"functools.partial",
"distutils.dir_util.copy_tree",
"jax.numpy.sum",
"jax.vmap",
"jax.pmap",
"jax.numpy.nan_to_num",
"jax.lax.pmean",
"jaxline.utils.get_first",
"ithaca.util.optim.adaptive_grad_clip",
"jaxline.utils.py_prefetch",
"ithaca.util.loss.categorical_kl_divergence",
"optax.clip_by_global_norm",
"numpy.concatenate",
"jaxline.utils.tree_psum",
"jax.numpy.log",
"jax.numpy.exp",
"os.makedirs",
"os.path.isdir",
"jax.numpy.arange",
"tensorflow_datasets.public_api.as_numpy",
"ithaca.util.optim.linear_weight",
"jaxline.utils.bcast_local_devices",
"jax.device_count",
"bz2.compress",
"numpy.array",
"jax.numpy.clip",
"jax.grad",
"jax.nn.softmax",
"optax.global_norm",
"jax.random.split",
"jax.tree_map"
] |
[((24646, 24683), 'absl.flags.mark_flag_as_required', 'flags.mark_flag_as_required', (['"""config"""'], {}), "('config')\n", (24673, 24683), False, 'from absl import flags\n'), ((2222, 2265), 'jaxline.utils.bcast_local_devices', 'jl_utils.bcast_local_devices', (['self.init_rng'], {}), '(self.init_rng)\n', (2250, 2265), True, 'from jaxline import utils as jl_utils\n'), ((2492, 2518), 'ithaca.models.model.Model', 'Model', ([], {}), '(**self.config.model)\n', (2497, 2518), False, 'from ithaca.models.model import Model\n'), ((2543, 2608), 'jax.pmap', 'jax.pmap', (['self._update_func'], {'axis_name': '"""i"""', 'donate_argnums': '(0, 1)'}), "(self._update_func, axis_name='i', donate_argnums=(0, 1))\n", (2551, 2608), False, 'import jax\n'), ((2648, 2794), 'functools.partial', 'functools.partial', (['linear_warmup_and_sqrt_decay'], {'max_lr': 'self.config.optimizer.kwargs.learning_rate', 'warmup_steps': 'self.config.optimizer.warmup'}), '(linear_warmup_and_sqrt_decay, max_lr=self.config.\n optimizer.kwargs.learning_rate, warmup_steps=self.config.optimizer.warmup)\n', (2665, 2794), False, 'import functools\n'), ((4671, 4698), 'jaxline.utils.get_first', 'jl_utils.get_first', (['scalars'], {}), '(scalars)\n', (4689, 4698), True, 'from jaxline import utils as jl_utils\n'), ((5820, 5838), 'jax.device_count', 'jax.device_count', ([], {}), '()\n', (5836, 5838), False, 'import jax\n'), ((5974, 6119), 'absl.logging.info', 'logging.info', (['"""num_devices: %d, per_device_batch_size: %d, global_batch_size: %d"""', 'num_devices', 'per_device_batch_size', 'global_batch_size'], {}), "(\n 'num_devices: %d, per_device_batch_size: %d, global_batch_size: %d',\n num_devices, per_device_batch_size, global_batch_size)\n", (5986, 6119), False, 'from absl import logging\n'), ((12668, 12705), 'jax.grad', 'jax.grad', (['self._loss_fn'], {'has_aux': '(True)'}), '(self._loss_fn, has_aux=True)\n', (12676, 12705), False, 'import jax\n'), ((12934, 12976), 'jax.tree_map', 'jax.tree_map', (['jnp.nan_to_num', 'scaled_grads'], {}), '(jnp.nan_to_num, scaled_grads)\n', (12946, 12976), False, 'import jax\n'), ((12989, 13036), 'jaxline.utils.tree_psum', 'jl_utils.tree_psum', (['scaled_grads'], {'axis_name': '"""i"""'}), "(scaled_grads, axis_name='i')\n", (13007, 13036), True, 'from jaxline import utils as jl_utils\n'), ((13233, 13269), 'optax.apply_updates', 'optax.apply_updates', (['params', 'updates'], {}), '(params, updates)\n', (13252, 13269), False, 'import optax\n'), ((13818, 13855), 'jax.lax.pmean', 'jax.lax.pmean', (['scalars'], {'axis_name': '"""i"""'}), "(scalars, axis_name='i')\n", (13831, 13855), False, 'import jax\n'), ((14397, 14458), 'absl.logging.info', 'logging.info', (['"""[Step %d] eval_score=%.2f"""', 'global_step', 'score'], {}), "('[Step %d] eval_score=%.2f', global_step, score)\n", (14409, 14458), False, 'from absl import logging\n'), ((14630, 14682), 'os.path.join', 'os.path.join', (['checkpoint_dir', '"""best_outputs.pkl.bz2"""'], {}), "(checkpoint_dir, 'best_outputs.pkl.bz2')\n", (14642, 14682), False, 'import os\n'), ((14700, 14746), 'os.path.join', 'os.path.join', (['checkpoint_dir', '"""best_score.txt"""'], {}), "(checkpoint_dir, 'best_score.txt')\n", (14712, 14746), False, 'import os\n'), ((14768, 14809), 'os.path.join', 'os.path.join', (['checkpoint_dir', '"""model_log"""'], {}), "(checkpoint_dir, 'model_log')\n", (14780, 14809), False, 'import os\n'), ((14836, 14882), 'os.path.join', 'os.path.join', (['checkpoint_dir', '"""best_model_log"""'], {}), "(checkpoint_dir, 'best_model_log')\n", (14848, 14882), False, 'import os\n'), ((14970, 14996), 'os.path.exists', 'os.path.exists', (['score_path'], {}), '(score_path)\n', (14984, 14996), False, 'import os\n'), ((16098, 16142), 'jaxline.utils.py_prefetch', 'jl_utils.py_prefetch', (['self._build_eval_input'], {}), '(self._build_eval_input)\n', (16118, 16142), True, 'from jaxline import utils as jl_utils\n'), ((21778, 21819), 'os.path.join', 'os.path.join', (['checkpoint_dir', '"""model_log"""'], {}), "(checkpoint_dir, 'model_log')\n", (21790, 21819), False, 'import os\n'), ((22076, 22108), 'jaxline.utils.get_first', 'jl_utils.get_first', (['self._params'], {}), '(self._params)\n', (22094, 22108), True, 'from jaxline import utils as jl_utils\n'), ((24694, 24738), 'functools.partial', 'functools.partial', (['platform.main', 'Experiment'], {}), '(platform.main, Experiment)\n', (24711, 24738), False, 'import functools\n'), ((2977, 3002), 'jax.jit', 'jax.jit', (['self._eval_batch'], {}), '(self._eval_batch)\n', (2984, 3002), False, 'import jax\n'), ((3195, 3244), 'ithaca.util.alphabet.GreekAlphabet', 'GreekAlphabet', ([], {'wordlist_file': 'f'}), '(wordlist_file=f, **alphabet_kwargs)\n', (3208, 3244), False, 'from ithaca.util.alphabet import GreekAlphabet\n'), ((4837, 4915), 'absl.logging.info', 'logging.info', (['"""Initializing parameters rather than restoring from checkpoint."""'], {}), "('Initializing parameters rather than restoring from checkpoint.')\n", (4849, 4915), False, 'from absl import logging\n'), ((4986, 5009), 'jaxline.utils.get_first', 'jl_utils.get_first', (['rng'], {}), '(rng)\n', (5004, 5009), True, 'from jaxline import utils as jl_utils\n'), ((5042, 5063), 'jax.random.split', 'jax.random.split', (['rng'], {}), '(rng)\n', (5058, 5063), False, 'import jax\n'), ((5083, 5123), 'jaxline.utils.bcast_local_devices', 'jl_utils.bcast_local_devices', (['params_rng'], {}), '(params_rng)\n', (5111, 5123), True, 'from jaxline import utils as jl_utils\n'), ((5144, 5185), 'jaxline.utils.bcast_local_devices', 'jl_utils.bcast_local_devices', (['dropout_rng'], {}), '(dropout_rng)\n', (5172, 5185), True, 'from jaxline import utils as jl_utils\n'), ((5523, 5547), 'jax.pmap', 'jax.pmap', (['self._opt_init'], {}), '(self._opt_init)\n', (5531, 5547), False, 'import jax\n'), ((5622, 5667), 'jaxline.utils.py_prefetch', 'jl_utils.py_prefetch', (['self._build_train_input'], {}), '(self._build_train_input)\n', (5642, 5667), True, 'from jaxline import utils as jl_utils\n'), ((5694, 5742), 'jaxline.utils.double_buffer_on_gpu', 'jl_utils.double_buffer_on_gpu', (['self._train_input'], {}), '(self._train_input)\n', (5723, 5742), True, 'from jaxline import utils as jl_utils\n'), ((6394, 6547), 'dataloader.loader_tf', 'dataloader.loader_tf', (['per_device_batch_size', 'config_dataset', 'self._region_map'], {'alphabet': 'self._alphabet', 'dataset_file': 'dataset_file', 'mode': '"""train"""'}), "(per_device_batch_size, config_dataset, self.\n _region_map, alphabet=self._alphabet, dataset_file=dataset_file, mode=\n 'train')\n", (6414, 6547), False, 'import dataloader\n'), ((6618, 6642), 'jax.local_device_count', 'jax.local_device_count', ([], {}), '()\n', (6640, 6642), False, 'import jax\n'), ((6660, 6677), 'tensorflow_datasets.public_api.as_numpy', 'tfds.as_numpy', (['ds'], {}), '(ds)\n', (6673, 6677), True, 'import tensorflow_datasets.public_api as tfds\n'), ((9462, 9559), 'ithaca.util.optim.linear_weight', 'linear_weight', (['global_step', 'self.config.loss.date.step_start', 'self.config.loss.date.step_end'], {}), '(global_step, self.config.loss.date.step_start, self.config.\n loss.date.step_end)\n', (9475, 9559), False, 'from ithaca.util.optim import linear_weight\n'), ((9878, 9920), 'jax.numpy.nan_to_num', 'jnp.nan_to_num', (['subregion_loss'], {'copy': '(False)'}), '(subregion_loss, copy=False)\n', (9892, 9920), True, 'import jax.numpy as jnp\n'), ((10081, 10182), 'ithaca.util.optim.linear_weight', 'linear_weight', (['global_step', 'self.config.loss.region.step_start', 'self.config.loss.region.step_end'], {}), '(global_step, self.config.loss.region.step_start, self.config.\n loss.region.step_end)\n', (10094, 10182), False, 'from ithaca.util.optim import linear_weight\n'), ((10556, 10593), 'jax.numpy.nan_to_num', 'jnp.nan_to_num', (['mask_loss'], {'copy': '(False)'}), '(mask_loss, copy=False)\n', (10570, 10593), True, 'import jax.numpy as jnp\n'), ((11003, 11100), 'ithaca.util.optim.linear_weight', 'linear_weight', (['global_step', 'self.config.loss.mask.step_start', 'self.config.loss.mask.step_end'], {}), '(global_step, self.config.loss.mask.step_start, self.config.\n loss.mask.step_end)\n', (11016, 11100), False, 'from ithaca.util.optim import linear_weight\n'), ((11482, 11518), 'jax.numpy.nan_to_num', 'jnp.nan_to_num', (['nsp_loss'], {'copy': '(False)'}), '(nsp_loss, copy=False)\n', (11496, 11518), True, 'import jax.numpy as jnp\n'), ((11935, 12030), 'ithaca.util.optim.linear_weight', 'linear_weight', (['global_step', 'self.config.loss.nsp.step_start', 'self.config.loss.nsp.step_end'], {}), '(global_step, self.config.loss.nsp.step_start, self.config.\n loss.nsp.step_end)\n', (11948, 12030), False, 'from ithaca.util.optim import linear_weight\n'), ((12145, 12163), 'jax.device_count', 'jax.device_count', ([], {}), '()\n', (12161, 12163), False, 'import jax\n'), ((13719, 13743), 'optax.global_norm', 'optax.global_norm', (['grads'], {}), '(grads)\n', (13736, 13743), False, 'import optax\n'), ((13771, 13796), 'optax.global_norm', 'optax.global_norm', (['params'], {}), '(params)\n', (13788, 13796), False, 'import optax\n'), ((14195, 14226), 'jaxline.utils.get_first', 'jl_utils.get_first', (['global_step'], {}), '(global_step)\n', (14213, 14226), True, 'from jaxline import utils as jl_utils\n'), ((14268, 14291), 'jaxline.utils.get_first', 'jl_utils.get_first', (['rng'], {}), '(rng)\n', (14286, 14291), True, 'from jaxline import utils as jl_utils\n'), ((14346, 14357), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (14354, 14357), True, 'import numpy as np\n'), ((14590, 14609), 'jax.process_index', 'jax.process_index', ([], {}), '()\n', (14607, 14609), False, 'import jax\n'), ((15860, 15938), 'absl.logging.info', 'logging.info', (['"""[Step %d] Writing eval outputs: %s."""', 'global_step', 'outputs_path'], {}), "('[Step %d] Writing eval outputs: %s.', global_step, outputs_path)\n", (15872, 15938), False, 'from absl import logging\n'), ((16335, 16518), 'dataloader.loader_tf', 'dataloader.loader_tf', (['self.config.evaluation.batch_size', 'config_dataset', 'self._region_map'], {'alphabet': 'self._alphabet', 'dataset_file': 'dataset_file', 'mode': 'self.config.evaluation.mode'}), '(self.config.evaluation.batch_size, config_dataset,\n self._region_map, alphabet=self._alphabet, dataset_file=dataset_file,\n mode=self.config.evaluation.mode)\n', (16355, 16518), False, 'import dataloader\n'), ((16589, 16606), 'tensorflow_datasets.public_api.as_numpy', 'tfds.as_numpy', (['ds'], {}), '(ds)\n', (16602, 16606), True, 'import tensorflow_datasets.public_api as tfds\n'), ((18704, 18727), 'jax.numpy.sum', 'jnp.sum', (['date_available'], {}), '(date_available)\n', (18711, 18727), True, 'import jax.numpy as jnp\n'), ((19678, 19696), 'jax.numpy.sum', 'jnp.sum', (['text_mask'], {}), '(text_mask)\n', (19685, 19696), True, 'import jax.numpy as jnp\n'), ((20383, 20410), 'jax.numpy.sum', 'jnp.sum', (['next_sentence_mask'], {}), '(next_sentence_mask)\n', (20390, 20410), True, 'import jax.numpy as jnp\n'), ((21736, 21755), 'jax.process_index', 'jax.process_index', ([], {}), '()\n', (21753, 21755), False, 'import jax\n'), ((21876, 21905), 'os.path.isdir', 'os.path.isdir', (['model_log_path'], {}), '(model_log_path)\n', (21889, 21905), False, 'import os\n'), ((24560, 24585), 'numpy.concatenate', 'np.concatenate', (['v'], {'axis': '(0)'}), '(v, axis=0)\n', (24574, 24585), True, 'import numpy as np\n'), ((3470, 3489), 'ithaca.util.region_names.load_region_maps', 'load_region_maps', (['f'], {}), '(f)\n', (3486, 3489), False, 'from ithaca.util.region_names import load_region_maps\n'), ((3632, 3651), 'ithaca.util.region_names.load_region_maps', 'load_region_maps', (['f'], {}), '(f)\n', (3648, 3651), False, 'from ithaca.util.region_names import load_region_maps\n'), ((5223, 5277), 'functools.partial', 'functools.partial', (['self.forward.init'], {'is_training': '(True)'}), '(self.forward.init, is_training=True)\n', (5240, 5277), False, 'import functools\n'), ((7752, 7770), 'jax.numpy.exp', 'jnp.exp', (['date_dist'], {}), '(date_dist)\n', (7759, 7770), True, 'import jax.numpy as jnp\n'), ((8265, 8306), 'jax.numpy.clip', 'jnp.clip', (['date_dist_prob_smooth', '(1e-06)', '(1)'], {}), '(date_dist_prob_smooth, 1e-06, 1)\n', (8273, 8306), True, 'import jax.numpy as jnp\n'), ((8326, 8356), 'jax.numpy.log', 'jnp.log', (['date_dist_prob_smooth'], {}), '(date_dist_prob_smooth)\n', (8333, 8356), True, 'import jax.numpy as jnp\n'), ((8953, 8994), 'jax.numpy.nan_to_num', 'jnp.nan_to_num', (['date_loss_l1_'], {'copy': '(False)'}), '(date_loss_l1_, copy=False)\n', (8967, 8994), True, 'import jax.numpy as jnp\n'), ((9182, 9229), 'ithaca.util.loss.categorical_kl_divergence', 'categorical_kl_divergence', (['date_dist', 'date_pred'], {}), '(date_dist, date_pred)\n', (9207, 9229), False, 'from ithaca.util.loss import categorical_kl_divergence\n'), ((9280, 9323), 'jax.numpy.nan_to_num', 'jnp.nan_to_num', (['date_loss_dist_'], {'copy': '(False)'}), '(date_loss_dist_, copy=False)\n', (9294, 9323), True, 'import jax.numpy as jnp\n'), ((9704, 9828), 'ithaca.util.loss.cross_entropy_label_smoothing_loss', 'cross_entropy_label_smoothing_loss', (['subregion_logits', 'subregion'], {'label_smoothing': 'self.config.loss.region.label_smoothing'}), '(subregion_logits, subregion,\n label_smoothing=self.config.loss.region.label_smoothing)\n', (9738, 9828), False, 'from ithaca.util.loss import cross_entropy_label_smoothing_loss\n'), ((10320, 10452), 'ithaca.util.loss.cross_entropy_label_smoothing_loss', 'cross_entropy_label_smoothing_loss', (['mask_logits', 'text_unmasked', 'text_mask'], {'label_smoothing': 'self.config.loss.mask.label_smoothing'}), '(mask_logits, text_unmasked, text_mask,\n label_smoothing=self.config.loss.mask.label_smoothing)\n', (10354, 10452), False, 'from ithaca.util.loss import cross_entropy_label_smoothing_loss\n'), ((10612, 10634), 'jax.numpy.mean', 'jnp.mean', (['mask_loss', '(0)'], {}), '(mask_loss, 0)\n', (10620, 10634), True, 'import jax.numpy as jnp\n'), ((11536, 11557), 'jax.numpy.mean', 'jnp.mean', (['nsp_loss', '(0)'], {}), '(nsp_loss, 0)\n', (11544, 11557), True, 'import jax.numpy as jnp\n'), ((15442, 15475), 'pickle.dumps', 'pickle.dumps', (['outputs'], {'protocol': '(2)'}), '(outputs, protocol=2)\n', (15454, 15475), False, 'import pickle\n'), ((15502, 15527), 'bz2.compress', 'bz2.compress', (['outputs_pkl'], {}), '(outputs_pkl)\n', (15514, 15527), False, 'import bz2\n'), ((15622, 15656), 'os.path.isdir', 'os.path.isdir', (['best_model_log_path'], {}), '(best_model_log_path)\n', (15635, 15656), False, 'import os\n'), ((15787, 15852), 'distutils.dir_util.copy_tree', 'distutils.dir_util.copy_tree', (['model_log_path', 'best_model_log_path'], {}), '(model_log_path, best_model_log_path)\n', (15815, 15852), False, 'import distutils\n'), ((18052, 18086), 'jax.nn.softmax', 'jax.nn.softmax', (['date_pred'], {'axis': '(-1)'}), '(date_pred, axis=-1)\n', (18066, 18086), False, 'import jax\n'), ((18477, 18524), 'ithaca.util.loss.categorical_kl_divergence', 'categorical_kl_divergence', (['date_dist', 'date_pred'], {}), '(date_dist, date_pred)\n', (18502, 18524), False, 'from ithaca.util.loss import categorical_kl_divergence\n'), ((18843, 18890), 'ithaca.util.loss.cross_entropy_loss', 'cross_entropy_loss', (['subregion_logits', 'subregion'], {}), '(subregion_logits, subregion)\n', (18861, 18890), False, 'from ithaca.util.loss import cross_entropy_loss\n'), ((19136, 19232), 'ithaca.util.loss.cross_entropy_label_smoothing_loss', 'cross_entropy_label_smoothing_loss', (['mask_logits', 'text_unmasked', 'text_mask'], {'label_smoothing': '(0)'}), '(mask_logits, text_unmasked, text_mask,\n label_smoothing=0)\n', (19170, 19232), False, 'from ithaca.util.loss import cross_entropy_label_smoothing_loss\n'), ((19377, 19399), 'jax.numpy.mean', 'jnp.mean', (['mask_loss', '(0)'], {}), '(mask_loss, 0)\n', (19385, 19399), True, 'import jax.numpy as jnp\n'), ((20061, 20081), 'jax.numpy.sum', 'jnp.sum', (['nsp_loss', '(0)'], {}), '(nsp_loss, 0)\n', (20068, 20081), True, 'import jax.numpy as jnp\n'), ((21984, 22011), 'os.makedirs', 'os.makedirs', (['model_log_path'], {}), '(model_log_path)\n', (21995, 22011), False, 'import os\n'), ((23315, 23356), 'pickle.dumps', 'pickle.dumps', (['model_log_batch'], {'protocol': '(2)'}), '(model_log_batch, protocol=2)\n', (23327, 23356), False, 'import pickle\n'), ((23385, 23412), 'bz2.compress', 'bz2.compress', (['model_log_pkl'], {}), '(model_log_pkl)\n', (23397, 23412), False, 'import bz2\n'), ((4000, 4041), 'ithaca.util.optim.adaptive_grad_clip', 'adaptive_grad_clip', (['config_opt.clip_level'], {}), '(config_opt.clip_level)\n', (4018, 4041), False, 'from ithaca.util.optim import adaptive_grad_clip\n'), ((4109, 4157), 'optax.clip_by_global_norm', 'optax.clip_by_global_norm', (['config_opt.clip_level'], {}), '(config_opt.clip_level)\n', (4134, 4157), False, 'import optax\n'), ((7839, 8027), 'jax.random.uniform', 'jax.random.uniform', (['rng'], {'shape': 'date_dist_prob.shape', 'dtype': 'date_dist_prob.dtype', 'minval': '(1 - self.config.loss.date.label_smoothing)', 'maxval': '(1 + self.config.loss.date.label_smoothing)'}), '(rng, shape=date_dist_prob.shape, dtype=date_dist_prob.\n dtype, minval=1 - self.config.loss.date.label_smoothing, maxval=1 +\n self.config.loss.date.label_smoothing)\n', (7857, 8027), False, 'import jax\n'), ((8751, 8785), 'jax.nn.softmax', 'jax.nn.softmax', (['date_pred'], {'axis': '(-1)'}), '(date_pred, axis=-1)\n', (8765, 8785), False, 'import jax\n'), ((8824, 8846), 'jax.vmap', 'jax.vmap', (['date_loss_l1'], {}), '(date_loss_l1)\n', (8832, 8846), False, 'import jax\n'), ((9030, 9061), 'jax.numpy.mean', 'jnp.mean', (['date_loss_l1_'], {'axis': '(0)'}), '(date_loss_l1_, axis=0)\n', (9038, 9061), True, 'import jax.numpy as jnp\n'), ((9359, 9392), 'jax.numpy.mean', 'jnp.mean', (['date_loss_dist_'], {'axis': '(0)'}), '(date_loss_dist_, axis=0)\n', (9367, 9392), True, 'import jax.numpy as jnp\n'), ((10023, 10055), 'jax.numpy.argmax', 'jnp.argmax', (['subregion_logits', '(-1)'], {}), '(subregion_logits, -1)\n', (10033, 10055), True, 'import jax.numpy as jnp\n'), ((10957, 10975), 'jax.numpy.sum', 'jnp.sum', (['text_mask'], {}), '(text_mask)\n', (10964, 10975), True, 'import jax.numpy as jnp\n'), ((11882, 11909), 'jax.numpy.sum', 'jnp.sum', (['next_sentence_mask'], {}), '(next_sentence_mask)\n', (11889, 11909), True, 'import jax.numpy as jnp\n'), ((15746, 15778), 'os.makedirs', 'os.makedirs', (['best_model_log_path'], {}), '(best_model_log_path)\n', (15757, 15778), False, 'import os\n'), ((17790, 17983), 'jax.numpy.arange', 'jnp.arange', (['(self.config.dataset.date_min + self.config.dataset.date_interval / 2)', '(self.config.dataset.date_max + self.config.dataset.date_interval / 2)', 'self.config.dataset.date_interval'], {}), '(self.config.dataset.date_min + self.config.dataset.date_interval /\n 2, self.config.dataset.date_max + self.config.dataset.date_interval / 2,\n self.config.dataset.date_interval)\n', (17800, 17983), True, 'import jax.numpy as jnp\n'), ((18141, 18163), 'jax.vmap', 'jax.vmap', (['date_loss_l1'], {}), '(date_loss_l1)\n', (18149, 18163), False, 'import jax\n'), ((18602, 18634), 'jax.numpy.sum', 'jnp.sum', (['date_loss_dist_'], {'axis': '(0)'}), '(date_loss_dist_, axis=0)\n', (18609, 18634), True, 'import jax.numpy as jnp\n'), ((18997, 19029), 'jax.numpy.argmax', 'jnp.argmax', (['subregion_logits', '(-1)'], {}), '(subregion_logits, -1)\n', (19007, 19029), True, 'import jax.numpy as jnp\n'), ((21930, 21962), 'glob.glob', 'glob.glob', (["(model_log_path + '/*')"], {}), "(model_log_path + '/*')\n", (21939, 21962), False, 'import glob\n'), ((8457, 8650), 'jax.numpy.arange', 'jnp.arange', (['(self.config.dataset.date_min + self.config.dataset.date_interval / 2)', '(self.config.dataset.date_max + self.config.dataset.date_interval / 2)', 'self.config.dataset.date_interval'], {}), '(self.config.dataset.date_min + self.config.dataset.date_interval /\n 2, self.config.dataset.date_max + self.config.dataset.date_interval / 2,\n self.config.dataset.date_interval)\n', (8467, 8650), True, 'import jax.numpy as jnp\n'), ((10699, 10726), 'jax.numpy.argmax', 'jnp.argmax', (['mask_logits', '(-1)'], {}), '(mask_logits, -1)\n', (10709, 10726), True, 'import jax.numpy as jnp\n'), ((11227, 11260), 'jax.vmap', 'jax.vmap', (['cross_entropy_mask_loss'], {}), '(cross_entropy_mask_loss)\n', (11235, 11260), False, 'import jax\n'), ((11614, 11640), 'jax.numpy.argmax', 'jnp.argmax', (['nsp_logits', '(-1)'], {}), '(nsp_logits, -1)\n', (11624, 11640), True, 'import jax.numpy as jnp\n'), ((15683, 15720), 'glob.glob', 'glob.glob', (["(best_model_log_path + '/*')"], {}), "(best_model_log_path + '/*')\n", (15692, 15720), False, 'import glob\n'), ((19465, 19492), 'jax.numpy.argmax', 'jnp.argmax', (['mask_logits', '(-1)'], {}), '(mask_logits, -1)\n', (19475, 19492), True, 'import jax.numpy as jnp\n'), ((19795, 19828), 'jax.vmap', 'jax.vmap', (['cross_entropy_mask_loss'], {}), '(cross_entropy_mask_loss)\n', (19803, 19828), False, 'import jax\n'), ((20138, 20164), 'jax.numpy.argmax', 'jnp.argmax', (['nsp_logits', '(-1)'], {}), '(nsp_logits, -1)\n', (20148, 20164), True, 'import jax.numpy as jnp\n')]
|
#!/usr/bin/env python3
import sys
import tkinter as tk
import time
import copy
import numpy as np
import matplotlib as mpl
import matplotlib.backends.tkagg as tkagg
from matplotlib.backends.backend_agg import FigureCanvasAgg
from matplotlib.backends.backend_tkagg import (FigureCanvasTkAgg,
NavigationToolbar2Tk)
import pandas as pd
import wp_ipc
import wp_gust
##############################################################################
def plot():
fig = mpl.figure.Figure(figsize=(3, 2))
ax = fig.add_subplot(111)
figure_canvas_agg = FigureCanvasAgg(fig)
series = pandas.Series(data)
dplt = series.plot(ax=ax)
figure_canvas_agg.draw()
figure_x, figure_y, figure_w, figure_h = fig.bbox.bounds
figure_w, figure_h = int(figure_w), int(figure_h)
global the_fig_photo
the_fig_photo = tk.PhotoImage(
master=the_canvas, width=figure_w, height=figure_h)
# Position: convert from top-left anchor to center anchor
loc = (0, 0)
the_canvas.delete("all")
the_canvas.create_image(
loc[0] + figure_w / 2, loc[1] + figure_h / 2, image=the_fig_photo)
tkagg.blit(
the_fig_photo, figure_canvas_agg.get_renderer()._renderer, colormode=2)
return the_fig_photo # XXX: has to be held
def setup_gui():
the_window = tk.Tk()
the_window.title("A figure in a canvas")
the_window.bind('<Escape>', sys.exit)
the_canvas = tk.Canvas(the_window, width=300, height=200)
the_canvas.pack()
the_fig_photo = None
the_wind_speed = tk.Scale(
the_window,
from_=0.0,
to=40.0,
resolution=0.1,
orient=tk.HORIZONTAL,
label="wind speed",
length=300,
command=None)
the_wind_speed.pack()
the_wind_speed.set(20.0)
the_potentiometer = tk.Scale(
the_window,
from_=0.0,
to=1.0,
resolution=0.01,
orient=tk.HORIZONTAL,
label="potentiometer",
length=300,
command=None)
the_potentiometer.pack()
the_potentiometer.set(0.5)
the_perturbation_period = tk.Scale(
the_window,
from_=10,
to=10000,
resolution=1,
orient=tk.HORIZONTAL,
label="perturbation period (ms)",
length=300,
command=None)
the_perturbation_period.pack()
the_perturbation_period.set(1000)
the_perturbation_amplitude = tk.Scale(
the_window,
from_=0.0,
to=1.0,
resolution=0.001,
orient=tk.HORIZONTAL,
label="perturbation amplitude",
length=300,
command=None)
the_perturbation_amplitude.pack()
the_perturbation_amplitude.set(0.1)
the_perturbation = tk.Scale(
the_window,
from_=-1.0,
to=1.0,
resolution=0.01,
orient=tk.HORIZONTAL,
label="perturbation",
length=300,
command=None)
the_perturbation.pack()
the_fan_duty = tk.Scale(
the_window,
from_=0.0,
to=1.0,
resolution=0.01,
orient=tk.HORIZONTAL,
label="fan duty",
length=300,
command=None)
the_fan_duty.pack()
tk.Label(the_window, text="\nsimulation\n").pack()
#the_fan_duty
inputs = {
'perturbation_period': the_perturbation_period,
'perturbation_amplitude': the_perturbation_amplitude,
'wind_speed': the_wind_speed,
'scale': the_potentiometer,
}
outputs = {
'fan_duty': the_fan_duty,
'perturbation': the_perturbation,
}
return the_window, inputs, outputs
##############################################################################
def update_inputs_gui(inputs, widgets):
for name, widget in widgets.items():
inputs.__dict__[name] = float(widget.get())
def set_outputs_gui(widgets, state):
for name, widget in widgets.items():
v = state.__dict__[name]
widgets[name].set(v)
def main():
loop_interval = 100
state = wp_gust.State()
inputs = wp_gust.Inputs()
ipc_session = wp_ipc.Session()
window, input_widgets, output_widgets = setup_gui()
def loop():
nonlocal state
inputs.__dict__['time'] = time.time()
wp_gust.update_inputs_ipc(inputs, ipc_session)
update_inputs_gui(inputs, input_widgets)
state = wp_gust.next_state(state, inputs)
set_outputs_gui(output_widgets, state)
ipc_session.send("wind_speed", inputs.wind_speed)
window.after(loop_interval, loop)
loop()
tk.mainloop()
if __name__ == '__main__':
main()
|
[
"tkinter.PhotoImage",
"wp_gust.State",
"tkinter.Label",
"tkinter.Canvas",
"tkinter.mainloop",
"matplotlib.backends.backend_agg.FigureCanvasAgg",
"wp_gust.next_state",
"wp_gust.Inputs",
"time.time",
"matplotlib.figure.Figure",
"wp_ipc.Session",
"tkinter.Scale",
"wp_gust.update_inputs_ipc",
"tkinter.Tk"
] |
[((518, 551), 'matplotlib.figure.Figure', 'mpl.figure.Figure', ([], {'figsize': '(3, 2)'}), '(figsize=(3, 2))\n', (535, 551), True, 'import matplotlib as mpl\n'), ((608, 628), 'matplotlib.backends.backend_agg.FigureCanvasAgg', 'FigureCanvasAgg', (['fig'], {}), '(fig)\n', (623, 628), False, 'from matplotlib.backends.backend_agg import FigureCanvasAgg\n'), ((883, 948), 'tkinter.PhotoImage', 'tk.PhotoImage', ([], {'master': 'the_canvas', 'width': 'figure_w', 'height': 'figure_h'}), '(master=the_canvas, width=figure_w, height=figure_h)\n', (896, 948), True, 'import tkinter as tk\n'), ((1352, 1359), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (1357, 1359), True, 'import tkinter as tk\n'), ((1466, 1510), 'tkinter.Canvas', 'tk.Canvas', (['the_window'], {'width': '(300)', 'height': '(200)'}), '(the_window, width=300, height=200)\n', (1475, 1510), True, 'import tkinter as tk\n'), ((1582, 1711), 'tkinter.Scale', 'tk.Scale', (['the_window'], {'from_': '(0.0)', 'to': '(40.0)', 'resolution': '(0.1)', 'orient': 'tk.HORIZONTAL', 'label': '"""wind speed"""', 'length': '(300)', 'command': 'None'}), "(the_window, from_=0.0, to=40.0, resolution=0.1, orient=tk.\n HORIZONTAL, label='wind speed', length=300, command=None)\n", (1590, 1711), True, 'import tkinter as tk\n'), ((1852, 1984), 'tkinter.Scale', 'tk.Scale', (['the_window'], {'from_': '(0.0)', 'to': '(1.0)', 'resolution': '(0.01)', 'orient': 'tk.HORIZONTAL', 'label': '"""potentiometer"""', 'length': '(300)', 'command': 'None'}), "(the_window, from_=0.0, to=1.0, resolution=0.01, orient=tk.\n HORIZONTAL, label='potentiometer', length=300, command=None)\n", (1860, 1984), True, 'import tkinter as tk\n'), ((2136, 2276), 'tkinter.Scale', 'tk.Scale', (['the_window'], {'from_': '(10)', 'to': '(10000)', 'resolution': '(1)', 'orient': 'tk.HORIZONTAL', 'label': '"""perturbation period (ms)"""', 'length': '(300)', 'command': 'None'}), "(the_window, from_=10, to=10000, resolution=1, orient=tk.HORIZONTAL,\n label='perturbation period (ms)', length=300, command=None)\n", (2144, 2276), True, 'import tkinter as tk\n'), ((2445, 2587), 'tkinter.Scale', 'tk.Scale', (['the_window'], {'from_': '(0.0)', 'to': '(1.0)', 'resolution': '(0.001)', 'orient': 'tk.HORIZONTAL', 'label': '"""perturbation amplitude"""', 'length': '(300)', 'command': 'None'}), "(the_window, from_=0.0, to=1.0, resolution=0.001, orient=tk.\n HORIZONTAL, label='perturbation amplitude', length=300, command=None)\n", (2453, 2587), True, 'import tkinter as tk\n'), ((2750, 2882), 'tkinter.Scale', 'tk.Scale', (['the_window'], {'from_': '(-1.0)', 'to': '(1.0)', 'resolution': '(0.01)', 'orient': 'tk.HORIZONTAL', 'label': '"""perturbation"""', 'length': '(300)', 'command': 'None'}), "(the_window, from_=-1.0, to=1.0, resolution=0.01, orient=tk.\n HORIZONTAL, label='perturbation', length=300, command=None)\n", (2758, 2882), True, 'import tkinter as tk\n'), ((2991, 3118), 'tkinter.Scale', 'tk.Scale', (['the_window'], {'from_': '(0.0)', 'to': '(1.0)', 'resolution': '(0.01)', 'orient': 'tk.HORIZONTAL', 'label': '"""fan duty"""', 'length': '(300)', 'command': 'None'}), "(the_window, from_=0.0, to=1.0, resolution=0.01, orient=tk.\n HORIZONTAL, label='fan duty', length=300, command=None)\n", (2999, 3118), True, 'import tkinter as tk\n'), ((4034, 4049), 'wp_gust.State', 'wp_gust.State', ([], {}), '()\n', (4047, 4049), False, 'import wp_gust\n'), ((4063, 4079), 'wp_gust.Inputs', 'wp_gust.Inputs', ([], {}), '()\n', (4077, 4079), False, 'import wp_gust\n'), ((4099, 4115), 'wp_ipc.Session', 'wp_ipc.Session', ([], {}), '()\n', (4113, 4115), False, 'import wp_ipc\n'), ((4579, 4592), 'tkinter.mainloop', 'tk.mainloop', ([], {}), '()\n', (4590, 4592), True, 'import tkinter as tk\n'), ((4246, 4257), 'time.time', 'time.time', ([], {}), '()\n', (4255, 4257), False, 'import time\n'), ((4266, 4312), 'wp_gust.update_inputs_ipc', 'wp_gust.update_inputs_ipc', (['inputs', 'ipc_session'], {}), '(inputs, ipc_session)\n', (4291, 4312), False, 'import wp_gust\n'), ((4379, 4412), 'wp_gust.next_state', 'wp_gust.next_state', (['state', 'inputs'], {}), '(state, inputs)\n', (4397, 4412), False, 'import wp_gust\n'), ((3208, 3251), 'tkinter.Label', 'tk.Label', (['the_window'], {'text': '"""\nsimulation\n"""'}), "(the_window, text='\\nsimulation\\n')\n", (3216, 3251), True, 'import tkinter as tk\n')]
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn as nn
from .builder import HEADS
import paddle.nn.functional as F
import paddle.fluid.layers as layers
LARGE_NUM = 1e9
@HEADS.register()
class SimCLRContrastiveHead(nn.Layer):
"""Head for contrastive learning.
Args:
temperature (float): The temperature hyper-parameter that
controls the concentration level of the distribution.
Default: 0.1.
"""
def __init__(self, temperature=0.5, return_accuracy=True, multi_rank=False):
super(SimCLRContrastiveHead, self).__init__()
self.criterion = nn.CrossEntropyLoss()
self.temperature = temperature
self.return_accuracy = return_accuracy
self.multi_rank = multi_rank
def forward(self, pos, neg):
"""Forward head.
Args:
pos (Tensor): Nx1 positive similarity.
neg (Tensor): Nxk negative similarity.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
hidden1, hidden2 = pos, neg
batch_size = pos.shape[0]
# Gather hidden1/hidden2 across replicas and create local labels.
if self.multi_rank is True:
hidden1_large = self.add_allgather(hidden1, "hidden1"+str(self.co2))
hidden2_large = self.add_allgather(hidden2, "hidden2"+str(self.co2))
hidden1_large = paddle.reshape(hidden1_large,
[-1, hidden1_large.shape[-1]])
hidden2_large = paddle.reshape(hidden2_large,
[-1, hidden2_large.shape[-1]])
enlarged_batch_size = paddle.shape(hidden1_large)[0]
trainer_id = self.args.trainer_id
labels_idx = paddle.arange(0, batch_size, 1,
"int32") + trainer_id * batch_size
labels = F.one_hot(
paddle.reshape(labels_idx, [batch_size]),
enlarged_batch_size * 2)
masks = F.one_hot(
paddle.reshape(labels_idx, [batch_size]),
enlarged_batch_size)
else:
hidden1_large = hidden1
hidden2_large = hidden2
labels = F.one_hot(
paddle.reshape(
paddle.arange(0, batch_size, 1, "int32"), [batch_size]),
batch_size * 2)
masks = F.one_hot(
paddle.reshape(
paddle.arange(0, batch_size, 1, "int32"), [batch_size]),
batch_size)
logits_aa = paddle.matmul(
hidden1, hidden1_large, transpose_y=True) / self.temperature
logits_aa = logits_aa - masks * LARGE_NUM
logits_bb = paddle.matmul(
hidden2, hidden2_large, transpose_y=True) / self.temperature
logits_bb = logits_bb - masks * LARGE_NUM
logits_ab = paddle.matmul(
hidden1, hidden2_large, transpose_y=True) / self.temperature
logits_ba = paddle.matmul(
hidden2, hidden1_large, transpose_y=True) / self.temperature
loss_a = paddle.nn.functional.softmax_with_cross_entropy(
paddle.concat([logits_ab, logits_aa], 1), labels, soft_label=True)
loss_b = paddle.nn.functional.softmax_with_cross_entropy(
paddle.concat([logits_ba, logits_bb], 1), labels, soft_label=True)
contrast_loss = loss_a + loss_b
logits_ab_co2 = logits_ab - masks * LARGE_NUM
logits_ba_co2 = logits_ba - masks * LARGE_NUM
logit_a = paddle.concat([logits_aa, logits_ab_co2], 1)
logit_b = paddle.concat([logits_ba_co2, logits_bb], 1)
log_a = paddle.nn.functional.log_softmax(logit_a)
log_b = paddle.nn.functional.log_softmax(logit_b)
a = paddle.nn.functional.softmax(logit_a)
b = paddle.nn.functional.softmax(logit_b)
kl_1 = paddle.nn.functional.kl_div(log_a, b, reduction='batchmean')
kl_2 = paddle.nn.functional.kl_div(log_b, a, reduction='batchmean')
co2_loss = 1 * (kl_1 + kl_2)
total_contrast_loss = contrast_loss + 3 * co2_loss
loss = layers.reduce_mean(total_contrast_loss)
contrastive_label = paddle.unsqueeze(
paddle.argmax(
labels, axis=1), 1)
acc1 = layers.accuracy(input=logits_ab, label=contrastive_label)
outputs = dict()
outputs['loss'] = loss
outputs['acc1'] = acc1
return outputs
def accuracy(output, target, topk=(1, )):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with paddle.no_grad():
maxk = max(topk)
batch_size = target.shape[0]
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = paddle.cast(pred == target.reshape([1, -1]).expand_as(pred),
'float32')
res = []
for k in topk:
correct_k = correct[:k].reshape([-1]).sum(0, keepdim=True)
res.append(correct_k * 100.0 / batch_size)
return res
def add_allgather(self, hidden, name=""):
block = self._train_program.global_block()
hidden_large = block.create_var(
name=name,
shape=[self.args.trainer_num] + list(hidden.shape),
persistable=False,
dtype=core.VarDesc.VarType.FP32)
op_len = len(list(enumerate(block.ops)))
op_maker = core.op_proto_and_checker_maker
self.op_role_key = op_maker.kOpRoleAttrName()
block._insert_op(
op_len,
type='c_allgather',
inputs={'X': hidden},
outputs={'Out': hidden_large},
attrs={
'nranks': self.args.trainer_num,
self.op_role_key: OpRole.Forward,
"use_calc_stream": True
})
return hidden_large
|
[
"paddle.fluid.layers.accuracy",
"paddle.fluid.layers.reduce_mean",
"paddle.concat",
"paddle.reshape",
"paddle.nn.functional.softmax",
"paddle.argmax",
"paddle.arange",
"paddle.no_grad",
"paddle.nn.functional.kl_div",
"paddle.matmul",
"paddle.shape",
"paddle.nn.functional.log_softmax",
"paddle.nn.CrossEntropyLoss"
] |
[((1199, 1220), 'paddle.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (1218, 1220), True, 'import paddle.nn as nn\n'), ((4174, 4218), 'paddle.concat', 'paddle.concat', (['[logits_aa, logits_ab_co2]', '(1)'], {}), '([logits_aa, logits_ab_co2], 1)\n', (4187, 4218), False, 'import paddle\n'), ((4237, 4281), 'paddle.concat', 'paddle.concat', (['[logits_ba_co2, logits_bb]', '(1)'], {}), '([logits_ba_co2, logits_bb], 1)\n', (4250, 4281), False, 'import paddle\n'), ((4298, 4339), 'paddle.nn.functional.log_softmax', 'paddle.nn.functional.log_softmax', (['logit_a'], {}), '(logit_a)\n', (4330, 4339), False, 'import paddle\n'), ((4356, 4397), 'paddle.nn.functional.log_softmax', 'paddle.nn.functional.log_softmax', (['logit_b'], {}), '(logit_b)\n', (4388, 4397), False, 'import paddle\n'), ((4410, 4447), 'paddle.nn.functional.softmax', 'paddle.nn.functional.softmax', (['logit_a'], {}), '(logit_a)\n', (4438, 4447), False, 'import paddle\n'), ((4460, 4497), 'paddle.nn.functional.softmax', 'paddle.nn.functional.softmax', (['logit_b'], {}), '(logit_b)\n', (4488, 4497), False, 'import paddle\n'), ((4513, 4573), 'paddle.nn.functional.kl_div', 'paddle.nn.functional.kl_div', (['log_a', 'b'], {'reduction': '"""batchmean"""'}), "(log_a, b, reduction='batchmean')\n", (4540, 4573), False, 'import paddle\n'), ((4589, 4649), 'paddle.nn.functional.kl_div', 'paddle.nn.functional.kl_div', (['log_b', 'a'], {'reduction': '"""batchmean"""'}), "(log_b, a, reduction='batchmean')\n", (4616, 4649), False, 'import paddle\n'), ((4762, 4801), 'paddle.fluid.layers.reduce_mean', 'layers.reduce_mean', (['total_contrast_loss'], {}), '(total_contrast_loss)\n', (4780, 4801), True, 'import paddle.fluid.layers as layers\n'), ((4933, 4990), 'paddle.fluid.layers.accuracy', 'layers.accuracy', ([], {'input': 'logits_ab', 'label': 'contrastive_label'}), '(input=logits_ab, label=contrastive_label)\n', (4948, 4990), True, 'import paddle.fluid.layers as layers\n'), ((5248, 5264), 'paddle.no_grad', 'paddle.no_grad', ([], {}), '()\n', (5262, 5264), False, 'import paddle\n'), ((1987, 2047), 'paddle.reshape', 'paddle.reshape', (['hidden1_large', '[-1, hidden1_large.shape[-1]]'], {}), '(hidden1_large, [-1, hidden1_large.shape[-1]])\n', (2001, 2047), False, 'import paddle\n'), ((2119, 2179), 'paddle.reshape', 'paddle.reshape', (['hidden2_large', '[-1, hidden2_large.shape[-1]]'], {}), '(hidden2_large, [-1, hidden2_large.shape[-1]])\n', (2133, 2179), False, 'import paddle\n'), ((3194, 3249), 'paddle.matmul', 'paddle.matmul', (['hidden1', 'hidden1_large'], {'transpose_y': '(True)'}), '(hidden1, hidden1_large, transpose_y=True)\n', (3207, 3249), False, 'import paddle\n'), ((3352, 3407), 'paddle.matmul', 'paddle.matmul', (['hidden2', 'hidden2_large'], {'transpose_y': '(True)'}), '(hidden2, hidden2_large, transpose_y=True)\n', (3365, 3407), False, 'import paddle\n'), ((3510, 3565), 'paddle.matmul', 'paddle.matmul', (['hidden1', 'hidden2_large'], {'transpose_y': '(True)'}), '(hidden1, hidden2_large, transpose_y=True)\n', (3523, 3565), False, 'import paddle\n'), ((3618, 3673), 'paddle.matmul', 'paddle.matmul', (['hidden2', 'hidden1_large'], {'transpose_y': '(True)'}), '(hidden2, hidden1_large, transpose_y=True)\n', (3631, 3673), False, 'import paddle\n'), ((3795, 3835), 'paddle.concat', 'paddle.concat', (['[logits_ab, logits_aa]', '(1)'], {}), '([logits_ab, logits_aa], 1)\n', (3808, 3835), False, 'import paddle\n'), ((3940, 3980), 'paddle.concat', 'paddle.concat', (['[logits_ba, logits_bb]', '(1)'], {}), '([logits_ba, logits_bb], 1)\n', (3953, 3980), False, 'import paddle\n'), ((4860, 4889), 'paddle.argmax', 'paddle.argmax', (['labels'], {'axis': '(1)'}), '(labels, axis=1)\n', (4873, 4889), False, 'import paddle\n'), ((2257, 2284), 'paddle.shape', 'paddle.shape', (['hidden1_large'], {}), '(hidden1_large)\n', (2269, 2284), False, 'import paddle\n'), ((2372, 2412), 'paddle.arange', 'paddle.arange', (['(0)', 'batch_size', '(1)', '"""int32"""'], {}), "(0, batch_size, 1, 'int32')\n", (2385, 2412), False, 'import paddle\n'), ((2525, 2565), 'paddle.reshape', 'paddle.reshape', (['labels_idx', '[batch_size]'], {}), '(labels_idx, [batch_size])\n', (2539, 2565), False, 'import paddle\n'), ((2655, 2695), 'paddle.reshape', 'paddle.reshape', (['labels_idx', '[batch_size]'], {}), '(labels_idx, [batch_size])\n', (2669, 2695), False, 'import paddle\n'), ((2904, 2944), 'paddle.arange', 'paddle.arange', (['(0)', 'batch_size', '(1)', '"""int32"""'], {}), "(0, batch_size, 1, 'int32')\n", (2917, 2944), False, 'import paddle\n'), ((3078, 3118), 'paddle.arange', 'paddle.arange', (['(0)', 'batch_size', '(1)', '"""int32"""'], {}), "(0, batch_size, 1, 'int32')\n", (3091, 3118), False, 'import paddle\n')]
|
#!/usr/bin/env python
"""
example of putting git short revision in matplotlib plot, up in the corner
(rather than in title where git revision text is too large)
This is helpful for when a colleague wants a plot exactly recreated from a year ago,
to help find the exact code used to create that plot.
http://matplotlib.org/api/figure_api.html
"""
import subprocess
from matplotlib.pyplot import figure, show
try:
gitrev = subprocess.check_output(
["git", "rev-parse", "--short", "HEAD"], universal_newlines=True
).strip("\n")
except Exception: # maybe they don't have git installed
gitrev = ""
fg = figure()
ax = fg.gca()
ax.plot([1, 2])
ax.set_title("my cool plot")
fg.text(1.0, 1.0, "git: " + gitrev, ha="right", va="top", rotation="vertical")
show()
|
[
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show",
"subprocess.check_output"
] |
[((624, 632), 'matplotlib.pyplot.figure', 'figure', ([], {}), '()\n', (630, 632), False, 'from matplotlib.pyplot import figure, show\n'), ((772, 778), 'matplotlib.pyplot.show', 'show', ([], {}), '()\n', (776, 778), False, 'from matplotlib.pyplot import figure, show\n'), ((429, 522), 'subprocess.check_output', 'subprocess.check_output', (["['git', 'rev-parse', '--short', 'HEAD']"], {'universal_newlines': '(True)'}), "(['git', 'rev-parse', '--short', 'HEAD'],\n universal_newlines=True)\n", (452, 522), False, 'import subprocess\n')]
|
from __future__ import print_function
from particletools.tables import (PYTHIAParticleData, c_speed_of_light,
print_stable, make_stable_list)
import math
pdata = PYTHIAParticleData()
print_stable(pdata.ctau('D0') / c_speed_of_light,
title=('Particles with known finite lifetimes longer '
'than that of D0 ({0}cm)').format(pdata.ctau('D0')))
print()
print('Known particles with tau > 1e-8s:', make_stable_list(1e-8))
|
[
"particletools.tables.PYTHIAParticleData",
"particletools.tables.make_stable_list"
] |
[((197, 217), 'particletools.tables.PYTHIAParticleData', 'PYTHIAParticleData', ([], {}), '()\n', (215, 217), False, 'from particletools.tables import PYTHIAParticleData, c_speed_of_light, print_stable, make_stable_list\n'), ((461, 484), 'particletools.tables.make_stable_list', 'make_stable_list', (['(1e-08)'], {}), '(1e-08)\n', (477, 484), False, 'from particletools.tables import PYTHIAParticleData, c_speed_of_light, print_stable, make_stable_list\n')]
|