hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5320daf74e5189735d626582356010934114572d
| 10,522
|
py
|
Python
|
testapp/app/app/tests/test_export_action.py
|
instituciones-abiertas/django-admin-export-action
|
bb089180e418915e1bba31927554537249fbec78
|
[
"MIT"
] | 5
|
2020-12-15T11:38:42.000Z
|
2022-01-06T02:33:59.000Z
|
testapp/app/app/tests/test_export_action.py
|
instituciones-abiertas/django-admin-export-action
|
bb089180e418915e1bba31927554537249fbec78
|
[
"MIT"
] | 2
|
2021-09-14T19:25:29.000Z
|
2021-11-26T14:16:50.000Z
|
testapp/app/app/tests/test_export_action.py
|
instituciones-abiertas/django-admin-export-action
|
bb089180e418915e1bba31927554537249fbec78
|
[
"MIT"
] | 2
|
2021-09-14T19:19:05.000Z
|
2021-09-14T19:19:18.000Z
|
# -- encoding: UTF-8 --
import json
import uuid
from admin_export_action import report
from admin_export_action.admin import export_selected_objects
from admin_export_action.config import default_config, get_config
from django.contrib.admin.sites import AdminSite
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase, RequestFactory
from django.urls import reverse
from django.utils.http import urlencode
from news.models import Attachment, Category, News, NewsTag, Video
from news.admin import NewsAdmin
class FakeDict(object):
def __getitem__(self, key):
return object()
class WS(object):
def __init__(self):
self.rows = []
self.cells = []
self.column_dimensions = FakeDict()
def cell(self, row, column):
pass
def append(self, row):
self.rows.append(row)
class FakeQueryset(object):
def __init__(self, num):
self.num = num
self.model = News
def values_list(self, field, flat=True):
return [i for i in range(1, self.num)]
class AdminExportActionTest(TestCase):
fixtures = ["tests.json"]
def test_config(self):
self.assertEqual(default_config.get('ENABLE_SITEWIDE'), True)
self.assertEqual(get_config('ENABLE_SITEWIDE'), False)
with self.settings(ADMIN_EXPORT_ACTION=None):
self.assertEqual(get_config('ENABLE_SITEWIDE'), True)
def test_export_selected_objects_session(self):
factory = RequestFactory()
request = factory.get('/news/admin/')
request.session = {}
modeladmin = NewsAdmin(model=News, admin_site=AdminSite())
qs = FakeQueryset(2000)
self.assertEqual(len(request.session), 0)
export_selected_objects(modeladmin, request, qs)
self.assertEqual(len(request.session), 1)
els = list(request.session.items())
self.assertEqual(els[0][1], qs.values_list('id'))
def test_get_field_verbose_name(self):
res = report.get_field_verbose_name(News.objects, 'tags__name')
assert res == 'all tags verbose name'
res = report.get_field_verbose_name(News.objects, 'share')
assert res == 'share'
def test_list_to_method_response_should_return_200_and_correct_values(
self):
admin = User.objects.get(pk=1)
data, messages = report.report_to_list(News.objects.all(),
['id', 'title', 'status'],
admin)
method = getattr(report, 'list_to_{}_response'.format('html'))
res = method(data)
assert res.status_code == 200
method = getattr(report, 'list_to_{}_response'.format('csv'))
res = method(data)
assert res.status_code == 200
assert res.content == b'1,Lucio Dalla,published\r\n2,La mano de Dios,draft\r\n'
method = getattr(report, 'list_to_{}_response'.format('xlsx'))
res = method(data)
assert res.status_code == 200
method = getattr(report, 'list_to_{}_response'.format('json'))
res = method(data, header=['id', 'title', 'status'])
d = json.loads(res.content)
assert d[0]['id'] == 1
assert d[0]['title'] == "Lucio Dalla"
assert d[0]['status'] == 'published'
assert d[1]['id'] == 2
assert d[1]['title'] == "La mano de Dios"
assert d[1]['status'] == 'draft'
assert res.status_code == 200
data, messages = report.report_to_list(News.objects.all(),
['id', 'title', 'status'],
admin,
raw_choices=True)
method = getattr(report, 'list_to_{}_response'.format('json'))
res = method(data, header=['id', 'title', 'status'])
d = json.loads(res.content)
assert d[0]['id'] == 1
assert d[0]['title'] == "Lucio Dalla"
assert d[0]['status'] == 2
assert d[1]['id'] == 2
assert d[1]['title'] == "La mano de Dios"
assert d[1]['status'] == 1
assert res.status_code == 200
def test_list_to_csv_response_should_have_expected_content(self):
admin = User.objects.get(pk=1)
data, messages = report.report_to_list(News.objects.all(),
['id', 'title'], admin)
method = getattr(report, 'list_to_{}_response'.format('csv'))
res = method(data)
assert res.status_code == 200
assert res.content == b'1,Lucio Dalla\r\n2,La mano de Dios\r\n'
def test_list_to_json_response_should_have_expected_content(self):
admin = User.objects.get(pk=1)
data, messages = report.report_to_list(News.objects.all(),
['id', 'title'], admin)
method = getattr(report, 'list_to_{}_response'.format('json'))
res = method(data, header=['id', 'title'])
d = json.loads(res.content)
assert d[0]['id'] == 1
assert d[0]['title'] == "Lucio Dalla"
assert d[1]['id'] == 2
assert d[1]['title'] == "La mano de Dios"
assert res.status_code == 200
def test_admin_export_post_should_return_200(self):
for output_format in ['html', 'csv', 'xslx', 'json']:
params = {
'ct':
ContentType.objects.get_for_model(News).pk,
'ids':
','.join(
repr(pk)
for pk in News.objects.values_list('pk', flat=True))
}
data = {
"title": "on",
"__format": output_format,
}
url = "{}?{}".format(reverse('admin_export_action:export'),
urlencode(params))
self.client.login(username='admin', password='admin')
response = self.client.post(url, data=data)
assert response.status_code == 200
def test_admin_export_get_should_return_200(self):
params = {
'ct':
ContentType.objects.get_for_model(News).pk,
'ids':
','.join(
repr(pk) for pk in News.objects.values_list('pk', flat=True))
}
url = "{}?{}".format(reverse('admin_export_action:export'),
urlencode(params))
self.client.login(username='admin', password='admin')
response = self.client.get(url)
assert response.status_code == 200
def test_admin_export_with_related_get_should_return_200(self):
params = {
'related': True,
'model_ct': ContentType.objects.get_for_model(News).pk,
'field': 'category',
'path': 'category.name',
}
url = "{}?{}".format(reverse('admin_export_action:export'),
urlencode(params))
self.client.login(username='admin', password='admin')
response = self.client.get(url)
assert response.status_code == 200
def test_admin_export_with_related_of_indirect_field_get_should_return_200(
self):
params = {
'related': True,
'model_ct': ContentType.objects.get_for_model(News).pk,
'field': 'newstag',
'path': 'newstag.id',
}
url = "{}?{}".format(reverse('admin_export_action:export'),
urlencode(params))
self.client.login(username='admin', password='admin')
response = self.client.get(url)
assert response.status_code == 200
def test_admin_export_with_unregistered_model_should_raise_ValueError(
self):
params = {
'ct':
ContentType.objects.get_for_model(NewsTag).pk,
'ids':
','.join(
repr(pk)
for pk in NewsTag.objects.values_list('pk', flat=True))
}
url = "{}?{}".format(reverse('admin_export_action:export'),
urlencode(params))
self.client.login(username='admin', password='admin')
try:
self.client.get(url)
self.fail()
except ValueError:
pass
def test_admin_action_should_redirect_to_export_view(self):
objects = News.objects.all()
ids = [repr(obj.pk) for obj in objects]
data = {
"action": "export_selected_objects",
"_selected_action": ids,
}
url = reverse('admin:news_news_changelist')
self.client.login(username='admin', password='admin')
response = self.client.post(url, data=data)
expected_url = "{}?ct={ct}&ids={ids}".format(
reverse('admin_export_action:export'),
ct=ContentType.objects.get_for_model(News).pk,
ids=','.join(reversed(ids)))
assert response.status_code == 302
assert response.url.endswith(expected_url)
def test_export_with_related_should_return_200(self):
for output_format in ['html', 'csv', 'xslx', 'json']:
news = News.objects.all()
params = {
'ct':
ContentType.objects.get_for_model(News).pk,
'ids':
','.join(
repr(pk)
for pk in News.objects.values_list('pk', flat=True))
}
data = {
'id': 'on',
'title': 'on',
'status': 'on',
'category__name': 'on',
'tags__name': 'on',
'newstag__created_on': 'on',
"__format": output_format,
}
url = "{}?{}".format(reverse('admin_export_action:export'),
urlencode(params))
self.client.login(username='admin', password='admin')
response = self.client.post(url, data=data)
assert response.status_code == 200
assert response.content
def test_build_sheet_convert_function(self):
data = [
['1', 5, 'convert', 9, {"foo": "bar"}, [1, 2], uuid.UUID("12345678123456781234567812345678")],
]
ws = WS()
report.build_sheet(data, ws, sheet_name='report', header=None, widths=None)
self.assertEqual(ws.rows, [['1', 5, 'converted', 9, "{'foo': 'bar'}", '[1, 2]', '12345678-1234-5678-1234-567812345678']])
| 36.79021
| 129
| 0.560255
| 1,183
| 10,522
| 4.782756
| 0.156382
| 0.031106
| 0.027572
| 0.028455
| 0.609933
| 0.591552
| 0.563273
| 0.545246
| 0.512725
| 0.509544
| 0
| 0.0236
| 0.307356
| 10,522
| 285
| 130
| 36.919298
| 0.752744
| 0.001996
| 0
| 0.487395
| 0
| 0.004202
| 0.126012
| 0.03067
| 0
| 0
| 0
| 0
| 0.176471
| 1
| 0.084034
| false
| 0.037815
| 0.054622
| 0.008403
| 0.168067
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5322235447e97ac9d03d73498451ad6c8b6d4fab
| 10,434
|
py
|
Python
|
face_attribute_verification.py
|
seymayucer/FacialPhenotypes
|
043f3ecf956cad53095d93f19383c4c94e033692
|
[
"MIT"
] | 2
|
2021-03-02T22:25:32.000Z
|
2021-03-06T23:53:13.000Z
|
face_attribute_verification.py
|
seymayucer/FacialPhenotypes
|
043f3ecf956cad53095d93f19383c4c94e033692
|
[
"MIT"
] | null | null | null |
face_attribute_verification.py
|
seymayucer/FacialPhenotypes
|
043f3ecf956cad53095d93f19383c4c94e033692
|
[
"MIT"
] | 1
|
2021-03-22T02:05:32.000Z
|
2021-03-22T02:05:32.000Z
|
import argparse
import numpy as np
from sklearn.model_selection import StratifiedKFold
import sklearn
import cv2
import datetime
import mxnet as mx
from mxnet import ndarray as nd
import pandas as pd
from numpy import linalg as line
import logging
logging.basicConfig(
format="%(asctime)s %(message)s", datefmt="%m/%d/%Y %I:%M:%S %p", level=logging.INFO
)
class FaceVerification:
def __init__(self, model=None, batch_size=32, data_dir=None):
super().__init__()
logging.info("Face Verification for RFW.")
self.data_dir = data_dir
self.image_size = 112
self.batch_size = batch_size
self.model = model
def load_model(self, model_dir=None):
logging.info("Model Loading")
ctx = mx.gpu(0)
sym, arg_params, aux_params = mx.model.load_checkpoint(model_dir, 1)
all_layers = sym.get_internals()
sym = all_layers["fc1_output"]
self.model = mx.mod.Module(symbol=sym, context=ctx, label_names=None)
self.model.bind(
data_shapes=[
("data", (self.batch_size, 3, self.image_size, self.image_size))
]
)
self.model.set_params(arg_params, aux_params)
return self.model
def load_images(self, inp_csv_file):
logging.info("Image Data Loading")
issame_list, data_list = [], []
pairs = pd.read_csv(inp_csv_file)
# data_list = list(
# np.empty((2, pairs.shape[0] * 2, 3, self.image_size, self.image_size))
# )
for flip in [0, 1]:
data = nd.empty((pairs.shape[0] * 2, 3, self.image_size, self.image_size))
data_list.append(data)
j = 0
for i, row in pairs.iterrows():
if i % 1000 == 0:
logging.info("processing {}".format(i))
issame_list.append(row.issame)
path1 = "{}/{}/{}_{:04d}.jpg".format(
self.data_dir,
row.Class_ID_s1,
row.Class_ID_s1.split("/")[1],
int(row.img_id_s1),
)
path2 = "{}/{}/{}_{:04d}.jpg".format(
self.data_dir,
row.Class_ID_s2,
row.Class_ID_s2.split("/")[1],
int(row.img_id_s2),
)
im1 = cv2.imread(path1)
im1 = cv2.cvtColor(im1, cv2.COLOR_BGR2RGB)
im1 = np.transpose(im1, (2, 0, 1)) # 3*112*112, RGB
im1 = mx.nd.array(im1)
im2 = cv2.imread(path2)
im2 = cv2.cvtColor(im2, cv2.COLOR_BGR2RGB)
im2 = np.transpose(im2, (2, 0, 1)) # 3*112*112, RGB
im2 = mx.nd.array(im2)
for flip in [0, 1]:
if flip == 1:
im1 = mx.ndarray.flip(im1, 2)
data_list[flip][j][:] = im1
for flip in [0, 1]:
if flip == 1:
im2 = mx.ndarray.flip(im2, 2)
data_list[flip][j + 1][:] = im2
# data_list[flip][i][:] = img
j = j + 2
# bins shape should be 2,12000,3,112,112
# data = np.asarray(data_list)
self.issame = np.asarray(issame_list)
self.data = data_list
logging.info("Pairs are loaded, shape: 2x{}.".format(self.data[0].shape))
return self.data, self.issame, pairs.shape
def clean_data(self):
self.data = None
self.issame = None
def verify(self, model=None):
data_list = self.data
embeddings_list = []
time_consumed = 0
_label = nd.ones((self.batch_size,))
for i in range(len(data_list)):
data = data_list[i]
embeddings = None
ba = 0
while ba < data.shape[0]:
bb = min(ba + self.batch_size, data.shape[0])
count = bb - ba
_data = nd.slice_axis(data, axis=0, begin=bb - self.batch_size, end=bb)
time0 = datetime.datetime.now()
db = mx.io.DataBatch(data=(_data,), label=(_label,))
self.model.forward(db, is_train=False)
net_out = self.model.get_outputs()
_embeddings = net_out[0].asnumpy()
time_now = datetime.datetime.now()
diff = time_now - time0
time_consumed += diff.total_seconds()
if embeddings is None:
embeddings = np.zeros((data.shape[0], _embeddings.shape[1]))
embeddings[ba:bb, :] = _embeddings[(self.batch_size - count) :, :]
ba = bb
embeddings_list.append(embeddings)
_xnorm = 0.0
_xnorm_cnt = 0
for embed in embeddings_list:
for i in range(embed.shape[0]):
_em = embed[i]
_norm = np.linalg.norm(_em)
_xnorm += _norm
_xnorm_cnt += 1
_xnorm /= _xnorm_cnt
acc1 = 0.0
std1 = 0.0
embeddings = embeddings_list[0] + embeddings_list[1]
embeddings = sklearn.preprocessing.normalize(embeddings)
print(embeddings.shape)
print("infer time", time_consumed)
tpr, fpr, accuracy, best_thresholds = self.evaluate(
embeddings, self.issame, nrof_folds=10
)
acc2, std2 = np.mean(accuracy), np.std(accuracy)
logging.info("Accuracy {}".format(acc2))
return tpr, fpr, acc2, std2
def evaluate(self, embeddings, actual_issame, nrof_folds=10):
# Calculate evaluation metrics
thresholds = np.arange(-1, 1, 0.001)
embeddings1 = embeddings[0::2]
embeddings2 = embeddings[1::2]
tpr, fpr, accuracy, best_thresholds = self.calculate_roc(
thresholds,
embeddings1,
embeddings2,
np.asarray(actual_issame),
nrof_folds=nrof_folds,
)
return tpr, fpr, accuracy, best_thresholds
def calculate_roc(
self, thresholds, embeddings1, embeddings2, actual_issame, nrof_folds=10
):
assert embeddings1.shape[1] == embeddings2.shape[1]
nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
nrof_thresholds = len(thresholds)
# k_fold = LFold(n_splits=nrof_folds, shuffle=False)
k_fold = StratifiedKFold(n_splits=nrof_folds, shuffle=False)
tprs = np.zeros((nrof_folds, nrof_thresholds))
fprs = np.zeros((nrof_folds, nrof_thresholds))
tnrs = np.zeros((nrof_folds, nrof_thresholds))
fnrs = np.zeros((nrof_folds, nrof_thresholds))
f1s = np.zeros((nrof_folds))
accuracy = np.zeros((nrof_folds))
indices = np.arange(nrof_pairs)
veclist = np.concatenate((embeddings1, embeddings2), axis=0)
meana = np.mean(veclist, axis=0)
embeddings1 -= meana
embeddings2 -= meana
dist = np.sum(embeddings1 * embeddings2, axis=1)
dist = dist / line.norm(embeddings1, axis=1) / line.norm(embeddings2, axis=1)
for fold_idx, (train_set, test_set) in enumerate(
k_fold.split(indices, actual_issame)
):
# print(train_set.shape, actual_issame[train_set].sum())
# print(test_set.shape, actual_issame[test_set].sum())
# Find the best threshold for the fold
acc_train = np.zeros((nrof_thresholds))
for threshold_idx, threshold in enumerate(thresholds):
_, _, _, _, acc_train[threshold_idx], f1 = self.calculate_accuracy(
threshold, dist[train_set], actual_issame[train_set]
)
best_threshold_index = np.argmax(acc_train)
# print('threshold', thresholds[best_threshold_index])
for threshold_idx, threshold in enumerate(thresholds):
(
tprs[fold_idx, threshold_idx],
fprs[fold_idx, threshold_idx],
tnrs[fold_idx, threshold_idx],
fnrs[fold_idx, threshold_idx],
_,
_,
) = self.calculate_accuracy(
threshold, dist[test_set], actual_issame[test_set]
)
_, _, _, _, accuracy[fold_idx], f1s[fold_idx] = self.calculate_accuracy(
thresholds[best_threshold_index],
dist[test_set],
actual_issame[test_set],
)
tpr = np.mean(tprs, 0)[best_threshold_index]
fpr = np.mean(fprs, 0)[best_threshold_index]
# tnr = np.mean(tnrs, 0)[best_threshold_index]
# fnr = np.mean(fnrs, 0)[best_threshold_index]
return tpr, fpr, accuracy, thresholds[best_threshold_index]
def calculate_accuracy(self, threshold, dist, actual_issame):
predict_issame = np.less(dist, threshold)
actual_issame = np.less(actual_issame, 0.5)
tn, fp, fn, tp = sklearn.metrics.confusion_matrix(
actual_issame, predict_issame
).ravel()
tpr = 0 if (tp + fn == 0) else float(tp) / float(tp + fn)
fpr = 0 if (fp + tn == 0) else float(fp) / float(fp + tn)
tnr = 0 if (fp + tn == 0) else float(tn) / float(fp + tn)
fnr = 0 if (fn + tp == 0) else float(fn) / float(fn + tp)
acc = float(tp + tn) / dist.size
f1 = sklearn.metrics.f1_score(predict_issame, actual_issame)
return tpr, fpr, tnr, fnr, acc, f1
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Face Verification for RFW")
parser.add_argument(
"--data_dir", type=str, default="RFW/test/aligned_data", help="dataset root"
)
parser.add_argument(
"--pair_file",
type=str,
default="./AttributePairs/eye_narrow_pairs_6000_selected.csv",
help="pair file to test",
)
parser.add_argument(
"--model_dir", type=str, default="/model/", help="pre-trained model directory"
)
parser.add_argument("--batch_size", type=int, default="32", help="batch_size")
args = parser.parse_args()
validation = FaceVerification(
batch_size=args.batch_size, model=None, data_dir=args.data_dir
)
validation.load_model(model_dir=args.model_dir)
_, _, _shape = validation.load_images(args.pair_file)
tpr, fpr, acc, std = validation.verify()
logging.info(
"Testing Accuracy {} for {} in shape {}".format(acc, args.pair_file, _shape[0])
)
| 35.610922
| 88
| 0.568047
| 1,274
| 10,434
| 4.445055
| 0.208006
| 0.031785
| 0.025428
| 0.016952
| 0.16599
| 0.12202
| 0.073636
| 0.031785
| 0.025428
| 0.013774
| 0
| 0.029346
| 0.314165
| 10,434
| 292
| 89
| 35.732877
| 0.762018
| 0.056354
| 0
| 0.069869
| 0
| 0
| 0.04884
| 0.007326
| 0
| 0
| 0
| 0
| 0.004367
| 1
| 0.034935
| false
| 0
| 0.048035
| 0
| 0.113537
| 0.008734
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5322e645a2731e419b7aab530efd6f637ecbe5b3
| 6,645
|
py
|
Python
|
jsonform/fields.py
|
Pix-00/jsonform
|
d62543474d96b258606ec38dd427693232daeda3
|
[
"Apache-2.0"
] | null | null | null |
jsonform/fields.py
|
Pix-00/jsonform
|
d62543474d96b258606ec38dd427693232daeda3
|
[
"Apache-2.0"
] | null | null | null |
jsonform/fields.py
|
Pix-00/jsonform
|
d62543474d96b258606ec38dd427693232daeda3
|
[
"Apache-2.0"
] | null | null | null |
import base64
import datetime
from abc import ABC, abstractmethod
from .conditions import AnyValue
from .errors import FieldError, FormError
__all__ = [
'Field', 'StringField', 'IntegerField', 'FloatField', 'BooleanField',
'DateTimeField', 'DateField', 'TimeField', 'ListField','SetField', 'EnumField', 'BytesField'
]
class Field(ABC):
_default = None
def __new__(cls, *args, **kwargs):
if 'init' in kwargs:
kwargs.pop('init')
return super().__new__(cls)
return UnboundField(cls, *args, **kwargs)
def __init__(self,
condition=AnyValue(),
optional: bool = False,
default=None,
init=False):
self.condition = condition
self.optional = optional
self.default = default or self._default
self._data = None
self.is_empty = False
@property
def data(self):
return self._data
def mark_empty(self):
if not self.optional:
raise FieldError('cannot be blank')
self.is_empty = True
if callable(self.default):
self._data = self.default()
else:
self._data = self.default
@abstractmethod
def process_data(self, value):
self.condition.check(self)
class UnboundField:
def __init__(self, field_cls, *args, **kwargs):
self.field_cls = field_cls
self.args = args
self.kwargs = kwargs
self.kwargs['init'] = True
def bind(self):
return self.field_cls(*self.args, **self.kwargs)
class StringField(Field):
_default = ''
def process_data(self, value):
if not isinstance(value, str):
raise FieldError('invalid string')
self._data = value
super().process_data(value)
class IntegerField(Field):
_default = 0
def process_data(self, value):
if not isinstance(value, int):
raise FieldError('invalid integer')
self._data = value
super().process_data(value)
class FloatField(Field):
_default = 0.0
def process_data(self, value):
if not isinstance(value, float):
raise FieldError('invalid float')
self._data = value
super().process_data(value)
class BooleanField(Field):
def process_data(self, value):
if not isinstance(value, bool):
raise FieldError('invalid boolean')
self._data = value
super().process_data(value)
class DateTimeField(Field):
def __init__(self, pattern='%Y-%m-%dT%H:%M:%S', **kwargs):
super().__init__(**kwargs)
self.pattern = pattern
def process_data(self, value):
try:
self._data = datetime.datetime.strptime(value, self.pattern)
except ValueError:
raise FieldError('invalid datetime')
super().process_data(value)
class DateField(DateTimeField):
def __init__(self, pattern='%Y-%m-%d', **kwargs):
super().__init__(pattern, **kwargs)
def process_data(self, value):
try:
self._data = datetime.datetime.strptime(value, self.pattern).date()
except ValueError:
raise FieldError('invalid date')
super().process_data(value)
class TimeField(DateTimeField):
def __init__(self, pattern='%H:%M:%S', **kwargs):
super().__init__(pattern, **kwargs)
def process_jsondata(self, value):
try:
self._data = datetime.datetime.strptime(value, self.pattern).time()
except ValueError:
raise FieldError('invalid time')
super().process_data(value)
class EnumField(Field):
def __init__(self, enum_class, **kwargs):
super().__init__(**kwargs)
self.enum_class = enum_class
def process_data(self, value):
try:
enum_obj = self.enum_class[value]
except KeyError:
raise FieldError('invalid enum')
self._data = enum_obj
super().process_data(value)
class BytesField(Field):
def __init__(self, length, **kwargs):
super().__init__(**kwargs)
self.length = length
def process_data(self, value):
try:
self.data = base64.decodebytes(value)
except (ValueError, TypeError):
raise FieldError('invalid base64 string')
if len(self.data) != self.length:
raise FieldError('invalid length')
super().process_data(value)
class ListField(Field):
def __init__(self, field, default=list, **kwargs):
self.field = field
self.data_ = None
super().__init__(default=default, **kwargs)
@property
def data(self):
if not self.data_:
self.data_ = [field.data for field in self._data]
return self.data_
def process_data(self, value):
if not isinstance(value, list):
raise FieldError('invalid list')
self._data = list()
e = FieldError()
for i, val in enumerate(value):
field = self.field.bind()
try:
field.process_data(val)
except FieldError as e_:
e[i] = e_.error
self._data.append(field)
if e:
raise e
super().process_data(value)
class SetField(Field):
def __init__(self, field, default=set, **kwargs):
self.field = field
self.data_ = None
super().__init__(default=default, **kwargs)
@property
def data(self):
if not self.data_:
self.data_ = {field.data for field in self._data}
return self.data_
def process_data(self, value):
if not isinstance(value, list):
raise FieldError('invalid list')
self._data = set()
e = FieldError()
for i, val in enumerate(set(value)):
field = self.field.bind()
try:
field.process_data(val)
except FieldError as e_:
e[i] = e_.error
self._data.add(field)
if e:
raise e
super().process_data(value)
class SubForm(Field):
def __init__(self, form, **kwargs):
self.form = form
kwargs.pop('condition', None)
super().__init__(**kwargs)
def process_data(self, value):
try:
self.form.process(jsondata=value)
except FormError as e_:
e = FieldError()
if e_.error:
e['error'] = e_.error
if e_.f_errors:
e['f_errors'] = e_.f_errors
raise e
self._data = {name: self.form[name] for name in self.form.fields}
| 27.233607
| 96
| 0.582844
| 743
| 6,645
| 4.987887
| 0.145357
| 0.062601
| 0.045332
| 0.058284
| 0.527793
| 0.427685
| 0.382623
| 0.350783
| 0.288181
| 0.264976
| 0
| 0.001945
| 0.303687
| 6,645
| 243
| 97
| 27.345679
| 0.799006
| 0
| 0
| 0.416667
| 0
| 0
| 0.05523
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.151042
| false
| 0
| 0.026042
| 0.010417
| 0.302083
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5323be56fb9f52f802238cdfb9a7b782de3d3c6b
| 1,220
|
py
|
Python
|
OpenCV-Computer-Vision-Examples-with-Python-A-Complete-Guide-for-Dummies-master/Source Code/opencv_operations/draw-circles.py
|
Payal197bhadra/ComputerVision
|
d66b5037ece99b6189dd4306b2c9be67cffd14af
|
[
"MIT"
] | 6
|
2019-06-30T09:08:03.000Z
|
2021-10-11T17:51:16.000Z
|
OpenCV-Computer-Vision-Examples-with-Python-A-Complete-Guide-for-Dummies-master/Source Code/opencv_operations/draw-circles.py
|
Payal197bhadra/ComputerVision
|
d66b5037ece99b6189dd4306b2c9be67cffd14af
|
[
"MIT"
] | null | null | null |
OpenCV-Computer-Vision-Examples-with-Python-A-Complete-Guide-for-Dummies-master/Source Code/opencv_operations/draw-circles.py
|
Payal197bhadra/ComputerVision
|
d66b5037ece99b6189dd4306b2c9be67cffd14af
|
[
"MIT"
] | 3
|
2020-01-01T17:41:10.000Z
|
2021-04-22T22:21:56.000Z
|
import numpy as np
import cv2
#define a canvas of size 300x300 px, with 3 channels (R,G,B) and data type as 8 bit unsigned integer
canvas = np.zeros((300,300,3), dtype ="uint8")
#define color
#draw a circle
#arguments are canvas/image, midpoint, radius, color, thickness(optional)
#display in cv2 window
green = (0,255,0)
cv2.circle(canvas,(100,100), 10, green)
cv2.imshow("Single circle", canvas)
cv2.waitKey(0)
# draw concentric white circles
# calculate the center point of canvas
# generate circles using for loop
# clearning the canvas
canvas = np.zeros((300,300,3), dtype ="uint8")
white = (255,255,255)
(centerX, centerY) = (canvas.shape[1]//2, canvas.shape[0]//2)
for r in range(0,175,25):
cv2.circle(canvas, (centerX,centerY), r, white)
cv2.imshow("concentric circles", canvas)
cv2.waitKey(0)
# generate random radius, center point, color
# draw circles in for loop
canvas = np.zeros((300,300,3), dtype ="uint8")
for i in range(0, 25):
radius = np.random.randint(5, high = 200)
color = np.random.randint(0, high = 256, size = (3,)).tolist()
pt = np.random.randint(0, high = 300, size = (2,))
cv2.circle(canvas, tuple(pt), radius, color, -1)
cv2.imshow("Canvas", canvas)
cv2.waitKey(0)
| 30.5
| 100
| 0.696721
| 200
| 1,220
| 4.25
| 0.4
| 0.056471
| 0.045882
| 0.056471
| 0.152941
| 0.105882
| 0.105882
| 0.105882
| 0
| 0
| 0
| 0.092664
| 0.15082
| 1,220
| 40
| 101
| 30.5
| 0.727799
| 0.332787
| 0
| 0.272727
| 0
| 0
| 0.064677
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.090909
| 0
| 0.090909
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5323c4b7d5f3632bee1bd22e2b1ceebf3d070d46
| 1,648
|
py
|
Python
|
tmux_cssh/main.py
|
cscutcher/tmux_cssh
|
bfbb7eb26d5f5864c0888fa8e614122401ed4f5f
|
[
"Unlicense"
] | null | null | null |
tmux_cssh/main.py
|
cscutcher/tmux_cssh
|
bfbb7eb26d5f5864c0888fa8e614122401ed4f5f
|
[
"Unlicense"
] | null | null | null |
tmux_cssh/main.py
|
cscutcher/tmux_cssh
|
bfbb7eb26d5f5864c0888fa8e614122401ed4f5f
|
[
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Main Script
"""
import logging
import argh
import sarge
import tmuxp
DEV_LOGGER = logging.getLogger(__name__)
def get_current_session(server=None):
'''
Seems to be no easy way to grab current attached session in tmuxp so
this provides a simple alternative.
'''
server = tmuxp.Server() if server is None else server
session_name = sarge.get_stdout('tmux display-message -p "#S"').strip()
session = server.findWhere({"session_name": session_name})
return session
@argh.arg('commands', nargs='+')
def clustered_window(commands):
'''
Creates new clustered window on session with commands.
A clustered session is one where you operate on all panes/commands at once
using the synchronized-panes option.
:param commands: Sequence of commands. Each one will run in its own pane.
'''
session = get_current_session()
window = session.new_window()
# Create additional panes
while len(window.panes) < len(commands):
window.panes[-1].split_window()
for pane, command in zip(window.panes, commands):
pane.send_keys(command)
window.select_layout('tiled')
window.set_window_option('synchronize-panes', 'on')
return window
@argh.arg('hosts', nargs='+')
def clustered_ssh(hosts):
'''
Creates new cluster window with an ssh connection to each host.
A clustered session is one where you operate on all panes/commands at once
using the synchronized-panes option.
:param hosts: Sequence of hosts to connect to.
'''
return clustered_window(
['ssh \'{}\''.format(host) for host in hosts])
| 26.580645
| 78
| 0.68932
| 223
| 1,648
| 4.995516
| 0.452915
| 0.029623
| 0.030521
| 0.034111
| 0.174147
| 0.174147
| 0.174147
| 0.174147
| 0.174147
| 0.174147
| 0
| 0.001529
| 0.206311
| 1,648
| 61
| 79
| 27.016393
| 0.850153
| 0.382888
| 0
| 0
| 0
| 0
| 0.08984
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12
| false
| 0
| 0.16
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5326a402c4dd86ad307f49de1d63c62b7a921bd6
| 10,288
|
py
|
Python
|
arch2vec/search_methods/reinforce_darts.py
|
gabrielasuchopar/arch2vec
|
1fc47d2cc7d63832e0d6337b8482669366b4aef2
|
[
"Apache-2.0"
] | null | null | null |
arch2vec/search_methods/reinforce_darts.py
|
gabrielasuchopar/arch2vec
|
1fc47d2cc7d63832e0d6337b8482669366b4aef2
|
[
"Apache-2.0"
] | null | null | null |
arch2vec/search_methods/reinforce_darts.py
|
gabrielasuchopar/arch2vec
|
1fc47d2cc7d63832e0d6337b8482669366b4aef2
|
[
"Apache-2.0"
] | null | null | null |
import os
import sys
import argparse
import json
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from arch2vec.models.pretraining_nasbench101 import configs
from arch2vec.utils import load_json, preprocessing, one_hot_darts
from arch2vec.preprocessing.gen_isomorphism_graphs import process
from arch2vec.models.model import Model
from torch.distributions import MultivariateNormal
from arch2vec.darts.cnn.train_search import Train
class Env(object):
def __init__(self, name, seed, cfg, data_path=None, save=False):
self.name = name
self.seed = seed
self.model = Model(input_dim=args.input_dim, hidden_dim=args.hidden_dim, latent_dim=args.dim,
num_hops=args.hops, num_mlp_layers=args.mlps, dropout=args.dropout, **cfg['GAE']).cuda()
self.dir_name = 'pretrained/dim-{}'.format(args.dim)
if not os.path.exists(os.path.join(self.dir_name, 'model-darts.pt')):
exit()
self.model.load_state_dict(torch.load(os.path.join(self.dir_name, 'model-darts.pt').format(args.dim))['model_state'])
self.visited = {}
self.features = []
self.genotype = []
self.embedding = {}
self._reset(data_path, save)
def _reset(self, data_path, save):
if not save:
print("extract arch2vec on DARTS search space ...")
dataset = load_json(data_path)
print("length of the dataset: {}".format(len(dataset)))
self.f_path = os.path.join(self.dir_name, 'arch2vec-darts.pt')
if os.path.exists(self.f_path):
print('{} is already saved'.format(self.f_path))
exit()
print('save to {}'.format(self.f_path))
counter = 0
self.model.eval()
for k, v in dataset.items():
adj = torch.Tensor(v[0]).unsqueeze(0).cuda()
ops = torch.Tensor(one_hot_darts(v[1])).unsqueeze(0).cuda()
adj, ops, prep_reverse = preprocessing(adj, ops, **cfg['prep'])
with torch.no_grad():
x, _ = self.model._encoder(ops, adj)
self.embedding[counter] = {'feature': x.squeeze(0).mean(dim=0).cpu(), 'genotype': process(v[2])}
print("{}/{}".format(counter, len(dataset)))
counter += 1
torch.save(self.embedding, self.f_path)
print("finished arch2vec extraction")
exit()
else:
self.f_path = os.path.join(self.dir_name, 'arch2vec-darts.pt')
print("load arch2vec from: {}".format(self.f_path))
self.embedding = torch.load(self.f_path)
for ind in range(len(self.embedding)):
self.features.append(self.embedding[ind]['feature'])
self.genotype.append(self.embedding[ind]['genotype'])
self.features = torch.stack(self.features, dim=0)
print('loading finished. pretrained embeddings shape: {}'.format(self.features.shape))
def get_init_state(self):
"""
:return: 1 x dim
"""
rand_indices = random.randint(0, self.features.shape[0])
self.visited[rand_indices] = True
return self.features[rand_indices], self.genotype[rand_indices]
def step(self, action):
"""
action: 1 x dim
self.features. N x dim
"""
dist = torch.norm(self.features - action.cpu(), dim=1)
knn = (-1 * dist).topk(dist.shape[0])
min_dist, min_idx = knn.values, knn.indices
count = 0
while True:
if len(self.visited) == dist.shape[0]:
print("CANNOT FIND IN THE DATASET!")
exit()
if min_idx[count].item() not in self.visited:
self.visited[min_idx[count].item()] = True
break
count += 1
return self.features[min_idx[count].item()], self.genotype[min_idx[count].item()]
class Policy(nn.Module):
def __init__(self, hidden_dim1, hidden_dim2):
super(Policy, self).__init__()
self.fc1 = nn.Linear(hidden_dim1, hidden_dim2)
self.fc2 = nn.Linear(hidden_dim2, hidden_dim1)
self.saved_log_probs = []
self.rewards = []
def forward(self, input):
x = F.relu(self.fc1(input))
out = self.fc2(x)
return out
class Policy_LSTM(nn.Module):
def __init__(self, hidden_dim1, hidden_dim2):
super(Policy_LSTM, self).__init__()
self.lstm = torch.nn.LSTMCell(input_size=hidden_dim1, hidden_size=hidden_dim2)
self.fc = nn.Linear(hidden_dim2, hidden_dim1)
self.saved_log_probs = []
self.rewards = []
self.hx = None
self.cx = None
def forward(self, input):
if self.hx is None and self.cx is None:
self.hx, self.cx = self.lstm(input)
else:
self.hx, self.cx = self.lstm(input, (self.hx, self.cx))
mean = self.fc(self.hx)
return mean
def select_action(state, policy):
"""
MVN based action selection.
:param state: 1 x dim
:param policy: policy network
:return: selected action: 1 x dim
"""
mean = policy(state.view(1, state.shape[0]))
mvn = MultivariateNormal(mean, torch.eye(state.shape[0]).cuda())
action = mvn.sample()
policy.saved_log_probs.append(torch.mean(mvn.log_prob(action)))
return action
def finish_episode(policy, optimizer):
R = 0
policy_loss = []
returns = []
for r in policy.rewards:
R = r + args.gamma * R
returns.append(R)
returns = torch.Tensor(policy.rewards)
val, indices = torch.sort(returns)
print("sorted validation reward:", val)
returns = returns - args.objective
for log_prob, R in zip(policy.saved_log_probs, returns):
policy_loss.append(-log_prob * R)
optimizer.zero_grad()
policy_loss = torch.mean(torch.stack(policy_loss, dim=0))
print("average reward: {}, policy loss: {}".format(sum(policy.rewards)/len(policy.rewards), policy_loss.item()))
policy_loss.backward()
optimizer.step()
del policy.rewards[:]
del policy.saved_log_probs[:]
policy.hx = None
policy.cx = None
def query(counter, seed, genotype, epochs):
trainer = Train()
rewards, rewards_test = trainer.main(counter, seed, genotype, epochs=epochs, train_portion=args.train_portion, save=args.logging_path)
val_sum = 0
for epoch, val_acc in rewards:
val_sum += val_acc
val_avg = val_sum / len(rewards)
return val_avg / 100. , rewards_test[-1][-1] / 100.
def reinforce_search(env):
""" implementation of arch2vec-RL on DARTS Search Space """
policy = Policy_LSTM(args.dim, 128).cuda()
optimizer = optim.Adam(policy.parameters(), lr=1e-2)
counter = 0
MAX_BUDGET = args.max_budgets
state, genotype = env.get_init_state()
CURR_BEST_VALID = 0
CURR_BEST_TEST = 0
CURR_BEST_GENOTYPE = None
test_trace = []
valid_trace = []
genotype_trace = []
counter_trace = []
while counter < MAX_BUDGET:
for c in range(args.bs):
state = state.cuda()
action = select_action(state, policy)
state, genotype = env.step(action)
reward, reward_test = query(counter=counter, seed=args.seed, genotype=genotype, epochs=args.inner_epochs)
policy.rewards.append(reward)
counter += 1
print('counter: {}, validation reward: {}, test reward: {}, genotype: {}'.format(counter, reward, reward_test, genotype))
if reward > CURR_BEST_VALID:
CURR_BEST_VALID = reward
CURR_BEST_TEST = reward_test
CURR_BEST_GENOTYPE = genotype
valid_trace.append(float(CURR_BEST_VALID))
test_trace.append(float(CURR_BEST_TEST))
genotype_trace.append(CURR_BEST_GENOTYPE)
counter_trace.append(counter)
if counter >= MAX_BUDGET:
break
finish_episode(policy, optimizer)
res = dict()
res['validation_acc'] = valid_trace
res['test_acc'] = test_trace
res['genotype'] = genotype_trace
res['counter'] = counter_trace
save_path = os.path.join(args.output_path, 'dim{}'.format(args.dim))
if not os.path.exists(save_path):
os.mkdir(save_path)
print('save to {}'.format(save_path))
fh = open(os.path.join(save_path, 'run_{}_arch2vec_model_darts.json'.format(args.seed)), 'w')
json.dump(res, fh)
fh.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="arch2vec-REINFORCE")
parser.add_argument("--gamma", type=float, default=0.8, help="discount factor (default 0.99)")
parser.add_argument("--seed", type=int, default=3, help="random seed")
parser.add_argument('--cfg', type=int, default=4, help='configuration (default: 4)')
parser.add_argument('--bs', type=int, default=16, help='batch size')
parser.add_argument('--objective', type=float, default=0.95, help='rl baseline')
parser.add_argument('--max_budgets', type=int, default=100, help='number of queries')
parser.add_argument('--inner_epochs', type=int, default=50, help='inner loop epochs')
parser.add_argument('--train_portion', type=float, default=0.9, help='train/validation split portion')
parser.add_argument('--output_path', type=str, default='rl', help='rl/bo (default: rl)')
parser.add_argument('--logging_path', type=str, default='', help='search logging path')
parser.add_argument('--saved_arch2vec', action="store_true", default=False)
parser.add_argument('--input_dim', type=int, default=11)
parser.add_argument('--hidden_dim', type=int, default=128)
parser.add_argument('--dim', type=int, default=16,
help='feature dimension (default: 16)')
parser.add_argument('--hops', type=int, default=5)
parser.add_argument('--mlps', type=int, default=2)
parser.add_argument('--dropout', type=float, default=0.3)
args = parser.parse_args()
cfg = configs[args.cfg]
env = Env('REINFORCE', args.seed, cfg, data_path='data/data_darts_counter600000.json', save=args.saved_arch2vec)
torch.manual_seed(args.seed)
reinforce_search(env)
| 39.722008
| 138
| 0.629374
| 1,350
| 10,288
| 4.62963
| 0.201481
| 0.02448
| 0.04624
| 0.00896
| 0.09152
| 0.07744
| 0.07744
| 0.06944
| 0.06944
| 0.04832
| 0
| 0.015028
| 0.236781
| 10,288
| 258
| 139
| 39.875969
| 0.780948
| 0.021676
| 0
| 0.102804
| 0
| 0
| 0.103538
| 0.006615
| 0
| 0
| 0
| 0
| 0
| 1
| 0.056075
| false
| 0
| 0.070093
| 0
| 0.168224
| 0.060748
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53285c4fc141853fde6ba747fb42c02369b8ef62
| 2,326
|
py
|
Python
|
setup.py
|
mentaal/r_map
|
42986e90b31018b1e7fc992a53b0f5f6e559253f
|
[
"MIT"
] | null | null | null |
setup.py
|
mentaal/r_map
|
42986e90b31018b1e7fc992a53b0f5f6e559253f
|
[
"MIT"
] | null | null | null |
setup.py
|
mentaal/r_map
|
42986e90b31018b1e7fc992a53b0f5f6e559253f
|
[
"MIT"
] | null | null | null |
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# Arguments marked as "Required" below must be included for upload to PyPI.
# Fields marked as "Optional" may be commented out.
setup(
name='r_map', # Required
version='0.9.0', # Required
description='A data structure for working with register map information', # Required
long_description=long_description, # Optional
long_description_content_type='text/markdown', # Optional (see note above)
url='https://github.com/mentaal/r_map', # Optional
# This should be your name or the name of the organization which owns the
# project.
author='Gregory Kuhn', # Optional
# This should be a valid email address corresponding to the author listed
# above.
author_email='gregorykuhn@gmail.com', # Optional
# Classifiers help users find your project by categorizing it.
#
# For a list of valid classifiers, see https://pypi.org/classifiers/
classifiers=[ # Optional
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3.6',
],
keywords='register bitfield registermap', # Optional
packages=['r_map'],
python_requires='>=3.6',
project_urls={ # Optional
'Bug Reports': 'https://github.com/mentaal/r_map/issues',
'Source': 'https://github.com/mentaal/r_map',
},
)
| 34.205882
| 89
| 0.676698
| 300
| 2,326
| 5.183333
| 0.546667
| 0.048232
| 0.036013
| 0.040514
| 0.048232
| 0.048232
| 0
| 0
| 0
| 0
| 0
| 0.007743
| 0.2227
| 2,326
| 67
| 90
| 34.716418
| 0.852323
| 0.462167
| 0
| 0
| 0
| 0
| 0.384679
| 0.017298
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.1
| 0
| 0.1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
532c5feeb5220f24428fb820adb0794dc7c4ef05
| 115,103
|
py
|
Python
|
adanet/core/estimator_test.py
|
eustomaqua/adanet
|
9c1de82428a4e661768af8e764041afebfec2e6f
|
[
"Apache-2.0"
] | null | null | null |
adanet/core/estimator_test.py
|
eustomaqua/adanet
|
9c1de82428a4e661768af8e764041afebfec2e6f
|
[
"Apache-2.0"
] | null | null | null |
adanet/core/estimator_test.py
|
eustomaqua/adanet
|
9c1de82428a4e661768af8e764041afebfec2e6f
|
[
"Apache-2.0"
] | null | null | null |
"""Test AdaNet estimator single graph implementation.
Copyright 2018 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import time
from absl import logging
from absl.testing import parameterized
from adanet import replay
from adanet import tf_compat
from adanet.core import testing_utils as tu
from adanet.core.estimator import Estimator
from adanet.core.evaluator import Evaluator
from adanet.core.report_materializer import ReportMaterializer
from adanet.distributed.placement import RoundRobinStrategy
from adanet.ensemble import AllStrategy
from adanet.ensemble import ComplexityRegularizedEnsembler
from adanet.ensemble import GrowStrategy
from adanet.ensemble import MixtureWeightType
from adanet.ensemble import SoloStrategy
from adanet.subnetwork import Builder
from adanet.subnetwork import Generator
from adanet.subnetwork import MaterializedReport
from adanet.subnetwork import Report
from adanet.subnetwork import SimpleGenerator
from adanet.subnetwork import Subnetwork
from adanet.subnetwork import TrainOpSpec
import numpy as np
import tensorflow as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util
from tensorflow.python.tools import saved_model_utils
# pylint: enable=g-direct-tensorflow-import
from tensorflow_estimator.python.estimator.canned.head import _binary_logistic_head_with_sigmoid_cross_entropy_loss as binary_class_head_v1
from tensorflow_estimator.python.estimator.export import export
from tensorflow_estimator.python.estimator.head import binary_class_head
from tensorflow_estimator.python.estimator.head import multi_head as multi_head_lib
from tensorflow_estimator.python.estimator.head import regression_head
logging.set_verbosity(logging.INFO)
XOR_FEATURES = [[1., 0.], [0., 0], [0., 1.], [1., 1.]]
XOR_LABELS = [[1.], [0.], [1.], [0.]]
class _DNNBuilder(Builder):
"""A simple DNN subnetwork builder."""
def __init__(self,
name,
learning_rate=.001,
mixture_weight_learning_rate=.001,
return_penultimate_layer=True,
layer_size=1,
subnetwork_chief_hooks=None,
subnetwork_hooks=None,
mixture_weight_chief_hooks=None,
mixture_weight_hooks=None,
seed=13):
self._name = name
self._learning_rate = learning_rate
self._mixture_weight_learning_rate = mixture_weight_learning_rate
self._return_penultimate_layer = return_penultimate_layer
self._layer_size = layer_size
self._subnetwork_chief_hooks = subnetwork_chief_hooks
self._subnetwork_hooks = subnetwork_hooks
self._mixture_weight_chief_hooks = mixture_weight_chief_hooks
self._mixture_weight_hooks = mixture_weight_hooks
self._seed = seed
@property
def name(self):
return self._name
def build_subnetwork(self,
features,
logits_dimension,
training,
iteration_step,
summary,
previous_ensemble=None):
seed = self._seed
if previous_ensemble:
# Increment seed so different iterations don't learn the exact same thing.
seed += 1
with tf_compat.v1.variable_scope("dnn"):
persisted_tensors = {}
with tf_compat.v1.variable_scope("hidden_layer"):
w = tf_compat.v1.get_variable(
shape=[2, self._layer_size],
initializer=tf_compat.v1.glorot_uniform_initializer(seed=seed),
name="weight")
disjoint_op = tf.constant([1], name="disjoint_op")
with tf_compat.v1.colocate_with(disjoint_op): # tests b/118865235
hidden_layer = tf.matmul(features["x"], w)
if previous_ensemble:
other_hidden_layer = previous_ensemble.weighted_subnetworks[
-1].subnetwork.persisted_tensors["hidden_layer"]
hidden_layer = tf.concat([hidden_layer, other_hidden_layer], axis=1)
# Use a leaky-relu activation so that gradients can flow even when
# outputs are negative. Leaky relu has a non-zero slope when x < 0.
# Otherwise success at learning is completely dependent on random seed.
hidden_layer = tf.nn.leaky_relu(hidden_layer, alpha=.2)
persisted_tensors["hidden_layer"] = hidden_layer
if training:
# This change will only be in the next iteration if
# `freeze_training_graph` is `True`.
persisted_tensors["hidden_layer"] = 2 * hidden_layer
last_layer = hidden_layer
with tf_compat.v1.variable_scope("logits"):
logits = tf_compat.v1.layers.dense(
hidden_layer,
logits_dimension,
kernel_initializer=tf_compat.v1.glorot_uniform_initializer(seed=seed))
summary.scalar("scalar", 3)
batch_size = features["x"].get_shape().as_list()[0]
summary.image("image", tf.ones([batch_size, 3, 3, 1]))
with tf_compat.v1.variable_scope("nested"):
summary.scalar("scalar", 5)
return Subnetwork(
last_layer=last_layer if self._return_penultimate_layer else logits,
logits=logits,
complexity=3,
persisted_tensors=persisted_tensors,
shared=persisted_tensors)
def build_subnetwork_train_op(self, subnetwork, loss, var_list, labels,
iteration_step, summary, previous_ensemble):
optimizer = tf_compat.v1.train.GradientDescentOptimizer(
learning_rate=self._learning_rate)
train_op = optimizer.minimize(loss, var_list=var_list)
if not self._subnetwork_hooks:
return train_op
return TrainOpSpec(train_op, self._subnetwork_chief_hooks,
self._subnetwork_hooks)
def build_mixture_weights_train_op(self, loss, var_list, logits, labels,
iteration_step, summary):
optimizer = tf_compat.v1.train.GradientDescentOptimizer(
learning_rate=self._mixture_weight_learning_rate)
train_op = optimizer.minimize(loss, var_list=var_list)
if not self._mixture_weight_hooks:
return train_op
return TrainOpSpec(train_op, self._mixture_weight_chief_hooks,
self._mixture_weight_hooks)
def build_subnetwork_report(self):
return Report(
hparams={"layer_size": self._layer_size},
attributes={"complexity": tf.constant(3, dtype=tf.int32)},
metrics={
"moo": (tf.constant(3,
dtype=tf.int32), tf.constant(3, dtype=tf.int32))
})
class _SimpleBuilder(Builder):
"""A simple subnetwork builder that takes feature_columns."""
def __init__(self, name, feature_columns, seed=42):
self._name = name
self._feature_columns = feature_columns
self._seed = seed
@property
def name(self):
return self._name
def build_subnetwork(self,
features,
logits_dimension,
training,
iteration_step,
summary,
previous_ensemble=None):
seed = self._seed
if previous_ensemble:
# Increment seed so different iterations don't learn the exact same thing.
seed += 1
with tf_compat.v1.variable_scope("simple"):
input_layer = tf_compat.v1.feature_column.input_layer(
features=features, feature_columns=self._feature_columns)
last_layer = input_layer
with tf_compat.v1.variable_scope("logits"):
logits = tf_compat.v1.layers.dense(
last_layer,
logits_dimension,
kernel_initializer=tf_compat.v1.glorot_uniform_initializer(seed=seed))
return Subnetwork(
last_layer=last_layer,
logits=logits,
complexity=1,
persisted_tensors={},
)
def build_subnetwork_train_op(self, subnetwork, loss, var_list, labels,
iteration_step, summary, previous_ensemble):
optimizer = tf_compat.v1.train.GradientDescentOptimizer(learning_rate=.001)
return optimizer.minimize(loss, var_list=var_list)
def build_mixture_weights_train_op(self, loss, var_list, logits, labels,
iteration_step, summary):
optimizer = tf_compat.v1.train.GradientDescentOptimizer(learning_rate=.001)
return optimizer.minimize(loss, var_list=var_list)
class _NanLossBuilder(Builder):
"""A subnetwork builder always produces a NaN loss."""
@property
def name(self):
return "nan"
def build_subnetwork(self,
features,
logits_dimension,
training,
iteration_step,
summary,
previous_ensemble=None):
logits = tf_compat.v1.layers.dense(
features["x"],
logits_dimension,
kernel_initializer=tf_compat.v1.glorot_uniform_initializer(
seed=42)) * np.nan
return Subnetwork(last_layer=logits, logits=logits, complexity=0)
def build_subnetwork_train_op(self, subnetwork, loss, var_list, labels,
iteration_step, summary, previous_ensemble):
return tf.no_op()
class _LinearBuilder(Builder):
"""A simple linear subnetwork builder."""
def __init__(self, name, mixture_weight_learning_rate=.001, seed=42):
self._name = name
self._mixture_weight_learning_rate = mixture_weight_learning_rate
self._seed = seed
@property
def name(self):
return self._name
def build_subnetwork(self,
features,
logits_dimension,
training,
iteration_step,
summary,
previous_ensemble=None):
logits = tf_compat.v1.layers.dense(
features["x"],
logits_dimension,
kernel_initializer=tf_compat.v1.glorot_uniform_initializer(
seed=self._seed))
return Subnetwork(
last_layer=features["x"],
logits=logits,
complexity=1,
persisted_tensors={},
)
def build_subnetwork_train_op(self, subnetwork, loss, var_list, labels,
iteration_step, summary, previous_ensemble):
optimizer = tf_compat.v1.train.GradientDescentOptimizer(learning_rate=.001)
return optimizer.minimize(loss, var_list=var_list)
def build_mixture_weights_train_op(self, loss, var_list, logits, labels,
iteration_step, summary):
optimizer = tf_compat.v1.train.GradientDescentOptimizer(
learning_rate=self._mixture_weight_learning_rate)
return optimizer.minimize(loss, var_list=var_list)
class _FakeGenerator(Generator):
"""Generator that exposed generate_candidates' arguments."""
def __init__(self, spy_fn, subnetwork_builders):
"""Checks the arguments passed to generate_candidates.
Args:
spy_fn: (iteration_number, previous_ensemble_reports, all_reports) -> ().
Spies on the arguments passed to generate_candidates whenever it is
called.
subnetwork_builders: List of `Builder`s to return in every call to
generate_candidates.
"""
self._spy_fn = spy_fn
self._subnetwork_builders = subnetwork_builders
def generate_candidates(self, previous_ensemble, iteration_number,
previous_ensemble_reports, all_reports):
"""Spys on arguments passed in, then returns a fixed list of candidates."""
del previous_ensemble # unused
self._spy_fn(iteration_number, previous_ensemble_reports, all_reports)
return self._subnetwork_builders
class _WidthLimitingDNNBuilder(_DNNBuilder):
"""Limits the width of the previous_ensemble."""
def __init__(self,
name,
learning_rate=.001,
mixture_weight_learning_rate=.001,
return_penultimate_layer=True,
layer_size=1,
width_limit=None,
seed=13):
if width_limit is not None and width_limit == 0:
raise ValueError("width_limit must be at least 1 or None.")
super(_WidthLimitingDNNBuilder,
self).__init__(name, learning_rate, mixture_weight_learning_rate,
return_penultimate_layer, layer_size, seed)
self._width_limit = width_limit
def prune_previous_ensemble(self, previous_ensemble):
indices = range(len(previous_ensemble.weighted_subnetworks))
if self._width_limit is None:
return indices
if self._width_limit == 1:
return []
return indices[-self._width_limit + 1:] # pylint: disable=invalid-unary-operand-type
class _FakeEvaluator(object):
"""Fakes an `adanet.Evaluator`."""
def __init__(self, input_fn):
self._input_fn = input_fn
@property
def input_fn(self):
"""Return the input_fn."""
return self._input_fn
@property
def steps(self):
"""Return the number of evaluation steps."""
return 1
@property
def metric_name(self):
"""Returns the name of the metric being optimized."""
return "adanet_loss"
@property
def objective_fn(self):
"""Always returns the minimize objective."""
return np.nanargmin
def evaluate(self, sess, ensemble_metrics):
"""Abstract method to be overridden in subclasses."""
del sess, ensemble_metrics # Unused.
raise NotImplementedError
class _AlwaysLastEvaluator(_FakeEvaluator):
def evaluate(self, sess, ensemble_metrics):
"""Always makes the last loss the smallest."""
del sess # Unused.
losses = [np.inf] * len(ensemble_metrics)
losses[-1] = 0.
return losses
class _AlwaysSecondToLastEvaluator(_FakeEvaluator):
def evaluate(self, sess, ensemble_metrics):
"""Always makes the second to last loss the smallest."""
del sess # Unused.
losses = [np.inf] * len(ensemble_metrics)
losses[-2] = 0.
return losses
class _EarlyStoppingHook(tf_compat.SessionRunHook):
"""Hook that immediately requests training to stop."""
def after_run(self, run_context, run_values):
run_context.request_stop()
class EstimatorTest(tu.AdanetTestCase):
@parameterized.named_parameters(
{
"testcase_name": "one_step",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 1,
"steps": 1,
"max_steps": None,
"want_loss": 0.49899703,
"want_iteration": 0,
"want_global_step": 1,
},
{
"testcase_name": "none_max_iteration_steps",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": None,
"steps": 300,
"max_steps": None,
"want_loss": 0.32487726,
"want_iteration": 0,
"want_global_step": 300,
},
{
"testcase_name": "single_builder_max_steps",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 200,
"max_steps": 300,
"want_loss": 0.32420248,
"want_iteration": 1,
"want_global_step": 300,
},
{
"testcase_name": "single_builder_steps",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 200,
"steps": 300,
"max_steps": None,
"want_loss": 0.32420248,
"want_iteration": 1,
"want_global_step": 300,
},
{
"testcase_name": "single_builder_two_max_iteration_fewer_max_steps",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 200,
"max_iterations": 2,
"max_steps": 300,
"want_loss": 0.32420248,
"want_iteration": 1,
"want_global_step": 300,
},
{
"testcase_name": "single_builder_no_bias",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 200,
"use_bias": False,
"want_loss": 0.496736,
"want_iteration": 1,
"want_global_step": 300,
},
{
"testcase_name":
"single_builder_subnetwork_hooks",
"subnetwork_generator":
SimpleGenerator([
_DNNBuilder(
"dnn",
subnetwork_chief_hooks=[
tu.ModifierSessionRunHook("chief_hook_var")
],
subnetwork_hooks=[tu.ModifierSessionRunHook("hook_var")])
]),
"max_iteration_steps":
200,
"use_bias":
False,
"want_loss":
0.496736,
"want_iteration":
1,
"want_global_step":
300,
},
{
"testcase_name":
"single_builder_mixture_weight_hooks",
"subnetwork_generator":
SimpleGenerator([
_DNNBuilder(
"dnn",
mixture_weight_chief_hooks=[
tu.ModifierSessionRunHook("chief_hook_var")
],
mixture_weight_hooks=[
tu.ModifierSessionRunHook("hook_var")
])
]),
"max_iteration_steps":
200,
"use_bias":
False,
"want_loss":
0.496736,
"want_iteration":
1,
"want_global_step":
300,
},
{
"testcase_name":
"single_builder_scalar_mixture_weight",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn", return_penultimate_layer=False)]),
"max_iteration_steps":
200,
"mixture_weight_type":
MixtureWeightType.SCALAR,
"want_loss":
0.32317898,
"want_iteration":
1,
"want_global_step":
300,
},
{
"testcase_name":
"single_builder_vector_mixture_weight",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn", return_penultimate_layer=False)]),
"max_iteration_steps":
200,
"mixture_weight_type":
MixtureWeightType.VECTOR,
"want_loss":
0.32317898,
"want_iteration":
1,
"want_global_step":
300,
},
{
"testcase_name": "single_builder_replicate_ensemble_in_training",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"replicate_ensemble_in_training": True,
"max_iteration_steps": 200,
"max_steps": 300,
"want_loss": 0.32420215,
"want_iteration": 1,
"want_global_step": 300,
},
{
"testcase_name": "single_builder_with_hook",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 200,
"hooks": [tu.ModifierSessionRunHook()],
"want_loss": 0.32420248,
"want_iteration": 1,
"want_global_step": 300,
},
{
"testcase_name": "high_max_iteration_steps",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 500,
"want_loss": 0.32487726,
"want_iteration": 0,
"want_global_step": 300,
},
{
"testcase_name":
"two_builders",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", seed=99)]),
"max_iteration_steps":
200,
"want_loss":
0.27713922,
"want_iteration":
1,
"want_global_step":
300,
},
{
"testcase_name":
"two_builders_different_layer_sizes",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"max_iteration_steps":
200,
"want_loss":
0.29696745,
"want_iteration":
1,
"want_global_step":
300,
},
{
"testcase_name":
"two_builders_one_max_iteration_none_steps_and_none_max_steps",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"max_iteration_steps":
200,
"max_iterations":
1,
"steps":
None,
"max_steps":
None,
"want_loss":
0.35249719,
"want_iteration":
0,
"want_global_step":
200,
},
{
"testcase_name":
"two_builders_one_max_iteration_two_hundred_steps",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"max_iteration_steps":
200,
"max_iterations":
1,
"steps":
300,
"max_steps":
None,
"want_loss":
0.35249719,
"want_iteration":
0,
"want_global_step":
200,
},
{
"testcase_name":
"two_builders_two_max_iteration_none_steps_and_none_max_steps",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"max_iteration_steps":
200,
"max_iterations":
2,
"steps":
None,
"max_steps":
None,
"want_loss":
0.26503286,
"want_iteration":
1,
"want_global_step":
400,
},
{
"testcase_name":
"two_builders_different_layer_sizes_three_iterations",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"max_iteration_steps":
100,
"want_loss":
0.26433355,
"want_iteration":
2,
"want_global_step":
300,
},
{
"testcase_name":
"two_dnn_export_subnetworks",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"max_iteration_steps":
100,
"want_loss":
0.26433355,
"want_iteration":
2,
"want_global_step":
300,
"export_subnetworks":
True,
},
{
"testcase_name":
"width_limiting_builder_no_pruning",
"subnetwork_generator":
SimpleGenerator([_WidthLimitingDNNBuilder("no_pruning")]),
"max_iteration_steps":
75,
"want_loss":
0.32001898,
"want_iteration":
3,
"want_global_step":
300,
},
{
"testcase_name":
"width_limiting_builder_some_pruning",
"subnetwork_generator":
SimpleGenerator(
[_WidthLimitingDNNBuilder("some_pruning", width_limit=2)]),
"max_iteration_steps":
75,
"want_loss":
0.38592532,
"want_iteration":
3,
"want_global_step":
300,
},
{
"testcase_name":
"width_limiting_builder_prune_all",
"subnetwork_generator":
SimpleGenerator(
[_WidthLimitingDNNBuilder("prune_all", width_limit=1)]),
"max_iteration_steps":
75,
"want_loss":
0.43492866,
"want_iteration":
3,
"want_global_step":
300,
},
{
"testcase_name":
"width_limiting_builder_mixed",
"subnetwork_generator":
SimpleGenerator([
_WidthLimitingDNNBuilder("no_pruning"),
_WidthLimitingDNNBuilder("some_pruning", width_limit=2),
_WidthLimitingDNNBuilder("prune_all", width_limit=1)
]),
"max_iteration_steps":
75,
"want_loss":
0.32001898,
"want_iteration":
3,
"want_global_step":
300,
},
{
"testcase_name":
"evaluator_good_input",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"evaluator":
Evaluator(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=3),
"max_iteration_steps":
200,
"want_loss":
0.36189985,
"want_iteration":
1,
"want_global_step":
300,
},
{
"testcase_name":
"evaluator_bad_input",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"evaluator":
Evaluator(
input_fn=tu.dummy_input_fn([[1., 1.]], [[1.]]), steps=3),
"max_iteration_steps":
200,
"want_loss":
0.29696745,
"want_iteration":
1,
"want_global_step":
300,
},
{
"testcase_name":
"evaluator_always_last",
"subnetwork_generator":
SimpleGenerator([
_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3),
]),
"evaluator":
_AlwaysLastEvaluator(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]])),
"max_iteration_steps":
None,
"want_loss":
0.31389591,
"want_iteration":
0,
"want_global_step":
300,
},
{
"testcase_name":
"evaluator_always_second_to_last",
"subnetwork_generator":
SimpleGenerator([
_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3),
]),
"evaluator":
_AlwaysSecondToLastEvaluator(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]])),
"max_iteration_steps":
None,
"want_loss":
0.32487726,
"want_iteration":
0,
"want_global_step":
300,
},
{
"testcase_name":
"report_materializer",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"report_materializer":
ReportMaterializer(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=1),
"max_iteration_steps":
200,
"want_loss":
0.29696745,
"want_iteration":
1,
"want_global_step":
300,
},
{
"testcase_name":
"all_strategy",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"ensemble_strategies": [AllStrategy()],
"max_iteration_steps":
200,
"want_loss":
0.29196805,
"want_iteration":
1,
"want_global_step":
300,
},
{
"testcase_name":
"all_strategy_multiple_ensemblers",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"ensemble_strategies": [AllStrategy()],
"ensemblers": [
ComplexityRegularizedEnsembler(),
ComplexityRegularizedEnsembler(use_bias=True, name="with_bias")
],
"max_iteration_steps":
200,
"want_loss":
0.23053232,
"want_iteration":
1,
"want_global_step":
300,
},
{
"testcase_name":
"solo_strategy",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"ensemble_strategies": [SoloStrategy()],
"max_iteration_steps":
200,
"want_loss":
0.35249719,
"want_iteration":
1,
"want_global_step":
300,
},
{
"testcase_name":
"solo_strategy_three_iterations",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"ensemble_strategies": [SoloStrategy()],
"max_iteration_steps":
100,
"want_loss":
0.36163166,
"want_iteration":
2,
"want_global_step":
300,
},
{
"testcase_name":
"multi_ensemble_strategy",
"subnetwork_generator":
SimpleGenerator(
[_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3)]),
"ensemble_strategies":
[AllStrategy(), GrowStrategy(),
SoloStrategy()],
"max_iteration_steps":
100,
"want_loss":
0.24838975,
"want_iteration":
2,
"want_global_step":
300,
},
{
"testcase_name":
"dataset_train_input_fn",
"subnetwork_generator":
SimpleGenerator([_DNNBuilder("dnn")]),
# pylint: disable=g-long-lambda
"train_input_fn":
lambda: tf.data.Dataset.from_tensors(({
"x": XOR_FEATURES
}, XOR_LABELS)).repeat(),
# pylint: enable=g-long-lambda
"max_iteration_steps":
100,
"want_loss":
0.32219219,
"want_iteration":
2,
"want_global_step":
300,
},
{
"testcase_name":
"early_stopping_subnetwork",
"subnetwork_generator":
SimpleGenerator([
_DNNBuilder("dnn"),
_DNNBuilder("dnn2", subnetwork_hooks=[_EarlyStoppingHook()])
]),
"max_iteration_steps":
100,
"max_steps":
200,
"want_loss":
0.2958503,
# Since one subnetwork stops after 1 step and global step is the
# mean of iteration steps, global step will be incremented at half
# the rate.
"want_iteration":
3,
"want_global_step":
200,
})
def test_lifecycle(self,
subnetwork_generator,
want_loss,
want_iteration,
want_global_step,
max_iteration_steps,
mixture_weight_type=MixtureWeightType.MATRIX,
evaluator=None,
use_bias=True,
replicate_ensemble_in_training=False,
hooks=None,
ensemblers=None,
ensemble_strategies=None,
max_steps=300,
steps=None,
report_materializer=None,
train_input_fn=None,
max_iterations=None,
export_subnetworks=False):
"""Train entire estimator lifecycle using XOR dataset."""
run_config = tf.estimator.RunConfig(tf_random_seed=42)
def _metric_fn(predictions):
mean = tf.keras.metrics.Mean()
mean.update_state(predictions["predictions"])
return {"keras_mean": mean}
default_ensembler_kwargs = {
"mixture_weight_type": mixture_weight_type,
"mixture_weight_initializer": tf_compat.v1.zeros_initializer(),
"warm_start_mixture_weights": True,
"use_bias": use_bias,
}
if ensemblers:
default_ensembler_kwargs = {}
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
max_iteration_steps=max_iteration_steps,
evaluator=evaluator,
ensemblers=ensemblers,
ensemble_strategies=ensemble_strategies,
report_materializer=report_materializer,
replicate_ensemble_in_training=replicate_ensemble_in_training,
metric_fn=_metric_fn,
model_dir=self.test_subdirectory,
config=run_config,
max_iterations=max_iterations,
export_subnetwork_logits=export_subnetworks,
export_subnetwork_last_layer=export_subnetworks,
**default_ensembler_kwargs)
if not train_input_fn:
train_input_fn = tu.dummy_input_fn(XOR_FEATURES, XOR_LABELS)
# Train.
estimator.train(
input_fn=train_input_fn, steps=steps, max_steps=max_steps, hooks=hooks)
# Evaluate.
eval_results = estimator.evaluate(
input_fn=train_input_fn, steps=10, hooks=hooks)
logging.info("%s", eval_results)
self.assertAlmostEqual(want_loss, eval_results["loss"], places=3)
self.assertEqual(want_global_step, eval_results["global_step"])
self.assertEqual(want_iteration, eval_results["iteration"])
# Predict.
predictions = estimator.predict(
input_fn=tu.dataset_input_fn(features=[0., 0.], labels=None))
for prediction in predictions:
self.assertIsNotNone(prediction["predictions"])
# Export SavedModel.
def serving_input_fn():
"""Input fn for serving export, starting from serialized example."""
serialized_example = tf_compat.v1.placeholder(
dtype=tf.string, shape=(None), name="serialized_example")
return tf.estimator.export.ServingInputReceiver(
features={"x": tf.constant([[0., 0.]], name="serving_x")},
receiver_tensors=serialized_example)
export_saved_model_fn = getattr(estimator, "export_saved_model", None)
if not callable(export_saved_model_fn):
export_saved_model_fn = estimator.export_savedmodel
export_dir_base = os.path.join(self.test_subdirectory, "export")
export_saved_model_fn(
export_dir_base=export_dir_base,
serving_input_receiver_fn=serving_input_fn)
if export_subnetworks:
saved_model = saved_model_utils.read_saved_model(
os.path.join(export_dir_base,
tf.io.gfile.listdir(export_dir_base)[0]))
export_signature_def = saved_model.meta_graphs[0].signature_def
self.assertIn("subnetwork_logits", export_signature_def.keys())
self.assertIn("subnetwork_last_layer", export_signature_def.keys())
@parameterized.named_parameters(
{
"testcase_name":
"hash_bucket_with_one_hot",
"feature_column": (tf.feature_column.indicator_column(
categorical_column=(
tf.feature_column.categorical_column_with_hash_bucket(
key="human_names", hash_bucket_size=4, dtype=tf.string)))
),
}, {
"testcase_name":
"vocab_list_with_one_hot",
"feature_column": (tf.feature_column.indicator_column(
categorical_column=(
tf.feature_column.categorical_column_with_vocabulary_list(
key="human_names",
vocabulary_list=["alice", "bob"],
dtype=tf.string)))),
}, {
"testcase_name":
"hash_bucket_with_embedding",
"feature_column": (tf.feature_column.embedding_column(
categorical_column=(
tf.feature_column.categorical_column_with_hash_bucket(
key="human_names", hash_bucket_size=4, dtype=tf.string)),
dimension=2)),
}, {
"testcase_name":
"vocab_list_with_embedding",
"feature_column": (tf.feature_column.embedding_column(
categorical_column=(
tf.feature_column.categorical_column_with_vocabulary_list(
key="human_names",
vocabulary_list=["alice", "bob"],
dtype=tf.string)),
dimension=2)),
})
def test_categorical_columns(self, feature_column):
def train_input_fn():
input_features = {
"human_names": tf.constant([["alice"], ["bob"]], name="human_names")
}
input_labels = tf.constant([[1.], [0.]], name="starts_with_a")
return input_features, input_labels
report_materializer = ReportMaterializer(input_fn=train_input_fn, steps=1)
estimator = Estimator(
head=regression_head.RegressionHead(),
subnetwork_generator=SimpleGenerator(
[_SimpleBuilder(name="simple", feature_columns=[feature_column])]),
report_materializer=report_materializer,
mixture_weight_type=MixtureWeightType.MATRIX,
mixture_weight_initializer=tf_compat.v1.zeros_initializer(),
warm_start_mixture_weights=True,
max_iteration_steps=1,
use_bias=True,
model_dir=self.test_subdirectory)
estimator.train(input_fn=train_input_fn, max_steps=3)
@parameterized.named_parameters(
{
"testcase_name": "no_subnetwork_generator",
"subnetwork_generator": None,
"max_iteration_steps": 100,
"want_error": ValueError,
},
{
"testcase_name": "negative_max_iteration_steps",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": -1,
"want_error": ValueError,
},
{
"testcase_name": "zero_max_iteration_steps",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 0,
"want_error": ValueError,
},
{
"testcase_name": "negative_max_iterations",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 1,
"max_iterations": -1,
"want_error": ValueError,
},
{
"testcase_name": "zero_max_iterations",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 1,
"max_iterations": 0,
"want_error": ValueError,
},
{
"testcase_name": "steps_and_max_steps",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 1,
"steps": 1,
"max_steps": 1,
"want_error": ValueError,
},
{
"testcase_name": "zero_steps",
"subnetwork_generator": SimpleGenerator([_DNNBuilder("dnn")]),
"max_iteration_steps": 1,
"steps": 0,
"max_steps": None,
"want_error": ValueError,
},
{
"testcase_name": "nan_loss_builder",
"subnetwork_generator": SimpleGenerator([_NanLossBuilder()]),
"max_iteration_steps": 1,
"max_steps": None,
"want_error": tf_compat.v1.estimator.NanLossDuringTrainingError,
},
{
"testcase_name":
"nan_loss_builder_first",
"subnetwork_generator":
SimpleGenerator([
_NanLossBuilder(),
_DNNBuilder("dnn"),
]),
"max_iteration_steps":
1,
"max_steps":
None,
"want_error":
tf_compat.v1.estimator.NanLossDuringTrainingError,
},
{
"testcase_name":
"nan_loss_builder_last",
"subnetwork_generator":
SimpleGenerator([
_DNNBuilder("dnn"),
_NanLossBuilder(),
]),
"max_iteration_steps":
1,
"max_steps":
None,
"want_error":
tf_compat.v1.estimator.NanLossDuringTrainingError,
},
)
def test_train_error(self,
subnetwork_generator,
max_iteration_steps,
want_error,
steps=None,
max_steps=10,
max_iterations=None):
report_materializer = ReportMaterializer(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=1)
with self.assertRaises(want_error):
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
report_materializer=report_materializer,
mixture_weight_type=MixtureWeightType.MATRIX,
mixture_weight_initializer=tf_compat.v1.zeros_initializer(),
warm_start_mixture_weights=True,
max_iteration_steps=max_iteration_steps,
use_bias=True,
max_iterations=max_iterations,
model_dir=self.test_subdirectory)
train_input_fn = tu.dummy_input_fn([[1., 0.]], [[1.]])
estimator.train(input_fn=train_input_fn, steps=steps, max_steps=max_steps)
def test_binary_head_asserts_are_disabled(self):
"""Tests b/140267630."""
subnetwork_generator = SimpleGenerator([
_DNNBuilder("dnn"),
_NanLossBuilder(),
])
estimator = Estimator(
head=binary_class_head_v1(),
subnetwork_generator=subnetwork_generator,
max_iteration_steps=10,
model_dir=self.test_subdirectory)
eval_input_fn = tu.dummy_input_fn([[1., 0.]], [[1.]])
estimator.evaluate(input_fn=eval_input_fn, steps=1)
class KerasCNNBuilder(Builder):
"""Builds a CNN subnetwork for AdaNet."""
def __init__(self, learning_rate, seed=42):
"""Initializes a `SimpleCNNBuilder`.
Args:
learning_rate: The float learning rate to use.
seed: The random seed.
Returns:
An instance of `SimpleCNNBuilder`.
"""
self._learning_rate = learning_rate
self._seed = seed
def build_subnetwork(self,
features,
logits_dimension,
training,
iteration_step,
summary,
previous_ensemble=None):
"""See `adanet.subnetwork.Builder`."""
seed = self._seed
if previous_ensemble:
seed += len(previous_ensemble.weighted_subnetworks)
images = list(features.values())[0]
images = tf.reshape(images, [-1, 2, 2, 1])
kernel_initializer = tf_compat.v1.keras.initializers.he_normal(seed=seed)
x = tf.keras.layers.Conv2D(
filters=3,
kernel_size=1,
padding="same",
activation="relu",
kernel_initializer=kernel_initializer)(
images)
x = tf.keras.layers.MaxPool2D(pool_size=2, strides=1)(x)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(
units=3, activation="relu", kernel_initializer=kernel_initializer)(
x)
logits = tf_compat.v1.layers.Dense(
units=1, activation=None, kernel_initializer=kernel_initializer)(
x)
complexity = tf.constant(1)
return Subnetwork(
last_layer=x,
logits=logits,
complexity=complexity,
persisted_tensors={})
def build_subnetwork_train_op(self,
subnetwork,
loss,
var_list,
labels,
iteration_step,
summary,
previous_ensemble=None):
optimizer = tf_compat.v1.train.GradientDescentOptimizer(self._learning_rate)
return optimizer.minimize(loss=loss, var_list=var_list)
def build_mixture_weights_train_op(self, loss, var_list, logits, labels,
iteration_step, summary):
return tf.no_op()
@property
def name(self):
return "simple_cnn"
class EstimatorKerasLayersTest(tu.AdanetTestCase):
def test_lifecycle(self):
"""Train entire estimator lifecycle using XOR dataset."""
run_config = tf.estimator.RunConfig(tf_random_seed=42)
estimator = Estimator(
head=tu.head(),
subnetwork_generator=SimpleGenerator(
[KerasCNNBuilder(learning_rate=.001)]),
max_iteration_steps=3,
evaluator=Evaluator(
input_fn=tu.dummy_input_fn([[1., 1., .1, .1]], [[0.]]), steps=3),
model_dir=self.test_subdirectory,
config=run_config)
xor_features = [[1., 0., 1., 0.], [0., 0., 0., 0.], [0., 1., 0., 1.],
[1., 1., 1., 1.]]
xor_labels = [[1.], [0.], [1.], [0.]]
train_input_fn = tu.dummy_input_fn(xor_features, xor_labels)
# Train.
estimator.train(input_fn=train_input_fn, max_steps=9)
# Evaluate.
eval_results = estimator.evaluate(input_fn=train_input_fn, steps=3)
logging.info("%s", eval_results)
want_loss = 0.16915826
if tf_compat.version_greater_or_equal("1.10.0"):
# After TF v1.10.0 the loss computed from a neural network using Keras
# layers changed, however it is not clear why.
want_loss = 0.26195815
self.assertAlmostEqual(want_loss, eval_results["loss"], places=3)
# Predict.
predictions = estimator.predict(
input_fn=tu.dataset_input_fn(features=[0., 0., 0., 0.], labels=None))
for prediction in predictions:
self.assertIsNotNone(prediction["predictions"])
# Export SavedModel.
def serving_input_fn():
"""Input fn for serving export, starting from serialized example."""
serialized_example = tf_compat.v1.placeholder(
dtype=tf.string, shape=(None), name="serialized_example")
return tf.estimator.export.ServingInputReceiver(
features={"x": tf.constant([[0., 0., 0., 0.]], name="serving_x")},
receiver_tensors=serialized_example)
export_saved_model_fn = getattr(estimator, "export_saved_model", None)
if not callable(export_saved_model_fn):
export_saved_model_fn = estimator.export_savedmodel
export_saved_model_fn(
export_dir_base=self.test_subdirectory,
serving_input_receiver_fn=serving_input_fn)
class MultiHeadBuilder(Builder):
"""Builds a subnetwork for AdaNet that uses dict labels."""
def __init__(self, learning_rate=.001, split_logits=False, seed=42):
"""Initializes a `LabelsDictBuilder`.
Args:
learning_rate: The float learning rate to use.
split_logits: Whether to return a dict of logits or a single concatenated
logits `Tensor`.
seed: The random seed.
Returns:
An instance of `MultiHeadBuilder`.
"""
self._learning_rate = learning_rate
self._split_logits = split_logits
self._seed = seed
def build_subnetwork(self,
features,
logits_dimension,
training,
iteration_step,
summary,
previous_ensemble=None):
"""See `adanet.subnetwork.Builder`."""
seed = self._seed
if previous_ensemble:
seed += len(previous_ensemble.weighted_subnetworks)
kernel_initializer = tf_compat.v1.keras.initializers.he_normal(seed=seed)
x = features["x"]
logits = tf_compat.v1.layers.dense(
x,
units=logits_dimension,
activation=None,
kernel_initializer=kernel_initializer)
if self._split_logits:
# Return different logits, one for each head.
logits1, logits2 = tf.split(logits, [1, 1], 1)
logits = {
"head1": logits1,
"head2": logits2,
}
complexity = tf.constant(1)
return Subnetwork(
last_layer=logits,
logits=logits,
complexity=complexity,
persisted_tensors={})
def build_subnetwork_train_op(self,
subnetwork,
loss,
var_list,
labels,
iteration_step,
summary,
previous_ensemble=None):
optimizer = tf_compat.v1.train.GradientDescentOptimizer(self._learning_rate)
return optimizer.minimize(loss=loss, var_list=var_list)
def build_mixture_weights_train_op(self, loss, var_list, logits, labels,
iteration_step, summary):
optimizer = tf_compat.v1.train.GradientDescentOptimizer(self._learning_rate)
return optimizer.minimize(loss=loss, var_list=var_list)
@property
def name(self):
return "multi_head"
class EstimatorMultiHeadTest(tu.AdanetTestCase):
@parameterized.named_parameters(
{
"testcase_name": "concatenated_logits",
"builders": [MultiHeadBuilder()],
"want_loss": 3.218,
}, {
"testcase_name": "split_logits_with_export_subnetworks",
"builders": [MultiHeadBuilder(split_logits=True)],
"want_loss": 3.224,
"export_subnetworks": True,
}, {
"testcase_name": "split_logits",
"builders": [MultiHeadBuilder(split_logits=True)],
"want_loss": 3.224,
})
def test_lifecycle(self, builders, want_loss, export_subnetworks=False):
"""Train entire estimator lifecycle using XOR dataset."""
run_config = tf.estimator.RunConfig(tf_random_seed=42)
xor_features = [[1., 0., 1., 0.], [0., 0., 0., 0.], [0., 1., 0., 1.],
[1., 1., 1., 1.]]
xor_labels = [[1.], [0.], [1.], [0.]]
def train_input_fn():
return {
"x": tf.constant(xor_features)
}, {
"head1": tf.constant(xor_labels),
"head2": tf.constant(xor_labels)
}
estimator = Estimator(
head=multi_head_lib.MultiHead(heads=[
regression_head.RegressionHead(
name="head1", loss_reduction=tf_compat.SUM_OVER_BATCH_SIZE),
regression_head.RegressionHead(
name="head2", loss_reduction=tf_compat.SUM_OVER_BATCH_SIZE),
]),
subnetwork_generator=SimpleGenerator(builders),
max_iteration_steps=3,
evaluator=Evaluator(input_fn=train_input_fn, steps=1),
model_dir=self.test_subdirectory,
config=run_config,
export_subnetwork_logits=export_subnetworks,
export_subnetwork_last_layer=export_subnetworks)
# Train.
estimator.train(input_fn=train_input_fn, max_steps=9)
# Evaluate.
eval_results = estimator.evaluate(input_fn=train_input_fn, steps=3)
self.assertAlmostEqual(want_loss, eval_results["loss"], places=3)
# Predict.
predictions = estimator.predict(
input_fn=tu.dataset_input_fn(features=[0., 0., 0., 0.], labels=None))
for prediction in predictions:
self.assertIsNotNone(prediction[("head1", "predictions")])
self.assertIsNotNone(prediction[("head2", "predictions")])
# Export SavedModel.
def serving_input_fn():
"""Input fn for serving export, starting from serialized example."""
serialized_example = tf_compat.v1.placeholder(
dtype=tf.string, shape=(None), name="serialized_example")
return tf.estimator.export.ServingInputReceiver(
features={"x": tf.constant([[0., 0., 0., 0.]], name="serving_x")},
receiver_tensors=serialized_example)
export_saved_model_fn = getattr(estimator, "export_saved_model", None)
if not callable(export_saved_model_fn):
export_saved_model_fn = estimator.export_savedmodel
export_dir_base = os.path.join(self.test_subdirectory, "export")
export_saved_model_fn(
export_dir_base=export_dir_base,
serving_input_receiver_fn=serving_input_fn)
if export_subnetworks:
saved_model = saved_model_utils.read_saved_model(
os.path.join(export_dir_base,
tf.io.gfile.listdir(export_dir_base)[0]))
export_signature_def = saved_model.meta_graphs[0].signature_def
self.assertIn("subnetwork_logits_head1", export_signature_def.keys())
self.assertIn("subnetwork_logits_head2", export_signature_def.keys())
self.assertIn("subnetwork_last_layer_head1", export_signature_def.keys())
self.assertIn("subnetwork_last_layer_head2", export_signature_def.keys())
class EstimatorCallingModelFnDirectlyTest(tu.AdanetTestCase):
"""Tests b/112108745. Warn users not to call model_fn directly."""
def test_calling_model_fn_directly(self):
subnetwork_generator = SimpleGenerator([_DNNBuilder("dnn")])
report_materializer = ReportMaterializer(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=1)
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
report_materializer=report_materializer,
max_iteration_steps=3,
use_bias=True,
model_dir=self.test_subdirectory)
model_fn = estimator.model_fn
train_input_fn = tu.dummy_input_fn([[1., 0.]], [[1.]])
tf_compat.v1.train.create_global_step()
features, labels = train_input_fn()
with self.assertRaises(UserWarning):
model_fn(
features=features,
mode=tf.estimator.ModeKeys.TRAIN,
labels=labels,
config={})
def test_calling_model_fn_directly_for_predict(self):
with context.graph_mode():
subnetwork_generator = SimpleGenerator([_DNNBuilder("dnn")])
report_materializer = ReportMaterializer(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=1)
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
report_materializer=report_materializer,
max_iteration_steps=3,
use_bias=True,
model_dir=self.test_subdirectory)
model_fn = estimator.model_fn
train_input_fn = tu.dummy_input_fn([[1., 0.]], [[1.]])
tf_compat.v1.train.create_global_step()
features, labels = train_input_fn()
model_fn(
features=features,
mode=tf.estimator.ModeKeys.PREDICT,
labels=labels,
config=tf.estimator.RunConfig(
save_checkpoints_steps=1,
keep_checkpoint_max=3,
model_dir=self.test_subdirectory,
))
class EstimatorCheckpointTest(tu.AdanetTestCase):
"""Tests estimator checkpoints."""
@parameterized.named_parameters(
{
"testcase_name": "single_iteration",
"max_iteration_steps": 3,
"keep_checkpoint_max": 3,
"want_num_checkpoints": 3,
}, {
"testcase_name": "single_iteration_keep_one",
"max_iteration_steps": 3,
"keep_checkpoint_max": 1,
"want_num_checkpoints": 1,
}, {
"testcase_name": "three_iterations",
"max_iteration_steps": 1,
"keep_checkpoint_max": 3,
"want_num_checkpoints": 3,
}, {
"testcase_name": "three_iterations_keep_one",
"max_iteration_steps": 1,
"keep_checkpoint_max": 1,
"want_num_checkpoints": 1,
})
def test_checkpoints(self,
max_iteration_steps,
keep_checkpoint_max,
want_num_checkpoints,
max_steps=3):
config = tf.estimator.RunConfig(
save_checkpoints_steps=1,
keep_checkpoint_max=keep_checkpoint_max,
)
subnetwork_generator = SimpleGenerator([_DNNBuilder("dnn")])
report_materializer = ReportMaterializer(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=1)
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
report_materializer=report_materializer,
mixture_weight_type=MixtureWeightType.MATRIX,
mixture_weight_initializer=tf_compat.v1.zeros_initializer(),
warm_start_mixture_weights=True,
max_iteration_steps=max_iteration_steps,
use_bias=True,
config=config,
model_dir=self.test_subdirectory)
train_input_fn = tu.dummy_input_fn([[1., 0.]], [[1.]])
estimator.train(input_fn=train_input_fn, max_steps=max_steps)
checkpoints = tf.io.gfile.glob(
os.path.join(self.test_subdirectory, "*.meta"))
self.assertEqual(want_num_checkpoints, len(checkpoints))
def _check_eventfile_for_keyword(keyword, dir_):
"""Checks event files for the keyword."""
tf_compat.v1.summary.FileWriterCache.clear()
if not tf.io.gfile.exists(dir_):
raise ValueError("Directory '{}' not found.".format(dir_))
# Get last `Event` written.
filenames = os.path.join(dir_, "events*")
event_paths = tf.io.gfile.glob(filenames)
if not event_paths:
raise ValueError("Path '{}' not found.".format(filenames))
for last_event in tf_compat.v1.train.summary_iterator(event_paths[-1]):
if last_event.summary is not None:
for value in last_event.summary.value:
if keyword == value.tag:
if value.HasField("simple_value"):
return value.simple_value
if value.HasField("image"):
return (value.image.height, value.image.width,
value.image.colorspace)
if value.HasField("tensor"):
return value.tensor.string_val
raise ValueError("Keyword '{}' not found in path '{}'.".format(
keyword, filenames))
class _FakeMetric(object):
"""A fake metric."""
def __init__(self, value, dtype):
self._value = value
self._dtype = dtype
def to_metric(self):
tensor = tf.convert_to_tensor(value=self._value, dtype=self._dtype)
return (tensor, tensor)
class _EvalMetricsHead(object):
"""A fake head with the given evaluation metrics."""
def __init__(self, fake_metrics):
self._fake_metrics = fake_metrics
@property
def logits_dimension(self):
return 1
def create_estimator_spec(self,
features,
mode,
logits,
labels=None,
train_op_fn=None):
del features # Unused
metric_ops = None
if self._fake_metrics:
metric_ops = {}
for k, fake_metric in self._fake_metrics.items():
metric_ops[k] = fake_metric.to_metric()
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=logits,
loss=tf.reduce_mean(input_tensor=labels - logits),
eval_metric_ops=metric_ops,
train_op=train_op_fn(1))
def _mean_keras_metric(value):
"""Returns the mean of given value as a Keras metric."""
mean = tf.keras.metrics.Mean()
mean.update_state(value)
return mean
class EstimatorSummaryWriterTest(tu.AdanetTestCase):
"""Test that Tensorboard summaries get written correctly."""
@tf_compat.skip_for_tf2
def test_summaries(self):
"""Tests that summaries are written to candidate directory."""
run_config = tf.estimator.RunConfig(
tf_random_seed=42, log_step_count_steps=2, save_summary_steps=2)
subnetwork_generator = SimpleGenerator(
[_DNNBuilder("dnn", mixture_weight_learning_rate=.001)])
report_materializer = ReportMaterializer(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=1)
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
report_materializer=report_materializer,
mixture_weight_type=MixtureWeightType.MATRIX,
mixture_weight_initializer=tf_compat.v1.zeros_initializer(),
warm_start_mixture_weights=True,
max_iteration_steps=10,
use_bias=True,
config=run_config,
model_dir=self.test_subdirectory)
train_input_fn = tu.dummy_input_fn([[1., 0.]], [[1.]])
estimator.train(input_fn=train_input_fn, max_steps=3)
ensemble_loss = 1.
self.assertAlmostEqual(
ensemble_loss,
_check_eventfile_for_keyword("loss", self.test_subdirectory),
places=3)
self.assertIsNotNone(
_check_eventfile_for_keyword("global_step/sec", self.test_subdirectory))
self.assertEqual(
0.,
_check_eventfile_for_keyword("iteration/adanet/iteration",
self.test_subdirectory))
subnetwork_subdir = os.path.join(self.test_subdirectory,
"subnetwork/t0_dnn")
self.assertAlmostEqual(
3., _check_eventfile_for_keyword("scalar", subnetwork_subdir), places=3)
self.assertEqual((3, 3, 1),
_check_eventfile_for_keyword("image/image/0",
subnetwork_subdir))
self.assertAlmostEqual(
5.,
_check_eventfile_for_keyword("nested/scalar", subnetwork_subdir),
places=3)
ensemble_subdir = os.path.join(
self.test_subdirectory, "ensemble/t0_dnn_grow_complexity_regularized")
self.assertAlmostEqual(
ensemble_loss,
_check_eventfile_for_keyword(
"adanet_loss/adanet/adanet_weighted_ensemble", ensemble_subdir),
places=3)
self.assertAlmostEqual(
0.,
_check_eventfile_for_keyword(
"complexity_regularization/adanet/adanet_weighted_ensemble",
ensemble_subdir),
places=3)
self.assertAlmostEqual(
0.,
_check_eventfile_for_keyword(
"mixture_weight_norms/adanet/"
"adanet_weighted_ensemble/subnetwork_0", ensemble_subdir),
places=3)
@tf_compat.skip_for_tf2
def test_disable_summaries(self):
"""Tests that summaries can be disabled for ensembles and subnetworks."""
run_config = tf.estimator.RunConfig(
tf_random_seed=42, log_step_count_steps=2, save_summary_steps=2)
subnetwork_generator = SimpleGenerator(
[_DNNBuilder("dnn", mixture_weight_learning_rate=.001)])
report_materializer = ReportMaterializer(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=1)
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
report_materializer=report_materializer,
mixture_weight_type=MixtureWeightType.MATRIX,
mixture_weight_initializer=tf_compat.v1.zeros_initializer(),
warm_start_mixture_weights=True,
max_iteration_steps=10,
use_bias=True,
config=run_config,
model_dir=self.test_subdirectory,
enable_ensemble_summaries=False,
enable_subnetwork_summaries=False,
)
train_input_fn = tu.dummy_input_fn([[1., 0.]], [[1.]])
estimator.train(input_fn=train_input_fn, max_steps=3)
ensemble_loss = 1.
self.assertAlmostEqual(
ensemble_loss,
_check_eventfile_for_keyword("loss", self.test_subdirectory),
places=3)
self.assertIsNotNone(
_check_eventfile_for_keyword("global_step/sec", self.test_subdirectory))
self.assertEqual(
0.,
_check_eventfile_for_keyword("iteration/adanet/iteration",
self.test_subdirectory))
subnetwork_subdir = os.path.join(self.test_subdirectory,
"subnetwork/t0_dnn")
with self.assertRaises(ValueError):
_check_eventfile_for_keyword("scalar", subnetwork_subdir)
with self.assertRaises(ValueError):
_check_eventfile_for_keyword("image/image/0", subnetwork_subdir)
with self.assertRaises(ValueError):
_check_eventfile_for_keyword("nested/scalar", subnetwork_subdir)
ensemble_subdir = os.path.join(
self.test_subdirectory, "ensemble/t0_dnn_grow_complexity_regularized")
with self.assertRaises(ValueError):
_check_eventfile_for_keyword(
"adanet_loss/adanet/adanet_weighted_ensemble", ensemble_subdir)
with self.assertRaises(ValueError):
_check_eventfile_for_keyword(
"complexity_regularization/adanet/adanet_weighted_ensemble",
ensemble_subdir)
with self.assertRaises(ValueError):
_check_eventfile_for_keyword(
"mixture_weight_norms/adanet/"
"adanet_weighted_ensemble/subnetwork_0", ensemble_subdir)
# pylint: disable=g-long-lambda
@parameterized.named_parameters(
{
"testcase_name": "none_metrics",
"head": _EvalMetricsHead(None),
"want_summaries": [],
"want_loss": -1.791,
}, {
"testcase_name":
"metrics_fn",
"head":
_EvalMetricsHead(None),
"metric_fn":
lambda predictions: {
"avg": tf_compat.v1.metrics.mean(predictions)
},
"want_summaries": ["avg"],
"want_loss":
-1.791,
}, {
"testcase_name":
"keras_metrics_fn",
"head":
_EvalMetricsHead(None),
"metric_fn":
lambda predictions: {
"avg": _mean_keras_metric(predictions)
},
"want_summaries": ["avg"],
"want_loss":
-1.791,
}, {
"testcase_name": "empty_metrics",
"head": _EvalMetricsHead({}),
"want_summaries": [],
"want_loss": -1.791,
}, {
"testcase_name":
"evaluation_name",
"head":
_EvalMetricsHead({}),
"evaluation_name":
"continuous",
"want_summaries": [],
"want_loss":
-1.791,
"global_subdir":
"eval_continuous",
"subnetwork_subdir":
"subnetwork/t0_dnn/eval_continuous",
"ensemble_subdir":
"ensemble/t0_dnn_grow_complexity_regularized/eval_continuous",
}, {
"testcase_name":
"regression_head",
"head":
regression_head.RegressionHead(
loss_reduction=tf_compat.SUM_OVER_BATCH_SIZE),
"want_summaries": ["average_loss"],
"want_loss":
.256,
}, {
"testcase_name":
"binary_classification_head",
"head":
binary_class_head.BinaryClassHead(
loss_reduction=tf_compat.SUM_OVER_BATCH_SIZE),
"learning_rate":
.6,
"want_summaries": ["average_loss", "accuracy", "recall"],
"want_loss":
0.122,
}, {
"testcase_name":
"all_metrics",
"head":
_EvalMetricsHead({
"float32":
_FakeMetric(1., tf.float32),
"float64":
_FakeMetric(1., tf.float64),
"serialized_summary":
_FakeMetric(
tf_compat.v1.Summary(value=[
tf_compat.v1.Summary.Value(
tag="summary_tag", simple_value=1.)
]).SerializeToString(), tf.string),
}),
"want_summaries": [
"float32",
"float64",
"serialized_summary/0",
],
"want_loss":
-1.791,
})
# pylint: enable=g-long-lambda
def test_eval_metrics(
self,
head,
want_loss,
want_summaries,
evaluation_name=None,
metric_fn=None,
learning_rate=.01,
global_subdir="eval",
subnetwork_subdir="subnetwork/t0_dnn/eval",
ensemble_subdir="ensemble/t0_dnn_grow_complexity_regularized/eval"):
"""Test that AdaNet evaluation metrics get persisted correctly."""
seed = 42
run_config = tf.estimator.RunConfig(tf_random_seed=seed)
subnetwork_generator = SimpleGenerator([
_DNNBuilder(
"dnn",
learning_rate=learning_rate,
mixture_weight_learning_rate=0.,
layer_size=8,
seed=seed)
])
estimator = Estimator(
head=head,
subnetwork_generator=subnetwork_generator,
max_iteration_steps=100,
metric_fn=metric_fn,
config=run_config,
model_dir=self.test_subdirectory)
train_input_fn = tu.dummy_input_fn(XOR_FEATURES, XOR_LABELS)
estimator.train(input_fn=train_input_fn, max_steps=100)
metrics = estimator.evaluate(
input_fn=train_input_fn, steps=1, name=evaluation_name)
self.assertAlmostEqual(want_loss, metrics["loss"], places=3)
global_subdir = os.path.join(self.test_subdirectory, global_subdir)
subnetwork_subdir = os.path.join(self.test_subdirectory, subnetwork_subdir)
ensemble_subdir = os.path.join(self.test_subdirectory, ensemble_subdir)
self.assertAlmostEqual(
want_loss,
_check_eventfile_for_keyword("loss", subnetwork_subdir),
places=3)
for metric in want_summaries:
self.assertIsNotNone(
_check_eventfile_for_keyword(metric, subnetwork_subdir),
msg="{} should be under 'eval'.".format(metric))
for dir_ in [global_subdir, ensemble_subdir]:
self.assertAlmostEqual(metrics["loss"],
_check_eventfile_for_keyword("loss", dir_))
self.assertEqual([b"| dnn |"],
_check_eventfile_for_keyword(
"architecture/adanet/ensembles/0", dir_))
for metric in want_summaries:
self.assertTrue(
_check_eventfile_for_keyword(metric, dir_) > 0.,
msg="{} should be under 'eval'.".format(metric))
class EstimatorMembersOverrideTest(tu.AdanetTestCase):
"""Tests b/77494544 fix."""
def test_assert_members_are_not_overridden(self):
"""Assert that AdaNet estimator does not break other estimators."""
config = tf.estimator.RunConfig()
subnetwork_generator = SimpleGenerator([_DNNBuilder("dnn")])
report_materializer = ReportMaterializer(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=1)
adanet = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
report_materializer=report_materializer,
mixture_weight_type=MixtureWeightType.MATRIX,
mixture_weight_initializer=tf_compat.v1.zeros_initializer(),
warm_start_mixture_weights=True,
max_iteration_steps=10,
use_bias=True,
config=config)
self.assertIsNotNone(adanet)
if hasattr(tf.estimator, "LinearEstimator"):
estimator_fn = tf.estimator.LinearEstimator
else:
estimator_fn = tf.contrib.estimator.LinearEstimator
linear = estimator_fn(
head=tu.head(), feature_columns=[tf.feature_column.numeric_column("x")])
self.assertIsNotNone(linear)
def _dummy_feature_dict_input_fn(features, labels):
"""Returns an input_fn that returns feature and labels `Tensors`."""
def _input_fn():
input_features = {}
for key, feature in features.items():
input_features[key] = tf.constant(feature, name=key)
input_labels = tf.constant(labels, name="labels")
return input_features, input_labels
return _input_fn
class EstimatorDifferentFeaturesPerModeTest(tu.AdanetTestCase):
"""Tests b/109751254."""
@parameterized.named_parameters(
{
"testcase_name": "extra_train_features",
"train_features": {
"x": [[1., 0.]],
"extra": [[1., 0.]],
},
"eval_features": {
"x": [[1., 0.]],
},
"predict_features": {
"x": [[1., 0.]],
},
}, {
"testcase_name": "extra_eval_features",
"train_features": {
"x": [[1., 0.]],
},
"eval_features": {
"x": [[1., 0.]],
"extra": [[1., 0.]],
},
"predict_features": {
"x": [[1., 0.]],
},
}, {
"testcase_name": "extra_predict_features",
"train_features": {
"x": [[1., 0.]],
},
"eval_features": {
"x": [[1., 0.]],
},
"predict_features": {
"x": [[1., 0.]],
"extra": [[1., 0.]],
},
})
def test_different_features_per_mode(self, train_features, eval_features,
predict_features):
"""Tests tests different numbers of features per mode."""
run_config = tf.estimator.RunConfig(tf_random_seed=42)
subnetwork_generator = SimpleGenerator([_DNNBuilder("dnn")])
report_materializer = ReportMaterializer(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=1)
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
report_materializer=report_materializer,
mixture_weight_type=MixtureWeightType.MATRIX,
mixture_weight_initializer=tf_compat.v1.zeros_initializer(),
warm_start_mixture_weights=True,
max_iteration_steps=1,
use_bias=True,
model_dir=self.test_subdirectory,
config=run_config)
labels = [[1.]]
train_input_fn = _dummy_feature_dict_input_fn(train_features, labels)
# Train.
estimator.train(input_fn=train_input_fn, max_steps=2)
# Evaluate.
eval_input_fn = _dummy_feature_dict_input_fn(eval_features, labels)
estimator.evaluate(input_fn=eval_input_fn, steps=1)
# Predict.
predict_input_fn = _dummy_feature_dict_input_fn(predict_features, None)
estimator.predict(input_fn=predict_input_fn)
# Export SavedModel.
def serving_input_fn():
"""Input fn for serving export, starting from serialized example."""
serialized_example = tf_compat.v1.placeholder(
dtype=tf.string, shape=(None), name="serialized_example")
features = {}
for key, value in predict_features.items():
features[key] = tf.constant(value)
return tf.estimator.export.ServingInputReceiver(
features=features, receiver_tensors=serialized_example)
export_saved_model_fn = getattr(estimator, "export_saved_model", None)
if not callable(export_saved_model_fn):
export_saved_model_fn = estimator.export_savedmodel
export_saved_model_fn(
export_dir_base=self.test_subdirectory,
serving_input_receiver_fn=serving_input_fn)
class EstimatorExportSavedModelTest(tu.AdanetTestCase):
def test_export_saved_model_for_predict(self):
"""Tests SavedModel exporting functionality for predict (b/110435640)."""
run_config = tf.estimator.RunConfig(tf_random_seed=42)
subnetwork_generator = SimpleGenerator([_DNNBuilder("dnn")])
report_materializer = ReportMaterializer(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=1)
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
report_materializer=report_materializer,
mixture_weight_type=MixtureWeightType.MATRIX,
mixture_weight_initializer=tf_compat.v1.zeros_initializer(),
warm_start_mixture_weights=True,
max_iteration_steps=1,
use_bias=True,
model_dir=self.test_subdirectory,
config=run_config)
features = {"x": [[1., 0.]]}
labels = [[1.]]
train_input_fn = _dummy_feature_dict_input_fn(features, labels)
# Train.
estimator.train(input_fn=train_input_fn, max_steps=2)
# Export SavedModel.
def serving_input_fn():
"""Input fn for serving export, starting from serialized example."""
serialized_example = tf_compat.v1.placeholder(
dtype=tf.string, shape=(None), name="serialized_example")
for key, value in features.items():
features[key] = tf.constant(value)
return tf.estimator.export.ServingInputReceiver(
features=features, receiver_tensors=serialized_example)
estimator.export_saved_model(
export_dir_base=self.test_subdirectory,
serving_input_receiver_fn=serving_input_fn,
experimental_mode=tf.estimator.ModeKeys.PREDICT)
@test_util.run_in_graph_and_eager_modes
def test_export_saved_model_for_eval(self):
"""Tests SavedModel exporting functionality for eval (b/110991908)."""
run_config = tf.estimator.RunConfig(tf_random_seed=42)
subnetwork_generator = SimpleGenerator(
[_DNNBuilder("dnn", layer_size=8, learning_rate=1.)])
estimator = Estimator(
head=binary_class_head.BinaryClassHead(),
subnetwork_generator=subnetwork_generator,
max_iteration_steps=100,
model_dir=self.test_subdirectory,
config=run_config)
train_input_fn = tu.dummy_input_fn(XOR_FEATURES, XOR_LABELS)
# Train.
estimator.train(input_fn=train_input_fn, max_steps=300)
metrics = estimator.evaluate(input_fn=train_input_fn, steps=1)
self.assertAlmostEqual(.067, metrics["average_loss"], places=3)
self.assertAlmostEqual(1., metrics["recall"], places=3)
self.assertAlmostEqual(1., metrics["accuracy"], places=3)
# Export SavedModel.
def serving_input_fn():
"""Input fn for serving export, starting from serialized example."""
serialized_example = tf_compat.v1.placeholder(
dtype=tf.string, shape=(None), name="serialized_example")
return export.SupervisedInputReceiver(
features={"x": tf.constant(XOR_FEATURES)},
labels=tf.constant(XOR_LABELS),
receiver_tensors=serialized_example)
export_dir_base = os.path.join(self.test_subdirectory, "export")
try:
estimator.export_saved_model(
export_dir_base=export_dir_base,
serving_input_receiver_fn=serving_input_fn,
experimental_mode=tf.estimator.ModeKeys.EVAL)
except AttributeError:
pass
try:
tf.contrib.estimator.export_saved_model_for_mode(
estimator,
export_dir_base=export_dir_base,
input_receiver_fn=serving_input_fn,
mode=tf.estimator.ModeKeys.EVAL)
except AttributeError:
pass
subdir = tf.io.gfile.listdir(export_dir_base)[0]
with context.graph_mode(), self.test_session() as sess:
meta_graph_def = tf_compat.v1.saved_model.loader.load(
sess, ["eval"], os.path.join(export_dir_base, subdir))
signature_def = meta_graph_def.signature_def.get("eval")
# Read zero metric.
self.assertAlmostEqual(
0.,
sess.run(
tf_compat.v1.saved_model.utils.get_tensor_from_tensor_info(
signature_def.outputs["metrics/average_loss/value"])),
places=3)
# Run metric update op.
sess.run((tf_compat.v1.saved_model.utils.get_tensor_from_tensor_info(
signature_def.outputs["metrics/average_loss/update_op"]),
tf_compat.v1.saved_model.utils.get_tensor_from_tensor_info(
signature_def.outputs["metrics/accuracy/update_op"]),
tf_compat.v1.saved_model.utils.get_tensor_from_tensor_info(
signature_def.outputs["metrics/recall/update_op"])))
# Read metric again; it should no longer be zero.
self.assertAlmostEqual(
0.067,
sess.run(
tf_compat.v1.saved_model.utils.get_tensor_from_tensor_info(
signature_def.outputs["metrics/average_loss/value"])),
places=3)
self.assertAlmostEqual(
1.,
sess.run(
tf_compat.v1.saved_model.utils.get_tensor_from_tensor_info(
signature_def.outputs["metrics/recall/value"])),
places=3)
self.assertAlmostEqual(
1.,
sess.run(
tf_compat.v1.saved_model.utils.get_tensor_from_tensor_info(
signature_def.outputs["metrics/accuracy/value"])),
places=3)
def test_export_saved_model_always_uses_replication_placement(self):
"""Tests b/137675014."""
run_config = tf.estimator.RunConfig(tf_random_seed=42)
subnetwork_generator = SimpleGenerator(
[_DNNBuilder("dnn1"), _DNNBuilder("dnn2")])
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
max_iteration_steps=1,
model_dir=self.test_subdirectory,
config=run_config,
experimental_placement_strategy=RoundRobinStrategy())
features = {"x": [[1., 0.]]}
labels = [[1.]]
train_input_fn = _dummy_feature_dict_input_fn(features, labels)
# Train.
estimator.train(input_fn=train_input_fn, max_steps=2)
# Export SavedModel.
def serving_input_fn():
"""Input fn for serving export, starting from serialized example."""
serialized_example = tf_compat.v1.placeholder(
dtype=tf.string, shape=(None), name="serialized_example")
tensor_features = {}
for key, value in features.items():
tensor_features[key] = tf.constant(value)
return tf.estimator.export.ServingInputReceiver(
features=tensor_features, receiver_tensors=serialized_example)
# Fake the number of PS replicas so RoundRobinStrategy will be used.
estimator._config._num_ps_replicas = 2
# If we're still using RoundRobinStrategy, this call will fail by trying
# to place ops on non-existent devices.
# Check all three export methods.
estimator.export_saved_model(
export_dir_base=self.test_subdirectory,
serving_input_receiver_fn=serving_input_fn,
experimental_mode=tf.estimator.ModeKeys.PREDICT)
try:
estimator.export_savedmodel(
export_dir_base=self.test_subdirectory,
serving_input_receiver_fn=serving_input_fn)
except AttributeError as error:
# Log deprecation errors.
logging.warning("Testing estimator#export_savedmodel: %s", error)
estimator.experimental_export_all_saved_models(
export_dir_base=self.test_subdirectory,
input_receiver_fn_map={
tf.estimator.ModeKeys.PREDICT: serving_input_fn,
})
class EstimatorReportTest(tu.AdanetTestCase):
"""Tests report generation and usage."""
def compare_report_lists(self, report_list1, report_list2):
# Essentially assertEqual(report_list1, report_list2), but ignoring
# the "metrics" attribute.
def make_qualified_name(iteration_number, name):
return "iteration_{}/{}".format(iteration_number, name)
report_dict_1 = {
make_qualified_name(report.iteration_number, report.name): report
for report in report_list1
}
report_dict_2 = {
make_qualified_name(report.iteration_number, report.name): report
for report in report_list2
}
self.assertEqual(len(report_list1), len(report_list2))
for qualified_name in report_dict_1.keys():
report_1 = report_dict_1[qualified_name]
report_2 = report_dict_2[qualified_name]
self.assertEqual(
report_1.hparams,
report_2.hparams,
msg="{} vs. {}".format(report_1, report_2))
self.assertEqual(
report_1.attributes,
report_2.attributes,
msg="{} vs. {}".format(report_1, report_2))
self.assertEqual(
report_1.included_in_final_ensemble,
report_2.included_in_final_ensemble,
msg="{} vs. {}".format(report_1, report_2))
for metric_key, metric_value in report_1.metrics.items():
self.assertEqual(
metric_value,
report_2.metrics[metric_key],
msg="{} vs. {}".format(report_1, report_2))
@parameterized.named_parameters(
{
"testcase_name": "one_iteration_one_subnetwork",
"subnetwork_builders": [_DNNBuilder("dnn", layer_size=1),],
"num_iterations": 1,
"want_materialized_iteration_reports": [[
MaterializedReport(
iteration_number=0,
name="dnn",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
]],
"want_previous_ensemble_reports": [],
"want_all_reports": [],
},
{
"testcase_name": "one_iteration_three_subnetworks",
"subnetwork_builders": [
# learning_rate is set to 0 for all but one Builder
# to make sure that only one of them can learn.
_DNNBuilder(
"dnn_1",
layer_size=1,
learning_rate=0.,
mixture_weight_learning_rate=0.),
_DNNBuilder(
"dnn_2",
layer_size=2,
learning_rate=0.,
mixture_weight_learning_rate=0.),
# fixing the match for dnn_3 to win.
_DNNBuilder("dnn_3", layer_size=3),
],
"num_iterations": 1,
"want_materialized_iteration_reports": [[
MaterializedReport(
iteration_number=0,
name="dnn_1",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=0,
name="dnn_2",
hparams={"layer_size": 2},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=0,
name="dnn_3",
hparams={"layer_size": 3},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
]],
"want_previous_ensemble_reports": [],
"want_all_reports": [],
},
{
"testcase_name":
"three_iterations_one_subnetwork",
"subnetwork_builders": [_DNNBuilder("dnn", layer_size=1),],
"num_iterations":
3,
"want_materialized_iteration_reports": [
[
MaterializedReport(
iteration_number=0,
name="dnn",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
)
],
[
MaterializedReport(
iteration_number=1,
name="previous_ensemble",
hparams={},
attributes={},
metrics={},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=1,
name="dnn",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
],
[
MaterializedReport(
iteration_number=2,
name="previous_ensemble",
hparams={},
attributes={},
metrics={},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=2,
name="dnn",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
],
],
"want_previous_ensemble_reports": [
MaterializedReport(
iteration_number=0,
name="dnn",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
MaterializedReport(
iteration_number=1,
name="dnn",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
],
"want_all_reports": [
MaterializedReport(
iteration_number=0,
name="dnn",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
MaterializedReport(
iteration_number=1,
name="previous_ensemble",
hparams={},
attributes={},
metrics={},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=1,
name="dnn",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
],
},
{
"testcase_name":
"three_iterations_three_subnetworks",
"subnetwork_builders": [
# learning_rate is set to 0 for all but one Builder
# to make sure that only one of them can learn.
_DNNBuilder(
"dnn_1",
layer_size=1,
learning_rate=0.,
mixture_weight_learning_rate=0.),
_DNNBuilder(
"dnn_2",
layer_size=2,
learning_rate=0.,
mixture_weight_learning_rate=0.),
# fixing the match for dnn_3 to win in every iteration.
_DNNBuilder("dnn_3", layer_size=3),
],
"num_iterations":
3,
"want_materialized_iteration_reports": [
[
MaterializedReport(
iteration_number=0,
name="dnn_1",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=0,
name="dnn_2",
hparams={"layer_size": 2},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=0,
name="dnn_3",
hparams={"layer_size": 3},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
],
[
MaterializedReport(
iteration_number=1,
name="previous_ensemble",
hparams={},
attributes={},
metrics={},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=1,
name="dnn_1",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=1,
name="dnn_2",
hparams={"layer_size": 2},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=1,
name="dnn_3",
hparams={"layer_size": 3},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
],
[
MaterializedReport(
iteration_number=2,
name="previous_ensemble",
hparams={},
attributes={},
metrics={},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=2,
name="dnn_1",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=2,
name="dnn_2",
hparams={"layer_size": 2},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=2,
name="dnn_3",
hparams={"layer_size": 3},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
],
],
"want_previous_ensemble_reports": [
MaterializedReport(
iteration_number=0,
name="dnn_3",
hparams={"layer_size": 3},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
MaterializedReport(
iteration_number=1,
name="dnn_3",
hparams={"layer_size": 3},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
],
"want_all_reports": [
MaterializedReport(
iteration_number=0,
name="dnn_1",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=0,
name="dnn_2",
hparams={"layer_size": 2},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=0,
name="dnn_3",
hparams={"layer_size": 3},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
MaterializedReport(
iteration_number=1,
name="previous_ensemble",
hparams={},
attributes={},
metrics={},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=1,
name="dnn_1",
hparams={"layer_size": 1},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=1,
name="dnn_2",
hparams={"layer_size": 2},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=False,
),
MaterializedReport(
iteration_number=1,
name="dnn_3",
hparams={"layer_size": 3},
attributes={
"complexity": 3,
},
metrics={
"moo": 3,
},
included_in_final_ensemble=True,
),
],
},
)
def test_report_generation_and_usage(self, subnetwork_builders,
num_iterations,
want_materialized_iteration_reports,
want_previous_ensemble_reports,
want_all_reports):
# Stores the iteration_number, previous_ensemble_reports and all_reports
# arguments in the self._iteration_reports dictionary, overwriting what
# was seen in previous iterations.
spied_iteration_reports = {}
def _spy_fn(iteration_number, previous_ensemble_reports, all_reports):
spied_iteration_reports[iteration_number] = {
"previous_ensemble_reports": previous_ensemble_reports,
"all_reports": all_reports,
}
subnetwork_generator = _FakeGenerator(
spy_fn=_spy_fn, subnetwork_builders=subnetwork_builders)
max_iteration_steps = 5
max_steps = max_iteration_steps * num_iterations + 1
train_input_fn = tu.dummy_input_fn([[1., 0.]], [[1.]])
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
mixture_weight_type=MixtureWeightType.MATRIX,
mixture_weight_initializer=tf_compat.v1.zeros_initializer(),
warm_start_mixture_weights=True,
max_iteration_steps=max_iteration_steps,
use_bias=True,
report_materializer=ReportMaterializer(
input_fn=train_input_fn, steps=1),
model_dir=self.test_subdirectory)
report_accessor = estimator._report_accessor
estimator.train(input_fn=train_input_fn, max_steps=max_steps)
materialized_iteration_reports = list(
report_accessor.read_iteration_reports())
self.assertEqual(num_iterations, len(materialized_iteration_reports))
for i in range(num_iterations):
want_materialized_reports = (want_materialized_iteration_reports[i])
materialized_reports = materialized_iteration_reports[i]
self.compare_report_lists(want_materialized_reports, materialized_reports)
# Compute argmin adanet loss.
argmin_adanet_loss = 0
smallest_known_adanet_loss = float("inf")
for j, materialized_subnetwork_report in enumerate(materialized_reports):
if (smallest_known_adanet_loss >
materialized_subnetwork_report.metrics["adanet_loss"]):
smallest_known_adanet_loss = (
materialized_subnetwork_report.metrics["adanet_loss"])
argmin_adanet_loss = j
# Check that the subnetwork with the lowest adanet loss is the one
# that is included in the final ensemble.
for j, materialized_reports in enumerate(materialized_reports):
self.assertEqual(j == argmin_adanet_loss,
materialized_reports.included_in_final_ensemble)
# Check the arguments passed into the generate_candidates method of the
# Generator.
iteration_report = spied_iteration_reports[num_iterations - 1]
self.compare_report_lists(want_previous_ensemble_reports,
iteration_report["previous_ensemble_reports"])
self.compare_report_lists(want_all_reports, iteration_report["all_reports"])
class EstimatorForceGrowTest(tu.AdanetTestCase):
"""Tests the force_grow override.
Uses linear subnetworks with the same seed. They will produce identical
outputs, so unless the `force_grow` override is set, none of the new
subnetworks will improve the AdaNet objective, and AdaNet will not add them to
the ensemble.
"""
@parameterized.named_parameters(
{
"testcase_name": "one_builder_no_force_grow",
"builders":
[_LinearBuilder("linear", mixture_weight_learning_rate=0.)],
"force_grow": False,
"want_subnetworks": 1,
}, {
"testcase_name": "one_builder",
"builders":
[_LinearBuilder("linear", mixture_weight_learning_rate=0.)],
"force_grow": True,
"want_subnetworks": 2,
}, {
"testcase_name": "two_builders",
"builders": [
_LinearBuilder("linear", mixture_weight_learning_rate=0.),
_LinearBuilder("linear2", mixture_weight_learning_rate=0.)
],
"force_grow": True,
"want_subnetworks": 2,
}, {
"testcase_name":
"two_builders_with_evaluator",
"builders": [
_LinearBuilder("linear", mixture_weight_learning_rate=0.),
_LinearBuilder("linear2", mixture_weight_learning_rate=0.)
],
"force_grow":
True,
"evaluator":
Evaluator(
input_fn=tu.dummy_input_fn([[1., 1.]], [[0.]]), steps=1),
"want_subnetworks":
3,
})
def test_force_grow(self,
builders,
force_grow,
want_subnetworks,
evaluator=None):
"""Train entire estimator lifecycle using XOR dataset."""
run_config = tf.estimator.RunConfig(tf_random_seed=42)
subnetwork_generator = SimpleGenerator(builders)
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
max_iteration_steps=1,
evaluator=evaluator,
force_grow=force_grow,
model_dir=self.test_subdirectory,
config=run_config)
train_input_fn = tu.dummy_input_fn(XOR_FEATURES, XOR_LABELS)
# Train for four iterations.
estimator.train(input_fn=train_input_fn, max_steps=3)
# Evaluate.
eval_results = estimator.evaluate(input_fn=train_input_fn, steps=1)
self.assertEqual(
want_subnetworks,
str(eval_results["architecture/adanet/ensembles"]).count(" linear "))
class EstimatorDebugTest(tu.AdanetTestCase):
"""Tests b/125483534. Detect NaNs in input_fns."""
# pylint: disable=g-long-lambda
@parameterized.named_parameters(
{
"testcase_name":
"nan_features",
"head":
regression_head.RegressionHead(
name="y", loss_reduction=tf_compat.SUM_OVER_BATCH_SIZE),
"input_fn":
lambda: ({
"x": tf.math.log([[1., 0.]])
}, tf.zeros([1, 1]))
}, {
"testcase_name":
"nan_label",
"head":
regression_head.RegressionHead(
name="y", loss_reduction=tf_compat.SUM_OVER_BATCH_SIZE),
"input_fn":
lambda: ({
"x": tf.ones([1, 2])
}, tf.math.log([[0.]]))
}, {
"testcase_name":
"nan_labels_dict",
"head":
multi_head_lib.MultiHead(heads=[
regression_head.RegressionHead(
name="y", loss_reduction=tf_compat.SUM_OVER_BATCH_SIZE),
]),
"input_fn":
lambda: ({
"x": tf.ones([1, 2])
}, {
"y": tf.math.log([[0.]])
})
})
# pylint: enable=g-long-lambda
def test_nans_from_input_fn(self, head, input_fn):
subnetwork_generator = SimpleGenerator([_DNNBuilder("dnn")])
estimator = Estimator(
head=head,
subnetwork_generator=subnetwork_generator,
max_iteration_steps=3,
model_dir=self.test_subdirectory,
debug=True)
with self.assertRaises(tf.errors.InvalidArgumentError):
estimator.train(input_fn=input_fn, max_steps=3)
class EstimatorEvaluateDuringTrainHookTest(tu.AdanetTestCase):
"""Tests b/129000842 with a hook that calls estimator.evaluate()."""
def test_train(self):
run_config = tf.estimator.RunConfig(tf_random_seed=42)
subnetwork_generator = SimpleGenerator([_DNNBuilder("dnn")])
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
max_iteration_steps=1,
model_dir=self.test_subdirectory,
config=run_config)
train_input_fn = tu.dummy_input_fn(XOR_FEATURES, XOR_LABELS)
class EvalTrainHook(tf.estimator.SessionRunHook):
def end(self, session):
estimator.evaluate(input_fn=train_input_fn, steps=1)
# This should not infinite loop.
estimator.train(
input_fn=train_input_fn, max_steps=3, hooks=[EvalTrainHook()])
class CheckpointSaverHookDuringTrainingTest(tu.AdanetTestCase):
"""Tests b/139057887."""
def test_checkpoint_saver_hooks_not_decorated_during_training(self):
run_config = tf.estimator.RunConfig(tf_random_seed=42)
subnetwork_generator = SimpleGenerator([_DNNBuilder("dnn")])
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
max_iteration_steps=1,
model_dir=self.test_subdirectory,
config=run_config)
train_input_fn = tu.dummy_input_fn(XOR_FEATURES, XOR_LABELS)
saver_hook = tf_compat.v1.train.CheckpointSaverHook(
checkpoint_dir=self.test_subdirectory, save_steps=10)
listener = tf_compat.v1.train.CheckpointSaverListener()
estimator.train(
input_fn=train_input_fn,
max_steps=3,
hooks=[saver_hook],
saving_listeners=[listener])
# If CheckpointSaverHook was not recognized during training then all
# saving_listeners would be attached to a default CheckpointSaverHook that
# Estimator creates.
self.assertLen(saver_hook._listeners, 1)
self.assertIs(saver_hook._listeners[0], listener)
class EstimatorTFLearnRunConfigTest(tu.AdanetTestCase):
"""Tests b/129483642 for tf.contrib.learn.RunConfig.
Checks that TF_CONFIG is overwritten correctly when no cluster is specified
in the RunConfig and the only task is of type chief.
"""
def test_train(self):
try:
run_config = tf.contrib.learn.RunConfig(tf_random_seed=42)
# Removed in TF 1.15 (nightly). See
# https://travis-ci.org/tensorflow/adanet/jobs/583471908
_ = run_config._session_creation_timeout_secs
except AttributeError:
self.skipTest("There is no tf.contrib in TF 2.0.")
try:
tf_config = {
"task": {
"type": "chief",
"index": 0
},
}
os.environ["TF_CONFIG"] = json.dumps(tf_config)
run_config = tf.contrib.learn.RunConfig(tf_random_seed=42)
run_config._is_chief = True # pylint: disable=protected-access
subnetwork_generator = SimpleGenerator([_DNNBuilder("dnn")])
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
max_iteration_steps=1,
model_dir=self.test_subdirectory,
config=run_config)
train_input_fn = tu.dummy_input_fn(XOR_FEATURES, XOR_LABELS)
# Will fail if TF_CONFIG is not overwritten correctly in
# Estimator#prepare_next_iteration.
estimator.train(input_fn=train_input_fn, max_steps=3)
finally:
# Revert TF_CONFIG environment variable in order to not break other tests.
del os.environ["TF_CONFIG"]
class EstimatorReplayTest(tu.AdanetTestCase):
@parameterized.named_parameters(
{
"testcase_name": "no_evaluator",
"evaluator": None,
"replay_evaluator": None,
"want_architecture": " dnn3 | dnn3 | dnn ",
}, {
"testcase_name":
"evaluator",
"evaluator":
Evaluator(
input_fn=tu.dummy_input_fn(XOR_FEATURES, XOR_LABELS),
steps=1),
"replay_evaluator":
Evaluator(
input_fn=tu.dummy_input_fn([[0., 0.], [0., 0], [0., 0.],
[0., 0.]], [[0], [0], [0], [0]]),
steps=1),
"want_architecture":
" dnn3 | dnn3 | dnn ",
})
def test_replay(self, evaluator, replay_evaluator, want_architecture):
"""Train entire estimator lifecycle using Replay."""
original_model_dir = os.path.join(self.test_subdirectory, "original")
run_config = tf.estimator.RunConfig(
tf_random_seed=42, model_dir=original_model_dir)
subnetwork_generator = SimpleGenerator([
_DNNBuilder("dnn"),
_DNNBuilder("dnn2", layer_size=3),
_DNNBuilder("dnn3", layer_size=5),
])
estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
max_iteration_steps=10,
evaluator=evaluator,
config=run_config)
train_input_fn = tu.dummy_input_fn(XOR_FEATURES, XOR_LABELS)
# Train for three iterations.
estimator.train(input_fn=train_input_fn, max_steps=30)
# Evaluate.
eval_results = estimator.evaluate(input_fn=train_input_fn, steps=1)
self.assertIn(want_architecture,
str(eval_results["architecture/adanet/ensembles"]))
replay_run_config = tf.estimator.RunConfig(
tf_random_seed=42,
model_dir=os.path.join(self.test_subdirectory, "replayed"))
# Use different features and labels to represent a shift in the data
# distribution.
different_features = [[0., 0.], [0., 0], [0., 0.], [0., 0.]]
different_labels = [[0], [0], [0], [0]]
replay_estimator = Estimator(
head=tu.head(),
subnetwork_generator=subnetwork_generator,
max_iteration_steps=10,
evaluator=replay_evaluator,
config=replay_run_config,
replay_config=replay.Config(best_ensemble_indices=[2, 3, 1]))
train_input_fn = tu.dummy_input_fn(different_features, different_labels)
# Train for three iterations.
replay_estimator.train(input_fn=train_input_fn, max_steps=30)
# Evaluate.
eval_results = replay_estimator.evaluate(input_fn=train_input_fn, steps=1)
self.assertIn(want_architecture,
str(eval_results["architecture/adanet/ensembles"]))
if __name__ == "__main__":
tf.test.main()
| 34.83747
| 139
| 0.57326
| 11,204
| 115,103
| 5.553909
| 0.073456
| 0.023286
| 0.024041
| 0.039598
| 0.701202
| 0.668547
| 0.628242
| 0.599348
| 0.568042
| 0.54429
| 0
| 0.022144
| 0.328714
| 115,103
| 3,303
| 140
| 34.848017
| 0.783193
| 0.065376
| 0
| 0.648457
| 0
| 0
| 0.11622
| 0.027383
| 0
| 0
| 0
| 0
| 0.023413
| 1
| 0.031571
| false
| 0.000709
| 0.013125
| 0.004257
| 0.075559
| 0.000355
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
532c9a004feae83c4d1b5c9bdf050c58af603c9f
| 1,473
|
py
|
Python
|
ua_roomseeker/uploader.py
|
nyg1/classroom-finder
|
13b6332187c2afb9833a1acd82bdf31ab81af5c8
|
[
"MIT"
] | 1
|
2020-08-29T22:04:17.000Z
|
2020-08-29T22:04:17.000Z
|
ua_roomseeker/uploader.py
|
nyg1/classroom-finder
|
13b6332187c2afb9833a1acd82bdf31ab81af5c8
|
[
"MIT"
] | 1
|
2020-02-17T05:18:36.000Z
|
2020-02-17T05:18:36.000Z
|
ua_roomseeker/uploader.py
|
nyg1/UAroomseeker
|
13b6332187c2afb9833a1acd82bdf31ab81af5c8
|
[
"MIT"
] | 2
|
2020-08-29T22:04:22.000Z
|
2020-09-07T18:01:46.000Z
|
from seeker.models import Building, Classroom, Time
import json
import os
os.chdir('../data')
fileList = os.listdir()
#loops through each json file
for jsonfile in fileList:
#opens the jsonfile and loads the data
f = open(jsonfile, 'r')
data = f.read()
jsondata = json.loads(data)
#create the building
building = Building(BuildingName=os.path.splitext(jsonfile)[0])
building.save()
for day in jsondata:
for room in jsondata[day].keys():
#creates each classroom, adding one only if one doesn't exist
classroom = Classroom.objects.get_or_create(building = Building.objects.get(BuildingName = os.path.splitext(jsonfile)[0]), ClassroomName = os.path.splitext(jsonfile)[0] + ' - ' + room)
for time in jsondata[day][room]:
#creates each time
time = Time(building=Building.objects.get(BuildingName = os.path.splitext(jsonfile)[0]), classroom=Classroom.objects.get(ClassroomName = os.path.splitext(jsonfile)[0] + ' - ' + room), DayofWeek=day, TimeValue=time)
time.save()
#IMPORTANT!!!!!!!
# This program must be run inside a python manage.py shell for it to work, in the future a fix may be found,
# but for the time being, follow these steps:
# 1. open powershell and navigate to the folder that contains this file
# 2. type in "python manage.py shell"
# 3. copy and paste the code into the shell and press enter
# 4. wait time is around 5 minutes
| 43.323529
| 230
| 0.681602
| 209
| 1,473
| 4.794258
| 0.464115
| 0.02994
| 0.06986
| 0.10978
| 0.236527
| 0.236527
| 0.201597
| 0.121756
| 0.121756
| 0.121756
| 0
| 0.008606
| 0.211134
| 1,473
| 33
| 231
| 44.636364
| 0.853701
| 0.359131
| 0
| 0
| 0
| 0
| 0.015038
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.176471
| 0
| 0.176471
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
532f2625149c93f751d40a68a85af35c606a9f80
| 1,192
|
py
|
Python
|
infra/apps/catalog/tests/views/distribution_upload_tests.py
|
datosgobar/infra.datos.gob.ar
|
9f6ae7f0fc741aad79d074e7b2eb2a7dddf8b2cf
|
[
"MIT"
] | 1
|
2019-07-01T19:38:52.000Z
|
2019-07-01T19:38:52.000Z
|
infra/apps/catalog/tests/views/distribution_upload_tests.py
|
datosgobar/infra.datos.gob.ar
|
9f6ae7f0fc741aad79d074e7b2eb2a7dddf8b2cf
|
[
"MIT"
] | 77
|
2019-05-27T18:16:30.000Z
|
2021-09-20T21:25:24.000Z
|
infra/apps/catalog/tests/views/distribution_upload_tests.py
|
datosgobar/infra.datos.gob.ar
|
9f6ae7f0fc741aad79d074e7b2eb2a7dddf8b2cf
|
[
"MIT"
] | 3
|
2019-12-09T16:38:18.000Z
|
2020-10-30T02:10:20.000Z
|
import pytest
from django.core.files import File
from django.urls import reverse
from freezegun import freeze_time
from infra.apps.catalog.tests.helpers.open_catalog import open_catalog
pytestmark = pytest.mark.django_db
@pytest.fixture(autouse=True)
def give_user_edit_rights(user, node):
node.admins.add(user)
def _call(client, distribution):
return client.get(reverse('catalog:distribution_uploads',
kwargs={'node_id': distribution.catalog.id,
'identifier': distribution.identifier}))
def test_older_versions_listed(logged_client, distribution_upload):
distribution = distribution_upload.distribution
with freeze_time('2019-01-01'):
with open_catalog('test_data.csv') as fd:
other = distribution.distributionupload_set \
.create(file=File(fd))
response = _call(logged_client, distribution)
assert str(other.uploaded_at) in response.content.decode('utf-8')
def test_catalog_identifier_in_page(logged_client, distribution):
response = _call(logged_client, distribution)
assert distribution.catalog.identifier in response.content.decode('utf-8')
| 34.057143
| 78
| 0.72651
| 144
| 1,192
| 5.805556
| 0.479167
| 0.107656
| 0.114833
| 0.057416
| 0.165072
| 0.165072
| 0
| 0
| 0
| 0
| 0
| 0.010256
| 0.182047
| 1,192
| 34
| 79
| 35.058824
| 0.847179
| 0
| 0
| 0.083333
| 0
| 0
| 0.065436
| 0.02349
| 0
| 0
| 0
| 0
| 0.083333
| 1
| 0.166667
| false
| 0
| 0.208333
| 0.041667
| 0.416667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5330d3c71a4dca71ef0aca045f8b4a15a601bd18
| 3,494
|
py
|
Python
|
examples/model_zoo/build_binaries.py
|
Embracing/unrealcv
|
19305da8554c3a0e683a5e27a1e487cc2cf42776
|
[
"MIT"
] | 1,617
|
2016-09-10T04:41:33.000Z
|
2022-03-31T20:03:28.000Z
|
examples/model_zoo/build_binaries.py
|
Embracing/unrealcv
|
19305da8554c3a0e683a5e27a1e487cc2cf42776
|
[
"MIT"
] | 199
|
2016-09-13T09:40:59.000Z
|
2022-03-16T02:37:23.000Z
|
examples/model_zoo/build_binaries.py
|
Embracing/unrealcv
|
19305da8554c3a0e683a5e27a1e487cc2cf42776
|
[
"MIT"
] | 431
|
2016-09-10T03:20:35.000Z
|
2022-03-19T13:44:21.000Z
|
import subprocess, os
ue4_win = r"C:\Program Files\Epic Games\UE_4.16"
ue4_linux = "/home/qiuwch/workspace/UE416"
ue4_mac = '/Users/Shared/Epic Games/UE_4.16'
win_uprojects = [
r'C:\qiuwch\workspace\uprojects\UE4RealisticRendering\RealisticRendering.uproject',
r'C:\qiuwch\workspace\uprojects\UE4ArchinteriorsVol2Scene1\ArchinteriorsVol2Scene1.uproject',
r'C:\qiuwch\workspace\uprojects\UE4ArchinteriorsVol2Scene2\ArchinteriorsVol2Scene2.uproject',
r'C:\qiuwch\workspace\uprojects\UE4ArchinteriorsVol2Scene3\ArchinteriorsVol2Scene3.uproject',
r'C:\qiuwch\workspace\uprojects\UE4UrbanCity\UrbanCity.uproject',
r'D:\workspace\uprojects\Matinee\Matinee.uproject',
r'D:\workspace\uprojects\PhotorealisticCharacter\PhotorealisticCharacter2.uproject',
]
linux_uprojects = [
os.path.expanduser('~/workspace/uprojects/UE4RealisticRendering/RealisticRendering.uproject'),
os.path.expanduser('~/workspace/uprojects/UE4ArchinteriorsVol2Scene1/ArchinteriorsVol2Scene1.uproject'),
os.path.expanduser('~/workspace/uprojects/UE4ArchinteriorsVol2Scene2/ArchinteriorsVol2Scene2.uproject'),
os.path.expanduser('~/workspace/uprojects/UE4ArchinteriorsVol2Scene3/ArchinteriorsVol2Scene3.uproject'),
os.path.expanduser("~/workspace/uprojects/UE4UrbanCity/UrbanCity.uproject"),
]
mac_uprojects = [
os.path.expanduser('~/workspace/UnrealEngine/Templates/FP_FirstPerson/FP_FirstPerson.uproject'),
os.path.expanduser('~/uprojects/RealisticRendering/RealisticRendering.uproject'),
os.path.expanduser('~/uprojects/UE4ArchinteriorsVol2Scene1/ArchinteriorsVol2Scene1.uproject'),
os.path.expanduser('~/uprojects/UE4ArchinteriorsVol2Scene2/ArchinteriorsVol2Scene2.uproject'),
os.path.expanduser('~/uprojects/UE4ArchinteriorsVol2Scene3/ArchinteriorsVol2Scene3.uproject'),
os.path.expanduser('~/uprojects/UE4UrbanCity/UrbanCity.uproject'),
]
uprojects = []
for uproject_path in win_uprojects:
uproject_name = os.path.basename(uproject_path).split('.')[0]
uprojects.append(
dict(
uproject_path = uproject_path,
ue4_path = ue4_win,
log_file = 'log/win_%s.log' % uproject_name
),
)
for uproject_path in linux_uprojects:
uproject_name = os.path.basename(uproject_path).split('.')[0]
uprojects.append(
dict(
uproject_path = uproject_path,
ue4_path = ue4_linux,
log_file = 'log/linux_%s.log' % uproject_name
),
)
for uproject_path in mac_uprojects:
uproject_name = os.path.basename(uproject_path).split('.')[0]
uprojects.append(
dict(
uproject_path = uproject_path,
ue4_path = ue4_mac,
log_file = 'log/mac_%s.log' % uproject_name
),
)
if __name__ == '__main__':
for uproject in uprojects:
uproject_path = uproject['uproject_path']
if not os.path.isfile(uproject_path):
print("Can not find uproject file %s, skip this project" % uproject_path)
continue
cmd = [
'python', 'build.py',
'--UE4', uproject['ue4_path'],
# '--output', uproject['output_folder'],
uproject['uproject_path']
]
print(cmd)
subprocess.call(cmd,
stdout = open(uproject['log_file'], 'w'))
with open(uproject['log_file']) as f:
lines = f.readlines()
print(''.join(lines[-10:])) # Print the last few lines
| 40.627907
| 108
| 0.694333
| 355
| 3,494
| 6.664789
| 0.242254
| 0.086221
| 0.074387
| 0.091293
| 0.687659
| 0.456889
| 0.370245
| 0.162299
| 0.134404
| 0.134404
| 0
| 0.026583
| 0.18174
| 3,494
| 85
| 109
| 41.105882
| 0.800979
| 0.018031
| 0
| 0.205479
| 0
| 0
| 0.453909
| 0.383897
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.013699
| 0
| 0.013699
| 0.041096
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53341c91d6109f552f8886886b2f526f32484d2e
| 731
|
py
|
Python
|
plugins/hanlp_demo/hanlp_demo/zh/tf/train/train_ctb9_pos_electra.py
|
antfootAlex/HanLP
|
e8044b27ae1de54b9070db08549853d3ca8271e2
|
[
"Apache-2.0"
] | 3
|
2022-03-07T08:33:16.000Z
|
2022-03-07T08:38:08.000Z
|
plugins/hanlp_demo/hanlp_demo/zh/tf/train/train_ctb9_pos_electra.py
|
hushaoyun/HanLP
|
967b52404c9d0adbc0cff2699690c127ecfca36e
|
[
"Apache-2.0"
] | null | null | null |
plugins/hanlp_demo/hanlp_demo/zh/tf/train/train_ctb9_pos_electra.py
|
hushaoyun/HanLP
|
967b52404c9d0adbc0cff2699690c127ecfca36e
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2019-12-28 23:15
from hanlp.components.taggers.transformers.transformer_tagger_tf import TransformerTaggerTF
from tests import cdroot
cdroot()
tagger = TransformerTaggerTF()
save_dir = 'data/model/pos/ctb9_electra_small_zh_epoch_20'
tagger.fit('data/pos/ctb9/train.tsv',
'data/pos/ctb9/test.tsv',
save_dir,
transformer='hfl/chinese-electra-small-discriminator',
max_seq_length=130,
warmup_steps_ratio=0.1,
epochs=20,
learning_rate=5e-5)
tagger.load(save_dir)
print(tagger(['我', '的', '希望', '是', '希望', '和平']))
tagger.evaluate('data/pos/ctb9/test.tsv', save_dir=save_dir)
print(f'Model saved in {save_dir}')
| 33.227273
| 91
| 0.679891
| 104
| 731
| 4.605769
| 0.634615
| 0.087683
| 0.068894
| 0.06263
| 0.104384
| 0.104384
| 0.104384
| 0
| 0
| 0
| 0
| 0.046205
| 0.170999
| 731
| 21
| 92
| 34.809524
| 0.744224
| 0.079343
| 0
| 0
| 0
| 0
| 0.276532
| 0.22571
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.117647
| 0
| 0.117647
| 0.117647
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
533456ef85893ecb35c41dc38df64614c652cb8f
| 768
|
py
|
Python
|
src/app/main.py
|
Wedding-APIs-System/Backend-APi
|
5a03be5f36ce8ca7e3abba2d64b63c55752697f3
|
[
"MIT"
] | null | null | null |
src/app/main.py
|
Wedding-APIs-System/Backend-APi
|
5a03be5f36ce8ca7e3abba2d64b63c55752697f3
|
[
"MIT"
] | null | null | null |
src/app/main.py
|
Wedding-APIs-System/Backend-APi
|
5a03be5f36ce8ca7e3abba2d64b63c55752697f3
|
[
"MIT"
] | null | null | null |
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from app.api import landing, login, attendance_confirmation
from sql_app.database import orm_connection
app = FastAPI(title="Sergio's wedding backend API",
description="REST API which serves login, attendance confirmation and other features",
version="1.0",)
origins = [
"*"
# "http://190.96.140.12:5500",
# "68.251.63.208"
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.include_router(landing.router)
app.include_router(login.router)
app.include_router(attendance_confirmation.router)
@app.get("/ping")
async def pong():
return {"ping": "pong!"}
| 20.210526
| 90
| 0.71875
| 97
| 768
| 5.56701
| 0.57732
| 0.122222
| 0.088889
| 0.081481
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.039939
| 0.152344
| 768
| 37
| 91
| 20.756757
| 0.789555
| 0.057292
| 0
| 0
| 0
| 0
| 0.165738
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.173913
| 0
| 0.217391
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5334ff647ed3cefd61f9291666af5ce5a96e862e
| 1,800
|
py
|
Python
|
tests/test_pydora/test_utils.py
|
NextGenTechBar/twandora
|
f626717a5580f82250bbe66d4ebc357e0882382c
|
[
"MIT"
] | null | null | null |
tests/test_pydora/test_utils.py
|
NextGenTechBar/twandora
|
f626717a5580f82250bbe66d4ebc357e0882382c
|
[
"MIT"
] | null | null | null |
tests/test_pydora/test_utils.py
|
NextGenTechBar/twandora
|
f626717a5580f82250bbe66d4ebc357e0882382c
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from pandora.client import APIClient
from pandora.errors import InvalidAuthToken, ParameterMissing
from pandora.models.pandora import Station, AdItem, PlaylistItem
from pandora.py2compat import Mock, patch
from pydora.utils import iterate_forever
class TestIterateForever(TestCase):
def setUp(self):
self.transport = Mock(side_effect=[InvalidAuthToken(), None])
self.client = APIClient(self.transport, None, None, None, None)
self.client._authenticate = Mock()
def test_handle_missing_params_exception_due_to_missing_ad_tokens(self):
with patch.object(APIClient, 'get_playlist') as get_playlist_mock:
with patch.object(APIClient, 'register_ad', side_effect=ParameterMissing("ParameterMissing")):
station = Station.from_json(self.client, {'stationToken': 'token_mock'})
ad_mock = AdItem.from_json(self.client, {'station_id': 'id_mock'})
get_playlist_mock.return_value=iter([ad_mock])
station_iter = iterate_forever(station.get_playlist)
next_track = next(station_iter)
self.assertEqual(ad_mock, next_track)
def test_reraise_missing_params_exception(self):
with patch.object(APIClient, 'get_playlist', side_effect=ParameterMissing("ParameterMissing")) as get_playlist_mock:
with self.assertRaises(ParameterMissing):
station = Station.from_json(self.client, {'stationToken': 'token_mock'})
track_mock = PlaylistItem.from_json(self.client, {'token': 'token_mock'})
get_playlist_mock.return_value=iter([track_mock])
station_iter = iterate_forever(station.get_playlist)
next(station_iter)
| 45
| 124
| 0.693333
| 201
| 1,800
| 5.935323
| 0.288557
| 0.073764
| 0.050293
| 0.060352
| 0.358759
| 0.323554
| 0.323554
| 0.201174
| 0.201174
| 0.115675
| 0
| 0.000708
| 0.215556
| 1,800
| 39
| 125
| 46.153846
| 0.844193
| 0
| 0
| 0.142857
| 0
| 0
| 0.079444
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 1
| 0.107143
| false
| 0
| 0.214286
| 0
| 0.357143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
533987c4f01c2ae25c35913b042954d6d704d9b2
| 1,736
|
py
|
Python
|
setup.py
|
TanKingsley/pyxll-jupyter
|
4f7b3eb361079b74683d89340dfff9576fb2ff41
|
[
"MIT"
] | 1
|
2020-12-28T10:40:38.000Z
|
2020-12-28T10:40:38.000Z
|
setup.py
|
TanKingsley/pyxll-jupyter
|
4f7b3eb361079b74683d89340dfff9576fb2ff41
|
[
"MIT"
] | null | null | null |
setup.py
|
TanKingsley/pyxll-jupyter
|
4f7b3eb361079b74683d89340dfff9576fb2ff41
|
[
"MIT"
] | null | null | null |
"""
PyXLL-Jupyter
This package integrated Jupyter notebooks into Microsoft Excel.
To install it, first install PyXLL (see https://www.pyxll.com).
Briefly, to install PyXLL do the following::
pip install pyxll
pyxll install
Once PyXLL is installed then installing this package will add a
button to the PyXLL ribbon toolbar that will start a Jupyter
notebook browser as a custom task pane in Excel.
To install this package use::
pip install pyxll_jupyter
"""
from setuptools import setup, find_packages
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name="pyxll_jupyter",
description="Adds Jupyter notebooks to Microsoft Excel using PyXLL.",
long_description=long_description,
long_description_content_type='text/markdown',
version="0.1.11",
packages=find_packages(),
include_package_data=True,
package_data={
"pyxll_jupyter": [
"pyxll_jupyter/resources/ribbon.xml",
"pyxll_jupyter/resources/jupyter.png",
]
},
project_urls={
"Source": "https://github.com/pyxll/pyxll-jupyter",
"Tracker": "https://github.com/pyxll/pyxll-jupyter/issues",
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: Microsoft :: Windows"
],
entry_points={
"pyxll": [
"modules = pyxll_jupyter.pyxll:modules",
"ribbon = pyxll_jupyter.pyxll:ribbon"
]
},
install_requires=[
"pyxll >= 5.0.0",
"jupyter >= 1.0.0",
"PySide2"
]
)
| 26.707692
| 73
| 0.657258
| 210
| 1,736
| 5.304762
| 0.514286
| 0.10772
| 0.045781
| 0.05386
| 0.055655
| 0.055655
| 0
| 0
| 0
| 0
| 0
| 0.009731
| 0.230415
| 1,736
| 64
| 74
| 27.125
| 0.824102
| 0.269009
| 0
| 0
| 0
| 0
| 0.400476
| 0.096749
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.05
| 0
| 0.05
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5339fd0b2f57b238565a16867e9a32da801ab240
| 5,630
|
py
|
Python
|
board/views.py
|
albi23/Pyra
|
1c1ceece15d55cd0e0ecf41d7224683b93b72555
|
[
"MIT"
] | null | null | null |
board/views.py
|
albi23/Pyra
|
1c1ceece15d55cd0e0ecf41d7224683b93b72555
|
[
"MIT"
] | 6
|
2021-03-19T01:58:04.000Z
|
2021-09-22T18:53:15.000Z
|
board/views.py
|
albi23/Pyra
|
1c1ceece15d55cd0e0ecf41d7224683b93b72555
|
[
"MIT"
] | 1
|
2020-06-29T18:16:29.000Z
|
2020-06-29T18:16:29.000Z
|
from typing import List
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.http import JsonResponse
from django.shortcuts import render
from django.urls import reverse_lazy
from django.views import generic, View
from board.forms import SignUpForm
from .const import BOARD_VIEW_COLUMN_COUNT
from .models import Board, Priority, Membership, Contribution
from .models import Task
@login_required
def index(request):
board_col, row_count = Board.objects.get_user_split_boards(request.user, BOARD_VIEW_COLUMN_COUNT)
context = {
'board_col': board_col,
'row_count': row_count
}
return render(request, 'index.html', context)
@login_required
def board(request, board_id):
_board = Board.objects.get(id=board_id)
todo_tasks: List[Task] = Task.objects.filter(board=_board, status='TODO')
doing_tasks = Task.objects.filter(board=_board, status='DOING')
done_tasks = Task.objects.filter(board=_board, status='DONE')
context = {
'board': _board,
'todo_tasks': todo_tasks,
'doing_tasks': doing_tasks,
'done_tasks': done_tasks,
'user': request.user,
}
return render(request, 'board.html', context)
@login_required
def update_task_state(request):
if request.method == "POST":
task_id = request.POST['task_id']
new_state = request.POST['new_state']
this_task = Task.objects.get(id=task_id)
this_task.status = new_state
this_task.save()
return JsonResponse({"success": True})
class SignUp(generic.CreateView):
form_class = SignUpForm
success_url = reverse_lazy('login')
template_name = 'signup.html'
class CreateBoard(View):
def post(self, request):
name = request.POST['name']
description = request.POST['description']
if name:
new_board = Board.objects.create(
name=name,
description=description,
)
Membership.objects.create(
board=new_board,
user=request.user,
role=Membership.Role.SUPER_USER
)
return JsonResponse({"success": True})
return JsonResponse({"success": False})
class CreateTask(View):
def post(self, request):
title = request.POST['title']
description = request.POST['description']
status = request.POST['status']
priority = int(request.POST['priority'])
board_id = int(request.POST['board_id'])
if title and request.user in Board.objects.get(id=board_id).members.all():
Task.objects.create(
title=title,
description=description,
status=status,
priority=Priority.choices[-int(priority) - 1][0],
created_by=request.user,
board_id=board_id
)
return JsonResponse({"success": True})
return JsonResponse({"success": False})
class CreateBoardMembership(View):
def post(self, request):
username = request.POST['username']
board_id = int(request.POST['board_id'])
if username and board_id:
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
return JsonResponse(
status=404,
data={'message': 'User doesn\'t exist'}
)
try:
membership = Membership.objects.get(board=board_id, user=user.id)
except Membership.DoesNotExist:
membership = None
if membership is not None:
return JsonResponse(
status=400,
data={'message': 'user already added'}
)
Membership.objects.create(
user=user,
board_id=board_id
)
return JsonResponse({'message': 'success'})
return JsonResponse(
status=400,
data={'message': 'username or board_id can\'t be empty'}
)
def parse_priority(value: str):
choices = Priority.choices
for i in range(0, len(choices)):
if value == choices[i][1].lower():
return choices[i][0]
@login_required
def update_task(request):
this_task = Task.objects.get(id=request.POST['id'])
this_task.title = request.POST['title']
this_task.description = request.POST['description']
this_task.status = request.POST['status']
this_task.priority = parse_priority(request.POST['priority'].lower())
this_task.save()
assigned_user_id = request.POST['user']
if assigned_user_id:
Contribution.objects.create(
task=this_task,
user_id=assigned_user_id,
)
return JsonResponse({"success": True})
@login_required
def get_available_users(request):
users = User.objects.filter(
membership__board_id=request.GET['board']
).exclude(
contribution__task_id=request.GET['task']
)
response_users = list(map(
lambda user: {
'id': user.id,
'username': user.username
},
users
))
return JsonResponse({'users': response_users})
@login_required
def delete_task(request):
if request.method.POST['task']:
task = Task.objects.get(id=request.method.GET['task'])
if request.user in task.board.members.all():
task.delete()
return JsonResponse({"success": True})
return JsonResponse({"success": False})
| 28.291457
| 101
| 0.610302
| 626
| 5,630
| 5.332268
| 0.198083
| 0.056022
| 0.059916
| 0.043439
| 0.258538
| 0.202516
| 0.112942
| 0.073996
| 0.038346
| 0
| 0
| 0.003453
| 0.279929
| 5,630
| 198
| 102
| 28.434343
| 0.819931
| 0
| 0
| 0.248366
| 0
| 0
| 0.072647
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.065359
| false
| 0
| 0.071895
| 0
| 0.287582
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
533acfb3888fb78753274cc7a4925350317c5e43
| 1,008
|
py
|
Python
|
setup.py
|
lazmond3/pylib-instagram-type
|
9683a7fb1dad9b1a770a3f98317f1cde1085f0a7
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
lazmond3/pylib-instagram-type
|
9683a7fb1dad9b1a770a3f98317f1cde1085f0a7
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
lazmond3/pylib-instagram-type
|
9683a7fb1dad9b1a770a3f98317f1cde1085f0a7
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Learn more: https://github.com/kennethreitz/setup.py
from setuptools import setup, find_packages
import os
with open('README.md') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
def take_package_name(name):
if name.startswith("-e"):
return name[name.find("=")+1:name.rfind("-")]
else:
return name.strip()
def load_requires_from_file(filepath):
with open(filepath) as fp:
return [take_package_name(pkg_name) for pkg_name in fp.readlines()]
setup(
name='lazmond3-pylib-instagram-type',
version='1.0.8',
description='update from 1.0.8: hasattr: 1.0.7: medias 複数, str get multiple + init.py',
long_description=readme,
author='lazmond3',
author_email='moikilo00@gmail.com',
url='https://github.com/lazmond3/pylib-instagram-type.git',
install_requires=["lazmond3-pylib-debug"],
license=license,
packages=find_packages(exclude=('tests', 'docs')),
test_suite='tests'
)
| 25.2
| 92
| 0.667659
| 141
| 1,008
| 4.666667
| 0.539007
| 0.036474
| 0.042553
| 0.079027
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020408
| 0.173611
| 1,008
| 39
| 93
| 25.846154
| 0.769508
| 0.073413
| 0
| 0
| 0
| 0.037037
| 0.257787
| 0.031149
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.074074
| 0
| 0.259259
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
533c689999c368cfd2824982d040a984df189702
| 3,909
|
py
|
Python
|
tests/test_webframe.py
|
zsolt-beringer/osm-gimmisn
|
b0cbf2e88c1846ef49e33fd32aeb6b4ecabea4c0
|
[
"MIT"
] | null | null | null |
tests/test_webframe.py
|
zsolt-beringer/osm-gimmisn
|
b0cbf2e88c1846ef49e33fd32aeb6b4ecabea4c0
|
[
"MIT"
] | null | null | null |
tests/test_webframe.py
|
zsolt-beringer/osm-gimmisn
|
b0cbf2e88c1846ef49e33fd32aeb6b4ecabea4c0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#
# Copyright (c) 2019 Miklos Vajna and contributors.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The test_webframe module covers the webframe module."""
from typing import List
from typing import TYPE_CHECKING
from typing import Tuple
from typing import cast
import configparser
import datetime
import os
import unittest
import unittest.mock
import time
# pylint: disable=unused-import
import yattag
import webframe
if TYPE_CHECKING:
# pylint: disable=no-name-in-module,import-error,unused-import
from wsgiref.types import StartResponse # noqa: F401
class TestHandleStatic(unittest.TestCase):
"""Tests handle_static()."""
def test_happy(self) -> None:
"""Tests the happy path: css case."""
content, content_type = webframe.handle_static("/osm/static/osm.css")
self.assertTrue(len(content))
self.assertEqual(content_type, "text/css")
def test_javascript(self) -> None:
"""Tests the javascript case."""
content, content_type = webframe.handle_static("/osm/static/sorttable.js")
self.assertTrue(len(content))
self.assertEqual(content_type, "application/x-javascript")
def test_else(self) -> None:
"""Tests the case when the content type is not recognized."""
content, content_type = webframe.handle_static("/osm/static/test.xyz")
self.assertFalse(len(content))
self.assertFalse(len(content_type))
class TestHandleException(unittest.TestCase):
"""Tests handle_exception()."""
def test_happy(self) -> None:
"""Tests the happy path."""
environ = {
"PATH_INFO": "/"
}
def start_response(status: str, response_headers: List[Tuple[str, str]]) -> None:
self.assertTrue(status.startswith("500"))
header_dict = dict(response_headers)
self.assertEqual(header_dict["Content-type"], "text/html; charset=utf-8")
try:
int("a")
# pylint: disable=broad-except
except Exception:
callback = cast('StartResponse', start_response)
output_iterable = webframe.handle_exception(environ, callback)
output_list = cast(List[bytes], output_iterable)
self.assertTrue(output_list)
output = output_list[0].decode('utf-8')
self.assertIn("ValueError", output)
return
self.fail()
class TestLocalToUiTz(unittest.TestCase):
"""Tests local_to_ui_tz()."""
def test_happy(self) -> None:
"""Tests the happy path."""
def get_abspath(path: str) -> str:
if os.path.isabs(path):
return path
return os.path.join(os.path.dirname(__file__), path)
def get_config() -> configparser.ConfigParser:
config = configparser.ConfigParser()
config.read_dict({"wsgi": {"timezone": "Europe/Budapest"}})
return config
with unittest.mock.patch('util.get_abspath', get_abspath):
with unittest.mock.patch('webframe.get_config', get_config):
local_dt = datetime.datetime.fromtimestamp(0)
ui_dt = webframe.local_to_ui_tz(local_dt)
if time.strftime('%Z%z') == "CET+0100":
self.assertEqual(ui_dt.timestamp(), 0)
class TestFillMissingHeaderItems(unittest.TestCase):
"""Tests fill_missing_header_items()."""
def test_happy(self) -> None:
"""Tests the happy path."""
streets = "no"
relation_name = "gazdagret"
items: List[yattag.doc.Doc] = []
webframe.fill_missing_header_items(streets, relation_name, items)
html = items[0].getvalue()
self.assertIn("Missing house numbers", html)
self.assertNotIn("Missing streets", html)
if __name__ == '__main__':
unittest.main()
| 33.991304
| 89
| 0.643643
| 457
| 3,909
| 5.352298
| 0.352298
| 0.035977
| 0.031889
| 0.039248
| 0.162306
| 0.162306
| 0.162306
| 0.162306
| 0.102208
| 0
| 0
| 0.00704
| 0.236889
| 3,909
| 114
| 90
| 34.289474
| 0.81294
| 0.164236
| 0
| 0.081081
| 0
| 0
| 0.094287
| 0.014986
| 0
| 0
| 0
| 0
| 0.175676
| 1
| 0.121622
| false
| 0
| 0.175676
| 0
| 0.405405
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
533cc101e9c7e5be34bb424dc7dd27a2b33a585a
| 6,533
|
py
|
Python
|
spotify.py
|
nimatest1234/telegram_spotify_downloader_bot
|
7e0a9ba32ee219752582b917867600653337f3d1
|
[
"MIT"
] | null | null | null |
spotify.py
|
nimatest1234/telegram_spotify_downloader_bot
|
7e0a9ba32ee219752582b917867600653337f3d1
|
[
"MIT"
] | null | null | null |
spotify.py
|
nimatest1234/telegram_spotify_downloader_bot
|
7e0a9ba32ee219752582b917867600653337f3d1
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
import spotipy
from spotipy.oauth2 import SpotifyClientCredentials
import requests
from youtube_search import YoutubeSearch
import youtube_dl
import eyed3.id3
import eyed3
import lyricsgenius
import telepot
spotifyy = spotipy.Spotify(
client_credentials_manager=SpotifyClientCredentials(client_id='a145db3dcd564b9592dacf10649e4ed5',
client_secret='389614e1ec874f17b8c99511c7baa2f6'))
genius = lyricsgenius.Genius('biZZReO7F98mji5oz3cE0FiIG73Hh07qoXSIzYSGNN3GBsnY-eUrPAVSdJk_0_de')
token = 'token bot'
bot = telepot.Bot(token)
def DOWNLOADMP3(link,chat_id):
#Get MetaData
results = spotifyy.track(link)
song = results['name']
print('[Spotify]MetaData Found!')
artist = results['artists'][0]['name']
YTSEARCH = str(song + " " + artist)
artistfinder = results['artists']
tracknum = results['track_number']
album = results['album']['name']
realese_date = int(results['album']['release_date'][:4])
if len(artistfinder) > 1:
fetures = "( Ft."
for lomi in range(0, len(artistfinder)):
try:
if lomi < len(artistfinder) - 2:
artistft = artistfinder[lomi + 1]['name'] + ", "
fetures += artistft
else:
artistft = artistfinder[lomi + 1]['name'] + ")"
fetures += artistft
except:
pass
else:
fetures = ""
time_duration = ""
time_duration1 = ""
time_duration2 = ""
time_duration3 = ""
millis = results['duration_ms']
millis = int(millis)
seconds = (millis / 1000) % 60
minutes = (millis / (1000 * 60)) % 60
seconds = int(seconds)
minutes = int(minutes)
if seconds >= 10:
if seconds < 59:
time_duration = "{0}:{1}".format(minutes, seconds)
time_duration1 = "{0}:{1}".format(minutes, seconds + 1)
time_duration2 = "{0}:{1}".format(minutes, seconds - 1)
if seconds == 10:
time_duration2 = "{0}:0{1}".format(minutes, seconds - 1)
time_duration3 = "{0}:{1}".format(minutes, seconds + 2)
elif seconds < 58:
time_duration3 = "{0}:{1}".format(minutes, seconds + 2)
time_duration2 = "{0}:{1}".format(minutes, seconds - 1)
elif seconds == 58:
time_duration3 = "{0}:0{1}".format(minutes + 1, seconds - 58)
time_duration2 = "{0}:{1}".format(minutes, seconds - 1)
else:
time_duration2 = "{0}:{1}".format(minutes, seconds - 1)
else:
time_duration1 = "{0}:0{1}".format(minutes + 1, seconds - 59)
if seconds == 59:
time_duration3 = "{0}:0{1}".format(minutes + 1, seconds - 58)
else:
time_duration = "{0}:0{1}".format(minutes, seconds)
time_duration1 = "{0}:0{1}".format(minutes, seconds + 1)
if seconds < 8:
time_duration3 = "{0}:0{1}".format(minutes, seconds + 2)
time_duration2 = "{0}:0{1}".format(minutes, seconds - 1)
elif seconds == 9 or seconds == 8:
time_duration3 = "{0}:{1}".format(minutes, seconds + 2)
elif seconds == 0:
time_duration2 = "{0}:{1}".format(minutes - 1, seconds + 59)
time_duration3 = "{0}:0{1}".format(minutes, seconds + 2)
else:
time_duration2 = "{0}:0{1}".format(minutes, seconds - 1)
time_duration3 = "{0}:0{1}".format(minutes, seconds + 2)
trackname = song + fetures
#Download Cover
response = requests.get(results['album']['images'][0]['url'])
DIRCOVER = "songpicts//" + trackname + ".png"
file = open(DIRCOVER, "wb")
file.write(response.content)
file.close()
#search for music on youtube
results = list(YoutubeSearch(str(YTSEARCH)).to_dict())
LINKASLI = ''
for URLSSS in results:
timeyt = URLSSS["duration"]
print(URLSSS['title'])
if timeyt == time_duration or timeyt == time_duration1:
LINKASLI = URLSSS['url_suffix']
break
elif timeyt == time_duration2 or timeyt == time_duration3:
LINKASLI = URLSSS['url_suffix']
break
YTLINK = str("https://www.youtube.com/" + LINKASLI)
print('[Youtube]song found!')
print(f'[Youtube]Link song on youtube : {YTLINK}')
#Donwload Music from youtube
options = {
# PERMANENT options
'format': 'bestaudio/best',
'keepvideo': False,
'outtmpl': f'song//{trackname}.*',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '320'
}]
}
with youtube_dl.YoutubeDL(options) as mp3:
mp3.download([YTLINK])
aud = eyed3.load(f"song//{trackname}.mp3")
print('[Youtube]Song Downloaded!')
aud.tag.artist = artist
aud.tag.album = album
aud.tag.album_artist = artist
aud.tag.title = trackname
aud.tag.track_num = tracknum
aud.tag.year = realese_date
try:
songok = genius.search_song(song, artist)
aud.tag.lyrics.set(songok.lyrics)
print('[Genius]Song lyric Found!')
except:
print('[Genius]Song lyric NOT Found!')
aud.tag.images.set(3, open("songpicts//" + trackname + ".png", 'rb').read(), 'image/png')
aud.tag.save()
bot.sendAudio(chat_id, open(f'song//{trackname}.mp3', 'rb'), title=trackname)
print('[Telegram]Song sent!')
def album(link):
results = spotifyy.album_tracks(link)
albums = results['items']
while results['next']:
results = spotifyy.next(results)
albums.extend(results['items'])
print('[Spotify]Album Found!')
return albums
def artist(link):
results = spotifyy.artist_top_tracks(link)
albums = results['tracks']
print('[Spotify]Artist Found!')
return albums
def searchalbum(track):
results = spotifyy.search(track)
return results['tracks']['items'][0]['album']['external_urls']['spotify']
def playlist(link):
results = spotifyy.playlist_tracks(link)
print('[Spotify]Playlist Found!')
return results['items']
def searchsingle(track):
results = spotifyy.search(track)
return results['tracks']['items'][0]['href']
def searchartist(searchstr):
results = spotifyy.search(searchstr)
return results['tracks']['items'][0]['artists'][0]["external_urls"]['spotify']
| 34.026042
| 106
| 0.590693
| 717
| 6,533
| 5.295676
| 0.245467
| 0.011061
| 0.044245
| 0.08296
| 0.29497
| 0.271267
| 0.266526
| 0.224124
| 0.169871
| 0.120095
| 0
| 0.044274
| 0.263585
| 6,533
| 191
| 107
| 34.204188
| 0.744959
| 0.015001
| 0
| 0.21875
| 0
| 0
| 0.160392
| 0.026447
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04375
| false
| 0.00625
| 0.0625
| 0
| 0.14375
| 0.06875
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
533d2e7d7e3bbba4894560b223b684f968b2d464
| 5,512
|
py
|
Python
|
tests/test_atomdict.py
|
Tillsten/atom
|
19b6291f7d3c9b3828dcd73e900b8dcbc2ddf92d
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
tests/test_atomdict.py
|
Tillsten/atom
|
19b6291f7d3c9b3828dcd73e900b8dcbc2ddf92d
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
tests/test_atomdict.py
|
Tillsten/atom
|
19b6291f7d3c9b3828dcd73e900b8dcbc2ddf92d
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
#------------------------------------------------------------------------------
# Copyright (c) 2018-2019, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
#------------------------------------------------------------------------------
"""Test the typed dictionary.
"""
import sys
import pytest
from atom.api import Atom, Dict, Int, atomdict
@pytest.fixture
def atom_dict():
"""Atom with different Dict members.
"""
class DictAtom(Atom):
untyped = Dict()
keytyped = Dict(Int())
valuetyped = Dict(value=Int())
fullytyped = Dict(Int(), Int())
untyped_default = Dict(default={1: 1})
keytyped_default = Dict(Int(), default={1: 1})
valuetyped_default = Dict(value=Int(), default={1: 1})
fullytyped_default = Dict(Int(), Int(), default={1: 1})
return DictAtom()
MEMBERS = ['untyped', 'keytyped', 'valuetyped', 'fullytyped',
'untyped_default', 'keytyped_default', 'valuetyped_default',
'fullytyped_default']
@pytest.mark.parametrize('member', MEMBERS)
def test_instance(atom_dict, member):
"""Test the repr.
"""
assert isinstance(getattr(atom_dict, member), atomdict)
@pytest.mark.parametrize('member', MEMBERS)
def test_repr(atom_dict, member):
"""Test the repr.
"""
d = getattr(atom_dict.__class__, member).default_value_mode[1]
if not d:
d = {i: i**2 for i in range(10)}
setattr(atom_dict, member, d)
assert repr(getattr(atom_dict, member)) == repr(d)
@pytest.mark.parametrize('member', MEMBERS)
def test_len(atom_dict, member):
"""Test the len.
"""
d = getattr(atom_dict.__class__, member).default_value_mode[1]
if not d:
d = {i: i**2 for i in range(10)}
setattr(atom_dict, member, d)
assert len(getattr(atom_dict, member)) == len(d)
@pytest.mark.parametrize('member', MEMBERS)
def test_contains(atom_dict, member):
"""Test __contains__.
"""
d = {i: i**2 for i in range(10)}
setattr(atom_dict, member, d)
assert 5 in getattr(atom_dict, member)
del getattr(atom_dict, member)[5]
assert 5 not in getattr(atom_dict, member)
@pytest.mark.parametrize('member', MEMBERS)
def test_keys(atom_dict, member):
"""Test the keys.
"""
d = getattr(atom_dict.__class__, member).default_value_mode[1]
if not d:
d = {i: i**2 for i in range(10)}
setattr(atom_dict, member, d)
assert getattr(atom_dict, member).keys() == d.keys()
@pytest.mark.parametrize('member', MEMBERS)
def test_copy(atom_dict, member):
"""Test copy.
"""
d = getattr(atom_dict.__class__, member).default_value_mode[1]
if not d:
d = {i: i**2 for i in range(10)}
setattr(atom_dict, member, d)
assert getattr(atom_dict, member).copy() == d
def test_setitem(atom_dict):
"""Test setting items.
"""
atom_dict.untyped[''] = 1
assert atom_dict.untyped[''] == 1
atom_dict.keytyped[1] = ''
assert atom_dict.keytyped[1] == ''
with pytest.raises(TypeError):
atom_dict.keytyped[''] = 1
atom_dict.valuetyped[1] = 1
assert atom_dict.valuetyped[1] == 1
with pytest.raises(TypeError):
atom_dict.valuetyped[''] = ''
atom_dict.fullytyped[1] = 1
assert atom_dict.fullytyped[1] == 1
with pytest.raises(TypeError):
atom_dict.fullytyped[''] = 1
with pytest.raises(TypeError):
atom_dict.fullytyped[1] = ''
def test_setdefault(atom_dict):
"""Test using setdefault.
"""
assert atom_dict.untyped.setdefault('', 1) == 1
assert atom_dict.untyped.setdefault('', 2) == 1
assert atom_dict.untyped[''] == 1
assert atom_dict.keytyped.setdefault(1, '') == ''
assert atom_dict.keytyped[1] == ''
with pytest.raises(TypeError):
atom_dict.keytyped.setdefault('', 1)
assert atom_dict.valuetyped.setdefault(1, 1) == 1
assert atom_dict.valuetyped.setdefault(1, '') == 1
assert atom_dict.valuetyped[1] == 1
with pytest.raises(TypeError):
atom_dict.valuetyped.setdefault(2, '')
assert atom_dict.fullytyped.setdefault(1, 1) == 1
assert atom_dict.fullytyped.setdefault(1, '') == 1
assert atom_dict.fullytyped[1] == 1
with pytest.raises(TypeError):
atom_dict.fullytyped.setdefault('', 1)
with pytest.raises(TypeError):
atom_dict.fullytyped.setdefault(2, '')
def test_update(atom_dict):
"""Test update a dict.
"""
atom_dict.untyped.update({'': 1})
assert atom_dict.untyped[''] == 1
atom_dict.untyped.update([('1', 1)])
assert atom_dict.untyped['1'] == 1
atom_dict.keytyped.update({1: 1})
assert atom_dict.keytyped[1] == 1
atom_dict.keytyped.update([(2, 1)])
assert atom_dict.keytyped[1] == 1
with pytest.raises(TypeError):
atom_dict.keytyped.update({'': 1})
atom_dict.valuetyped.update({1: 1})
assert atom_dict.valuetyped[1] == 1
atom_dict.valuetyped.update([(2, 1)])
assert atom_dict.valuetyped[1] == 1
with pytest.raises(TypeError):
atom_dict.valuetyped.update({'': ''})
atom_dict.fullytyped.update({1: 1})
assert atom_dict.fullytyped[1] == 1
atom_dict.fullytyped.update([(2, 1)])
assert atom_dict.fullytyped[1] == 1
with pytest.raises(TypeError):
atom_dict.fullytyped.update({'': 1})
with pytest.raises(TypeError):
atom_dict.fullytyped.update({'': ''})
| 28.559585
| 79
| 0.622097
| 709
| 5,512
| 4.668547
| 0.114245
| 0.181269
| 0.097281
| 0.095166
| 0.693958
| 0.61994
| 0.561631
| 0.464955
| 0.358308
| 0.308459
| 0
| 0.025034
| 0.20283
| 5,512
| 192
| 80
| 28.708333
| 0.728266
| 0.110668
| 0
| 0.432203
| 0
| 0
| 0.028944
| 0
| 0
| 0
| 0
| 0
| 0.254237
| 1
| 0.084746
| false
| 0
| 0.025424
| 0
| 0.194915
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
533f6cd5ce74f507059e39e73891411c50d53556
| 15,158
|
py
|
Python
|
typy/nodes.py
|
Procrat/typy
|
668cedb7f929256a09f565af9ee43c02889bec3f
|
[
"MIT"
] | 3
|
2016-03-08T09:55:20.000Z
|
2016-09-09T12:54:12.000Z
|
typy/nodes.py
|
Procrat/typy
|
668cedb7f929256a09f565af9ee43c02889bec3f
|
[
"MIT"
] | null | null | null |
typy/nodes.py
|
Procrat/typy
|
668cedb7f929256a09f565af9ee43c02889bec3f
|
[
"MIT"
] | null | null | null |
"""
Our own implementation of an abstract syntax tree (AST).
The convert function recursively converts a Python AST (from the module `ast`)
to our own AST (of the class `Node`).
"""
import ast
from logging import debug
from typy.builtin import data_types
from typy.exceptions import NotYetSupported, NoSuchAttribute, NotIterable
from typy import types
class Node:
def __init__(self, type_map, ast_node):
self.type_map = type_map
self._ast_fields = ast_node._fields
def check(self):
"""Must be overriden in subtype."""
raise NotYetSupported('check call to', self)
def iter_fields(self):
for field in self._ast_fields:
try:
yield field, getattr(self, field)
except AttributeError:
pass
def iter_child_nodes(self):
for _name, field in self.iter_fields():
if isinstance(field, Node):
yield field
elif isinstance(field, list):
for item in field:
if isinstance(item, Node):
yield item
class FunctionDef(Node):
def __init__(self, type_map, ast_node):
if (ast_node.args.vararg is not None or
len(ast_node.args.kwonlyargs) > 0 or
len(ast_node.args.kw_defaults) > 0 or
ast_node.args.kwarg is not None or
len(ast_node.args.defaults) > 0):
raise NotYetSupported('default arguments and keyword arguments')
super().__init__(type_map, ast_node)
self.name = ast_node.name
self.params = [arg.arg for arg in ast_node.args.args]
self.body = [convert(type_map, stmt) for stmt in ast_node.body]
self._ast_fields = ('name', 'params', 'body')
def check(self):
debug('checking func def %s', self.name)
function = types.Function(self, self.type_map)
self.type_map.add_variable(self.name, function)
return data_types.None_()
def __repr__(self):
return 'def ' + self.name + '()'
class ClassDef(Node):
def __init__(self, type_map, ast_node):
super().__init__(type_map, ast_node)
self.name = ast_node.name
self.body = [convert(type_map, stmt) for stmt in ast_node.body]
def check(self):
debug('checking class def %s', self.name)
class_namespace = self.type_map.enter_namespace(self.name)
for stmt in self.body:
stmt.check()
self.type_map.exit_namespace()
class_ = types.Class(self, self.type_map, class_namespace)
self.type_map.add_variable(self.name, class_)
return data_types.None_()
def __repr__(self):
return 'def ' + self.name
class Attribute(Node):
def __init__(self, type_map, ast_node):
super().__init__(type_map, ast_node)
self.value = convert(type_map, ast_node.value)
self.attr = ast_node.attr
self.ctx = ast_node.ctx
def check(self):
debug('checking attr %s', self)
value_type = self.value.check()
debug('attr %r = %r', self, value_type)
if isinstance(self.ctx, ast.Load):
return value_type.get_attribute(self.attr)
elif isinstance(self.ctx, ast.Store):
return (value_type, self.attr)
else:
# TODO implement for Del, AugLoad, AugStore, Param
raise NotYetSupported('name context', self.ctx)
def __repr__(self):
return repr(self.value) + '.' + self.attr
class Name(Node):
def __init__(self, type_map, ast_node):
super().__init__(type_map, ast_node)
self.id = ast_node.id
self.ctx = ast_node.ctx
def check(self):
debug('checking name %s', self.id)
if isinstance(self.ctx, ast.Load):
return self.type_map.find(self.id)
elif isinstance(self.ctx, ast.Store):
return self
else:
# TODO implement for Del, AugLoad, AugStore, Param
raise NotYetSupported('name context', self.ctx)
def __repr__(self):
return self.id
class Call(Node):
def __init__(self, type_map, ast_node):
if (len(ast_node.keywords) > 0 or
ast_node.starargs is not None or
ast_node.kwargs is not None):
raise NotYetSupported('keyword arguments and star arguments')
super().__init__(type_map, ast_node)
self.func = convert(type_map, ast_node.func)
self.args = [convert(type_map, expr) for expr in ast_node.args]
def check(self):
debug('checking call')
func = self.func.check()
args = [arg.check() for arg in self.args]
return func.check_call(args)
def __repr__(self):
return repr(self.func) + \
'(' + ', '.join(repr(x) for x in self.args) + ')'
class Expr(Node):
def __init__(self, type_map, ast_node):
super().__init__(type_map, ast_node)
self.value = convert(type_map, ast_node.value)
def check(self):
debug('checking expr')
self.value.check()
return data_types.None_()
def __repr__(self):
return repr(self.value)
class Return(Node):
def __init__(self, type_map, ast_node):
super().__init__(type_map, ast_node)
self.value = convert(type_map, ast_node.value)
def check(self):
debug('checking return')
return self.value.check()
def __repr__(self):
return 'return ' + repr(self.value)
class Module(Node, types.Type):
def __init__(self, type_map, ast_node):
Node.__init__(self, type_map, ast_node)
types.Type.__init__(self, type_map)
self.body = [convert(type_map, stmt) for stmt in ast_node.body]
def check(self):
debug('checking module')
self.module_namespace = self.type_map.enter_namespace('__main__')
debug('entering %r', self.type_map.current_namespace)
for stmt in self.body:
debug('still in %r', self.type_map.current_namespace)
stmt.check()
debug('leaving %r', self.type_map.current_namespace)
self.type_map.exit_namespace()
def get_attribute(self, name):
try:
return self.module_namespace[name]
except KeyError:
types.Type.get_attribute(self, name)
class Assign(Node):
def __init__(self, type_map, ast_node):
# TODO handle multiple targets
if len(ast_node.targets) > 1:
raise NotYetSupported('assignment with multiple targets')
super().__init__(type_map, ast_node)
self.target = convert(type_map, ast_node.targets[0])
self.value = convert(type_map, ast_node.value)
self._ast_fields = ('target', 'value')
def check(self):
debug('checking assign %r', self.target)
_assign(self.target, self.value, self.type_map)
return data_types.None_()
def __repr__(self):
return repr(self.target) + ' = ' + repr(self.value)
class Pass(Node):
def check(self):
debug('checking pass')
return data_types.None_()
def __repr__(self):
return 'pass'
class Not(Node):
def __init__(self, type_map, ast_node):
super().__init__(type_map, ast_node)
self.value = convert(type_map, ast_node.value)
def check(self):
debug('checking not')
self.value.check()
return data_types.Bool()
def __repr__(self):
return 'not ' + repr(self.value)
class BoolOp(Node):
def __init__(self, type_map, ast_node):
super().__init__(type_map, ast_node)
self.op = ast_node.op
self.values = [convert(type_map, value) for value in ast_node.values]
def check(self):
debug('checking boolop')
for value in self.values:
value.check()
# TODO return intersection van types?
return data_types.Bool()
def __repr__(self):
op_name = ' {} '.format(self.op)
return '(' + op_name.join(repr(val) for val in self.values) + ')'
class In(Node):
def __init__(self, type_map, ast_node):
super().__init__(type_map, ast_node)
self.element = convert(type_map, ast_node.element)
self.container = convert(type_map, ast_node.container)
def check(self):
debug('checking in')
element = self.element.check()
container = self.container.check()
try:
container.call_magic_method('__contains__', element)
except NoSuchAttribute:
if not container.is_iterable():
raise NotIterable(container)
return data_types.Bool()
def __repr__(self):
return '{!r} in {!r}'.format(self.element, self.container)
class For(Node):
def __init__(self, type_map, ast_node):
super().__init__(type_map, ast_node)
self.target = convert(type_map, ast_node.target)
self.iter = convert(type_map, ast_node.iter)
self.body = [convert(type_map, stmt) for stmt in ast_node.body]
self.orelse = [convert(type_map, clause) for clause in ast_node.orelse]
def check(self):
debug('checking for')
iterator = self.iter.check()
enclosed_type = iterator.get_enclosed_type()
_assign(self.target, enclosed_type, self.type_map)
for stmt in self.body:
stmt.check()
for stmt in self.orelse:
stmt.check()
# TODO return intersection of values of both branches
return data_types.None_()
def __repr__(self):
s = 'for {!r} in {!r}:\n '.format(self.target, self.iter)
s += '\n '.join(repr(stmt) for stmt in self.body)
if self.orelse:
s += 'else:\n '
s += '\n '.join(repr(stmt) for stmt in self.orelse)
return s
class If(Node):
def __init__(self, type_map, ast_node):
super().__init__(type_map, ast_node)
self.test = convert(type_map, ast_node.test)
self.body = [convert(type_map, stmt) for stmt in ast_node.body]
self.orelse = [convert(type_map, stmt) for stmt in ast_node.orelse]
def check(self):
debug('checking if')
# TODO take isinstance into account (?)
# TODO real branching?
self.test.check()
for stmt in self.body:
stmt.check()
for stmt in self.orelse:
stmt.check()
# TODO return intersection of values of both branches
return data_types.None_()
def __repr__(self):
s = 'if {!r}:\n '.format(self.test)
s += '\n '.join(repr(stmt) for stmt in self.body)
if self.orelse:
s += 'else:\n '
s += '\n '.join(repr(stmt) for stmt in self.orelse)
return s
class IfExp(Node):
def __init__(self, type_map, ast_node):
super().__init__(type_map, ast_node)
self.test = convert(type_map, ast_node.test)
self.body = convert(type_map, ast_node.body)
self.orelse = convert(type_map, ast_node.orelse)
def check(self):
debug('checking ifexp')
# TODO take isinstance into account (?)
self.test.check()
value1 = self.body.check()
value2 = self.orelse.check()
return types.Intersection(value1, value2)
def __repr__(self):
template = '{!r} if {!r} else {!r}'
return template.format(self.test, self.body, self.orelse)
class NameConstant(Node):
def __init__(self, type_map, ast_node):
super().__init__(type_map, ast_node)
self.value = ast_node.value
def check(self):
debug('checking name constant %r', self.value)
if self.value is None:
return data_types.None_()
elif self.value is True or self.value is False:
return data_types.Bool()
else:
raise NotYetSupported('name constant', self.value)
def __repr__(self):
return repr(self.value)
class While(Node):
def __init__(self, type_map, ast_node):
super().__init__(type_map, ast_node)
self.test = convert(type_map, ast_node.test)
self.body = [convert(type_map, stmt) for stmt in ast_node.body]
self.orelse = [convert(type_map, stmt) for stmt in ast_node.orelse]
def check(self):
debug('checking while')
# TODO take isinstance into account (?)
# TODO real branching?
self.test.check()
for stmt in self.body:
stmt.check()
for stmt in self.orelse:
stmt.check()
# TODO return intersection of values of both branches
return data_types.None_()
def __repr__(self):
s = 'while {!r}:\n '.format(self.test)
s += '\n '.join(repr(stmt) for stmt in self.body)
if self.orelse:
s += 'else:\n '
s += '\n '.join(repr(stmt) for stmt in self.orelse)
return s
class Break(Node):
def check(self):
debug('checking break')
return data_types.None_()
def __repr__(self):
return 'break'
class Continue(Node):
def check(self):
debug('checking continue')
return data_types.None_()
def __repr__(self):
return 'continue'
class Num(Node):
def __init__(self, type_map, ast_node):
super().__init__(type_map, ast_node)
self.number_type = {
int: data_types.Int,
# float: data_types.Float,
# complex: data_types.Complex,
}[type(ast_node.n)]
def check(self):
debug('checking num')
return self.number_type()
class Tuple(Node):
def __init__(self, type_map, ast_node):
super().__init__(type_map, ast_node)
self.elts = [convert(type_map, el) for el in ast_node.elts]
self.ctx = ast_node.ctx
def check(self):
debug('checking tuple %r', self)
if isinstance(self.ctx, ast.Load):
el_types = (el.check() for el in self.elts)
return types.Tuple(self.type_map, *el_types)
elif isinstance(self.ctx, ast.Store):
return self
else:
# TODO implement for Del, AugLoad, AugStore, Param
raise NotYetSupported('name context', self.ctx)
def __repr__(self):
return '(' + ', '.join(repr(el) for el in self.elts) + ')'
def _assign(target, value, type_map):
value_type = value.check()
if isinstance(target, Name):
target_type = target.check()
type_map.add_variable(target_type.id, value_type)
elif isinstance(target, Attribute):
target_type, attr = target.check()
target_type.set_attribute(attr, value_type)
else:
raise NotYetSupported('assignment to', target)
def convert(type_map, node):
class_name = node.__class__.__name__
try:
# Try to convert to a node
class_ = globals()[class_name]
return class_(type_map, node)
except KeyError:
try:
# Try to convert to a builtin type
class_ = getattr(data_types, class_name)
return class_()
except AttributeError:
raise NotYetSupported('node', node)
| 29.547758
| 79
| 0.605159
| 1,970
| 15,158
| 4.385279
| 0.092893
| 0.072115
| 0.063665
| 0.089131
| 0.579697
| 0.536752
| 0.485473
| 0.456187
| 0.399699
| 0.382915
| 0
| 0.000921
| 0.283415
| 15,158
| 512
| 80
| 29.605469
| 0.794421
| 0.055482
| 0
| 0.502762
| 0
| 0
| 0.057531
| 0
| 0
| 0
| 0
| 0.001953
| 0
| 1
| 0.187845
| false
| 0.01105
| 0.013812
| 0.041436
| 0.39779
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
533f744d195d3508a544cf4533f2224861641646
| 2,310
|
py
|
Python
|
anonlink-entity-service/backend/entityservice/integrationtests/objectstoretests/test_objectstore.py
|
Sam-Gresh/linkage-agent-tools
|
f405c7efe3fa82d99bc047f130c0fac6f3f5bf82
|
[
"Apache-2.0"
] | 1
|
2020-05-19T07:29:31.000Z
|
2020-05-19T07:29:31.000Z
|
backend/entityservice/integrationtests/objectstoretests/test_objectstore.py
|
hardbyte/anonlink-entity-service
|
3c1815473bc8169ca571532c18e0913a45c704de
|
[
"Apache-2.0"
] | null | null | null |
backend/entityservice/integrationtests/objectstoretests/test_objectstore.py
|
hardbyte/anonlink-entity-service
|
3c1815473bc8169ca571532c18e0913a45c704de
|
[
"Apache-2.0"
] | null | null | null |
"""
Testing:
- uploading over existing files
- using deleted credentials
- using expired credentials
"""
import io
import minio
from minio import Minio
import pytest
from minio.credentials import AssumeRoleProvider, Credentials
from entityservice.object_store import connect_to_object_store, connect_to_upload_object_store
from entityservice.settings import Config
restricted_upload_policy = """{
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"s3:PutObject"
],
"Effect": "Allow",
"Resource": [
"arn:aws:s3:::uploads/2020/*"
],
"Sid": "Upload-access-to-specific-bucket-only"
}
]
}
"""
class TestAssumeRole:
def test_temp_credentials_minio(self):
upload_endpoint = Config.UPLOAD_OBJECT_STORE_SERVER
bucket_name = "uploads"
root_mc_client = connect_to_object_store()
upload_restricted_minio_client = connect_to_upload_object_store()
if not root_mc_client.bucket_exists(bucket_name):
root_mc_client.make_bucket(bucket_name)
with pytest.raises(minio.error.AccessDenied):
upload_restricted_minio_client.list_buckets()
# Should be able to put an object though
upload_restricted_minio_client.put_object(bucket_name, 'testobject', io.BytesIO(b'data'), length=4)
credentials_provider = AssumeRoleProvider(upload_restricted_minio_client,
Policy=restricted_upload_policy
)
temp_creds = Credentials(provider=credentials_provider)
newly_restricted_mc_client = Minio(upload_endpoint, credentials=temp_creds, region='us-east-1', secure=False)
with pytest.raises(minio.error.AccessDenied):
newly_restricted_mc_client.list_buckets()
# Note this put object worked with the earlier credentials
# But should fail if we have applied the more restrictive policy
with pytest.raises(minio.error.AccessDenied):
newly_restricted_mc_client.put_object(bucket_name, 'testobject2', io.BytesIO(b'data'), length=4)
# this path is allowed in the policy however
newly_restricted_mc_client.put_object(bucket_name, '2020/testobject', io.BytesIO(b'data'), length=4)
| 32.535211
| 117
| 0.685281
| 267
| 2,310
| 5.651685
| 0.397004
| 0.037111
| 0.055666
| 0.071571
| 0.252485
| 0.201458
| 0.162359
| 0.121272
| 0.080848
| 0.080848
| 0
| 0.012936
| 0.230303
| 2,310
| 70
| 118
| 33
| 0.835771
| 0.130736
| 0
| 0.113636
| 0
| 0
| 0.162325
| 0.034068
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022727
| false
| 0
| 0.159091
| 0
| 0.204545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
533fd29f460114d34ef9d86ca150f4d2360ad787
| 26,237
|
py
|
Python
|
tests/test_models.py
|
kykrueger/redash
|
5fd78fdb2324a7c194e8a99c13deb5a57268866c
|
[
"BSD-2-Clause"
] | 1
|
2019-11-19T06:10:22.000Z
|
2019-11-19T06:10:22.000Z
|
tests/test_models.py
|
kykrueger/redash
|
5fd78fdb2324a7c194e8a99c13deb5a57268866c
|
[
"BSD-2-Clause"
] | 3
|
2022-02-14T01:15:27.000Z
|
2022-02-27T11:21:50.000Z
|
tests/test_models.py
|
kykrueger/redash
|
5fd78fdb2324a7c194e8a99c13deb5a57268866c
|
[
"BSD-2-Clause"
] | 1
|
2019-12-06T08:30:35.000Z
|
2019-12-06T08:30:35.000Z
|
import calendar
import datetime
from unittest import TestCase
import pytz
from dateutil.parser import parse as date_parse
from tests import BaseTestCase
from redash import models, redis_connection
from redash.models import db, types
from redash.utils import gen_query_hash, utcnow
class DashboardTest(BaseTestCase):
def test_appends_suffix_to_slug_when_duplicate(self):
d1 = self.factory.create_dashboard()
db.session.flush()
self.assertEqual(d1.slug, 'test')
d2 = self.factory.create_dashboard(user=d1.user)
db.session.flush()
self.assertNotEqual(d1.slug, d2.slug)
d3 = self.factory.create_dashboard(user=d1.user)
db.session.flush()
self.assertNotEqual(d1.slug, d3.slug)
self.assertNotEqual(d2.slug, d3.slug)
class ShouldScheduleNextTest(TestCase):
def test_interval_schedule_that_needs_reschedule(self):
now = utcnow()
two_hours_ago = now - datetime.timedelta(hours=2)
self.assertTrue(models.should_schedule_next(two_hours_ago, now, "3600"))
def test_interval_schedule_that_doesnt_need_reschedule(self):
now = utcnow()
half_an_hour_ago = now - datetime.timedelta(minutes=30)
self.assertFalse(models.should_schedule_next(half_an_hour_ago, now, "3600"))
def test_exact_time_that_needs_reschedule(self):
now = utcnow()
yesterday = now - datetime.timedelta(days=1)
scheduled_datetime = now - datetime.timedelta(hours=3)
scheduled_time = "{:02d}:00".format(scheduled_datetime.hour)
self.assertTrue(models.should_schedule_next(yesterday, now, "86400",
scheduled_time))
def test_exact_time_that_doesnt_need_reschedule(self):
now = date_parse("2015-10-16 20:10")
yesterday = date_parse("2015-10-15 23:07")
schedule = "23:00"
self.assertFalse(models.should_schedule_next(yesterday, now, "86400", schedule))
def test_exact_time_with_day_change(self):
now = utcnow().replace(hour=0, minute=1)
previous = (now - datetime.timedelta(days=2)).replace(hour=23,
minute=59)
schedule = "23:59".format(now.hour + 3)
self.assertTrue(models.should_schedule_next(previous, now, "86400", schedule))
def test_exact_time_every_x_days_that_needs_reschedule(self):
now = utcnow()
four_days_ago = now - datetime.timedelta(days=4)
three_day_interval = "259200"
scheduled_datetime = now - datetime.timedelta(hours=3)
scheduled_time = "{:02d}:00".format(scheduled_datetime.hour)
self.assertTrue(models.should_schedule_next(four_days_ago, now, three_day_interval,
scheduled_time))
def test_exact_time_every_x_days_that_doesnt_need_reschedule(self):
now = utcnow()
four_days_ago = now - datetime.timedelta(days=2)
three_day_interval = "259200"
scheduled_datetime = now - datetime.timedelta(hours=3)
scheduled_time = "{:02d}:00".format(scheduled_datetime.hour)
self.assertFalse(models.should_schedule_next(four_days_ago, now, three_day_interval,
scheduled_time))
def test_exact_time_every_x_days_with_day_change(self):
now = utcnow().replace(hour=23, minute=59)
previous = (now - datetime.timedelta(days=2)).replace(hour=0, minute=1)
schedule = "23:58"
three_day_interval = "259200"
self.assertTrue(models.should_schedule_next(previous, now, three_day_interval, schedule))
def test_exact_time_every_x_weeks_that_needs_reschedule(self):
# Setup:
#
# 1) The query should run every 3 weeks on Tuesday
# 2) The last time it ran was 3 weeks ago from this week's Thursday
# 3) It is now Wednesday of this week
#
# Expectation: Even though less than 3 weeks have passed since the
# last run 3 weeks ago on Thursday, it's overdue since
# it should be running on Tuesdays.
this_thursday = utcnow() + datetime.timedelta(days=list(calendar.day_name).index("Thursday") - utcnow().weekday())
three_weeks_ago = this_thursday - datetime.timedelta(weeks=3)
now = this_thursday - datetime.timedelta(days=1)
three_week_interval = "1814400"
scheduled_datetime = now - datetime.timedelta(hours=3)
scheduled_time = "{:02d}:00".format(scheduled_datetime.hour)
self.assertTrue(models.should_schedule_next(three_weeks_ago, now, three_week_interval,
scheduled_time, "Tuesday"))
def test_exact_time_every_x_weeks_that_doesnt_need_reschedule(self):
# Setup:
#
# 1) The query should run every 3 weeks on Thurday
# 2) The last time it ran was 3 weeks ago from this week's Tuesday
# 3) It is now Wednesday of this week
#
# Expectation: Even though more than 3 weeks have passed since the
# last run 3 weeks ago on Tuesday, it's not overdue since
# it should be running on Thursdays.
this_tuesday = utcnow() + datetime.timedelta(days=list(calendar.day_name).index("Tuesday") - utcnow().weekday())
three_weeks_ago = this_tuesday - datetime.timedelta(weeks=3)
now = this_tuesday + datetime.timedelta(days=1)
three_week_interval = "1814400"
scheduled_datetime = now - datetime.timedelta(hours=3)
scheduled_time = "{:02d}:00".format(scheduled_datetime.hour)
self.assertFalse(models.should_schedule_next(three_weeks_ago, now, three_week_interval,
scheduled_time, "Thursday"))
def test_backoff(self):
now = utcnow()
two_hours_ago = now - datetime.timedelta(hours=2)
self.assertTrue(models.should_schedule_next(two_hours_ago, now, "3600",
failures=5))
self.assertFalse(models.should_schedule_next(two_hours_ago, now,
"3600", failures=10))
def test_next_iteration_overflow(self):
now = utcnow()
two_hours_ago = now - datetime.timedelta(hours=2)
self.assertFalse(models.should_schedule_next(two_hours_ago, now, "3600", failures=32))
class QueryOutdatedQueriesTest(BaseTestCase):
# TODO: this test can be refactored to use mock version of should_schedule_next to simplify it.
def test_outdated_queries_skips_unscheduled_queries(self):
query = self.factory.create_query(schedule={'interval':None, 'time': None, 'until':None, 'day_of_week':None})
query_with_none = self.factory.create_query(schedule=None)
queries = models.Query.outdated_queries()
self.assertNotIn(query, queries)
self.assertNotIn(query_with_none, queries)
def test_outdated_queries_works_with_ttl_based_schedule(self):
two_hours_ago = utcnow() - datetime.timedelta(hours=2)
query = self.factory.create_query(schedule={'interval':'3600', 'time': None, 'until':None, 'day_of_week':None})
query_result = self.factory.create_query_result(query=query.query_text, retrieved_at=two_hours_ago)
query.latest_query_data = query_result
queries = models.Query.outdated_queries()
self.assertIn(query, queries)
def test_outdated_queries_works_scheduled_queries_tracker(self):
two_hours_ago = utcnow() - datetime.timedelta(hours=2)
query = self.factory.create_query(schedule={'interval':'3600', 'time': None, 'until':None, 'day_of_week':None})
query_result = self.factory.create_query_result(query=query, retrieved_at=two_hours_ago)
query.latest_query_data = query_result
models.scheduled_queries_executions.update(query.id)
queries = models.Query.outdated_queries()
self.assertNotIn(query, queries)
def test_skips_fresh_queries(self):
half_an_hour_ago = utcnow() - datetime.timedelta(minutes=30)
query = self.factory.create_query(schedule={'interval':'3600', 'time': None, 'until':None, 'day_of_week':None})
query_result = self.factory.create_query_result(query=query.query_text, retrieved_at=half_an_hour_ago)
query.latest_query_data = query_result
queries = models.Query.outdated_queries()
self.assertNotIn(query, queries)
def test_outdated_queries_works_with_specific_time_schedule(self):
half_an_hour_ago = utcnow() - datetime.timedelta(minutes=30)
query = self.factory.create_query(schedule={'interval':'86400', 'time':half_an_hour_ago.strftime('%H:%M'), 'until':None, 'day_of_week':None})
query_result = self.factory.create_query_result(query=query.query_text, retrieved_at=half_an_hour_ago - datetime.timedelta(days=1))
query.latest_query_data = query_result
queries = models.Query.outdated_queries()
self.assertIn(query, queries)
def test_enqueues_query_only_once(self):
"""
Only one query per data source with the same text will be reported by
Query.outdated_queries().
"""
query = self.factory.create_query(schedule={'interval':'60', 'until':None, 'time': None, 'day_of_week':None})
query2 = self.factory.create_query(
schedule={'interval':'60', 'until':None, 'time': None, 'day_of_week':None}, query_text=query.query_text,
query_hash=query.query_hash)
retrieved_at = utcnow() - datetime.timedelta(minutes=10)
query_result = self.factory.create_query_result(
retrieved_at=retrieved_at, query_text=query.query_text,
query_hash=query.query_hash)
query.latest_query_data = query_result
query2.latest_query_data = query_result
self.assertEqual(list(models.Query.outdated_queries()), [query2])
def test_enqueues_query_with_correct_data_source(self):
"""
Queries from different data sources will be reported by
Query.outdated_queries() even if they have the same query text.
"""
query = self.factory.create_query(
schedule={'interval':'60', 'until':None, 'time': None, 'day_of_week':None}, data_source=self.factory.create_data_source())
query2 = self.factory.create_query(
schedule={'interval':'60', 'until':None, 'time': None, 'day_of_week':None}, query_text=query.query_text,
query_hash=query.query_hash)
retrieved_at = utcnow() - datetime.timedelta(minutes=10)
query_result = self.factory.create_query_result(
retrieved_at=retrieved_at, query_text=query.query_text,
query_hash=query.query_hash)
query.latest_query_data = query_result
query2.latest_query_data = query_result
outdated_queries = models.Query.outdated_queries()
self.assertEqual(len(outdated_queries), 2)
self.assertIn(query, outdated_queries)
self.assertIn(query2, outdated_queries)
def test_enqueues_only_for_relevant_data_source(self):
"""
If multiple queries with the same text exist, only ones that are
scheduled to be refreshed are reported by Query.outdated_queries().
"""
query = self.factory.create_query(schedule={'interval':'60', 'until':None, 'time': None, 'day_of_week':None})
query2 = self.factory.create_query(
schedule={'interval':'3600', 'until':None, 'time': None, 'day_of_week':None}, query_text=query.query_text,
query_hash=query.query_hash)
retrieved_at = utcnow() - datetime.timedelta(minutes=10)
query_result = self.factory.create_query_result(
retrieved_at=retrieved_at, query_text=query.query_text,
query_hash=query.query_hash)
query.latest_query_data = query_result
query2.latest_query_data = query_result
self.assertEqual(list(models.Query.outdated_queries()), [query])
def test_failure_extends_schedule(self):
"""
Execution failures recorded for a query result in exponential backoff
for scheduling future execution.
"""
query = self.factory.create_query(schedule={'interval':'60', 'until':None, 'time': None, 'day_of_week':None}, schedule_failures=4)
retrieved_at = utcnow() - datetime.timedelta(minutes=16)
query_result = self.factory.create_query_result(
retrieved_at=retrieved_at, query_text=query.query_text,
query_hash=query.query_hash)
query.latest_query_data = query_result
self.assertEqual(list(models.Query.outdated_queries()), [])
query_result.retrieved_at = utcnow() - datetime.timedelta(minutes=17)
self.assertEqual(list(models.Query.outdated_queries()), [query])
def test_schedule_until_after(self):
"""
Queries with non-null ``schedule['until']`` are not reported by
Query.outdated_queries() after the given time is past.
"""
one_day_ago = (utcnow() - datetime.timedelta(days=1)).strftime("%Y-%m-%d")
two_hours_ago = utcnow() - datetime.timedelta(hours=2)
query = self.factory.create_query(schedule={'interval':'3600', 'until':one_day_ago, 'time':None, 'day_of_week':None})
query_result = self.factory.create_query_result(query=query.query_text, retrieved_at=two_hours_ago)
query.latest_query_data = query_result
queries = models.Query.outdated_queries()
self.assertNotIn(query, queries)
def test_schedule_until_before(self):
"""
Queries with non-null ``schedule['until']`` are reported by
Query.outdated_queries() before the given time is past.
"""
one_day_from_now = (utcnow() + datetime.timedelta(days=1)).strftime("%Y-%m-%d")
two_hours_ago = utcnow() - datetime.timedelta(hours=2)
query = self.factory.create_query(schedule={'interval':'3600', 'until':one_day_from_now, 'time': None, 'day_of_week':None})
query_result = self.factory.create_query_result(query=query.query_text, retrieved_at=two_hours_ago)
query.latest_query_data = query_result
queries = models.Query.outdated_queries()
self.assertIn(query, queries)
class QueryArchiveTest(BaseTestCase):
def test_archive_query_sets_flag(self):
query = self.factory.create_query()
db.session.flush()
query.archive()
self.assertEqual(query.is_archived, True)
def test_archived_query_doesnt_return_in_all(self):
query = self.factory.create_query(schedule={'interval':'1', 'until':None, 'time': None, 'day_of_week':None})
yesterday = utcnow() - datetime.timedelta(days=1)
query_result = models.QueryResult.store_result(
query.org_id, query.data_source, query.query_hash, query.query_text,
"1", 123, yesterday)
query.latest_query_data = query_result
groups = list(models.Group.query.filter(models.Group.id.in_(query.groups)))
self.assertIn(query, list(models.Query.all_queries([g.id for g in groups])))
self.assertIn(query, models.Query.outdated_queries())
db.session.flush()
query.archive()
self.assertNotIn(query, list(models.Query.all_queries([g.id for g in groups])))
self.assertNotIn(query, models.Query.outdated_queries())
def test_removes_associated_widgets_from_dashboards(self):
widget = self.factory.create_widget()
query = widget.visualization.query_rel
db.session.commit()
query.archive()
db.session.flush()
self.assertEqual(models.Widget.query.get(widget.id), None)
def test_removes_scheduling(self):
query = self.factory.create_query(schedule={'interval':'1', 'until':None, 'time': None, 'day_of_week':None})
query.archive()
self.assertIsNone(query.schedule)
def test_deletes_alerts(self):
subscription = self.factory.create_alert_subscription()
query = subscription.alert.query_rel
db.session.commit()
query.archive()
db.session.flush()
self.assertEqual(models.Alert.query.get(subscription.alert.id), None)
self.assertEqual(models.AlertSubscription.query.get(subscription.id), None)
class TestUnusedQueryResults(BaseTestCase):
def test_returns_only_unused_query_results(self):
two_weeks_ago = utcnow() - datetime.timedelta(days=14)
qr = self.factory.create_query_result()
self.factory.create_query(latest_query_data=qr)
db.session.flush()
unused_qr = self.factory.create_query_result(retrieved_at=two_weeks_ago)
self.assertIn(unused_qr, list(models.QueryResult.unused()))
self.assertNotIn(qr, list(models.QueryResult.unused()))
def test_returns_only_over_a_week_old_results(self):
two_weeks_ago = utcnow() - datetime.timedelta(days=14)
unused_qr = self.factory.create_query_result(retrieved_at=two_weeks_ago)
db.session.flush()
new_unused_qr = self.factory.create_query_result()
self.assertIn(unused_qr, list(models.QueryResult.unused()))
self.assertNotIn(new_unused_qr, list(models.QueryResult.unused()))
class TestQueryAll(BaseTestCase):
def test_returns_only_queries_in_given_groups(self):
ds1 = self.factory.create_data_source()
ds2 = self.factory.create_data_source()
group1 = models.Group(name="g1", org=ds1.org, permissions=['create', 'view'])
group2 = models.Group(name="g2", org=ds1.org, permissions=['create', 'view'])
q1 = self.factory.create_query(data_source=ds1)
q2 = self.factory.create_query(data_source=ds2)
db.session.add_all([
ds1, ds2,
group1, group2,
q1, q2,
models.DataSourceGroup(
group=group1, data_source=ds1),
models.DataSourceGroup(group=group2, data_source=ds2)
])
db.session.flush()
self.assertIn(q1, list(models.Query.all_queries([group1.id])))
self.assertNotIn(q2, list(models.Query.all_queries([group1.id])))
self.assertIn(q1, list(models.Query.all_queries([group1.id, group2.id])))
self.assertIn(q2, list(models.Query.all_queries([group1.id, group2.id])))
def test_skips_drafts(self):
q = self.factory.create_query(is_draft=True)
self.assertNotIn(q, models.Query.all_queries([self.factory.default_group.id]))
def test_includes_drafts_of_given_user(self):
q = self.factory.create_query(is_draft=True)
self.assertIn(q, models.Query.all_queries([self.factory.default_group.id], user_id=q.user_id))
def test_order_by_relationship(self):
u1 = self.factory.create_user(name='alice')
u2 = self.factory.create_user(name='bob')
self.factory.create_query(user=u1)
self.factory.create_query(user=u2)
db.session.commit()
# have to reset the order here with None since all_queries orders by
# created_at by default
base = models.Query.all_queries([self.factory.default_group.id]).order_by(None)
qs1 = base.order_by(models.User.name)
self.assertEqual(['alice', 'bob'], [q.user.name for q in qs1])
qs2 = base.order_by(models.User.name.desc())
self.assertEqual(['bob', 'alice'], [q.user.name for q in qs2])
class TestGroup(BaseTestCase):
def test_returns_groups_with_specified_names(self):
org1 = self.factory.create_org()
org2 = self.factory.create_org()
matching_group1 = models.Group(id=999, name="g1", org=org1)
matching_group2 = models.Group(id=888, name="g2", org=org1)
non_matching_group = models.Group(id=777, name="g1", org=org2)
groups = models.Group.find_by_name(org1, ["g1", "g2"])
self.assertIn(matching_group1, groups)
self.assertIn(matching_group2, groups)
self.assertNotIn(non_matching_group, groups)
def test_returns_no_groups(self):
org1 = self.factory.create_org()
models.Group(id=999, name="g1", org=org1)
self.assertEqual([], models.Group.find_by_name(org1, ["non-existing"]))
class TestQueryResultStoreResult(BaseTestCase):
def setUp(self):
super(TestQueryResultStoreResult, self).setUp()
self.data_source = self.factory.data_source
self.query = "SELECT 1"
self.query_hash = gen_query_hash(self.query)
self.runtime = 123
self.utcnow = utcnow()
self.data = '{"a": 1}'
def test_stores_the_result(self):
query_result = models.QueryResult.store_result(
self.data_source.org_id, self.data_source, self.query_hash,
self.query, self.data, self.runtime, self.utcnow)
self.assertEqual(query_result._data, self.data)
self.assertEqual(query_result.runtime, self.runtime)
self.assertEqual(query_result.retrieved_at, self.utcnow)
self.assertEqual(query_result.query_text, self.query)
self.assertEqual(query_result.query_hash, self.query_hash)
self.assertEqual(query_result.data_source, self.data_source)
class TestEvents(BaseTestCase):
def raw_event(self):
timestamp = 1411778709.791
user = self.factory.user
created_at = datetime.datetime.utcfromtimestamp(timestamp)
db.session.flush()
raw_event = {"action": "view",
"timestamp": timestamp,
"object_type": "dashboard",
"user_id": user.id,
"object_id": 1,
"org_id": 1}
return raw_event, user, created_at
def test_records_event(self):
raw_event, user, created_at = self.raw_event()
event = models.Event.record(raw_event)
db.session.flush()
self.assertEqual(event.user, user)
self.assertEqual(event.action, "view")
self.assertEqual(event.object_type, "dashboard")
self.assertEqual(event.object_id, 1)
self.assertEqual(event.created_at, created_at)
def test_records_additional_properties(self):
raw_event, _, _ = self.raw_event()
additional_properties = {'test': 1, 'test2': 2, 'whatever': "abc"}
raw_event.update(additional_properties)
event = models.Event.record(raw_event)
self.assertDictEqual(event.additional_properties, additional_properties)
def _set_up_dashboard_test(d):
d.g1 = d.factory.create_group(name='First', permissions=['create', 'view'])
d.g2 = d.factory.create_group(name='Second', permissions=['create', 'view'])
d.ds1 = d.factory.create_data_source()
d.ds2 = d.factory.create_data_source()
db.session.flush()
d.u1 = d.factory.create_user(group_ids=[d.g1.id])
d.u2 = d.factory.create_user(group_ids=[d.g2.id])
db.session.add_all([
models.DataSourceGroup(group=d.g1, data_source=d.ds1),
models.DataSourceGroup(group=d.g2, data_source=d.ds2)
])
d.q1 = d.factory.create_query(data_source=d.ds1)
d.q2 = d.factory.create_query(data_source=d.ds2)
d.v1 = d.factory.create_visualization(query_rel=d.q1)
d.v2 = d.factory.create_visualization(query_rel=d.q2)
d.w1 = d.factory.create_widget(visualization=d.v1)
d.w2 = d.factory.create_widget(visualization=d.v2)
d.w3 = d.factory.create_widget(visualization=d.v2, dashboard=d.w2.dashboard)
d.w4 = d.factory.create_widget(visualization=d.v2)
d.w5 = d.factory.create_widget(visualization=d.v1, dashboard=d.w4.dashboard)
d.w1.dashboard.is_draft = False
d.w2.dashboard.is_draft = False
d.w4.dashboard.is_draft = False
class TestDashboardAll(BaseTestCase):
def setUp(self):
super(TestDashboardAll, self).setUp()
_set_up_dashboard_test(self)
def test_requires_group_or_user_id(self):
d1 = self.factory.create_dashboard()
self.assertNotIn(d1, list(models.Dashboard.all(
d1.user.org, d1.user.group_ids, None)))
l2 = list(models.Dashboard.all(
d1.user.org, [0], d1.user.id))
self.assertIn(d1, l2)
def test_returns_dashboards_based_on_groups(self):
self.assertIn(self.w1.dashboard, list(models.Dashboard.all(
self.u1.org, self.u1.group_ids, None)))
self.assertIn(self.w2.dashboard, list(models.Dashboard.all(
self.u2.org, self.u2.group_ids, None)))
self.assertNotIn(self.w1.dashboard, list(models.Dashboard.all(
self.u2.org, self.u2.group_ids, None)))
self.assertNotIn(self.w2.dashboard, list(models.Dashboard.all(
self.u1.org, self.u1.group_ids, None)))
def test_returns_each_dashboard_once(self):
dashboards = list(models.Dashboard.all(self.u2.org, self.u2.group_ids, None))
self.assertEqual(len(dashboards), 2)
def test_returns_dashboard_you_have_partial_access_to(self):
self.assertIn(self.w5.dashboard, models.Dashboard.all(self.u1.org, self.u1.group_ids, None))
def test_returns_dashboards_created_by_user(self):
d1 = self.factory.create_dashboard(user=self.u1)
db.session.flush()
self.assertIn(d1, list(models.Dashboard.all(self.u1.org, self.u1.group_ids, self.u1.id)))
self.assertIn(d1, list(models.Dashboard.all(self.u1.org, [0], self.u1.id)))
self.assertNotIn(d1, list(models.Dashboard.all(self.u2.org, self.u2.group_ids, self.u2.id)))
def test_returns_dashboards_with_text_widgets(self):
w1 = self.factory.create_widget(visualization=None)
self.assertIn(w1.dashboard, models.Dashboard.all(self.u1.org, self.u1.group_ids, None))
self.assertIn(w1.dashboard, models.Dashboard.all(self.u2.org, self.u2.group_ids, None))
def test_returns_dashboards_from_current_org_only(self):
w1 = self.factory.create_widget(visualization=None)
user = self.factory.create_user(org=self.factory.create_org())
self.assertIn(w1.dashboard, models.Dashboard.all(self.u1.org, self.u1.group_ids, None))
self.assertNotIn(w1.dashboard, models.Dashboard.all(user.org, user.group_ids, None))
| 46.355124
| 149
| 0.675192
| 3,377
| 26,237
| 5.007699
| 0.104827
| 0.056117
| 0.058305
| 0.050736
| 0.693868
| 0.62344
| 0.550529
| 0.49861
| 0.453669
| 0.43522
| 0
| 0.021867
| 0.210428
| 26,237
| 565
| 150
| 46.437168
| 0.794458
| 0.058886
| 0
| 0.354762
| 0
| 0
| 0.037533
| 0
| 0
| 0
| 0
| 0.00177
| 0.2
| 1
| 0.121429
| false
| 0
| 0.021429
| 0
| 0.169048
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53432a332241c8c7299ada338e326f5385523550
| 2,537
|
py
|
Python
|
tests/test.py
|
N4S4/thingspeak_wrapper
|
f5c26e52c09124b85cc6056782d766d145e65a31
|
[
"MIT"
] | null | null | null |
tests/test.py
|
N4S4/thingspeak_wrapper
|
f5c26e52c09124b85cc6056782d766d145e65a31
|
[
"MIT"
] | null | null | null |
tests/test.py
|
N4S4/thingspeak_wrapper
|
f5c26e52c09124b85cc6056782d766d145e65a31
|
[
"MIT"
] | null | null | null |
import time
import thingspeak_wrapper as tsw
# Initiate the class ThingWrapper with (CHANNEL_ID, WRITE_API__KEY, READ_API_KEY)
# if is a public channel just pass the CHANNEL_ID argument, api_key defaults are None
my_channel = tsw.wrapper.ThingWrapper(501309, '6TQDNWJQ44FA0GAQ', '10EVD2N6YIHI5O7Z')
# all set of functions are:
# my_channel.sender()
# my_channel.multiple_sender()
# my_channel.get_json_feeds()
# my_channel.get_json_feeds_from()
# my_channel.get_xml_feeds()
# my_channel.get_xml_feeds_from()
# my_channel.get_csv_feeds()
# my_channel.get_csv_feeds_from()
# ---------------------------
# Now you can use all the possible functions
# Send a value to a single field
my_channel.sender(1, 4)
# this delay is due to limitation of thingspeak free account which allow you to post data every 15 sec minimum
time.sleep(15)
# ---------------------------
# Send data to multiple field
# It take 2 input as lists ([..], [..])
# Create lists of fields and values
fields = [1, 2, 3]
values = [22.0, 1029, 700]
# pass them to the function
my_channel.multiple_sender(fields, values)
# ---------------------------
# Get data functions returns data as json, xml, csv
# optionally csv can be returned as Pandas Data frame
# pass arguments to the function (field, data_quantity)
# default values are ( fields='feeds', results_quantity=None)
# you will get all fields and all values (max 8000)
json_field1 = my_channel.get_json_feeds(1, 300)
print(json_field1)
# get xml data pass same values as previous function
xml_field1 = my_channel.get_xml_feeds(1, 300)
print(xml_field1)
# get csv data
# this function requires to specify (field, pandas_format=True, result_quantity=None)
# defaults are (fields='feeds', pandas_format=True, result_quantity=None)
csv_field1 = my_channel.get_csv_feeds(1, pandas_format=True,
results_quantity=300)
print(csv_field1)
# data without pandas_format
csv_no_pandas = my_channel.get_csv_feeds(1, pandas_format=False,
results_quantity=300)
print(csv_no_pandas)
# there is the possibility to request data from and to specific dates
# set date and time as strings YYYY-MM-DD HH:NN:SS
start_date, start_time = '2018-05-21', '12:00:00'
stop_date, stop_time = '2018-05-21', '23:59:59'
# pass values to the function
# defaults are (start_date, start_time, stop_date=None, stop_time=None, fields='feeds')
values_from_date = my_channel.get_json_feeds_from(stop_date, start_time, stop_date, stop_time, 1)
print(values_from_date)
| 35.236111
| 110
| 0.727237
| 396
| 2,537
| 4.431818
| 0.338384
| 0.082051
| 0.075214
| 0.036467
| 0.241595
| 0.104843
| 0.037607
| 0.037607
| 0
| 0
| 0
| 0.042424
| 0.154513
| 2,537
| 71
| 111
| 35.732394
| 0.775758
| 0.612534
| 0
| 0.090909
| 0
| 0
| 0.071806
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.090909
| 0
| 0.090909
| 0.227273
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5343c13e8e474004e5dc969475dce6a87967180c
| 6,798
|
py
|
Python
|
neptunecontrib/monitoring/skopt.py
|
neptune-ai/neptune-contrib
|
fe5c6853128020aaaa59b440cc5203b940dcd39a
|
[
"MIT"
] | 22
|
2020-02-23T21:25:34.000Z
|
2021-06-11T16:34:27.000Z
|
neptunecontrib/monitoring/skopt.py
|
neptune-ai/neptune-contrib
|
fe5c6853128020aaaa59b440cc5203b940dcd39a
|
[
"MIT"
] | 29
|
2020-02-11T11:10:22.000Z
|
2021-10-03T09:01:28.000Z
|
neptunecontrib/monitoring/skopt.py
|
neptune-ai/neptune-contrib
|
fe5c6853128020aaaa59b440cc5203b940dcd39a
|
[
"MIT"
] | 7
|
2020-05-10T06:59:53.000Z
|
2021-06-11T16:34:32.000Z
|
#
# Copyright (c) 2019, Neptune Labs Sp. z o.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
import matplotlib.pyplot as plt
import neptune
import numpy as np
import skopt.plots as sk_plots
from skopt.utils import dump
from neptunecontrib.monitoring.utils import axes2fig, expect_not_a_run
class NeptuneCallback:
"""Logs hyperparameter optimization process to Neptune.
Specifically using NeptuneCallback will log: run metrics and run parameters, best run metrics so far, and
the current results checkpoint.
Examples:
Initialize NeptuneCallback::
import neptune
import neptunecontrib.monitoring.skopt as sk_utils
neptune.init(api_token='ANONYMOUS',
project_qualified_name='shared/showroom')
neptune.create_experiment(name='optuna sweep')
neptune_callback = sk_utils.NeptuneCallback()
Run skopt training passing neptune_callback as a callback::
...
results = skopt.forest_minimize(objective, space, callback=[neptune_callback],
base_estimator='ET', n_calls=100, n_random_starts=10)
You can explore an example experiment in Neptune:
https://ui.neptune.ai/o/shared/org/showroom/e/SHOW-1065/logs
"""
def __init__(self, experiment=None, log_checkpoint=True):
self._exp = experiment if experiment else neptune
expect_not_a_run(self._exp)
self.log_checkpoint = log_checkpoint
self._iteration = 0
def __call__(self, res):
self._exp.log_metric('run_score', x=self._iteration, y=res.func_vals[-1])
self._exp.log_metric('best_so_far_run_score', x=self._iteration, y=np.min(res.func_vals))
self._exp.log_text('run_parameters', x=self._iteration, y=NeptuneCallback._get_last_params(res))
if self.log_checkpoint:
self._exp.log_artifact(_export_results_object(res), 'results.pkl')
self._iteration += 1
@staticmethod
def _get_last_params(res):
param_vals = res.x_iters[-1]
named_params = _format_to_named_params(param_vals, res)
return str(named_params)
def log_results(results, experiment=None, log_plots=True, log_pickle=True):
"""Logs runs results and parameters to neptune.
Logs all hyperparameter optimization results to Neptune. Those include best score ('best_score' metric),
best parameters ('best_parameters' property), convergence plot ('diagnostics' log),
evaluations plot ('diagnostics' log), and objective plot ('diagnostics' log).
Args:
results('scipy.optimize.OptimizeResult'): Results object that is typically an output
| of the function like `skopt.forest_minimize(...)`
experiment(`neptune.experiments.Experiment`): Neptune experiment. Default is None.
log_plots: ('bool'): If True skopt plots will be logged to Neptune.
log_pickle: ('bool'): if True pickled skopt results object will be logged to Neptune.
Examples:
Run skopt training::
...
results = skopt.forest_minimize(objective, space,
base_estimator='ET', n_calls=100, n_random_starts=10)
Initialize Neptune::
import neptune
neptune.init(api_token='ANONYMOUS',
project_qualified_name='shared/showroom')
neptune.create_experiment(name='optuna sweep')
Send best parameters to Neptune::
import neptunecontrib.monitoring.skopt as sk_utils
sk_utils.log_results(results)
You can explore an example experiment in Neptune:
https://ui.neptune.ai/o/shared/org/showroom/e/SHOW-1065/logs
"""
_exp = experiment if experiment else neptune
expect_not_a_run(_exp)
_log_best_score(results, _exp)
_log_best_parameters(results, _exp)
if log_plots:
_log_plot_convergence(results, _exp)
_log_plot_evaluations(results, _exp)
_log_plot_regret(results, _exp)
_log_plot_objective(results, _exp)
if log_pickle:
_log_results_object(results, _exp)
def NeptuneMonitor(*args, **kwargs):
message = """NeptuneMonitor was renamed to NeptuneCallback and will be removed in future releases.
"""
warnings.warn(message)
return NeptuneCallback(*args, **kwargs)
def _log_best_parameters(results, experiment):
expect_not_a_run(experiment)
named_params = ([(dimension.name, param) for dimension, param in zip(results.space, results.x)])
experiment.set_property('best_parameters', str(named_params))
def _log_best_score(results, experiment):
experiment.log_metric('best_score', results.fun)
def _log_plot_convergence(results, experiment, name='diagnostics'):
expect_not_a_run(experiment)
fig, ax = plt.subplots()
sk_plots.plot_convergence(results, ax=ax)
experiment.log_image(name, fig)
def _log_plot_regret(results, experiment, name='diagnostics'):
expect_not_a_run(experiment)
fig, ax = plt.subplots()
sk_plots.plot_regret(results, ax=ax)
experiment.log_image(name, fig)
def _log_plot_evaluations(results, experiment, name='diagnostics'):
expect_not_a_run(experiment)
fig = plt.figure(figsize=(16, 12))
fig = axes2fig(sk_plots.plot_evaluations(results, bins=10), fig=fig)
experiment.log_image(name, fig)
def _log_plot_objective(results, experiment, name='diagnostics'):
try:
expect_not_a_run(experiment)
fig = plt.figure(figsize=(16, 12))
fig = axes2fig(sk_plots.plot_objective(results), fig=fig)
experiment.log_image(name, fig)
except Exception as e:
print('Could not create the objective chart due to error: {}'.format(e))
def _log_results_object(results, experiment=None):
expect_not_a_run(experiment)
experiment.log_artifact(_export_results_object(results), 'results.pkl')
def _export_results_object(results):
from io import BytesIO
results.specs['args'].pop('callback', None)
buffer = BytesIO()
dump(results, buffer, store_objective=False)
buffer.seek(0)
return buffer
def _format_to_named_params(params, result):
return [(dimension.name, param) for dimension, param in zip(result.space, params)]
| 33.653465
| 109
| 0.699912
| 875
| 6,798
| 5.211429
| 0.275429
| 0.017763
| 0.019737
| 0.025658
| 0.338596
| 0.297368
| 0.269737
| 0.261623
| 0.214035
| 0.214035
| 0
| 0.008169
| 0.207708
| 6,798
| 201
| 110
| 33.820896
| 0.83847
| 0.419388
| 0
| 0.168675
| 0
| 0
| 0.077292
| 0.005597
| 0
| 0
| 0
| 0
| 0
| 1
| 0.168675
| false
| 0
| 0.096386
| 0.012048
| 0.325301
| 0.012048
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5347385bc4a5e5ee6c5f4719e3b7b90b80842cc8
| 813
|
py
|
Python
|
invenio_iiif/config.py
|
dfdan/invenio-iiif
|
2ea2747fd29ab03b1d38e0ca6d2a9c1506aa8cbc
|
[
"MIT"
] | 3
|
2019-07-25T16:25:22.000Z
|
2021-02-04T16:51:55.000Z
|
invenio_iiif/config.py
|
dfdan/invenio-iiif
|
2ea2747fd29ab03b1d38e0ca6d2a9c1506aa8cbc
|
[
"MIT"
] | 26
|
2018-04-10T14:46:34.000Z
|
2021-06-16T08:51:09.000Z
|
invenio_iiif/config.py
|
dfdan/invenio-iiif
|
2ea2747fd29ab03b1d38e0ca6d2a9c1506aa8cbc
|
[
"MIT"
] | 22
|
2018-04-04T09:41:38.000Z
|
2021-11-25T09:33:40.000Z
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""IIIF API for Invenio."""
IIIF_API_PREFIX = '/iiif/'
"""URL prefix to IIIF API."""
IIIF_UI_URL = '/api{}'.format(IIIF_API_PREFIX)
"""URL to IIIF API endpoint (allow hostname)."""
IIIF_PREVIEWER_PARAMS = {
'size': '750,'
}
"""Parameters for IIIF image previewer extension."""
IIIF_PREVIEW_TEMPLATE = 'invenio_iiif/preview.html'
"""Template for IIIF image preview."""
IIIF_API_DECORATOR_HANDLER = 'invenio_iiif.handlers:protect_api'
"""Image opener handler decorator."""
IIIF_IMAGE_OPENER_HANDLER = 'invenio_iiif.handlers:image_opener'
"""Image opener handler function."""
| 27.1
| 72
| 0.724477
| 115
| 813
| 4.93913
| 0.495652
| 0.073944
| 0.09507
| 0.091549
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011412
| 0.137761
| 813
| 29
| 73
| 28.034483
| 0.798859
| 0.291513
| 0
| 0
| 0
| 0
| 0.345679
| 0.283951
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5348118dbe9a56351f72fa4c704f5f49e6815a7c
| 63,527
|
py
|
Python
|
port/platform/common/automation/u_utils.py
|
u-blox/ubxlib
|
4dc1b16e6f12354b601cb1c9d799c10f4e2afb54
|
[
"Apache-2.0"
] | 91
|
2020-12-21T13:10:19.000Z
|
2022-03-24T23:27:13.000Z
|
port/platform/common/automation/u_utils.py
|
u-blox/ubxlib
|
4dc1b16e6f12354b601cb1c9d799c10f4e2afb54
|
[
"Apache-2.0"
] | 42
|
2021-01-04T13:35:18.000Z
|
2022-03-25T08:57:45.000Z
|
port/platform/common/automation/u_utils.py
|
u-blox/ubxlib
|
4dc1b16e6f12354b601cb1c9d799c10f4e2afb54
|
[
"Apache-2.0"
] | 25
|
2021-01-02T12:37:34.000Z
|
2022-03-31T01:53:37.000Z
|
#!/usr/bin/env python
'''Generally useful bits and bobs.'''
import queue # For PrintThread and exe_run
from time import sleep, time, gmtime, strftime # For lock timeout, exe_run timeout and logging
from multiprocessing import RLock
from copy import copy
import threading # For PrintThread
import sys
import os # For ChangeDir, has_admin
import stat # To help deltree out
from collections import deque # For storing a window of debug
from telnetlib import Telnet # For talking to JLink server
import socket
import shutil # To delete a directory tree
import signal # For CTRL_C_EVENT
import subprocess
import platform # Figure out current OS
import re # Regular Expression
import serial # Pyserial (make sure to do pip install pyserial)
import psutil # For killing things (make sure to do pip install psutil)
import requests # For HTTP comms with a KMTronic box (do pip install requests)
import u_settings
# Since this function is used by the global variables below it needs
# to be placed here.
def is_linux():
'''Returns True when system is Linux'''
return platform.system() == 'Linux'
# Since this function is used by the global variables below it needs
# to be placed here.
def pick_by_os(linux=None, other=None):
'''
This is a convenience function for selecting a value based on platform.
As an example the line below will print out "Linux" when running on a
Linux platform and "Not Linux" when running on some other platform:
print( u_utils.pick_by_os(linux="Linux", other="Not Linux") )
'''
if is_linux():
return linux
return other
# The port that this agent service runs on
# Deliberately NOT a setting, we need to be sure
# everyone uses the same value
AGENT_SERVICE_PORT = 17003
# The maximum number of characters that an agent will
# use from controller_name when constructing a directory
# name for a ubxlib branch to be checked out into
AGENT_WORKING_SUBDIR_CONTROLLER_NAME_MAX_LENGTH = 4
# How long to wait for an install lock in seconds
INSTALL_LOCK_WAIT_SECONDS = u_settings.INSTALL_LOCK_WAIT_SECONDS #(60 * 60)
# The URL for Unity, the unit test framework
UNITY_URL = u_settings.UNITY_URL #"https://github.com/ThrowTheSwitch/Unity"
# The sub-directory that Unity is usually put in
# (off the working directory)
UNITY_SUBDIR = u_settings.UNITY_SUBDIR #"Unity"
# The path to DevCon, a Windows tool that allows
# USB devices to be reset, amongst other things
DEVCON_PATH = u_settings.DEVCON_PATH #"devcon.exe"
# The path to jlink.exe (or just the name 'cos it's on the path)
JLINK_PATH = u_settings.JLINK_PATH #"jlink.exe"
# The port number for SWO trace capture out of JLink
JLINK_SWO_PORT = u_settings.JLINK_SWO_PORT #19021
# The port number for GDB control of ST-LINK GDB server
STLINK_GDB_PORT = u_settings.STLINK_GDB_PORT #61200
# The port number for SWO trace capture out of ST-LINK GDB server
STLINK_SWO_PORT = u_settings.STLINK_SWO_PORT #61300
# The format string passed to strftime()
# for logging prints
TIME_FORMAT = u_settings.TIME_FORMAT #"%Y-%m-%d_%H:%M:%S"
# The default guard time waiting for a platform lock in seconds
PLATFORM_LOCK_GUARD_TIME_SECONDS = u_settings.PLATFORM_LOCK_GUARD_TIME_SECONDS #60 * 60
# The default guard time for downloading to a target in seconds
DOWNLOAD_GUARD_TIME_SECONDS = u_settings.DOWNLOAD_GUARD_TIME_SECONDS #60
# The default guard time for running tests in seconds
RUN_GUARD_TIME_SECONDS = u_settings.RUN_GUARD_TIME_SECONDS #60 * 60
# The default inactivity timer for running tests in seconds
RUN_INACTIVITY_TIME_SECONDS = u_settings.RUN_INACTIVITY_TIME_SECONDS #60 * 5
# The name of the #define that forms the filter string
# for which tests to run
FILTER_MACRO_NAME = u_settings.FILTER_MACRO_NAME #"U_CFG_APP_FILTER"
# The name of the environment variable that indicates we're running under automation
ENV_UBXLIB_AUTO = "U_UBXLIB_AUTO"
# The time for which to wait for something from the
# queue in exe_run(). If this is too short, in a
# multiprocessing world or on a slow machine, it is
# possible to miss things as the task putting things
# on the queue may be blocked from doing so until
# we've decided the queue has been completely emptied
# and moved on
EXE_RUN_QUEUE_WAIT_SECONDS = u_settings.EXE_RUN_QUEUE_WAIT_SECONDS #1
# The number of seconds a USB cutter and the bit positions of
# a KMTronic box are switched off for
HW_RESET_DURATION_SECONDS = u_settings.HW_RESET_DURATION_SECONDS # e.g. 5
# Executable file extension. This will be "" for Linux
# and ".exe" for Windows
EXE_EXT = pick_by_os(linux="", other=".exe")
def keep_going(flag, printer=None, prompt=None):
'''Check a keep_going flag'''
do_not_stop = True
if flag is not None and not flag.is_set():
do_not_stop = False
if printer and prompt:
printer.string("{}aborting as requested.".format(prompt))
return do_not_stop
# subprocess arguments behaves a little differently on Linux and Windows
# depending if a shell is used or not, which can be read here:
# https://stackoverflow.com/a/15109975
# This function will compensate for these deviations
def subprocess_osify(cmd, shell=True):
''' expects an array of strings being [command, param, ...] '''
if is_linux() and shell:
line = ''
for item in cmd:
# Put everything in a single string and quote args containing spaces
if ' ' in item:
line += '\"{}\" '.format(item)
else:
line += '{} '.format(item)
cmd = line
return cmd
def split_command_line_args(cmd_line):
''' Will split a command line string into a list of arguments.
Quoted arguments will be preserved as one argument '''
return [p for p in re.split("( |\\\".*?\\\"|'.*?')", cmd_line) if p.strip()]
def get_actual_path(path):
'''Given a drive number return real path if it is a subst'''
actual_path = path
if is_linux():
return actual_path
if os.name == 'nt':
# Get a list of substs
text = subprocess.check_output("subst",
stderr=subprocess.STDOUT,
shell=True) # Jenkins hangs without this
for line in text.splitlines():
# Lines should look like this:
# Z:\: => C:\projects\ubxlib_priv
# So, in this example, if we were given z:\blah
# then the actual path should be C:\projects\ubxlib_priv\blah
text = line.decode()
bits = text.rsplit(": => ")
if (len(bits) > 1) and (len(path) > 1) and \
(bits[0].lower()[0:2] == path[0:2].lower()):
actual_path = bits[1] + path[2:]
break
return actual_path
def get_instance_text(instance):
'''Return the instance as a text string'''
instance_text = ""
for idx, item in enumerate(instance):
if idx == 0:
instance_text += str(item)
else:
instance_text += "." + str(item)
return instance_text
# Get a list of instances as a text string separated
# by spaces.
def get_instances_text(instances):
'''Return the instances as a text string'''
instances_text = ""
for instance in instances:
if instance:
instances_text += " {}".format(get_instance_text(instance))
return instances_text
def remove_readonly(func, path, exec_info):
'''Help deltree out'''
del exec_info
os.chmod(path, stat.S_IWRITE)
func(path)
def deltree(directory, printer, prompt):
'''Remove an entire directory tree'''
tries = 3
success = False
if os.path.isdir(directory):
# Retry this as sometimes Windows complains
# that the directory is not empty when it
# it really should be, some sort of internal
# Windows race condition
while not success and (tries > 0):
try:
# Need the onerror bit on Winders, see
# this Stack Overflow post:
# https://stackoverflow.com/questions/1889597/deleting-directory-in-python
shutil.rmtree(directory, onerror=remove_readonly)
success = True
except OSError as ex:
if printer and prompt:
printer.string("{}ERROR unable to delete \"{}\" {}: \"{}\"".
format(prompt, directory,
ex.errno, ex.strerror))
sleep(1)
tries -= 1
else:
success = True
return success
# Some list types aren't quite list types: for instance,
# the lists returned by RPyC look like lists but they
# aren't of type list and so "in", for instance, will fail.
# This converts an instance list (i.e. a list-like object
# containing items that are each another list-like object)
# into a plain-old two-level list.
def copy_two_level_list(instances_in):
'''Convert instances_in into a true list'''
instances_out = []
if instances_in:
for item1 in instances_in:
instances_out1 = []
for item2 in item1:
instances_out1.append(item2)
instances_out.append(copy(instances_out1))
return instances_out
# Check if admin privileges are available, from:
# https://stackoverflow.com/questions/2946746/python-checking-if-a-user-has-administrator-privileges
def has_admin():
'''Check for administrator privileges'''
admin = False
if os.name == 'nt':
try:
# only Windows users with admin privileges can read the C:\windows\temp
if os.listdir(os.sep.join([os.environ.get("SystemRoot", "C:\\windows"), "temp"])):
admin = True
except PermissionError:
pass
else:
# Pylint will complain about the following line but
# that's OK, it is only executed if we're NOT on Windows
# and there the geteuid() method will exist
if "SUDO_USER" in os.environ and os.geteuid() == 0:
admin = True
return admin
# Reset a USB port with the given Device Description
def usb_reset(device_description, printer, prompt):
''' Reset a device'''
instance_id = None
found = False
success = False
try:
# Run devcon and parse the output to find the given device
printer.string("{}running {} to look for \"{}\"...". \
format(prompt, DEVCON_PATH, device_description))
cmd = [DEVCON_PATH, "hwids", "=ports"]
text = subprocess.check_output(subprocess_osify(cmd),
stderr=subprocess.STDOUT,
shell=True) # Jenkins hangs without this
for line in text.splitlines():
# The format of a devcon entry is this:
#
# USB\VID_1366&PID_1015&MI_00\6&38E81674&0&0000
# Name: JLink CDC UART Port (COM45)
# Hardware IDs:
# USB\VID_1366&PID_1015&REV_0100&MI_00
# USB\VID_1366&PID_1015&MI_00
# Compatible IDs:
# USB\Class_02&SubClass_02&Prot_00
# USB\Class_02&SubClass_02
# USB\Class_02
#
# Grab what we hope is the instance ID
line = line.decode()
if line.startswith("USB"):
instance_id = line
else:
# If the next line is the Name we want then we're done
if instance_id and ("Name: " + device_description in line):
found = True
printer.string("{}\"{}\" found with instance ID \"{}\"". \
format(prompt, device_description,
instance_id))
break
instance_id = None
if found:
# Now run devcon to reset the device
printer.string("{}running {} to reset device \"{}\"...". \
format(prompt, DEVCON_PATH, instance_id))
cmd = [DEVCON_PATH, "restart", "@" + instance_id]
text = subprocess.check_output(subprocess_osify(cmd),
stderr=subprocess.STDOUT,
shell=False) # Has to be False or devcon won't work
for line in text.splitlines():
printer.string("{}{}".format(prompt, line.decode()))
success = True
else:
printer.string("{}device with description \"{}\" not found.". \
format(prompt, device_description))
except subprocess.CalledProcessError:
printer.string("{} unable to find and reset device.".format(prompt))
return success
# Open the required serial port.
def open_serial(serial_name, speed, printer, prompt):
'''Open serial port'''
serial_handle = None
text = "{}: trying to open \"{}\" as a serial port...". \
format(prompt, serial_name)
try:
return_value = serial.Serial(serial_name, speed, timeout=0.05)
serial_handle = return_value
printer.string("{} opened.".format(text))
except (ValueError, serial.SerialException) as ex:
printer.string("{}{} while accessing port {}: {}.".
format(prompt, type(ex).__name__,
serial_handle.name, str(ex)))
return serial_handle
def open_telnet(port_number, printer, prompt):
'''Open telnet port on localhost'''
telnet_handle = None
text = "{}trying to open \"{}\" as a telnet port on localhost...". \
format(prompt, port_number)
try:
telnet_handle = Telnet("localhost", int(port_number), timeout=5)
if telnet_handle is not None:
printer.string("{} opened.".format(text))
else:
printer.string("{} failed.".format(text))
except (socket.error, socket.timeout, ValueError) as ex:
printer.string("{}{} failed to open telnet {}: {}.".
format(prompt, type(ex).__name__,
port_number, str(ex)))
return telnet_handle
def install_lock_acquire(install_lock, printer, prompt, keep_going_flag=None):
'''Attempt to acquire install lock'''
timeout_seconds = INSTALL_LOCK_WAIT_SECONDS
success = False
if install_lock:
printer.string("{}waiting for install lock...".format(prompt))
while not install_lock.acquire(False) and (timeout_seconds > 0) and \
keep_going(keep_going_flag, printer, prompt):
sleep(1)
timeout_seconds -= 1
if timeout_seconds > 0:
printer.string("{}got install lock.".format(prompt))
success = True
else:
printer.string("{}failed to aquire install lock.".format(prompt))
else:
printer.string("{}warning, there is no install lock.".format(prompt))
return success
def install_lock_release(install_lock, printer, prompt):
'''Release install lock'''
if install_lock:
install_lock.release()
printer.string("{}install lock released.".format(prompt))
def fetch_repo(url, directory, branch, printer, prompt, submodule_init=True, force=False):
'''Fetch a repo: directory can be relative or absolute, branch can be a hash'''
got_code = False
success = False
dir_text = directory
if dir_text == ".":
dir_text = "this directory"
if printer and prompt:
printer.string("{}in directory {}, fetching"
" {} to {}.".format(prompt, os.getcwd(),
url, dir_text))
if not branch:
branch = "master"
if os.path.isdir(directory):
# Update existing code
with ChangeDir(directory):
if printer and prompt:
printer.string("{}updating code in {}...".
format(prompt, dir_text))
target = branch
if branch.startswith("#"):
# Actually been given a branch, lose the
# preceding #
target = branch[1:len(branch)]
# Try this once and, if it fails and force is set,
# do a git reset --hard and try again
tries = 1
if force:
tries += 1
while tries > 0:
try:
call_list = []
call_list.append("git")
call_list.append("fetch")
call_list.append("origin")
call_list.append(target)
if printer and prompt:
text = ""
for item in call_list:
if text:
text += " "
text += item
printer.string("{}in {} calling {}...".
format(prompt, os.getcwd(), text))
# Try to pull the code
text = subprocess.check_output(subprocess_osify(call_list),
stderr=subprocess.STDOUT,
shell=True) # Jenkins hangs without this
for line in text.splitlines():
if printer and prompt:
printer.string("{}{}".format(prompt, line.decode()))
got_code = True
except subprocess.CalledProcessError as error:
if printer and prompt:
printer.string("{}git returned error {}: \"{}\"".
format(prompt, error.returncode,
error.output))
if got_code:
tries = 0
else:
if force:
# git reset --hard
printer.string("{}in directory {} calling git reset --hard...". \
format(prompt, os.getcwd()))
try:
text = subprocess.check_output(subprocess_osify(["git", "reset",
"--hard"]),
stderr=subprocess.STDOUT,
shell=True) # Jenkins hangs without this
for line in text.splitlines():
if printer and prompt:
printer.string("{}{}".format(prompt, line.decode()))
except subprocess.CalledProcessError as error:
if printer and prompt:
printer.string("{}git returned error {}: \"{}\"".
format(prompt, error.returncode,
error.output))
force = False
tries -= 1
if not got_code:
# If we still haven't got the code, delete the
# directory for a true clean start
deltree(directory, printer, prompt)
if not os.path.isdir(directory):
# Clone the repo
if printer and prompt:
printer.string("{}cloning from {} into {}...".
format(prompt, url, dir_text))
try:
text = subprocess.check_output(subprocess_osify(["git", "clone", "-q",
url, directory]),
stderr=subprocess.STDOUT,
shell=True) # Jenkins hangs without this
for line in text.splitlines():
if printer and prompt:
printer.string("{}{}".format(prompt, line.decode()))
got_code = True
except subprocess.CalledProcessError as error:
if printer and prompt:
printer.string("{}git returned error {}: \"{}\"".
format(prompt, error.returncode,
error.output))
if got_code and os.path.isdir(directory):
# Check out the correct branch and recurse submodules
with ChangeDir(directory):
target = "origin/" + branch
if branch.startswith("#"):
# Actually been given a branch, so lose the
# "origin/" and the preceding #
target = branch[1:len(branch)]
if printer and prompt:
printer.string("{}checking out {}...".
format(prompt, target))
try:
call_list = ["git", "-c", "advice.detachedHead=false",
"checkout", "--no-progress"]
if submodule_init:
call_list.append("--recurse-submodules")
printer.string("{}also recursing sub-modules (can take some time" \
" and gives no feedback).".format(prompt))
call_list.append(target)
if printer and prompt:
text = ""
for item in call_list:
if text:
text += " "
text += item
printer.string("{}in {} calling {}...".
format(prompt, os.getcwd(), text))
text = subprocess.check_output(subprocess_osify(call_list),
stderr=subprocess.STDOUT,
shell=True) # Jenkins hangs without this
for line in text.splitlines():
if printer and prompt:
printer.string("{}{}".format(prompt, line.decode()))
success = True
except subprocess.CalledProcessError as error:
if printer and prompt:
printer.string("{}git returned error {}: \"{}\"".
format(prompt, error.returncode,
error.output))
return success
def exe_where(exe_name, help_text, printer, prompt):
'''Find an executable using where.exe or which on linux'''
success = False
try:
printer.string("{}looking for \"{}\"...". \
format(prompt, exe_name))
# See here:
# https://stackoverflow.com/questions/14928860/passing-double-quote-shell-commands-in-python-to-subprocess-popen
# ...for why the construction "".join() is necessary when
# passing things which might have spaces in them.
# It is the only thing that works.
if is_linux():
cmd = ["which {}".format(exe_name.replace(":", "/"))]
printer.string("{}detected linux, calling \"{}\"...".format(prompt, cmd))
else:
cmd = ["where", "".join(exe_name)]
printer.string("{}detected nonlinux, calling \"{}\"...".format(prompt, cmd))
text = subprocess.check_output(cmd,
stderr=subprocess.STDOUT,
shell=True) # Jenkins hangs without this
for line in text.splitlines():
printer.string("{}{} found in {}".format(prompt, exe_name,
line.decode()))
success = True
except subprocess.CalledProcessError:
if help_text:
printer.string("{}ERROR {} not found: {}". \
format(prompt, exe_name, help_text))
else:
printer.string("{}ERROR {} not found". \
format(prompt, exe_name))
return success
def exe_version(exe_name, version_switch, printer, prompt):
'''Print the version of a given executable'''
success = False
if not version_switch:
version_switch = "--version"
try:
text = subprocess.check_output(subprocess_osify(["".join(exe_name), version_switch]),
stderr=subprocess.STDOUT,
shell=True) # Jenkins hangs without this
for line in text.splitlines():
printer.string("{}{}".format(prompt, line.decode()))
success = True
except subprocess.CalledProcessError:
printer.string("{}ERROR {} either not found or didn't like {}". \
format(prompt, exe_name, version_switch))
return success
def exe_terminate(process_pid):
'''Jonathan's killer'''
process = psutil.Process(process_pid)
for proc in process.children(recursive=True):
proc.terminate()
process.terminate()
def read_from_process_and_queue(process, read_queue):
'''Read from a process, non-blocking'''
while process.poll() is None:
string = process.stdout.readline().decode()
if string and string != "":
read_queue.put(string)
else:
sleep(0.1)
def queue_get_no_exception(the_queue, block=True, timeout=None):
'''A version of queue.get() that doesn't throw an Empty exception'''
thing = None
try:
thing = the_queue.get(block=block, timeout=timeout)
except queue.Empty:
pass
return thing
def capture_env_var(line, env, printer, prompt):
'''A bit of exe_run that needs to be called from two places'''
# Find a KEY=VALUE bit in the line,
# parse it out and put it in the dictionary
# we were given
pair = line.split('=', 1)
if len(pair) == 2:
env[pair[0]] = pair[1].rstrip()
else:
printer.string("{}WARNING: not an environment variable: \"{}\"".
format(prompt, line))
# Note: if returned_env is given then "set"
# will be executed after the exe and the environment
# variables will be returned in it. The down-side
# of this is that the return value of the exe is,
# of course, lost.
def exe_run(call_list, guard_time_seconds=None, printer=None, prompt=None,
shell_cmd=False, set_env=None, returned_env=None,
bash_cmd=False, keep_going_flag=None):
'''Call an executable, printing out what it does'''
success = False
start_time = time()
flibbling = False
kill_time = None
read_time = start_time
if returned_env is not None:
# The caller wants the environment after the
# command has run, so, from this post:
# https://stackoverflow.com/questions/1214496/how-to-get-environment-from-a-subprocess
# append a tag that we can detect
# to the command and then call set,
# from which we can parse the environment
call_list.append("&&")
call_list.append("echo")
call_list.append("flibble")
call_list.append("&&")
if is_linux():
call_list.append("env")
bash_cmd = True
else:
call_list.append("set")
# I've seen output from set get lost,
# possibly because the process ending
# is asynchronous with stdout,
# so add a delay here as well
call_list.append("&&")
call_list.append("sleep")
call_list.append("2")
try:
popen_keywords = {
'stdout': subprocess.PIPE,
'stderr': subprocess.STDOUT,
'shell': shell_cmd,
'env': set_env,
'executable': "bin/bash" if bash_cmd else None
}
# Call the thang
# Note: used to have bufsize=1 here but it turns out
# that is ignored 'cos the output is considered
# binary. Seems to work in any case, I guess
# Winders, at least, is in any case line-buffered.
process = subprocess.Popen(subprocess_osify(call_list, shell=shell_cmd),
**popen_keywords)
if printer:
printer.string("{}{}, pid {} started with guard time {} second(s)". \
format(prompt, call_list[0], process.pid,
guard_time_seconds))
# This is over complex but, unfortunately, necessary.
# At least one thing that we try to run, nrfjprog, can
# crash silently: just hangs and sends no output. However
# it also doesn't flush and close stdout and so read(1)
# will hang, meaning we can't read its output as a means
# to check that it has hung.
# So, here we poll for the return value, which is normally
# how things will end, and we start another thread which
# reads from the process's stdout. If the thread sees
# nothing for guard_time_seconds then we terminate the
# process.
read_queue = queue.Queue()
read_thread = threading.Thread(target=read_from_process_and_queue,
args=(process, read_queue))
read_thread.start()
while process.poll() is None:
if keep_going_flag is None or keep_going(keep_going_flag, printer, prompt):
if guard_time_seconds and (kill_time is None) and \
((time() - start_time > guard_time_seconds) or
(time() - read_time > guard_time_seconds)):
kill_time = time()
if printer:
printer.string("{}guard time of {} second(s)." \
" expired, stopping {}...".
format(prompt, guard_time_seconds,
call_list[0]))
exe_terminate(process.pid)
else:
exe_terminate(process.pid)
line = queue_get_no_exception(read_queue, True, EXE_RUN_QUEUE_WAIT_SECONDS)
read_time = time()
while line is not None:
line = line.rstrip()
if flibbling:
capture_env_var(line, returned_env, printer, prompt)
else:
if returned_env is not None and "flibble" in line:
flibbling = True
else:
printer.string("{}{}".format(prompt, line))
line = queue_get_no_exception(read_queue, True, EXE_RUN_QUEUE_WAIT_SECONDS)
read_time = time()
sleep(0.1)
# Can't join() read_thread here as it might have
# blocked on a read() (if nrfjprog has anything to
# do with it). It will be tidied up when this process
# exits.
# There may still be stuff on the queue, read it out here
line = queue_get_no_exception(read_queue, True, EXE_RUN_QUEUE_WAIT_SECONDS)
while line is not None:
line = line.rstrip()
if flibbling:
capture_env_var(line, returned_env, printer, prompt)
else:
if returned_env is not None and "flibble" in line:
flibbling = True
else:
printer.string("{}{}".format(prompt, line))
line = queue_get_no_exception(read_queue, True, EXE_RUN_QUEUE_WAIT_SECONDS)
# There may still be stuff in the buffer after
# the application has finished running so flush that
# out here
line = process.stdout.readline().decode()
while line:
line = line.rstrip()
if flibbling:
capture_env_var(line, returned_env, printer, prompt)
else:
if returned_env is not None and "flibble" in line:
flibbling = True
else:
printer.string("{}{}".format(prompt, line))
line = process.stdout.readline().decode()
if (process.poll() == 0) and kill_time is None:
success = True
if printer:
printer.string("{}{}, pid {} ended with return value {}.". \
format(prompt, call_list[0],
process.pid, process.poll()))
except ValueError as ex:
if printer:
printer.string("{}failed: {} while trying to execute {}.". \
format(prompt, type(ex).__name__, str(ex)))
except KeyboardInterrupt as ex:
process.kill()
raise KeyboardInterrupt from ex
return success
def set_process_prio_high():
'''Set the priority of the current process to high'''
if is_linux():
print("Setting process priority currently not supported for Linux")
# It should be possible to set prio with:
# psutil.Process().nice(-10)
# However we get "[Errno 13] Permission denied" even when run as root
else:
psutil.Process().nice(psutil.HIGH_PRIORITY_CLASS)
def set_process_prio_normal():
'''Set the priority of the current process to normal'''
if is_linux():
print("Setting process priority currently not supported for Linux")
# It should be possible to set prio with:
# psutil.Process().nice(0)
# However we get "[Errno 13] Permission denied" even when run as root
else:
psutil.Process().nice(psutil.NORMAL_PRIORITY_CLASS)
class ExeRun():
'''Run an executable as a "with:"'''
def __init__(self, call_list, printer=None, prompt=None, shell_cmd=False, with_stdin=False):
self._call_list = call_list
self._printer = printer
self._prompt = prompt
self._shell_cmd = shell_cmd
self._with_stdin=with_stdin
self._process = None
def __enter__(self):
if self._printer:
text = ""
for idx, item in enumerate(self._call_list):
if idx == 0:
text = item
else:
text += " {}".format(item)
self._printer.string("{}starting {}...".format(self._prompt,
text))
try:
# Start exe
popen_keywords = {
'stdout': subprocess.PIPE,
'stderr': subprocess.STDOUT,
'shell': self._shell_cmd
}
if not is_linux():
popen_keywords['creationflags'] = subprocess.CREATE_NEW_PROCESS_GROUP
if self._with_stdin:
popen_keywords['stdin'] = subprocess.PIPE
self._process = subprocess.Popen(self._call_list, **popen_keywords)
if self._printer:
self._printer.string("{}{} pid {} started".format(self._prompt,
self._call_list[0],
self._process.pid))
except (OSError, subprocess.CalledProcessError, ValueError) as ex:
if self._printer:
self._printer.string("{}failed: {} to start {}.". \
format(self._prompt,
type(ex).__name__, str(ex)))
except KeyboardInterrupt as ex:
self._process.kill()
raise KeyboardInterrupt from ex
return self._process
def __exit__(self, _type, value, traceback):
del _type
del value
del traceback
# Stop exe
if self._printer:
self._printer.string("{}stopping {}...". \
format(self._prompt,
self._call_list[0]))
return_value = self._process.poll()
if not return_value:
retry = 5
while (self._process.poll() is None) and (retry > 0):
# Try to stop with CTRL-C
if is_linux():
sig = signal.SIGINT
else:
sig = signal.CTRL_BREAK_EVENT
self._process.send_signal(sig)
sleep(1)
retry -= 1
return_value = self._process.poll()
if not return_value:
# Terminate with a vengeance
self._process.terminate()
while self._process.poll() is None:
sleep(0.1)
if self._printer:
self._printer.string("{}{} pid {} terminated".format(self._prompt,
self._call_list[0],
self._process.pid))
else:
if self._printer:
self._printer.string("{}{} pid {} CTRL-C'd".format(self._prompt,
self._call_list[0],
self._process.pid))
else:
if self._printer:
self._printer.string("{}{} pid {} already ended".format(self._prompt,
self._call_list[0],
self._process.pid))
return return_value
# Simple SWO decoder: only handles single bytes of application
# data at a time, i.e. what ITM_SendChar() sends.
class SwoDecoder():
'''Take the contents of a byte_array and decode it as SWO'''
def __init__(self, address, replaceLfWithCrLf=False):
self._address = address
self._replace_lf_with_crlf = replaceLfWithCrLf
self._expecting_swit = True
def decode(self, swo_byte_array):
'''Do the decode'''
decoded_byte_array = bytearray()
if swo_byte_array:
for data_byte in swo_byte_array:
# We're looking only for "address" and we also know
# that CMSIS only offers ITM_SendChar(), so packet length
# is always 1, and we only send ASCII characters,
# so the top bit of the data byte must be 0.
#
# For the SWO protocol, see:
#
# https://developer.arm.com/documentation/ddi0314/h/
# instrumentation-trace-macrocell/
# about-the-instrumentation-trace-macrocell/trace-packet-format
#
# When we see SWIT (SoftWare Instrumentation Trace
# I think, anyway, the bit that carries our prints
# off the target) which is 0bBBBBB0SS, where BBBBB is
# address and SS is the size of payload to follow,
# in our case 0x01, we know that the next
# byte is probably data and if it is ASCII then
# it is data. Anything else is ignored.
# The reason for doing it this way is that the
# ARM ITM only sends out sync packets under
# special circumstances so it is not a recovery
# mechanism for simply losing a byte in the
# transfer, which does happen occasionally.
if self._expecting_swit:
if ((data_byte & 0x03) == 0x01) and ((data_byte & 0xf8) >> 3 == self._address):
# Trace packet type is SWIT, i.e. our
# application logging
self._expecting_swit = False
else:
if data_byte & 0x80 == 0:
if (data_byte == 10) and self._replace_lf_with_crlf:
decoded_byte_array.append(13)
decoded_byte_array.append(data_byte)
self._expecting_swit = True
return decoded_byte_array
class PrintThread(threading.Thread):
'''Print thread to organise prints nicely'''
def __init__(self, print_queue, file_handle=None,
window_file_handle=None, window_size=10000,
window_update_period_seconds=1):
self._queue = print_queue
self._lock = RLock()
self._queue_forwards = []
self._running = False
self._file_handle = file_handle
self._window = None
self._window_file_handle = window_file_handle
if self._window_file_handle:
self._window = deque(self._window_file_handle, maxlen=window_size)
self._window_update_pending = False
self._window_update_period_seconds = window_update_period_seconds
self._window_next_update_time = time()
threading.Thread.__init__(self)
def _send_forward(self, flush=False):
# Send from any forwarding buffers
# self._lock should be acquired before this is called
queue_idxes_to_remove = []
for idx, queue_forward in enumerate(self._queue_forwards):
if flush or time() > queue_forward["last_send"] + queue_forward["buffer_time"]:
string_forward = ""
len_queue_forward = len(queue_forward["buffer"])
count = 0
for item in queue_forward["buffer"]:
count += 1
if count < len_queue_forward:
item += "\n"
if queue_forward["prefix_string"]:
item = queue_forward["prefix_string"] + item
string_forward += item
queue_forward["buffer"] = []
if string_forward:
try:
queue_forward["queue"].put(string_forward)
except TimeoutError:
pass
except (OSError, EOFError, BrokenPipeError):
queue_idxes_to_remove.append(idx)
queue_forward["last_send"] = time()
for idx in queue_idxes_to_remove:
self._queue_forwards.pop(idx)
def add_forward_queue(self, queue_forward, prefix_string=None, buffer_time=0):
'''Forward things received on the print queue to another queue'''
self._lock.acquire()
already_done = False
for item in self._queue_forwards:
if item["queue"] == queue_forward:
already_done = True
break
if not already_done:
item = {}
item["queue"] = queue_forward
item["prefix_string"] = prefix_string
item["buffer"] = []
item["buffer_time"] = buffer_time
item["last_send"] = time()
self._queue_forwards.append(item)
self._lock.release()
def remove_forward_queue(self, queue_forward):
'''Stop forwarding things received on the print queue to another queue'''
self._lock.acquire()
queues = []
self._send_forward(flush=True)
for item in self._queue_forwards:
if item["queue"] != queue_forward:
queues.append(item)
self._queue_forwards = queues
self._lock.release()
def stop_thread(self):
'''Helper function to stop the thread'''
self._lock.acquire()
self._running = False
# Write anything remaining to the window file
if self._window_update_pending:
self._window_file_handle.seek(0)
for item in self._window:
self._window_file_handle.write(item)
self._window_file_handle.flush()
self._window_update_pending = False
self._window_next_update_time = time() + self._window_update_period_seconds
self._lock.release()
def run(self):
'''Worker thread'''
self._running = True
while self._running:
# Print locally and store in any forwarding buffers
try:
my_string = self._queue.get(block=False, timeout=0.5)
print(my_string)
if self._file_handle:
self._file_handle.write(my_string + "\n")
self._lock.acquire()
if self._window is not None:
# Note that my_string can contain multiple lines,
# hence the need to split it here to maintain the
# window
for line in my_string.splitlines():
self._window.append(line + "\n")
self._window_update_pending = True
for queue_forward in self._queue_forwards:
queue_forward["buffer"].append(my_string)
self._lock.release()
except queue.Empty:
sleep(0.1)
except (OSError, EOFError, BrokenPipeError):
# Try to restore stdout
sleep(0.1)
sys.stdout = sys.__stdout__
self._lock.acquire()
# Send from any forwarding buffers
self._send_forward()
# Write the window to file if required
if self._window_update_pending and time() > self._window_next_update_time:
# If you don't do this you can end up with garbage
# at the end of the file
self._window_file_handle.truncate()
self._window_file_handle.seek(0)
for item in self._window:
self._window_file_handle.write(item)
self._window_update_pending = False
self._window_next_update_time = time() + self._window_update_period_seconds
self._lock.release()
class PrintToQueue():
'''Print to a queue, if there is one'''
def __init__(self, print_queue, file_handle, include_timestamp=False):
self._queues = []
self._lock = RLock()
if print_queue:
self._queues.append(print_queue)
self._file_handle = file_handle
self._include_timestamp = include_timestamp
def add_queue(self, print_queue):
'''Add a queue to the list of places to print to'''
self._lock.acquire()
already_done = False
for item in self._queues:
if item == print_queue:
already_done = True
break
if not already_done:
self._queues.append(print_queue)
self._lock.release()
def remove_queue(self, print_queue):
'''Remove a queue from the list of places to print to'''
self._lock.acquire()
queues = []
for item in self._queues:
if item != print_queue:
queues.append(item)
self._queues = queues
self._lock.release()
def string(self, string, file_only=False):
'''Print a string to the queue(s)'''
if self._include_timestamp:
string = strftime(TIME_FORMAT, gmtime()) + " " + string
if not file_only:
self._lock.acquire()
queue_idxes_to_remove = []
if self._queues:
for idx, print_queue in enumerate(self._queues):
try:
print_queue.put(string)
except (EOFError, BrokenPipeError):
queue_idxes_to_remove.append(idx)
for idx in queue_idxes_to_remove:
self._queues.pop(idx)
else:
print(string)
self._lock.release()
if self._file_handle:
self._file_handle.write(string + "\n")
self._file_handle.flush()
# This stolen from here:
# https://stackoverflow.com/questions/431684/how-do-i-change-the-working-directory-in-python
class ChangeDir():
'''Context manager for changing the current working directory'''
def __init__(self, new_path):
self._new_path = os.path.expanduser(new_path)
self._saved_path = None
def __enter__(self):
'''CD to new_path'''
self._saved_path = os.getcwd()
os.chdir(self._new_path)
def __exit__(self, etype, value, traceback):
'''CD back to saved_path'''
os.chdir(self._saved_path)
class Lock():
'''Hold a lock as a "with:"'''
def __init__(self, lock, guard_time_seconds,
lock_type, printer, prompt, keep_going_flag=None):
self._lock = lock
self._guard_time_seconds = guard_time_seconds
self._lock_type = lock_type
self._printer = printer
self._prompt = prompt
self._keep_going_flag = keep_going_flag
self._locked = False
def __enter__(self):
if not self._lock:
return True
# Wait on the lock
if not self._locked:
timeout_seconds = self._guard_time_seconds
self._printer.string("{}waiting up to {} second(s)" \
" for a {} lock...". \
format(self._prompt,
self._guard_time_seconds,
self._lock_type))
count = 0
while not self._lock.acquire(False) and \
((self._guard_time_seconds == 0) or (timeout_seconds > 0)) and \
keep_going(self._keep_going_flag, self._printer, self._prompt):
sleep(1)
timeout_seconds -= 1
count += 1
if count == 30:
self._printer.string("{}still waiting {} second(s)" \
" for a {} lock (locker is" \
" currently {}).". \
format(self._prompt, timeout_seconds,
self._lock_type, self._lock))
count = 0
if (self._guard_time_seconds == 0) or (timeout_seconds > 0):
self._locked = True
self._printer.string("{}{} lock acquired ({}).". \
format(self._prompt, self._lock_type,
self._lock))
return self._locked
def __exit__(self, _type, value, traceback):
del _type
del value
del traceback
if self._lock and self._locked:
try:
self._lock.release()
self._locked = False
self._printer.string("{}released a {} lock.".format(self._prompt,
self._lock_type))
except RuntimeError:
self._locked = False
self._printer.string("{}{} lock was already released.". \
format(self._prompt, self._lock_type))
def wait_for_completion(_list, purpose, guard_time_seconds,
printer, prompt, keep_going_flag):
'''Wait for a completion list to empty'''
completed = False
if len(_list) > 0:
timeout_seconds = guard_time_seconds
printer.string("{}waiting up to {} second(s)" \
" for {} completion...". \
format(prompt, guard_time_seconds, purpose))
count = 0
while (len(_list) > 0) and \
((guard_time_seconds == 0) or (timeout_seconds > 0)) and \
keep_going(keep_going_flag, printer, prompt):
sleep(1)
timeout_seconds -= 1
count += 1
if count == 30:
list_text = ""
for item in _list:
if list_text:
list_text += ", "
list_text += str(item)
printer.string("{}still waiting {} second(s)" \
" for {} to complete (waiting" \
" for {}).". \
format(prompt, timeout_seconds,
purpose, list_text))
count = 0
if len(_list) == 0:
completed = True
printer.string("{}{} completed.".format(prompt, purpose))
return completed
def reset_nrf_target(connection, printer, prompt):
'''Reset a Nordic NRFxxx target'''
call_list = []
printer.string("{}resetting target...".format(prompt))
# Assemble the call list
call_list.append("nrfjprog")
call_list.append("--reset")
if connection and "debugger" in connection and connection["debugger"]:
call_list.append("-s")
call_list.append(connection["debugger"])
# Print what we're gonna do
tmp = ""
for item in call_list:
tmp += " " + item
printer.string("{}in directory {} calling{}". \
format(prompt, os.getcwd(), tmp))
# Call it
return exe_run(call_list, 60, printer, prompt)
def usb_cutter_reset(usb_cutter_id_strs, printer, prompt):
'''Cut and then un-cut USB cables using Cleware USB cutters'''
# First switch the USB cutters off
action = "1"
count = 0
call_list_root = ["usbswitchcmd"]
call_list_root.append("-s")
call_list_root.append("-n")
while count < 2:
for usb_cutter_id_str in usb_cutter_id_strs:
call_list = call_list_root.copy()
call_list.append(usb_cutter_id_str)
call_list.append(action)
# Print what we're gonna do
tmp = ""
for item in call_list:
tmp += " " + item
if printer:
printer.string("{}in directory {} calling{}". \
format(prompt, os.getcwd(), tmp))
# Set shell to keep Jenkins happy
exe_run(call_list, 0, printer, prompt, shell_cmd=True)
# Wait 5ish seconds
if printer:
printer.string("{}waiting {} second(s)...". \
format(prompt, HW_RESET_DURATION_SECONDS))
sleep(HW_RESET_DURATION_SECONDS)
# "0" to switch the USB cutters on again
action = "0"
count += 1
def kmtronic_reset(ip_address, hex_bitmap, printer, prompt):
'''Cut and then un-cut power using a KMTronic box'''
# KMTronic is a web relay box which will be controlling
# power to, for instance, EVKs The last byte of the URL
# is a hex bitmap of the outputs where 0 sets off and 1
# sets on
# Take only the last two digits of the hex bitmap
hex_bitmap_len = len(hex_bitmap)
hex_bitmap = hex_bitmap[hex_bitmap_len - 2:hex_bitmap_len]
kmtronic_off = "http://" + ip_address + "FFE0" + hex_bitmap
kmtronic_on = "http://" + ip_address + "FFE0" + "{0:x}".format(int(hex_bitmap, 16) ^ 0xFF)
try:
# First switch the given bit positions off
if printer:
printer.string("{}sending {}". \
format(prompt, kmtronic_off))
response = requests.get(kmtronic_off)
# Wait 5ish seconds
if printer:
printer.string("{}...received response {}, waiting {} second(s)...". \
format(prompt, response.status_code, HW_RESET_DURATION_SECONDS))
sleep(HW_RESET_DURATION_SECONDS)
# Switch the given bit positions on
if printer:
printer.string("{}sending {}".format(prompt, kmtronic_on))
response = requests.get(kmtronic_on)
if printer:
printer.string("{}...received response {}.". \
format(prompt, response.status_code))
except requests.ConnectionError:
if printer:
printer.string("{}unable to connect to KMTronic box at {}.". \
format(prompt, ip_address))
# Look for a single line anywhere in message
# beginning with "test: ". This must be followed by
# "x.y.z a.b.c m.n.o" (i.e. instance IDs space separated)
# and then an optional "blah" filter string, or just "*"
# and an optional "blah" filter string or "None".
# Valid examples are:
#
# test: 1
# test: 1 3 7
# test: 1.0.3 3 7.0
# test: 1 2 example
# test: 1.1 8 portInit
# test: *
# test: * port
# test: none
#
# Filter strings must NOT begin with a digit.
# There cannot be more than one * or a * with any other instance.
# There can only be one filter string.
# Only whitespace is expected after this on the line.
# Anything else is ignored.
# Populates instances with the "0 4.5 13.5.1" bit as instance
# entries [[0], [4, 5], [13, 5, 1]] and returns the filter
# string, if any.
def commit_message_parse(message, instances, printer=None, prompt=None):
'''Find stuff in a commit message'''
instances_all = False
instances_local = []
filter_string_local = None
found = False
if message:
# Search through message for a line beginning
# with "test:"
if printer:
printer.string("{}### parsing message to see if it contains a test directive...". \
format(prompt))
lines = message.split("\\n")
for idx1, line in enumerate(lines):
if printer:
printer.string("{}text line {}: \"{}\"".format(prompt, idx1 + 1, line))
if line.lower().startswith("test:"):
found = True
instances_all = False
# Pick through what follows
parts = line[5:].split()
for part in parts:
if instances_all and (part[0].isdigit() or part == "*" or part.lower() == "none"):
# If we've had a "*" and this is another one
# or it begins with a digit then this is
# obviously not a "test:" line,
# leave the loop and try again.
instances_local = []
filter_string_local = None
if printer:
printer.string("{}...badly formed test directive, ignoring.". \
format(prompt))
found = False
break
if filter_string_local:
# If we've had a filter string then nothing
# must follow so this is not a "test:" line,
# leave the loop and try again.
instances_local = []
filter_string_local = None
if printer:
printer.string("{}...extraneous characters after test directive," \
" ignoring.".format(prompt))
found = False
break
if part[0].isdigit():
# If this part begins with a digit it could
# be an instance containing numbers
instance = []
bad = False
for item in part.split("."):
try:
instance.append(int(item))
except ValueError:
# Some rubbish, not a test line so
# leave the loop and try the next
# line
bad = True
break
if bad:
instances_local = []
filter_string_local = None
if printer:
printer.string("{}...badly formed test directive, ignoring.". \
format(prompt))
found = False
break
if instance:
instances_local.append(instance[:])
elif part == "*":
if instances_local:
# If we've already had any instances
# this is obviously not a test line,
# leave the loop and try again
instances_local = []
filter_string_local = None
if printer:
printer.string("{}...badly formed test directive, ignoring.". \
format(prompt))
found = False
break
# If we haven't had any instances and
# this is a * then it means "all"
instances_local.append(part)
instances_all = True
elif part.lower() == "none":
if instances_local:
# If we've already had any instances
# this is obviously not a test line,
# leave the loop and try again
if printer:
printer.string("{}...badly formed test directive, ignoring.". \
format(prompt))
found = False
instances_local = []
filter_string_local = None
break
elif instances_local and not part == "*":
# If we've had an instance and this
# is not a "*" then this must be a
# filter string
filter_string_local = part
else:
# Found some rubbish, not a "test:"
# line after all, leave the loop
# and try the next line
instances_local = []
filter_string_local = None
if printer:
printer.string("{}...badly formed test directive, ignoring.". \
format(prompt))
found = False
break
if found:
text = "found test directive with"
if instances_local:
text += " instance(s)" + get_instances_text(instances_local)
if filter_string_local:
text += " and filter \"" + filter_string_local + "\""
else:
text += " instances \"None\""
if printer:
printer.string("{}{}.".format(prompt, text))
break
if printer:
printer.string("{}no test directive found".format(prompt))
if found and instances_local:
instances.extend(instances_local[:])
return found, filter_string_local
| 42.865722
| 120
| 0.533332
| 7,006
| 63,527
| 4.669141
| 0.13203
| 0.033382
| 0.011739
| 0.014123
| 0.356169
| 0.29558
| 0.256481
| 0.221356
| 0.198551
| 0.176877
| 0
| 0.008565
| 0.378784
| 63,527
| 1,481
| 121
| 42.894666
| 0.820343
| 0.214177
| 0
| 0.456897
| 0
| 0
| 0.067646
| 0.000507
| 0.000958
| 0
| 0.000405
| 0
| 0
| 1
| 0.050766
| false
| 0.002874
| 0.019157
| 0
| 0.104406
| 0.167625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53481a8ce6431996b6ceac97f012f4f1b1b0f592
| 8,765
|
py
|
Python
|
faigler_mazeh.py
|
tcjansen/beer
|
c6421371b6506cef1adf88cefa9a55db2f04e2dc
|
[
"MIT"
] | null | null | null |
faigler_mazeh.py
|
tcjansen/beer
|
c6421371b6506cef1adf88cefa9a55db2f04e2dc
|
[
"MIT"
] | null | null | null |
faigler_mazeh.py
|
tcjansen/beer
|
c6421371b6506cef1adf88cefa9a55db2f04e2dc
|
[
"MIT"
] | null | null | null |
import numpy as np
import astropy.modeling.blackbody as bb
import astropy.constants as const
from astropy.io import fits
from scipy.interpolate import interp2d
class FaiglerMazehFit():
def __init__(self, P_orb, inc, R_star, M_star, T_star, A_ellip=False, A_beam=False,
R_p=False, a=False, u=False, g=0.65, logg=None, tele='TESS', M_p=False,
K=False):
self.P_orb = P_orb # orbital period in days
self.inc = inc * np.pi / 180 # inclination converted to radians
self.R_star = R_star # radius of the star in solar units
self.M_star = M_star # mass of the star in solar units
self.T_star = T_star # temperature of the star [K]
self.A_ellip = A_ellip # ellipsoidal amplitude in ppm
self.A_beam = A_beam # beaming amplitude in ppm
self.g = g # gravity-darkening coefficient, expected range is 0.3-1.0
self.logg = logg # log surface gravity of the star [cm s^-2]
self.tele = tele.lower() # observation instrument used, default is TESS. Only other
# other option (for now) is Kepler.
self.R_p = R_p # radius of the planet in jupiter radii
self.a = a
self.u = u # the limb-darkening coefficient, range is 0-1
self.g = g
self.M_p = M_p
self.K = K
# get the mass from the ellipsoidal amplitude, if given.
# u is the limb-darkening coefficient, range is 0-1
if not M_p and not not A_ellip and not not logg:
self.u = self.LDC()
self.M_p = self.m_from_ellip()
# star-planet separation [au] assuming a circular orbit
if not a and not not M_p:
self.a = get_a(self.P_orb * 86400, self.M_star * const.M_sun.value, \
self.M_p * const.M_jup.value) / const.au.value
def alpha_ellip(self):
if not self.u:
self.u = self.LDC()
if not self.g:
self.g = self.GDC()
a = 15 + self.u
b = 1 + self.g
c = 3 - self.u
return 0.15 * a * b / c
def RV_amp(self):
"""
Returns the radial velocity amplitude [m/s] of the star given a companion mass.
"""
return 27 / 40 * const.c.value \
* self.M_star ** (-2/3) \
* self.P_orb ** (-1/3) \
* self.M_p * np.sin(self.inc)
def doppler_shift(self, K):
"""
Returns the shift in wavelength for a given radial velocity amplitude.
"""
return K / const.c.value
def response_convolution(self, lambdas, response):
return response * bb.blackbody_lambda(lambdas, self.T_star).value
def alpha_beam(self, K):
"""
Returns the factor that accounts for the flux lost when a star gets Doppler shifted
in and out of the observer's bandpass.
"""
print(K)
rest_lambdas, response = response_func(self.tele)
flux_rest = np.trapz(self.response_convolution(rest_lambdas, response), \
x=rest_lambdas)
blueshifted_lambdas = rest_lambdas - self.doppler_shift(K=K)
flux_blueshift = np.trapz(self.response_convolution(blueshifted_lambdas, response), \
x=rest_lambdas)
redshifted_lambdas = rest_lambdas + self.doppler_shift(K=K)
flux_redshift = np.trapz(self.response_convolution(redshifted_lambdas, response), \
x=rest_lambdas)
alpha_blue = abs( (flux_rest - flux_blueshift) / flux_rest )
alpha_red = abs( (flux_rest - flux_redshift) / flux_rest )
return 1 - np.mean([alpha_red, alpha_blue])
def m_from_ellip(self):
return self.A_ellip \
* self.R_star ** (-3) \
* self.M_star ** 2 \
* self.P_orb ** 2 \
/ (12.8 * self.alpha_ellip() * np.sin(self.inc) ** 2)
def ellip_from_m(self):
return self.M_p * 12.8 * self.alpha_ellip() * np.sin(self.inc) ** 2 \
* self.R_star ** 3 \
* self.M_star ** (-2) \
* self.P_orb ** (-2)
def m_from_beam(self, K=False, alpha_beam=False):
if not alpha_beam and not K and not not self.M_p:
alpha_beam = self.alpha_beam(K=self.RV_amp())
elif not alpha_beam and not not K:
alpha_beam = self.alpha_beam(K=K)
elif not not K and not not alpha_beam:
raise ValueError("Please only specify either K or alpha_beam, not both.")
elif not K and not alpha_beam:
raise ValueError("Please specify a radial velocity (K) or alpha_beam parameter")
return self.A_beam \
* self.M_star ** (2/3) \
* self.P_orb ** (1/3) \
/ (alpha_beam * np.sin(self.inc) * 2.7)
def beam_from_m(self):
"""
Returns the expected Doppler beaming amplitude [ppm] for a given mass.
"""
if not self.M_p:
raise ValueError("Argument 'M_p' must be specified if you're trying to " +
"derive a beaming amplitude from a mass.")
if not self.K:
K=self.RV_amp()
return 2.7 * self.alpha_beam(K=self.K) \
* self.M_star ** (-2/3) \
* self.P_orb ** (-1/3) \
* self.M_p * np.sin(self.inc)
def Ag_from_thermref(self, A_thermref):
"""
Return the geometric albedo derived from the thermal + ref amplitude.
"""
return A_thermref * (self.R_p / self.a) ** -2 * (const.au / const.R_jup) ** 2
def mass(self, derived_from=None, K=False, alpha_beam=False):
if derived_from == "ellip":
return self.m_from_ellip()
elif derived_from == "beam":
return self.m_from_beam(K=K, alpha_beam=alpha_beam)
else:
raise ValueError("derived_from must equal either 'ellip' or 'beam'")
def nearest_neighbors(self, value, array, max_difference):
"""
Returns a set of nearest neighbor indices of the given array.
"""
return set(list((np.where(abs(array - value) < max_difference))[0]))
def correct_maxdiff(self, value, array, guess):
while len(self.nearest_neighbors(value, array, guess)) > 0:
guess -= 0.01 * guess
return guess
def shared_neighbor(self, value1, array1, max_diff1, value2, array2, max_diff2):
set1 = self.nearest_neighbors(value1, array1, max_diff1)
set2 = self.nearest_neighbors(value2, array2, max_diff2)
nearest = list(set1.intersection(set2))
# if len(nearest) > 1:
# newmax_diff1 = self.correct_maxdiff(value1, array1, max_diff1)
# newmax_diff2 = self.correct_maxdiff(value2, array2, max_diff2)
# print(newmax_diff1, newmax_diff2)
# if newmax_diff2 > newmax_diff1:
# max_diff2 = newmax_diff2
# else:
# max_diff1 = newmax_diff1
# set1 = self.nearest_neighbors(value1, array1, max_diff1)
# set2 = self.nearest_neighbors(value2, array2, max_diff2)
# nearest = list(set1.intersection(set2))
# print(nearest)
# # if len(nearest) > 1:
# # raise ValueError("Multiple shared nearest neighbors, indices = ", nearest)
# # else:
# # return nearest[0]
return nearest[0]
def tess_warning(self):
if self.tele != 'tess':
raise ValueError("This function is only appropriate for observations done with " +
"the TESS satellite")
def claret_LDC(self):
"""
Returns the mu coefficient and the four-parameters used in the Claret four-parameter
limb-darkening law (Claret 2000). These are obtained by finding the nearest neighbor
in the model limb-darkening of TESS from Claret 2018.
"""
# print("claret_LDC is still garbage, sorry. Quitting now...")
# exit()
self.tess_warning()
logg, Teff, a1, a2, a3, a4, mu, mod = np.genfromtxt('../claret_ldc.dat',
usecols=(0,1,4,5,6,7,8,10),
unpack=True)
mod = np.genfromtxt('../claret_ldc.dat', usecols=(10,), dtype='str')
if self.T_star <= 3000:
# the PC model is meant for cool stars, and if we break it up this way we can do an
# easier 2D interpolation.
mask = mod == 'PD'
else:
mask = mod == 'PC'
logg = logg[mask]
Teff = Teff[mask]
a1 = a1[mask]
a2 = a2[mask]
a3 = a3[mask]
a4 = a4[mask]
mu = mu[mask]
nearest = self.shared_neighbor(self.T_star, Teff, 100, self.logg, logg, 0.25)
mu = mu[nearest]
a_coeffs = [a1[nearest], a2[nearest], a3[nearest], a4[nearest]]
return mu, a_coeffs
def GDC(self):
"""
Returns the gravity-darkening coefficient from the Claret 2017 model
"""
self.tess_warning()
logg, log_Teff, g = np.genfromtxt('../claret_gdc.dat', usecols=(2,3,4), unpack=True)
nearest = self.shared_neighbor(np.log10(self.T_star), log_Teff, .01, self.logg,
logg, 0.25)
return g[nearest]
def LDC(self):
"""
Returns the limb-darkening coefficient of the host star.
"""
mu, a_coeffs = self.claret_LDC()
return 1 - sum([a_coeffs[k] * (1 - mu ** ((k+1) / 2)) for k in range(4)])
def get_response_specs(tele):
if tele=="tess":
return "../tess-response-function-v1.0.csv", ',', 1e1
elif tele=="kepler":
return "../kepler_hires.dat", '\t', 1e4
def response_func(tele):
file, delimiter, to_AA = get_response_specs(tele)
lambdas, response = np.genfromtxt(file, delimiter=delimiter, usecols=(0,1), unpack=True)
return lambdas * to_AA, response
def get_a(P, M_star, M_p):
"""
Use Kepler's third law to derive the star-planet separation.
"""
return (P ** 2 * const.G.value * (M_star + M_p) / (4 * np.pi ** 2)) ** (1/3)
| 31.989051
| 89
| 0.664803
| 1,398
| 8,765
| 4.023605
| 0.205293
| 0.016
| 0.011378
| 0.008889
| 0.2096
| 0.157689
| 0.129956
| 0.108978
| 0.096178
| 0.081956
| 0
| 0.027938
| 0.207758
| 8,765
| 273
| 90
| 32.106227
| 0.782114
| 0.258985
| 0
| 0.104938
| 0
| 0
| 0.074617
| 0.005364
| 0
| 0
| 0
| 0
| 0
| 1
| 0.135802
| false
| 0
| 0.030864
| 0.018519
| 0.308642
| 0.006173
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
534844fa3b3f1c68231a812a9b687424b61ad180
| 13,681
|
py
|
Python
|
Grid-neighbor-search/GNS/read_instance_2layer_2LMM_L.py
|
CitrusAqua/mol-infer
|
6d5411a2cdc7feda418f9413153b1b66b45a2e96
|
[
"MIT"
] | null | null | null |
Grid-neighbor-search/GNS/read_instance_2layer_2LMM_L.py
|
CitrusAqua/mol-infer
|
6d5411a2cdc7feda418f9413153b1b66b45a2e96
|
[
"MIT"
] | null | null | null |
Grid-neighbor-search/GNS/read_instance_2layer_2LMM_L.py
|
CitrusAqua/mol-infer
|
6d5411a2cdc7feda418f9413153b1b66b45a2e96
|
[
"MIT"
] | null | null | null |
"""
read_instance_BH-cyclic.py
"""
'''
[seed graph]
V_C : "V_C"
E_C : "E_C"
[core specification]
ell_LB : "\ell_{\rm LB}"
ell_UB : "\ell_{\rm UB}"
cs_LB : "\textsc{cs}_{\rm LB}"
cs_UB : "\textsc{cs}_{\rm UB}"
'''
import sys
def read_pmax_file(filename):
with open(filename,'r') as f:
F = [line.rstrip('\n') for line in f if line[0]!='#']
p_max = int(F.pop(0))
s = F.pop(0)
delta = list(map(float, s.split(' ')))
s = F.pop(0)
r = list(map(int, s.split(' ')))
return p_max, delta, r
def read_seed_graph(filename):
with open(filename,'r') as f:
F = [line.rstrip('\n') for line in f if line[0]!='#']
### read V_C ###
num_V_C = int(F.pop(0))
V_C = tuple(range(1,num_V_C+1))
### read E_C ###
num_E_C = int(F.pop(0))
E_C = {}
for e in range(num_E_C):
s = F.pop(0)
arr = list(map(int, s.split(' ')))
E_C[arr[0]] = (arr[0], arr[1], arr[2]) # Add arr[0] to distinguish two edges with same starting and ending vertices, by Zhu
### read ell_LB and ell_UB ###
ell_LB = {}
ell_UB = {}
for e in range(num_E_C):
s = F.pop(0)
arr = list(map(int, s.split(' ')))
ell_LB[arr[0]] = arr[1]
ell_UB[arr[0]] = arr[2]
### compute E_ge_two, E_ge_one, E_zero_one, E_equal_one ###
E_ge_two = []
E_ge_one = []
E_zero_one = []
E_equal_one = []
I_ge_two = []
I_ge_one = []
I_zero_one = []
I_equal_one = []
for e in E_C:
if ell_LB[e] >= 2:
E_ge_two.append(E_C[e])
I_ge_two.append(e)
elif ell_LB[e] == 1 and ell_UB[e] >= 2:
E_ge_one.append(E_C[e])
I_ge_one.append(e)
elif ell_LB[e] == 0 and ell_UB[e] == 1:
E_zero_one.append(E_C[e])
I_zero_one.append(e)
elif ell_LB[e] == 1 and ell_UB[e] == 1:
E_equal_one.append(E_C[e])
I_equal_one.append(e)
else:
sys.stderr.write('error: a strange edge is found.\n')
sys.exit(1)
### read n_LB_int and n_UB_int ###
n_LB_int = int(F.pop(0))
n_UB_int = int(F.pop(0))
# read n_LB and n_star
n_LB = int(F.pop(0))
n_star = int(F.pop(0))
# read rho
rho = int(F.pop(0))
### read ch_LB and ch_UB ###
ch_LB = {}
ch_UB = {}
for v in range(num_V_C):
s = F.pop(0)
arr = list(map(int, s.split(' ')))
ch_LB[arr[0]] = arr[1]
ch_UB[arr[0]] = arr[2]
for e in range(len(E_ge_two + E_ge_one)):
s = F.pop(0)
arr = list(map(int, s.split(' ')))
ch_LB[E_C[arr[0]]] = arr[1]
ch_UB[E_C[arr[0]]] = arr[2]
### read bl_LB and bl_UB ###
bl_LB = {}
bl_UB = {}
for v in range(num_V_C):
s = F.pop(0)
arr = list(map(int, s.split(' ')))
bl_LB[arr[0]] = arr[1]
bl_UB[arr[0]] = arr[2]
for e in range(len(E_ge_two + E_ge_one)):
s = F.pop(0)
arr = list(map(int, s.split(' ')))
bl_LB[E_C[arr[0]]] = arr[1]
bl_UB[E_C[arr[0]]] = arr[2]
# read Lambda
s = F.pop(0)
Lambda = list(s.split(' '))
# read Lambda_dg_int
s = F.pop(0)
num = int(s)
Lambda_dg_int = list()
for i in range(num):
s = F.pop(0)
arr = list(s.split(' '))
Lambda_dg_int.append((arr[0], int(arr[1])))
# read Gamma_int_ac
s = F.pop(0)
num = int(s)
Gamma_int_ac = list()
nu_int = list()
for i in range(num):
s = F.pop(0)
arr = list(s.split(' '))
tmp_1 = (arr[0], arr[1], int(arr[2]))
tmp_2 = (arr[1], arr[0], int(arr[2]))
nu_int.append(tmp_1)
if tmp_1 not in Gamma_int_ac:
Gamma_int_ac.append(tmp_1)
if tmp_2 not in Gamma_int_ac:
Gamma_int_ac.append(tmp_2)
# read Gamma_int
s = F.pop(0)
num = int(s)
Gamma_int = list()
gam_int = list()
for i in range(num):
s = F.pop(0)
arr = list(s.split(' '))
tmp_1 = ((arr[0], int(arr[1])), (arr[2], int(arr[3])), int(arr[4]))
tmp_2 = ((arr[2], int(arr[3])), (arr[0], int(arr[1])), int(arr[4]))
gam_int.append(tmp_1)
if tmp_1 not in Gamma_int:
Gamma_int.append(tmp_1)
if tmp_2 not in Gamma_int:
Gamma_int.append(tmp_2)
# read Lambda_star
Lambda_star = {i: set() for i in range(1, num_V_C + 1)}
for i in range(1, num_V_C + 1):
s = F.pop(0)
arr = list(s.split(' '))
ind = int(arr[0])
arr.pop(0)
for a in arr:
Lambda_star[ind].add(a)
Lambda_int = list()
# read na_LB and na_UB
s = F.pop(0)
num = int(s)
na_LB = {}
na_UB = {}
for i in range(num):
s = F.pop(0)
arr = list(s.split(' '))
na_LB[arr[0]] = int(arr[1])
na_UB[arr[0]] = int(arr[2])
# read na_LB_int and na_UB_int
s = F.pop(0)
num = int(s)
na_LB_int = {}
na_UB_int = {}
for i in range(num):
s = F.pop(0)
arr = list(s.split(' '))
na_LB_int[arr[0]] = int(arr[1])
na_UB_int[arr[0]] = int(arr[2])
Lambda_int.append(arr[0])
# read ns_LB_int and ns_UB_int
s = F.pop(0)
num = int(s)
ns_LB_int = {}
ns_UB_int = {}
for i in range(num):
s = F.pop(0)
arr = list(s.split(' '))
ns_LB_int[(arr[0], int(arr[1]))] = int(arr[2])
ns_UB_int[(arr[0], int(arr[1]))] = int(arr[3])
# read ac_LB_int and ac_UB_int
s = F.pop(0)
num = int(s)
ac_LB_int = {}
ac_UB_int = {}
for i in range(num):
s = F.pop(0)
arr = list(s.split(' '))
a1, a2, m = nu_int[int(arr[0]) - 1]
ac_LB_int[(a1, a2, m)] = int(arr[1])
ac_LB_int[(a2, a1, m)] = int(arr[1])
ac_UB_int[(a1, a2, m)] = int(arr[2])
ac_UB_int[(a2, a1, m)] = int(arr[2])
# read ec_LB_int and ec_UB_int
s = F.pop(0)
num = int(s)
ec_LB_int = {}
ec_UB_int = {}
for i in range(num):
s = F.pop(0)
arr = list(s.split(' '))
a1, a2, m = gam_int[int(arr[0]) - 1]
ec_LB_int[(a1, a2, m)] = int(arr[1])
ec_LB_int[(a2, a1, m)] = int(arr[1])
ec_UB_int[(a1, a2, m)] = int(arr[2])
ec_UB_int[(a2, a1, m)] = int(arr[2])
# read bd2_LB and bd2_UB
bd2_LB = {}
bd2_UB = {}
for e in range(len(E_C)):
s = F.pop(0)
arr = list(map(int, s.split(' ')))
bd2_LB[E_C[arr[0]]] = arr[1]
bd2_UB[E_C[arr[0]]] = arr[2]
# read bd3_LB and bd3_UB
bd3_LB = {}
bd3_UB = {}
for e in range(len(E_C)):
s = F.pop(0)
arr = list(map(int, s.split(' ')))
bd3_LB[E_C[arr[0]]] = arr[1]
bd3_UB[E_C[arr[0]]] = arr[2]
# read ac_LB_lf and ac_UB_lf
s = F.pop(0)
num = int(s)
ac_LB_lf = dict()
ac_UB_lf = dict()
for e in range(num):
s = F.pop(0)
arr = list(s.split(' '))
ac_LB_lf[(arr[0], arr[1], int(arr[2]))] = int(arr[3])
ac_UB_lf[(arr[0], arr[1], int(arr[2]))] = int(arr[4])
s = F.pop(0)
arr = list(map(int, s.split(' ')))
ac_LB_lf_common = arr[0]
ac_UB_lf_common = arr[1]
####################################
# # Undefined constants for instances but used in MILP
r_GC = num_E_C - (num_V_C - 1)
dg_LB = [0,0,0,0,0]
dg_UB = [n_star,n_star,n_star,n_star,n_star]
return V_C, E_C, \
E_ge_two, E_ge_one, E_zero_one, E_equal_one, \
I_ge_two, I_ge_one, I_zero_one, I_equal_one, \
ell_LB, ell_UB, n_LB_int, n_UB_int, \
n_LB, n_star, rho, \
ch_LB, ch_UB, bl_LB, bl_UB, \
Lambda, Lambda_dg_int, Gamma_int_ac, Gamma_int, \
Lambda_star, na_LB, na_UB, Lambda_int, \
na_LB_int, na_UB_int, ns_LB_int, ns_UB_int, \
ac_LB_int, ac_UB_int, ec_LB_int, ec_UB_int, \
bd2_LB, bd2_UB, bd3_LB, bd3_UB, \
dg_LB, dg_UB, ac_LB_lf, ac_UB_lf, ac_LB_lf_common, ac_UB_lf_common, r_GC
def get_value(filename):
y_min = 0
y_max = 0
ind = 0
with open(filename, 'r') as f:
lines = f.readlines()
for line in lines:
if len(line.split(",")) < 2:
continue
if line.split(",")[0] == "CID":
continue
if ind == 0:
y_min = float(line.split(",")[1])
y_max = float(line.split(",")[1])
ind = 1
else:
y_tmp = float(line.split(",")[1])
if y_tmp > y_max:
y_max = y_tmp
if y_tmp < y_min:
y_min = y_tmp
return y_min, y_max
# prepare a set of chemical rooted tree
class chemicalRootedTree():
def __init__(self):
self.root = ("e", 0)
self.index = 0
self.vertex = []
self.adj = []
self.alpha = []
self.beta = []
self.height = 0
self.chg = []
def prepare_fringe_trees(fringe_filename, Lambda):
# modified for 2LMM, 0527
set_F = list()
strF = dict()
fc_LB = dict()
fc_UB = dict()
with open(fringe_filename,'r') as f:
lines = f.readlines()
for line in lines:
if len(line.split(",")) < 4:
continue
ind = int(line.split(",")[0])
str1 = line.split(",")[1]
str2 = line.split(",")[2]
str3 = line.split(",")[3].replace('\n', '')
if len(line.split(",")) > 4:
LB_tmp = line.split(",")[4].replace('\n', '')
LB_tmp = LB_tmp.replace(' ', '')
fc_LB[ind] = int(LB_tmp)
UB_tmp = line.split(",")[5].replace('\n', '')
UB_tmp = UB_tmp.replace(' ', '')
fc_UB[ind] = int(UB_tmp)
else:
fc_LB[ind] = 0
fc_UB[ind] = 10
psi = chemicalRootedTree()
seq1 = str1.split()
seq2 = [int(mul) for mul in line.split(",")[2].split()]
seq3 = [int(chg) for chg in line.split(",")[3].split()]
psi.index = ind
psi.vertex = [(seq1[j], int(seq1[j + 1])) for j in range(0, len(seq1), 2)]
psi.root = psi.vertex[0]
psi.height = max(psi.vertex[v][1] for v in range(len(psi.vertex)) if psi.vertex[v][0] != "H1")
psi.adj = [set() for _ in range(len(psi.vertex))]
psi.beta = [[0 for _ in range(len(psi.vertex))] for _ in range(len(psi.vertex))]
psi.chg = [chg for chg in seq3]
for j in range(len(seq2)):
cld = j + 1
prt = max(v for v in range(j + 1) if psi.vertex[v][1] == psi.vertex[cld][1] - 1)
psi.adj[prt].add(cld)
psi.adj[cld].add(prt)
psi.beta[prt][cld] = seq2[j]
psi.beta[cld][prt] = seq2[j]
# print(str(prt) + " " + str(cld) + " " + str(j) + " " + str(seq2[j]))
flag = True
for (a, d) in psi.vertex:
if a not in Lambda:
flag = False
break
if flag:
strF[ind] = (str1, str2, str3)
set_F.append(psi)
Lambda_ex = list()
for psi in set_F:
for (a, d) in psi.vertex[1:]:
if a not in Lambda_ex and a in Lambda:
Lambda_ex.append(a)
return set_F, Lambda_ex, strF, fc_LB, fc_UB
if __name__=="__main__":
V_C, E_C, \
E_ge_two, E_ge_one, E_zero_one, E_equal_one, \
I_ge_two, I_ge_one, I_zero_one, I_equal_one, \
ell_LB, ell_UB, n_LB_int, n_UB_int, \
n_LB, n_star, rho, \
ch_LB, ch_UB, bl_LB, bl_UB, \
Lambda, Lambda_dg_int, Gamma_int_ac, Gamma_int, \
Lambda_star, na_LB, na_UB, Lambda_int, \
na_LB_int, na_UB_int, ns_LB_int, ns_UB_int, \
ac_LB_int, ac_UB_int, ec_LB_int, ec_UB_int, \
bd2_LB, bd2_UB, bd3_LB, bd3_UB, dg_LB, dg_UB = read_seed_graph(sys.argv[1])
set_F, psi_epsilon, Code_F, n_psi, deg_r, \
beta_r, atom_r, ht, Lambda_ex = prepare_fringe_trees(sys.argv[2])
# print(V_C)
# print(E_C)
# print(E_ge_two)
# print(E_ge_one)
# print(E_zero_one)
# print(E_equal_one)
# print(ell_LB)
# print(ell_UB)
# print(bl_UB)
for psi in (set_F + [psi_epsilon]):
print(str(Code_F[psi]) + " " + str(n_psi[Code_F[psi]]) + " " + \
str(ht[Code_F[psi]]) + " " + str(atom_r[Code_F[psi]]) + " " + \
str(deg_r[Code_F[psi]]) + " " + str(beta_r[Code_F[psi]]))
# print(Lambda_ex)
# set_F_v = {v : set_F for v in V_C}
# set_F_E = set_F
# n_C = max(psi.numVertex - 1 for v in V_C for psi in set_F_v[v])
# n_T = max(psi.numVertex - 1 for psi in set_F_E)
# n_F = max(psi.numVertex - 1 for psi in set_F_E)
# print(str(n_C) + " " + str(n_T) + " " + str(n_F))
MAX_VAL = 4
val = {"C": 4, "O": 2, "N": 3}
n_H = dict()
na_alpha_ex = {ele : {i + 1 : 0} for i in range(len(set_F)) for ele in Lambda_ex}
for i, psi in enumerate(set_F):
n_H_tmp = {d : 0 for d in range(MAX_VAL)}
na_ex_tmp = {ele : 0 for ele in Lambda_ex}
for u, (ele, dep) in enumerate(psi.vertex[1:]):
beta_tmp = 0
na_ex_tmp[ele] += 1
for v in psi.adj[u + 1]:
beta_tmp += psi.beta[u + 1][v]
d_tmp = val[ele] - beta_tmp
n_H_tmp[d_tmp] += 1
for ele, d in na_alpha_ex.items():
d[i + 1] = na_ex_tmp[ele]
n_H[i + 1] = n_H_tmp
print(n_H)
print(na_alpha_ex)
| 29.421505
| 133
| 0.493166
| 2,339
| 13,681
| 2.635742
| 0.081659
| 0.025953
| 0.03163
| 0.03017
| 0.534955
| 0.466504
| 0.426764
| 0.397567
| 0.349878
| 0.311922
| 0
| 0.031969
| 0.33923
| 13,681
| 464
| 134
| 29.484914
| 0.65
| 0.087201
| 0
| 0.312139
| 0
| 0
| 0.009064
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014451
| false
| 0
| 0.00289
| 0
| 0.031792
| 0.008671
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
534d47fdc7a25cba8b55b44734cb77c92e4d9b0f
| 1,580
|
py
|
Python
|
Stage_3/Task11_Graph/depth_first_search.py
|
Pyabecedarian/Algorithms-and-Data-Structures-using-Python
|
08642357df60d48cb185b5487150204b42764260
|
[
"MIT"
] | null | null | null |
Stage_3/Task11_Graph/depth_first_search.py
|
Pyabecedarian/Algorithms-and-Data-Structures-using-Python
|
08642357df60d48cb185b5487150204b42764260
|
[
"MIT"
] | null | null | null |
Stage_3/Task11_Graph/depth_first_search.py
|
Pyabecedarian/Algorithms-and-Data-Structures-using-Python
|
08642357df60d48cb185b5487150204b42764260
|
[
"MIT"
] | null | null | null |
"""
The Depth First Search (DFS)
The goal of a dfs is to search as deeply as possible, connecting as many nodes in the graph as possible and
branching where necessary. Think of the BFS that builds a search tree one level at a time, whereas the DFS
creates a search tree by exploring one branch of the tree as deeply as possible.
As with bfs the dfs makes use of `predecessor` links to construct the tree. In
addition, the dfs will make use of two additional instance variables in the Vertex class, `discovery` and
`finish_time`.
predecessor : same as bfs
discovery : tracks the number of steps in the algorithm before a vertex is first encountered;
finish_time : is the number of steps before a vertex is colored black
"""
from datastruct.graph import Vertex, Graph
class DFSGraph(Graph):
def __init__(self):
super(DFSGraph, self).__init__()
self.time = 0
def reset(self):
self.time = 0
for v in self:
v.color = 'white'
v.predecessor = None
def dfs(self):
self.reset()
for v in self:
if v.color == 'white':
self._dfs_visit(v)
def _dfs_visit(self, vert: Vertex):
vert.color = 'gray'
self.time += 1
vert.discovery = self.time
for nextv in vert.get_connections():
if nextv.color == 'white':
nextv.predecessor = vert
self._dfs_visit(nextv)
vert.color = 'black'
self.time += 1
vert.finish_time = self.time
| 30.384615
| 111
| 0.623418
| 224
| 1,580
| 4.316964
| 0.379464
| 0.049638
| 0.020683
| 0.037229
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003653
| 0.306962
| 1,580
| 51
| 112
| 30.980392
| 0.879452
| 0.485443
| 0
| 0.230769
| 0
| 0
| 0.029963
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0.038462
| 0
| 0.230769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
534f4e03ba246b728b20809e5d71ee70468b20fb
| 2,886
|
py
|
Python
|
test_backtest/simplebacktest.py
|
qzm/QUANTAXIS
|
055fdc16d67670fb4770e7097865336199e55f3e
|
[
"MIT"
] | 1
|
2021-05-20T12:33:46.000Z
|
2021-05-20T12:33:46.000Z
|
test_backtest/simplebacktest.py
|
qzm/QUANTAXIS
|
055fdc16d67670fb4770e7097865336199e55f3e
|
[
"MIT"
] | null | null | null |
test_backtest/simplebacktest.py
|
qzm/QUANTAXIS
|
055fdc16d67670fb4770e7097865336199e55f3e
|
[
"MIT"
] | null | null | null |
# coding=utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2018 yutiansut/QUANTAXIS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import QUANTAXIS as QA
import random
"""
该代码旨在给出一个极其容易实现的小回测 高效 无事件驱动
"""
B = QA.QA_BacktestBroker()
AC = QA.QA_Account()
"""
# 账户设置初始资金
AC.reset_assets(assets)
# 发送订单
Order=AC.send_order(code='000001',amount=1000,time='2018-03-21',towards=QA.ORDER_DIRECTION.BUY,price=0,order_model=QA.ORDER_MODEL.MARKET,amount_model=QA.AMOUNT_MODEL.BY_AMOUNT)
# 撮合订单
dealmes=B.receive_order(QA.QA_Event(order=Order,market_data=data))
# 更新账户
AC.receive_deal(dealmes)
# 分析结果
risk=QA.QA_Risk(AC)
"""
AC.reset_assets(20000000) #设置初始资金
def simple_backtest(AC, code, start, end):
DATA = QA.QA_fetch_stock_day_adv(code, start, end).to_qfq()
for items in DATA.panel_gen: # 一天过去了
for item in items.security_gen:
if random.random()>0.5:# 加入一个随机 模拟买卖的
if AC.sell_available.get(item.code[0], 0) == 0:
order=AC.send_order(
code=item.data.code[0], time=item.data.date[0], amount=1000, towards=QA.ORDER_DIRECTION.BUY, price=0, order_model=QA.ORDER_MODEL.MARKET, amount_model=QA.AMOUNT_MODEL.BY_AMOUNT
)
AC.receive_deal(B.receive_order(QA.QA_Event(order=order,market_data=item)))
else:
AC.receive_deal(B.receive_order(QA.QA_Event(order=AC.send_order(
code=item.data.code[0], time=item.data.date[0], amount=1000, towards=QA.ORDER_DIRECTION.SELL, price=0, order_model=QA.ORDER_MODEL.MARKET, amount_model=QA.AMOUNT_MODEL.BY_AMOUNT
),market_data=item)))
AC.settle()
simple_backtest(AC, QA.QA_fetch_stock_block_adv(
).code[0:10], '2017-01-01', '2018-01-31')
print(AC.message)
AC.save()
risk = QA.QA_Risk(AC)
print(risk.message)
risk.save()
| 37.480519
| 200
| 0.711365
| 443
| 2,886
| 4.514673
| 0.392777
| 0.018
| 0.0165
| 0.024
| 0.2865
| 0.2625
| 0.2625
| 0.2625
| 0.2625
| 0.2625
| 0
| 0.031383
| 0.182952
| 2,886
| 77
| 201
| 37.480519
| 0.816794
| 0.389466
| 0
| 0
| 0
| 0
| 0.014959
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037037
| false
| 0
| 0.074074
| 0
| 0.111111
| 0.074074
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53500afefcda695385af9237df24a3052bec880e
| 4,010
|
py
|
Python
|
artview/components/field.py
|
jjhelmus/artview
|
2af5ccad8d509d11ef6da7c97bee0f7b255b6879
|
[
"BSD-3-Clause"
] | null | null | null |
artview/components/field.py
|
jjhelmus/artview
|
2af5ccad8d509d11ef6da7c97bee0f7b255b6879
|
[
"BSD-3-Clause"
] | null | null | null |
artview/components/field.py
|
jjhelmus/artview
|
2af5ccad8d509d11ef6da7c97bee0f7b255b6879
|
[
"BSD-3-Clause"
] | null | null | null |
"""
field.py
Class instance used for modifying field via Display window.
"""
# Load the needed packages
from functools import partial
from ..core import Variable, Component, QtGui, QtCore
class FieldButtonWindow(Component):
'''Class to display a Window with Field name radio buttons.'''
Vradar = None #: see :ref:`shared_variable`
Vfield = None #: see :ref:`shared_variable`
def __init__(self, Vradar=None, Vfield=None, name="FieldButtons",
parent=None):
'''
Initialize the class to create the interface.
Parameters
----------
[Optional]
Vradar : :py:class:`~artview.core.core.Variable` instance
Radar signal variable. If None start new one with None
Vfield : :py:class:`~artview.core.core.Variable` instance
Field signal variable. If None start new one empty string
name : string
Field Radiobutton window name.
parent : PyQt instance
Parent instance to associate to FieldButtonWindow.
If None, then Qt owns, otherwise associated with parent PyQt
instance.
Notes
-----
This class records the selected button and passes the
change value back to variable.
'''
super(FieldButtonWindow, self).__init__(name=name, parent=parent)
# Set up signal, so that DISPLAY can react to external
# (or internal) changes in field (Core.Variable instances expected)
# The change is sent through Vfield
if Vradar is None:
self.Vradar = Variable(None)
else:
self.Vradar = Vradar
if Vfield is None:
self.Vfield = Variable('')
else:
self.Vfield = Vfield
self.sharedVariables = {"Vradar": self.NewRadar,
"Vfield": self.NewField}
self.connectAllVariables()
self.CreateFieldWidget()
self.SetFieldRadioButtons()
self.show()
########################
# Button methods #
########################
def FieldSelectCmd(self, field):
'''Captures a selection and updates field variable.'''
self.Vfield.change(field)
def CreateFieldWidget(self):
'''Create a widget to store radio buttons to control field adjust.'''
self.radioBox = QtGui.QGroupBox("Field Selection", parent=self)
self.rBox_layout = QtGui.QVBoxLayout(self.radioBox)
self.radioBox.setLayout(self.rBox_layout)
self.setCentralWidget(self.radioBox)
def SetFieldRadioButtons(self):
'''Set a field selection using radio buttons.'''
# Instantiate the buttons into a list for future use
self.fieldbutton = {}
if self.Vradar.value is None:
return
# Loop through and create each field button and
# connect a value when selected
for field in self.Vradar.value.fields.keys():
button = QtGui.QRadioButton(field, self.radioBox)
self.fieldbutton[field] = button
QtCore.QObject.connect(button, QtCore.SIGNAL("clicked()"),
partial(self.FieldSelectCmd, field))
self.rBox_layout.addWidget(button)
# set Checked the current field
self.NewField(self.Vfield, self.Vfield.value, True)
def NewField(self, variable, value, strong):
'''Slot for 'ValueChanged' signal of
:py:class:`Vfield <artview.core.core.Variable>`.
This will:
* Update radio check
'''
if (self.Vradar.value is not None and
value in self.Vradar.value.fields):
self.fieldbutton[value].setChecked(True)
def NewRadar(self, variable, value, strong):
'''Slot for 'ValueChanged' signal of
:py:class:`Vradar <artview.core.core.Variable>`.
This will:
* Recreate radio items
'''
self.CreateFieldWidget()
self.SetFieldRadioButtons()
| 32.868852
| 77
| 0.605486
| 433
| 4,010
| 5.577367
| 0.337182
| 0.028986
| 0.024845
| 0.038095
| 0.184679
| 0.130021
| 0.104348
| 0.047205
| 0.047205
| 0.047205
| 0
| 0
| 0.293267
| 4,010
| 121
| 78
| 33.140496
| 0.852152
| 0.385786
| 0
| 0.12766
| 0
| 0
| 0.022419
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12766
| false
| 0
| 0.042553
| 0
| 0.255319
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5351910933f7e53efc48e359df0170e503cf6959
| 8,375
|
py
|
Python
|
src/diepvries/field.py
|
michael-the1/diepvries
|
ddba9c91ee5fb2014dc576ffb74faa40c3d0d04f
|
[
"MIT"
] | 67
|
2021-08-20T14:30:49.000Z
|
2022-03-22T23:37:08.000Z
|
src/diepvries/field.py
|
michael-the1/diepvries
|
ddba9c91ee5fb2014dc576ffb74faa40c3d0d04f
|
[
"MIT"
] | 1
|
2022-01-22T08:19:38.000Z
|
2022-02-02T08:48:34.000Z
|
src/diepvries/field.py
|
michael-the1/diepvries
|
ddba9c91ee5fb2014dc576ffb74faa40c3d0d04f
|
[
"MIT"
] | 6
|
2021-09-03T17:21:16.000Z
|
2021-12-22T12:11:51.000Z
|
"""Module for a Data Vault field."""
from typing import Optional
from . import (
FIELD_PREFIX,
FIELD_SUFFIX,
METADATA_FIELDS,
TABLE_PREFIXES,
UNKNOWN,
FieldDataType,
FieldRole,
TableType,
)
class Field:
"""A field in a Data Vault model."""
def __init__(
self,
parent_table_name: str,
name: str,
data_type: FieldDataType,
position: int,
is_mandatory: bool,
precision: int = None,
scale: int = None,
length: int = None,
):
"""Instantiate a Field.
Convert both name and parent_table_name to lower case.
Args:
parent_table_name: Name of parent table in the database.
name: Column name in the database.
data_type: Column data type in the database.
position: Column position in the database.
is_mandatory: Column is mandatory in the database.
precision: Numeric precision (maximum number of digits before the decimal
separator). Only applicable when `self.data_type==FieldDataType.NUMBER`.
scale: Numeric scale (maximum number of digits after the decimal
separator). Only applicable when `self.data_type==FieldDataType.NUMBER`.
length: Character length (maximum number of characters allowed). Only
applicable when `self.data_type==FieldDataType.TEXT`.
"""
self.parent_table_name = parent_table_name.lower()
self.name = name.lower()
self.data_type = data_type
self.position = position
self.is_mandatory = is_mandatory
self.precision = precision
self.scale = scale
self.length = length
def __hash__(self):
"""Hash of a Data Vault field."""
return hash(self.name_in_staging)
def __eq__(self, other):
"""Equality of a Data Vault field."""
return self.name_in_staging == other.name_in_staging
def __str__(self) -> str:
"""Representation of a Field object as a string.
This helps the tracking of logging events per entity.
Returns:
String representation for the `Field` object.
"""
return f"{type(self).__name__}: {self.name}"
@property
def data_type_sql(self) -> str:
"""Build SQL expression to represent the field data type."""
if self.data_type == FieldDataType.NUMBER:
return f"{self.data_type.value} ({self.precision}, {self.scale})"
if self.data_type == FieldDataType.TEXT and self.length:
return f"{self.data_type.value} ({self.length})"
return f"{self.data_type.name}"
@property
def hash_concatenation_sql(self) -> str:
"""Build SQL expression to deterministically represent the field as a string.
This expression is needed to produce hashes (hashkey/hashdiff) that are
consistent, independently on the data type used to store the field in the
extraction table.
The SQL expression does the following steps:
1. Cast field to its data type in the DV model.
2. Produce a consistent string representation of the result of step 1, depending
on the field data type.
3. Ensure the result of step 2 never returns NULL.
Returns:
SQL expression to deterministically represent the field as a string.
"""
hash_concatenation_sql = ""
date_format = "yyyy-mm-dd"
time_format = "hh24:mi:ss.ff9"
timezone_format = "tzhtzm"
cast_expression = (
f"CAST({self.name} AS {self.data_type_sql})"
if self.data_type != FieldDataType.GEOGRAPHY
else f"TO_GEOGRAPHY({self.name})"
)
if self.data_type in (FieldDataType.TIMESTAMP_LTZ, FieldDataType.TIMESTAMP_TZ):
hash_concatenation_sql = (
f"TO_CHAR({cast_expression}, "
f"'{date_format} {time_format} {timezone_format}')"
)
elif self.data_type == FieldDataType.TIMESTAMP_NTZ:
hash_concatenation_sql = (
f"TO_CHAR({cast_expression}, '{date_format} {time_format}')"
)
elif self.data_type == FieldDataType.DATE:
hash_concatenation_sql = f"TO_CHAR({cast_expression}, '{date_format}')"
elif self.data_type == FieldDataType.TIME:
hash_concatenation_sql = f"TO_CHAR({cast_expression}, '{time_format}')"
elif self.data_type == FieldDataType.TEXT:
hash_concatenation_sql = cast_expression
elif self.data_type == FieldDataType.GEOGRAPHY:
hash_concatenation_sql = f"ST_ASTEXT({cast_expression})"
else:
hash_concatenation_sql = f"CAST({cast_expression} AS TEXT)"
default_value = UNKNOWN if self.role == FieldRole.BUSINESS_KEY else ""
return f"COALESCE({hash_concatenation_sql}, '{default_value}')"
@property
def suffix(self) -> str:
"""Get field suffix.
Returns:
Field suffix.
"""
return self.name.split("_").pop()
@property
def prefix(self) -> str:
"""Get field prefix.
Returns:
Field prefix.
"""
return next(split_part for split_part in self.name.split("_"))
@property
def parent_table_type(self) -> TableType:
"""Get parent table type, based on table prefix.
Returns:
Table type (HUB, LINK or SATELLITE).
"""
table_prefix = next(
split_part for split_part in self.parent_table_name.split("_")
)
if table_prefix in TABLE_PREFIXES[TableType.LINK]:
return TableType.LINK
if table_prefix in TABLE_PREFIXES[TableType.SATELLITE]:
return TableType.SATELLITE
return TableType.HUB
@property
def name_in_staging(self) -> str:
"""Get the name that this field should have, when created in a staging table.
In most cases this function will return `self.name`, but for hashdiffs the name
is <parent_table_name>_hashdiff (every Satellite has one hashdiff field, named
s_hashdiff).
Returns:
Name of the field in staging.
"""
if self.role == FieldRole.HASHDIFF:
return f"{self.parent_table_name}_{FIELD_SUFFIX[FieldRole.HASHDIFF]}"
return self.name
@property
def ddl_in_staging(self) -> str:
"""Get DDL expression to create this field in the staging table.
Returns:
The DDL expression for this field.
"""
return (
f"{self.name_in_staging} {self.data_type_sql}"
f"{' NOT NULL' if self.is_mandatory else ''}"
)
@property
def role(self) -> FieldRole:
"""Get the role of the field in a Data Vault model.
See `FieldRole` enum for more information.
Returns:
Field role in a Data Vault model.
Raises:
RuntimeError: When no field role can be attributed.
"""
found_role: Optional[FieldRole] = None
if self.name in METADATA_FIELDS.values():
found_role = FieldRole.METADATA
elif (
self.name == f"{self.parent_table_name}_{self.suffix}"
and self.suffix == FIELD_SUFFIX[FieldRole.HASHKEY]
):
found_role = FieldRole.HASHKEY
elif self.suffix == FIELD_SUFFIX[FieldRole.HASHKEY]:
found_role = FieldRole.HASHKEY_PARENT
elif self.prefix == FIELD_PREFIX[FieldRole.CHILD_KEY]:
found_role = FieldRole.CHILD_KEY
elif (
self.parent_table_type != TableType.SATELLITE
and self.prefix not in FIELD_PREFIX.values()
and self.position != 1
):
found_role = FieldRole.BUSINESS_KEY
elif self.suffix == FIELD_SUFFIX[FieldRole.HASHDIFF]:
found_role = FieldRole.HASHDIFF
elif self.parent_table_type == TableType.SATELLITE:
found_role = FieldRole.DESCRIPTIVE
if found_role is not None:
return found_role
raise RuntimeError(
(
f"{self.name}: It was not possible to assign a valid field role "
f" (validate FieldRole and FIELD_PREFIXES configuration)"
)
)
| 34.465021
| 88
| 0.612537
| 988
| 8,375
| 5.012146
| 0.200405
| 0.043619
| 0.043619
| 0.055533
| 0.296446
| 0.247779
| 0.209006
| 0.125606
| 0.096527
| 0.096527
| 0
| 0.001541
| 0.302567
| 8,375
| 242
| 89
| 34.607438
| 0.846259
| 0.297194
| 0
| 0.111111
| 0
| 0
| 0.162579
| 0.081754
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088889
| false
| 0
| 0.014815
| 0
| 0.22963
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5351c8767281abfc0e99352875444fb190e31a5e
| 5,702
|
py
|
Python
|
mmdet/datasets/deepscoresV2.py
|
tuggeluk/mmdetection
|
669a535c944628a3ab43330cae5c77b643e13a4b
|
[
"Apache-2.0"
] | 1
|
2020-01-22T15:25:20.000Z
|
2020-01-22T15:25:20.000Z
|
mmdet/datasets/deepscoresV2.py
|
tuggeluk/mmdetection
|
669a535c944628a3ab43330cae5c77b643e13a4b
|
[
"Apache-2.0"
] | 2
|
2019-12-16T10:51:41.000Z
|
2020-10-06T13:46:25.000Z
|
mmdet/datasets/deepscoresV2.py
|
tuggeluk/mmdetection
|
669a535c944628a3ab43330cae5c77b643e13a4b
|
[
"Apache-2.0"
] | 2
|
2020-04-20T08:58:40.000Z
|
2021-05-08T07:55:54.000Z
|
"""DEEPSCORESV2
Provides access to the DEEPSCORESV2 database with a COCO-like interface. The
only changes made compared to the coco.py file are the class labels.
Author:
Lukas Tuggener <tugg@zhaw.ch>
Yvan Satyawan <y_satyawan@hotmail.com>
Created on:
November 23, 2019
"""
from .coco import *
import os
import json
from obb_anns import OBBAnns
@DATASETS.register_module
class DeepScoresV2Dataset(CocoDataset):
def load_annotations(self, ann_file):
self.obb = OBBAnns(ann_file)
self.obb.load_annotations()
self.obb.set_annotation_set_filter(['deepscores'])
self.obb.set_class_blacklist(["staff"])
self.cat_ids = list(self.obb.get_cats().keys())
self.cat2label = {
cat_id: i
for i, cat_id in enumerate(self.cat_ids)
}
self.label2cat = {v: k for k, v in self.cat2label.items()}
self.CLASSES = tuple([v["name"] for (k, v) in self.obb.get_cats().items()])
self.img_ids = [id['id'] for id in self.obb.img_info]
return self.obb.img_info
def get_ann_info(self, idx):
return self._parse_ann_info(*self.obb.get_img_ann_pair(idxs=[idx]))
def _filter_imgs(self, min_size=32):
valid_inds = []
for i, img_info in enumerate(self.obb.img_info):
if self.filter_empty_gt and len(img_info['ann_ids']) == 0:
continue
if min(img_info['width'], img_info['height']) >= min_size:
valid_inds.append(i)
return valid_inds
def _parse_ann_info(self, img_info, ann_info):
img_info, ann_info = img_info[0], ann_info[0]
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
for i, ann in ann_info.iterrows():
# we have no ignore feature
if ann['area'] <= 0:
continue
bbox = ann['a_bbox']
gt_bboxes.append(bbox)
gt_labels.append(self.cat2label[ann['cat_id'][0]])
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
ann = dict(
bboxes=gt_bboxes,
labels=gt_labels,
bboxes_ignore=gt_bboxes_ignore,
masks=None,
seg_map=None)
return ann
def prepare_json_dict(self, results):
json_results = {"annotation_set": "deepscores", "proposals": []}
for idx in range(len(self)):
img_id = self.img_ids[idx]
result = results[idx]
for label in range(len(result)):
bboxes = result[label]
for i in range(bboxes.shape[0]):
data = dict()
data['img_id'] = img_id
data['bbox'] = [str(nr) for nr in bboxes[i][0:-1]]
data['score'] = str(bboxes[i][-1])
data['cat_id'] = self.label2cat[label]
json_results["proposals"].append(data)
return json_results
def write_results_json(self, results, filename=None):
if filename is None:
filename = "deepscores_results.json"
json_results = self.prepare_json_dict(results)
with open(filename, "w") as fo:
json.dump(json_results, fo)
return filename
def evaluate(self,
results,
metric='bbox',
logger=None,
jsonfile_prefix=None,
classwise=True,
proposal_nums=(100, 300, 1000),
iou_thrs=np.arange(0.5, 0.96, 0.05),
average_thrs=False):
"""Evaluation in COCO protocol.
Args:
results (list): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated.
logger (logging.Logger | str | None): Logger used for printing
related information during evaluation. Default: None.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
classwise (bool): Whether to evaluating the AP for each class.
proposal_nums (Sequence[int]): Proposal number used for evaluating
recalls, such as recall@100, recall@1000.
Default: (100, 300, 1000).
iou_thrs (Sequence[float]): IoU threshold used for evaluating
recalls. If set to a list, the average recall of all IoUs will
also be computed. Default: 0.5.
Returns:
dict[str: float]
"""
metrics = metric if isinstance(metric, list) else [metric]
allowed_metrics = ['bbox']
for metric in metrics:
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
filename = self.write_results_json(results)
self.obb.load_proposals(filename)
metric_results = self.obb.calculate_metrics(iou_thrs=iou_thrs, classwise=classwise, average_thrs=average_thrs)
metric_results = {self.CLASSES[self.cat2label[key]]: value for (key, value) in metric_results.items()}
# add occurences
occurences_by_class = self.obb.get_class_occurences()
for (key, value) in metric_results.items():
value.update(no_occurences=occurences_by_class[key])
if True:
import pickle
pickle.dump(metric_results, open('evaluation_renamed_rcnn.pickle', 'wb'))
print(metric_results)
return metric_results
| 36.318471
| 118
| 0.590144
| 723
| 5,702
| 4.484094
| 0.312586
| 0.028069
| 0.012338
| 0.012955
| 0.047193
| 0.02992
| 0.019124
| 0
| 0
| 0
| 0
| 0.017997
| 0.308138
| 5,702
| 156
| 119
| 36.551282
| 0.803802
| 0.216941
| 0
| 0.020408
| 0
| 0
| 0.049698
| 0.012308
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.05102
| 0.010204
| 0.204082
| 0.010204
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53527ec6ef2428da3e1c97ac08275c75fd6e2545
| 1,628
|
py
|
Python
|
gui/wellplot/settings/style/wellplotstylehandler.py
|
adriangrepo/qreservoir
|
20fba1b1fd1a42add223d9e8af2d267665bec493
|
[
"MIT"
] | 2
|
2019-10-04T13:54:51.000Z
|
2021-05-21T19:36:15.000Z
|
gui/wellplot/settings/style/wellplotstylehandler.py
|
adriangrepo/qreservoir
|
20fba1b1fd1a42add223d9e8af2d267665bec493
|
[
"MIT"
] | 3
|
2019-11-19T17:06:09.000Z
|
2020-01-18T20:39:54.000Z
|
gui/wellplot/settings/style/wellplotstylehandler.py
|
adriangrepo/qreservoir
|
20fba1b1fd1a42add223d9e8af2d267665bec493
|
[
"MIT"
] | 2
|
2020-07-02T13:20:48.000Z
|
2020-11-11T00:18:51.000Z
|
import logging
from qrutilities.imageutils import ImageUtils
from PyQt4.QtGui import QColor
logger = logging.getLogger('console')
class WellPlotStyleHandler(object):
'''
classdocs
'''
def saveDataState(self, wellPlotData, wellPlotStyleWidget):
if wellPlotStyleWidget.plotTitleOnCheckBox.isChecked():
wellPlotData.title_on = True
else:
wellPlotData.title_on = False
wellPlotData.title = wellPlotStyleWidget.plotTitleLineEdit.text()
r,g,b,a = QColor(wellPlotStyleWidget.trackBackgroundColorPushButton.color()).getRgb()
rgbString = ImageUtils.rgbToString(r,g,b)
wellPlotData.plot_background_rgb = rgbString
wellPlotData.plot_background_alpha = wellPlotStyleWidget.trackBackgroundOpacitySpinBox.value()
r,g,b,a = QColor(wellPlotStyleWidget.labelBackgroundColorPushButton.color()).getRgb()
rgbString = ImageUtils.rgbToString(r,g,b)
wellPlotData.label_background_rgb = rgbString
wellPlotData.label_background_alpha = wellPlotStyleWidget.labelBackgroundOpacitySpinBox.value()
r,g,b,a = QColor(wellPlotStyleWidget.labelForegroundColorPushButton.color()).getRgb()
rgbString = ImageUtils.rgbToString(r,g,b)
wellPlotData.label_foreground_rgb = rgbString
wellPlotData.label_foreground_alpha = wellPlotStyleWidget.labelForegroundOpacitySpinBox.value()
if wellPlotStyleWidget.singleRowLabelsCheckBox.isChecked():
wellPlotData.single_row_header_labels = True
else:
wellPlotData.single_row_header_labels = False
| 45.222222
| 103
| 0.72973
| 141
| 1,628
| 8.283688
| 0.397163
| 0.010274
| 0.015411
| 0.010274
| 0.291952
| 0.235445
| 0.210616
| 0.152397
| 0.152397
| 0.104452
| 0
| 0.000759
| 0.191032
| 1,628
| 36
| 104
| 45.222222
| 0.886105
| 0.005528
| 0
| 0.185185
| 0
| 0
| 0.004364
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037037
| false
| 0
| 0.111111
| 0
| 0.185185
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5353098565b09d0a6b37ec215ad6356db9a8d2af
| 599
|
py
|
Python
|
utm_messages/urls.py
|
geoffreynyaga/ANGA-UTM
|
8371a51ad27c85d2479bb34d8c4e02ea28465941
|
[
"Apache-2.0"
] | 7
|
2020-01-18T16:53:41.000Z
|
2021-12-21T07:02:43.000Z
|
utm_messages/urls.py
|
geoffreynyaga/ANGA-UTM
|
8371a51ad27c85d2479bb34d8c4e02ea28465941
|
[
"Apache-2.0"
] | 28
|
2020-01-06T18:36:54.000Z
|
2022-02-10T10:03:55.000Z
|
utm_messages/urls.py
|
geoffreynyaga/ANGA-UTM
|
8371a51ad27c85d2479bb34d8c4e02ea28465941
|
[
"Apache-2.0"
] | 3
|
2020-01-18T16:53:54.000Z
|
2020-10-26T11:21:41.000Z
|
from django.conf.urls import url
from . import views
app_name = "messages"
urlpatterns = [
url(r'^$', views.InboxListView.as_view(), name='inbox'),
url(r'^sent/$', views.SentMessagesListView.as_view(), name='sent'),
url(r'^compose/$', views.MessagesCreateView.as_view(), name='compose'),
# url(r'^compose-all/$', views.SendToAll.as_view(), name='compose_to_all'),
url(r'^(?P<pk>\d+)/$', views.MessageDetailView.as_view(), name='message_detail'),
url(r'^calendar/$', views.CalendarView.as_view(), name='calendar'),
]
| 29.95
| 90
| 0.60601
| 72
| 599
| 4.902778
| 0.430556
| 0.067989
| 0.169972
| 0.096317
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.195326
| 599
| 19
| 91
| 31.526316
| 0.732365
| 0.12187
| 0
| 0
| 0
| 0
| 0.178218
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5354d3bcbb084eaac2e9dc5457335c7f402533a9
| 12,221
|
py
|
Python
|
nova/policies/servers.py
|
maya2250/nova
|
e483ca1cd9a5db5856f87fc69ca07c42d2be5def
|
[
"Apache-2.0"
] | null | null | null |
nova/policies/servers.py
|
maya2250/nova
|
e483ca1cd9a5db5856f87fc69ca07c42d2be5def
|
[
"Apache-2.0"
] | 1
|
2020-11-05T17:42:24.000Z
|
2020-11-05T17:42:24.000Z
|
nova/policies/servers.py
|
Mattlk13/nova
|
5b13eb59540aaf535a53920e783964d106de2620
|
[
"Apache-2.0"
] | 1
|
2020-07-22T22:14:40.000Z
|
2020-07-22T22:14:40.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from nova.policies import base
RULE_AOO = base.RULE_ADMIN_OR_OWNER
SERVERS = 'os_compute_api:servers:%s'
NETWORK_ATTACH_EXTERNAL = 'network:attach_external_network'
ZERO_DISK_FLAVOR = SERVERS % 'create:zero_disk_flavor'
REQUESTED_DESTINATION = 'compute:servers:create:requested_destination'
CROSS_CELL_RESIZE = 'compute:servers:resize:cross_cell'
rules = [
policy.DocumentedRuleDefault(
SERVERS % 'index',
RULE_AOO,
"List all servers",
[
{
'method': 'GET',
'path': '/servers'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'detail',
RULE_AOO,
"List all servers with detailed information",
[
{
'method': 'GET',
'path': '/servers/detail'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'index:get_all_tenants',
base.RULE_ADMIN_API,
"List all servers for all projects",
[
{
'method': 'GET',
'path': '/servers'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'detail:get_all_tenants',
base.RULE_ADMIN_API,
"List all servers with detailed information for all projects",
[
{
'method': 'GET',
'path': '/servers/detail'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'allow_all_filters',
base.RULE_ADMIN_API,
"Allow all filters when listing servers",
[
{
'method': 'GET',
'path': '/servers'
},
{
'method': 'GET',
'path': '/servers/detail'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'show',
RULE_AOO,
"Show a server",
[
{
'method': 'GET',
'path': '/servers/{server_id}'
}
]),
# the details in host_status are pretty sensitive, only admins
# should do that by default.
policy.DocumentedRuleDefault(
SERVERS % 'show:host_status',
base.RULE_ADMIN_API,
"""
Show a server with additional host status information.
This means host_status will be shown irrespective of status value. If showing
only host_status UNKNOWN is desired, use the
``os_compute_api:servers:show:host_status:unknown-only`` policy rule.
Microvision 2.75 added the ``host_status`` attribute in the
``PUT /servers/{server_id}`` and ``POST /servers/{server_id}/action (rebuild)``
API responses which are also controlled by this policy rule, like the
``GET /servers*`` APIs.
""",
[
{
'method': 'GET',
'path': '/servers/{server_id}'
},
{
'method': 'GET',
'path': '/servers/detail'
},
{
'method': 'PUT',
'path': '/servers/{server_id}'
},
{
'method': 'POST',
'path': '/servers/{server_id}/action (rebuild)'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'show:host_status:unknown-only',
base.RULE_ADMIN_API,
"""
Show a server with additional host status information, only if host status is
UNKNOWN.
This policy rule will only be enforced when the
``os_compute_api:servers:show:host_status`` policy rule does not pass for the
request. An example policy configuration could be where the
``os_compute_api:servers:show:host_status`` rule is set to allow admin-only and
the ``os_compute_api:servers:show:host_status:unknown-only`` rule is set to
allow everyone.
""",
[
{
'method': 'GET',
'path': '/servers/{server_id}'
},
{
'method': 'GET',
'path': '/servers/detail'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'create',
RULE_AOO,
"Create a server",
[
{
'method': 'POST',
'path': '/servers'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'create:forced_host',
base.RULE_ADMIN_API,
"""
Create a server on the specified host and/or node.
In this case, the server is forced to launch on the specified
host and/or node by bypassing the scheduler filters unlike the
``compute:servers:create:requested_destination`` rule.
""",
[
{
'method': 'POST',
'path': '/servers'
}
]),
policy.DocumentedRuleDefault(
REQUESTED_DESTINATION,
base.RULE_ADMIN_API,
"""
Create a server on the requested compute service host and/or
hypervisor_hostname.
In this case, the requested host and/or hypervisor_hostname is
validated by the scheduler filters unlike the
``os_compute_api:servers:create:forced_host`` rule.
""",
[
{
'method': 'POST',
'path': '/servers'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'create:attach_volume',
RULE_AOO,
"Create a server with the requested volume attached to it",
[
{
'method': 'POST',
'path': '/servers'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'create:attach_network',
RULE_AOO,
"Create a server with the requested network attached to it",
[
{
'method': 'POST',
'path': '/servers'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'create:trusted_certs',
RULE_AOO,
"Create a server with trusted image certificate IDs",
[
{
'method': 'POST',
'path': '/servers'
}
]),
policy.DocumentedRuleDefault(
ZERO_DISK_FLAVOR,
base.RULE_ADMIN_API,
"""
This rule controls the compute API validation behavior of creating a server
with a flavor that has 0 disk, indicating the server should be volume-backed.
For a flavor with disk=0, the root disk will be set to exactly the size of the
image used to deploy the instance. However, in this case the filter_scheduler
cannot select the compute host based on the virtual image size. Therefore, 0
should only be used for volume booted instances or for testing purposes.
WARNING: It is a potential security exposure to enable this policy rule
if users can upload their own images since repeated attempts to
create a disk=0 flavor instance with a large image can exhaust
the local disk of the compute (or shared storage cluster). See bug
https://bugs.launchpad.net/nova/+bug/1739646 for details.
""",
[
{
'method': 'POST',
'path': '/servers'
}
]),
policy.DocumentedRuleDefault(
NETWORK_ATTACH_EXTERNAL,
'is_admin:True',
"Attach an unshared external network to a server",
[
# Create a server with a requested network or port.
{
'method': 'POST',
'path': '/servers'
},
# Attach a network or port to an existing server.
{
'method': 'POST',
'path': '/servers/{server_id}/os-interface'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'delete',
RULE_AOO,
"Delete a server",
[
{
'method': 'DELETE',
'path': '/servers/{server_id}'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'update',
RULE_AOO,
"Update a server",
[
{
'method': 'PUT',
'path': '/servers/{server_id}'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'confirm_resize',
RULE_AOO,
"Confirm a server resize",
[
{
'method': 'POST',
'path': '/servers/{server_id}/action (confirmResize)'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'revert_resize',
RULE_AOO,
"Revert a server resize",
[
{
'method': 'POST',
'path': '/servers/{server_id}/action (revertResize)'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'reboot',
RULE_AOO,
"Reboot a server",
[
{
'method': 'POST',
'path': '/servers/{server_id}/action (reboot)'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'resize',
RULE_AOO,
"Resize a server",
[
{
'method': 'POST',
'path': '/servers/{server_id}/action (resize)'
}
]),
policy.DocumentedRuleDefault(
CROSS_CELL_RESIZE,
base.RULE_NOBODY,
"Resize a server across cells. By default, this is disabled for all "
"users and recommended to be tested in a deployment for admin users "
"before opening it up to non-admin users. Resizing within a cell is "
"the default preferred behavior even if this is enabled. ",
[
{
'method': 'POST',
'path': '/servers/{server_id}/action (resize)'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'rebuild',
RULE_AOO,
"Rebuild a server",
[
{
'method': 'POST',
'path': '/servers/{server_id}/action (rebuild)'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'rebuild:trusted_certs',
RULE_AOO,
"Rebuild a server with trusted image certificate IDs",
[
{
'method': 'POST',
'path': '/servers/{server_id}/action (rebuild)'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'create_image',
RULE_AOO,
"Create an image from a server",
[
{
'method': 'POST',
'path': '/servers/{server_id}/action (createImage)'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'create_image:allow_volume_backed',
RULE_AOO,
"Create an image from a volume backed server",
[
{
'method': 'POST',
'path': '/servers/{server_id}/action (createImage)'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'start',
RULE_AOO,
"Start a server",
[
{
'method': 'POST',
'path': '/servers/{server_id}/action (os-start)'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'stop',
RULE_AOO,
"Stop a server",
[
{
'method': 'POST',
'path': '/servers/{server_id}/action (os-stop)'
}
]),
policy.DocumentedRuleDefault(
SERVERS % 'trigger_crash_dump',
RULE_AOO,
"Trigger crash dump in a server",
[
{
'method': 'POST',
'path': '/servers/{server_id}/action (trigger_crash_dump)'
}
]),
]
def list_rules():
return rules
| 29.734793
| 79
| 0.524262
| 1,177
| 12,221
| 5.329652
| 0.220901
| 0.063128
| 0.140921
| 0.073649
| 0.502949
| 0.441256
| 0.394548
| 0.343058
| 0.257134
| 0.218875
| 0
| 0.002341
| 0.370837
| 12,221
| 410
| 80
| 29.807317
| 0.8135
| 0.059897
| 0
| 0.448378
| 0
| 0
| 0.305778
| 0.074226
| 0
| 0
| 0
| 0
| 0
| 1
| 0.00295
| false
| 0
| 0.0059
| 0.00295
| 0.011799
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53564fa8ddf1d013bfaf1e0a0630a501757ce124
| 1,504
|
py
|
Python
|
week02/day08.py
|
gtadeus/LeetCodeChallenge2009
|
81d3fae205fb9071d7a98260df9bbeb1c8c8ffe0
|
[
"MIT"
] | null | null | null |
week02/day08.py
|
gtadeus/LeetCodeChallenge2009
|
81d3fae205fb9071d7a98260df9bbeb1c8c8ffe0
|
[
"MIT"
] | null | null | null |
week02/day08.py
|
gtadeus/LeetCodeChallenge2009
|
81d3fae205fb9071d7a98260df9bbeb1c8c8ffe0
|
[
"MIT"
] | null | null | null |
import unittest
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def sumRootToLeaf(self, root: TreeNode) -> int:
m = self.c(root)
r=0
for n in m:
if n != 0:
if n== 1:
r+=1
else:
r+=int(n,2)
return r
def c(self, l):
if l.left is None and l.right is None:
return [l.val]
else:
p, p2 = [], []
if not l.left is None:
p=self.c(l.left)
if not l.right is None:
p2=self.c(l.right)
v=f'{l.val}'
#v = l.val << 1
for i, x in enumerate(p):
if not l.left is None:
p[i]=f'{v}{x}'
for i, x in enumerate(p2):
if not l.right is None:
p2[i]=f'{v}{x}'
return p+p2
class TestDay08(unittest.TestCase):
S = Solution()
input_ = [ TreeNode(1, TreeNode(0, TreeNode(0,None,None), TreeNode(1,None,None)), TreeNode(1, TreeNode(0,None,None), TreeNode(1,None,None))) ]
solutions = [22]
def testSumRoot(self):
for indx, val in enumerate(self.input_):
self.assertEqual(self.solutions[indx], self.S.sumRootToLeaf(val))
if __name__ == "__main__":
unittest.main()
| 28.923077
| 146
| 0.475399
| 203
| 1,504
| 3.453202
| 0.26601
| 0.051355
| 0.034237
| 0.047076
| 0.245364
| 0.199715
| 0.199715
| 0.097004
| 0
| 0
| 0
| 0.025471
| 0.399601
| 1,504
| 52
| 147
| 28.923077
| 0.750831
| 0.031915
| 0
| 0.139535
| 0
| 0
| 0.018569
| 0
| 0
| 0
| 0
| 0
| 0.023256
| 1
| 0.093023
| false
| 0
| 0.023256
| 0
| 0.325581
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5358824aa89abe42dc1e1bfd86a2b5480905c96d
| 411
|
py
|
Python
|
src/tests/test_stop_at_task.py
|
francesco-p/FACIL
|
e719deebb6d2acb5778b60759294c23ea5e2b454
|
[
"MIT"
] | 243
|
2020-09-22T11:26:34.000Z
|
2022-03-31T13:16:21.000Z
|
src/tests/test_stop_at_task.py
|
francesco-p/FACIL
|
e719deebb6d2acb5778b60759294c23ea5e2b454
|
[
"MIT"
] | 15
|
2021-05-09T08:48:15.000Z
|
2022-03-28T16:07:45.000Z
|
src/tests/test_stop_at_task.py
|
francesco-p/FACIL
|
e719deebb6d2acb5778b60759294c23ea5e2b454
|
[
"MIT"
] | 52
|
2021-03-01T15:08:29.000Z
|
2022-03-28T19:53:14.000Z
|
from tests import run_main_and_assert
FAST_LOCAL_TEST_ARGS = "--exp-name local_test --datasets mnist" \
" --network LeNet --num-tasks 5 --seed 1 --batch-size 32" \
" --nepochs 2 --num-workers 0 --stop-at-task 3"
def test_finetuning_stop_at_task():
args_line = FAST_LOCAL_TEST_ARGS
args_line += " --approach finetuning"
run_main_and_assert(args_line)
| 34.25
| 82
| 0.647202
| 58
| 411
| 4.241379
| 0.637931
| 0.109756
| 0.081301
| 0.130081
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022508
| 0.243309
| 411
| 11
| 83
| 37.363636
| 0.768489
| 0
| 0
| 0
| 0
| 0
| 0.389294
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.125
| false
| 0
| 0.125
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53591d67014d7a8167c868c3b270950bcf55cca8
| 375
|
py
|
Python
|
Python/contains-duplicate.py
|
shreyventure/LeetCode-Solutions
|
74423d65702b78974e390f17c9d6365d17e6eed5
|
[
"MIT"
] | 388
|
2020-06-29T08:41:27.000Z
|
2022-03-31T22:55:05.000Z
|
Python/contains-duplicate.py
|
shreyventure/LeetCode-Solutions
|
74423d65702b78974e390f17c9d6365d17e6eed5
|
[
"MIT"
] | 178
|
2020-07-16T17:15:28.000Z
|
2022-03-09T21:01:50.000Z
|
Python/contains-duplicate.py
|
shreyventure/LeetCode-Solutions
|
74423d65702b78974e390f17c9d6365d17e6eed5
|
[
"MIT"
] | 263
|
2020-07-13T18:33:20.000Z
|
2022-03-28T13:54:10.000Z
|
# Autor: Anuj Sharma (@optider)
# Github Profile: https://github.com/Optider/
# Problem Link: https://leetcode.com/problems/contains-duplicate/
class Solution:
def containsDuplicate(self, nums: List[int]) -> bool:
count = {}
for n in nums :
if count.get(n) != None :
return True
count[n] = 1
return False
| 26.785714
| 65
| 0.581333
| 44
| 375
| 4.954545
| 0.772727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003788
| 0.296
| 375
| 13
| 66
| 28.846154
| 0.82197
| 0.365333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5359c8fd0dd897c7cd9afb3870d3437688b42ddc
| 8,824
|
py
|
Python
|
build/android/gyp/dex.py
|
google-ar/chromium
|
2441c86a5fd975f09a6c30cddb57dfb7fc239699
|
[
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2,151
|
2020-04-18T07:31:17.000Z
|
2022-03-31T08:39:18.000Z
|
build/android/gyp/dex.py
|
harrymarkovskiy/WebARonARCore
|
2441c86a5fd975f09a6c30cddb57dfb7fc239699
|
[
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 395
|
2020-04-18T08:22:18.000Z
|
2021-12-08T13:04:49.000Z
|
build/android/gyp/dex.py
|
harrymarkovskiy/WebARonARCore
|
2441c86a5fd975f09a6c30cddb57dfb7fc239699
|
[
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 338
|
2020-04-18T08:03:10.000Z
|
2022-03-29T12:33:22.000Z
|
#!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import logging
import optparse
import os
import sys
import tempfile
import zipfile
from util import build_utils
def _CheckFilePathEndsWithJar(parser, file_path):
if not file_path.endswith(".jar"):
# dx ignores non .jar files.
parser.error("%s does not end in .jar" % file_path)
def _CheckFilePathsEndWithJar(parser, file_paths):
for file_path in file_paths:
_CheckFilePathEndsWithJar(parser, file_path)
def _RemoveUnwantedFilesFromZip(dex_path):
iz = zipfile.ZipFile(dex_path, 'r')
tmp_dex_path = '%s.tmp.zip' % dex_path
oz = zipfile.ZipFile(tmp_dex_path, 'w', zipfile.ZIP_DEFLATED)
for i in iz.namelist():
if i.endswith('.dex'):
oz.writestr(i, iz.read(i))
os.remove(dex_path)
os.rename(tmp_dex_path, dex_path)
def _ParseArgs(args):
args = build_utils.ExpandFileArgs(args)
parser = optparse.OptionParser()
build_utils.AddDepfileOption(parser)
parser.add_option('--android-sdk-tools',
help='Android sdk build tools directory.')
parser.add_option('--output-directory',
default=os.getcwd(),
help='Path to the output build directory.')
parser.add_option('--dex-path', help='Dex output path.')
parser.add_option('--configuration-name',
help='The build CONFIGURATION_NAME.')
parser.add_option('--proguard-enabled',
help='"true" if proguard is enabled.')
parser.add_option('--debug-build-proguard-enabled',
help='"true" if proguard is enabled for debug build.')
parser.add_option('--proguard-enabled-input-path',
help=('Path to dex in Release mode when proguard '
'is enabled.'))
parser.add_option('--no-locals', default='0',
help='Exclude locals list from the dex file.')
parser.add_option('--incremental',
action='store_true',
help='Enable incremental builds when possible.')
parser.add_option('--inputs', help='A list of additional input paths.')
parser.add_option('--excluded-paths',
help='A list of paths to exclude from the dex file.')
parser.add_option('--main-dex-list-path',
help='A file containing a list of the classes to '
'include in the main dex.')
parser.add_option('--multidex-configuration-path',
help='A JSON file containing multidex build configuration.')
parser.add_option('--multi-dex', default=False, action='store_true',
help='Generate multiple dex files.')
options, paths = parser.parse_args(args)
required_options = ('android_sdk_tools',)
build_utils.CheckOptions(options, parser, required=required_options)
if options.multidex_configuration_path:
with open(options.multidex_configuration_path) as multidex_config_file:
multidex_config = json.loads(multidex_config_file.read())
options.multi_dex = multidex_config.get('enabled', False)
if options.multi_dex and not options.main_dex_list_path:
logging.warning('multidex cannot be enabled without --main-dex-list-path')
options.multi_dex = False
elif options.main_dex_list_path and not options.multi_dex:
logging.warning('--main-dex-list-path is unused if multidex is not enabled')
if options.inputs:
options.inputs = build_utils.ParseGnList(options.inputs)
_CheckFilePathsEndWithJar(parser, options.inputs)
if options.excluded_paths:
options.excluded_paths = build_utils.ParseGnList(options.excluded_paths)
if options.proguard_enabled_input_path:
_CheckFilePathEndsWithJar(parser, options.proguard_enabled_input_path)
_CheckFilePathsEndWithJar(parser, paths)
return options, paths
def _AllSubpathsAreClassFiles(paths, changes):
for path in paths:
if any(not p.endswith('.class') for p in changes.IterChangedSubpaths(path)):
return False
return True
def _DexWasEmpty(paths, changes):
for path in paths:
if any(p.endswith('.class')
for p in changes.old_metadata.IterSubpaths(path)):
return False
return True
def _IterAllClassFiles(changes):
for path in changes.IterAllPaths():
for subpath in changes.IterAllSubpaths(path):
if subpath.endswith('.class'):
yield path
def _MightHitDxBug(changes):
# We've seen dx --incremental fail for small libraries. It's unlikely a
# speed-up anyways in this case.
num_classes = sum(1 for x in _IterAllClassFiles(changes))
if num_classes < 10:
return True
# We've also been able to consistently produce a failure by adding an empty
# line to the top of the first .java file of a library.
# https://crbug.com/617935
first_file = next(_IterAllClassFiles(changes))
for path in changes.IterChangedPaths():
for subpath in changes.IterChangedSubpaths(path):
if first_file == subpath:
return True
return False
def _RunDx(changes, options, dex_cmd, paths):
with build_utils.TempDir() as classes_temp_dir:
# --multi-dex is incompatible with --incremental.
if options.multi_dex:
dex_cmd.append('--main-dex-list=%s' % options.main_dex_list_path)
else:
# --incremental tells dx to merge all newly dex'ed .class files with
# what that already exist in the output dex file (existing classes are
# replaced).
# Use --incremental when .class files are added or modified, but not when
# any are removed (since it won't know to remove them).
if (options.incremental
and not _MightHitDxBug(changes)
and changes.AddedOrModifiedOnly()):
changed_inputs = set(changes.IterChangedPaths())
changed_paths = [p for p in paths if p in changed_inputs]
if not changed_paths:
return
# When merging in other dex files, there's no easy way to know if
# classes were removed from them.
if (_AllSubpathsAreClassFiles(changed_paths, changes)
and not _DexWasEmpty(changed_paths, changes)):
dex_cmd.append('--incremental')
for path in changed_paths:
changed_subpaths = set(changes.IterChangedSubpaths(path))
# Note: |changed_subpaths| may be empty if nothing changed.
if changed_subpaths:
build_utils.ExtractAll(path, path=classes_temp_dir,
predicate=lambda p: p in changed_subpaths)
paths = [classes_temp_dir]
dex_cmd += paths
build_utils.CheckOutput(dex_cmd, print_stderr=False)
if options.dex_path.endswith('.zip'):
_RemoveUnwantedFilesFromZip(options.dex_path)
def _OnStaleMd5(changes, options, dex_cmd, paths):
_RunDx(changes, options, dex_cmd, paths)
build_utils.WriteJson(
[os.path.relpath(p, options.output_directory) for p in paths],
options.dex_path + '.inputs')
def main(args):
options, paths = _ParseArgs(args)
if ((options.proguard_enabled == 'true'
and options.configuration_name == 'Release')
or (options.debug_build_proguard_enabled == 'true'
and options.configuration_name == 'Debug')):
paths = [options.proguard_enabled_input_path]
if options.inputs:
paths += options.inputs
if options.excluded_paths:
# Excluded paths are relative to the output directory.
exclude_paths = options.excluded_paths
paths = [p for p in paths if not
os.path.relpath(p, options.output_directory) in exclude_paths]
input_paths = list(paths)
dx_binary = os.path.join(options.android_sdk_tools, 'dx')
# See http://crbug.com/272064 for context on --force-jumbo.
# See https://github.com/android/platform_dalvik/commit/dd140a22d for
# --num-threads.
# See http://crbug.com/658782 for why -JXmx2G was added.
dex_cmd = [dx_binary, '-JXmx2G', '--num-threads=8', '--dex', '--force-jumbo',
'--output', options.dex_path]
if options.no_locals != '0':
dex_cmd.append('--no-locals')
if options.multi_dex:
input_paths.append(options.main_dex_list_path)
dex_cmd += [
'--multi-dex',
'--minimal-main-dex',
]
output_paths = [
options.dex_path,
options.dex_path + '.inputs',
]
# An escape hatch to be able to check if incremental dexing is causing
# problems.
force = int(os.environ.get('DISABLE_INCREMENTAL_DX', 0))
build_utils.CallAndWriteDepfileIfStale(
lambda changes: _OnStaleMd5(changes, options, dex_cmd, paths),
options,
input_paths=input_paths,
input_strings=dex_cmd,
output_paths=output_paths,
force=force,
pass_changes=True)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| 35.580645
| 80
| 0.68563
| 1,146
| 8,824
| 5.11082
| 0.246073
| 0.017927
| 0.035855
| 0.017927
| 0.187809
| 0.14393
| 0.078539
| 0.024927
| 0
| 0
| 0
| 0.005602
| 0.211015
| 8,824
| 247
| 81
| 35.724696
| 0.835679
| 0.142792
| 0
| 0.086207
| 0
| 0
| 0.160011
| 0.014595
| 0
| 0
| 0
| 0
| 0
| 1
| 0.063218
| false
| 0.005747
| 0.045977
| 0
| 0.16092
| 0.005747
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
535aa0f5c23f246944ffe8092713608d551e77e5
| 11,199
|
py
|
Python
|
apps/views.py
|
Edwardhgj/meiduo
|
38796f5caf54676eb5620f50ade5474ee8700ad8
|
[
"MIT"
] | null | null | null |
apps/views.py
|
Edwardhgj/meiduo
|
38796f5caf54676eb5620f50ade5474ee8700ad8
|
[
"MIT"
] | 6
|
2020-06-05T23:02:49.000Z
|
2022-02-11T03:43:22.000Z
|
apps/views.py
|
Edwardhgj/meiduo
|
38796f5caf54676eb5620f50ade5474ee8700ad8
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.http import HttpResponse
from django.contrib.auth.hashers import check_password, make_password
from django.views import View
from utils.response_code import RET, error_map
from rest_framework.views import APIView
from rest_framework.response import Response
from apps.serializers import *
from datetime import datetime
# Create your views here.
# 展示登陆页
def login(request):
return render(request, 'admin/login.html')
# 提交登陆
import json
class SubmitLogin(View):
def post(self, request): #反射
mes = {}
name = request.POST.get('name')
passwd = request.POST.get('passwd')
# print(name,passwd)
if not all([name, passwd]):
mes['code'] = RET.DATAERR
mes['message'] = error_map[RET.DATAERR]
else:
# 查询name
admin = Sadmin.objects.filter(username=name).first()
print(admin.username)
if admin:
# 比较密码
if check_password(passwd, admin.password):
# 登陆成功
request.session['admin_id'] = admin.id
mes['code'] = RET.OK
mes['message'] = error_map[RET.OK]
else:
mes['code'] = RET.PWDERR
mes['message'] = error_map[RET.PWDERR]
else:
mes['code'] = RET.USERERR
mes['message'] = error_map[RET.USERERR]
print('sdfsdfssdf')
return HttpResponse(json.dumps(mes))
# 注册
def reg(request):
password = make_password('123')
admin = Sadmin(username='admin', password=password, is_admin=True)
admin.save()
return HttpResponse('ok')
# 展示首页
def index(request):
admin_id = request.session.get('admin_id')
if admin_id:
admin = Sadmin.objects.get(id=admin_id)
return render(request, 'admin/index.html', locals())
# 展示分类页面
def showCate(request):
return render(request, "admin/cate_list.html")
# 展示新闻页面
def showNews(request):
return render(request, "admin/news_list.html")
#展示焦点图页面
def bannersCate(request):
return render(request, "admin/point_list.html")
#展示标签页面
def tagCate(request):
return render(request, "admin/tag_list.html")
#展示商品页面
def goodsCate(request):
return render(request, "admin/goods_list.html")
#展示商品页面
def newsCate(request):
return render(request, "admin/news_list.html")
#展示焦点图页面
def bannersCate(request):
return render(request, "admin/point_list.html")
# 分类列表
class CateList(APIView):
def get(self, request):
cate = Cate.objects.all()
c = CateModelSerializer(cate, many=True)
mes = {}
mes['code'] = RET.OK
mes['cateList'] = c.data
return Response(mes)
#标签列表
class TagList(APIView):
def get(self, request):
tags = Tags.objects.all()
c = TagModelSerializer(tags, many=True)
mes = {}
mes['code'] = RET.OK
mes['tagList'] = c.data
return Response(mes)
# 商品列表
class GoodsList(APIView):
def get(self, request):
goods = Goods.objects.all()
g = GoodsModelSerializer(goods, many=True)
mes = {}
mes['code'] = RET.OK
mes['goodsList'] = g.data
return Response(mes)
#新闻列表
class NewsList(APIView):
def get(self, request):
news = News.objects.all()
n=NewsModelSerializer(news,many=True)
mes = {}
mes['code'] = RET.OK
mes['newsList'] = n.data
return Response(mes)
#焦点图列表
class BannersList(APIView):
def get(self, request):
banners = Banners.objects.all()
n=BannersModelSerializer(banners,many=True)
mes = {}
mes['code'] = RET.OK
mes['bannersList'] = n.data
return Response(mes)
# 添加分类页面
def addCate(request):
# 获取一级分类
cate = Cate.objects.filter(pid=0).all()
id=request.GET.get('id')
try:
#修改
one_cate=Cate.objects.get(id=id)
print(one_cate)
except:
id=""
return render(request, "admin/add_cate.html", locals())
# 添加标签页面
def addTag(request):
# print('sdf')
cate_list = Cate.objects.all()
id=request.GET.get('id')
try:
#修改
one_tag=Tags.objects.get(id=id)
except:
id=""
return render(request, "admin/add_tag.html", locals())
# 添加商品页面
def addGoods(request):
# print('ceshi')
# 获取所有商品
goods = Goods.objects.all()
cates = Cate.objects.all()
tag_list=Tags.objects.all()
id=request.GET.get('id')
print(id)
try:
one_goods=Goods.objects.get(id=id)
# print(one_goods)
except:
id=""
return render(request, "admin/add_goods.html", locals())
# 添加商品页面
def addNews(request):
# print('ceshi')
# 获取所有商品
news = News.objects.all()
#修改时需要传id
id=request.GET.get('id')
print(id)
try:
one_news=News.objects.get(id=id)
# print(one_goods)
except:
id=""
return render(request, "admin/add_news.html", locals())
# 添加焦点图页面
def addBanners(request):
# print('ceshi')
# 获取所有商品
banners = Banners.objects.all()
#修改时需要传id
id=request.GET.get('id')
print(id)
try:
one_banner=Banners.objects.get(id=id)
# print(one_goods)
except:
id=""
return render(request, "admin/add_banners.html", locals())
from day01.settings import UPLOADFILES
import os
# 上传图片方法
def upload_img(img):
if img:
f = open(os.path.join(UPLOADFILES, '', img.name),'wb')
for chunk in img.chunks():
f.write(chunk)
f.close()
img=datetime.now().strftime("%Y-%m-%d-%H-%M-%S")+img.name
return 'http://127.0.0.1:8000/static/upload/'+img
return ' '
#富文本上传图片
def addnews_upload(request):
files = request.FILES.get('file')
path = upload_img(files)
mes = {
'path': path,
'error': False
}
return HttpResponse(json.dumps(mes))
# 增加分类接口
class SubmitAddCate(APIView):
def post(self, request):
content = request.data
print(content)
# 上传图片
img = request.FILES.get('img')
path=upload_img(img)
content['picture']=path
try:
pid=int(content['pid'])
except:
pid=0
# 通过pic构造top_id,type
if pid == 0:
type = 1
top_id = 0
else:
cate = Cate.objects.get(id=pid)
type = cate.type + 1
if cate.top_id==0:
top_id = cate.id
else:
top_id = cate.top_id
print(top_id,pid,type)
content['type'] = type
content['top_id'] = top_id
try:
id=int(content['id'])
except:
id=0
if id>0:
cc=Cate.objects.get(id=id)
c=CateSerializer(cc,data=content)
#修改
else:
c = CateSerializer(data=content)
mes={}
if c.is_valid():
try:
c.save()
mes['code'] = RET.OK
except:
mes['code'] = RET.DATAERR
else:
print(c.errors)
mes['code'] = RET.DATAERR
return Response(mes)
#删除分类
def deleteCate(request):
id=request.GET.get('id')
Cate.objects.get(id=id).delete()
return render(request, "admin/cate_list.html")
# 增加标签接口
class SubmitAddTag(APIView):
def post(self, request):
content = request.data
print(content)
try:
id = int(content['id']) # 取出id
print(id)
print('di 到这了')
except:
id = 0
if id > 0:
dd = Tags.objects.get(id=id)
d = TagSerializer(dd, data=content)
# 修改
else:
d = TagSerializer(data=content)
mes = {}
if d.is_valid():
try:
d.save()
mes['code'] = RET.OK
except:
mes['code'] = RET.DATAERR
else:
mes['code'] = RET.DATAERR
return Response(mes)
#删除标签
def deleteTag(request):
id=request.GET.get('id')
Cate.objects.get(id=id).delete()
return render(request, "admin/tag_list.html")
# 增加商品接口
class SubmitAddGoods(APIView):
def post(self, request):
# print('eerw')
content = request.data
print(content)
print(content['id'])
print(content['cid_id'])
# 上传图片
img = request.FILES.get('img')
path=upload_img(img)
content['picture']=path
one_cate=Cate.objects.get(id=int(content['cid_id']))
print(one_cate)
content['top_id'] = one_cate.top_id
try:
print('测试代码')
id=int(content['id'])
print(id)
except:
id=0
if id>0:
# 修改商品
instance = Goods.objects.get(id=id)
c = GoodsSerializer(instance, data=content)
else:
c = GoodsSerializer(data=content)
mes={}
if c.is_valid():
c.save()
mes['code'] = RET.OK
else:
print(c.errors)
mes['code'] = RET.DATAERR
return Response(mes)
#删除商品
def deleteGoods(request):
id=request.GET.get('id')
Goods.objects.get(id=id).delete()
return render(request, "admin/goods_list.html")
#添加新闻接口
class SubmitAddNews(APIView):
def post(self,request):
content=request.data
print(content)
try:
id = int(content['id']) # 取出id
except:
id = 0
if id > 0:
print(id)
nn = News.objects.get(id=id)
d = NewsSerializer(nn, data=content)
# 修改
else:
d = NewsSerializer(data=content)
mes = {}
if d.is_valid():
try:
d.save()
mes['code'] = RET.OK
except:
mes['code'] = RET.DATAERR
else:
mes['code'] = RET.DATAERR
return Response(mes)
#删除新闻
def deleteNews(request):
id=request.GET.get('id')
News.objects.get(id=id).delete()
return render(request,"admin/news_list.html")
#删除焦点图
def deleteBanners(request):
id=request.GET.get('id')
Banners.objects.get(id=id).delete()
return render(request,"admin/point_list.html")
#添加焦点图接口
class SubmitAddBanner(APIView):
def post(self,request):
content=request.data
print(content)
try:
id = int(content['id']) # 取出id
except:
id = 0
if id > 0:
print(id)
nn = Banners.objects.get(id=id)
d = BannersSerializer(nn, data=content)
# 修改
else:
d = BannersSerializer(data=content)
mes = {}
if d.is_valid():
try:
d.save()
mes['code'] = RET.OK
except:
mes['code'] = RET.DATAERR
else:
mes['code'] = RET.DATAERR
return Response(mes)
def user_count(request):
return render(request,'admin/user_count.html')
| 21.331429
| 70
| 0.544156
| 1,308
| 11,199
| 4.602446
| 0.169725
| 0.023256
| 0.038206
| 0.079734
| 0.52392
| 0.402658
| 0.369934
| 0.31412
| 0.292525
| 0.256312
| 0
| 0.004241
| 0.326279
| 11,199
| 524
| 71
| 21.372137
| 0.793638
| 0.043307
| 0
| 0.564179
| 0
| 0
| 0.071878
| 0.013906
| 0
| 0
| 0
| 0
| 0
| 1
| 0.101493
| false
| 0.01791
| 0.035821
| 0.026866
| 0.274627
| 0.068657
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
535ab0b00b5e6dd49d2816d9ac5192041774bc04
| 4,283
|
py
|
Python
|
learnedevolution/targets/covariance/amalgam_covariance.py
|
realtwister/LearnedEvolution
|
2ec49b50a49acae9693cfb05ac114dfbcc4aa337
|
[
"MIT"
] | null | null | null |
learnedevolution/targets/covariance/amalgam_covariance.py
|
realtwister/LearnedEvolution
|
2ec49b50a49acae9693cfb05ac114dfbcc4aa337
|
[
"MIT"
] | null | null | null |
learnedevolution/targets/covariance/amalgam_covariance.py
|
realtwister/LearnedEvolution
|
2ec49b50a49acae9693cfb05ac114dfbcc4aa337
|
[
"MIT"
] | null | null | null |
import numpy as np;
from .covariance_target import CovarianceTarget;
class AMaLGaMCovariance(CovarianceTarget):
_API=2.
def __init__(self,
theta_SDR = 1.,
eta_DEC = 0.9,
alpha_Sigma = [-1.1,1.2,1.6],
NIS_MAX = 25,
tau = 0.35,
epsilon = 1e-30,
condition_number_epsilon = 1e6):
self.epsilon = epsilon;
self.theta_SDR = theta_SDR;
self.eta_DEC = eta_DEC;
self.eta_INC = 1./eta_DEC;
self.NIS_MAX = NIS_MAX;
self.alpha_Sigma = alpha_Sigma;
self.tau = tau;
self.condition_number_epsilon = condition_number_epsilon;
def _reset(self, initial_mean, initial_covariance):
self.mean = initial_mean;
self.old_mean = initial_mean;
self.covariance = initial_covariance;
self.d = len(initial_mean);
self.Sigma = initial_covariance;
self.c_multiplier = 1.;
self.NIS = 0;
self.t = 0;
self.best_f = -float('inf');
def _update_mean(self, mean):
self.old_mean = self.mean;
self.mean = mean;
def _calculate(self, population):
self.update_matrix(population);
self.update_multiplier(population);
self.t += 1;
self.best_f = max(self.best_f, np.max(population.fitness));
new_covariance = self.Sigma*self.c_multiplier;
u,s,_ = np.linalg.svd(new_covariance);
s_max = np.max(s)
s_max = np.clip(s_max, self.epsilon*self.condition_number_epsilon, 1e3);
s = np.clip(s, s_max/self.condition_number_epsilon, s_max);
new_covariance = u*s@u.T
self.covariance = new_covariance
return self.covariance;
def update_matrix(self, population):
F = population.fitness;
sel_idx = F.argsort()[-np.ceil(self.tau*len(population)).astype(int):][::-1]
alpha = self.alpha_Sigma;
eta_Sigma = 1.-np.exp(alpha[0]*len(sel_idx)**alpha[1]/self.d**alpha[2]);
current_update = np.zeros((self.d,self.d));
selection = population.population[sel_idx];
for individual in selection:
delta = individual-self.old_mean;
current_update += np.outer(delta,delta)
current_update /= (selection.shape[0]);
self.Sigma *= (1-eta_Sigma);
self.Sigma += eta_Sigma*current_update;
# We need to ensure the condition number is OK to avoid singular matrix.
u,s,_ = np.linalg.svd(self.Sigma);
s_max = np.max(s)
s_max = np.clip(s_max, self.epsilon*self.condition_number_epsilon, None);
s = np.clip(s, s_max/self.condition_number_epsilon, s_max);
self.Sigma = u*s@u.T
def update_multiplier(self, population):
if np.any(population.fitness>self.best_f):
self.NIS = 0;
self.c_multiplier = max(1., self.c_multiplier);
self.SDR(population);
else:
if self.c_multiplier <= 1:
self.NIS += 1;
if self.c_multiplier > 1 or self.NIS >= self.NIS_MAX:
self.c_multiplier *= self.eta_DEC;
if self.c_multiplier < 1 and self.NIS < self.NIS_MAX:
self.c_multiplier = 1;
def SDR(self, population):
x_avg = np.mean(population.population[population.fitness>self.best_f], axis=0);
delta = np.abs(self.mean-x_avg);
variances = np.abs(np.diag(self.covariance));
if np.any(delta/np.sqrt(variances)>self.theta_SDR):
self.c_multiplier *= self.eta_INC;
def _calculate_deterministic(self,population):
return self._calculate(population);
def _terminating(self, population):
pass;
@classmethod
def _get_kwargs(cls, config, key = ""):
cls._config_required(
'theta_SDR',
'eta_DEC',
'alpha_Sigma',
'NIS_MAX',
'tau',
'epsilon',
'condition_number_epsilon'
)
cls._config_defaults(
theta_SDR = 1.,
eta_DEC = 0.9,
alpha_Sigma = [-1.1,1.2,1.6],
NIS_MAX = 25,
tau = 0.35,
epsilon = 1e-30,
condition_number_epsilon = 1e6
)
return super()._get_kwargs(config, key = key);
| 30.592857
| 87
| 0.585337
| 555
| 4,283
| 4.291892
| 0.203604
| 0.062972
| 0.062972
| 0.054576
| 0.263224
| 0.202351
| 0.183039
| 0.183039
| 0.156171
| 0.156171
| 0
| 0.020219
| 0.295587
| 4,283
| 139
| 88
| 30.81295
| 0.769307
| 0.016344
| 0
| 0.168224
| 0
| 0
| 0.016861
| 0.005699
| 0
| 0
| 0
| 0
| 0
| 1
| 0.093458
| false
| 0.009346
| 0.018692
| 0.009346
| 0.158879
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
535eb0ebfd076b333a6d2c988712739a93360c70
| 8,513
|
py
|
Python
|
legacy_code/tf_cnn_siamese/model.py
|
PerryXDeng/project_punyslayer
|
79529b020ca56a5473dbb85ac7155bc03dc5023a
|
[
"MIT"
] | 2
|
2019-10-25T04:57:03.000Z
|
2020-06-16T00:34:18.000Z
|
legacy_code/tf_cnn_siamese/model.py
|
PerryXDeng/project_punyslayer
|
79529b020ca56a5473dbb85ac7155bc03dc5023a
|
[
"MIT"
] | null | null | null |
legacy_code/tf_cnn_siamese/model.py
|
PerryXDeng/project_punyslayer
|
79529b020ca56a5473dbb85ac7155bc03dc5023a
|
[
"MIT"
] | 1
|
2020-06-25T14:54:24.000Z
|
2020-06-25T14:54:24.000Z
|
import legacy_code.tf_cnn_siamese.configurations as conf
import tensorflow as tf
import numpy as np
def construct_cnn(x, conv_weights, conv_biases, fc_weights, fc_biases,
dropout = False):
"""
constructs the convolution graph for one image
:param x: input node
:param conv_weights: convolution weights
:param conv_biases: relu biases for each convolution
:param fc_weights: fully connected weights, only one set should be used here
:param fc_biases: fully connected biases, only one set should be used here
:param dropout: whether to add a dropout layer for the fully connected layer
:return: output node
"""
k = conf.NUM_POOL
for i in range(conf.NUM_CONVS):
x = tf.nn.conv2d(x, conv_weights[i], strides=[1, 1, 1, 1], padding='SAME',
data_format=conf.DATA_FORMAT)
x = tf.nn.relu(tf.nn.bias_add(x, conv_biases[i],
data_format=conf.DATA_FORMAT))
if k > 0:
x = tf.nn.max_pool(x, ksize=conf.POOL_KDIM,strides=conf.POOL_KDIM,
padding='VALID', data_format=conf.DATA_FORMAT)
k -= 1
# Reshape the feature map cuboids into vectors for fc layers
features_shape = x.get_shape().as_list()
n = features_shape[0]
m = features_shape[1] * features_shape[2] * features_shape[3]
features = tf.reshape(x, [n, m])
# last fc_weights determine output dimensions
fc = tf.nn.sigmoid(tf.matmul(features, fc_weights[0]) + fc_biases[0])
# for actual training
if dropout:
fc = tf.nn.dropout(fc, conf.DROP_RATE)
return fc
def construct_logits_model(x_1, x_2, conv_weights, conv_biases, fc_weights,
fc_biases, dropout=False):
"""
constructs the logit node before the final sigmoid activation
:param x_1: input image node 1
:param x_2: input image node 2
:param conv_weights: nodes for convolution weights
:param conv_biases: nodes for convolution relu biases
:param fc_weights: nodes for fully connected weights
:param fc_biases: nodes for fully connected biases
:param dropout: whether to include dropout layers
:return: logit node
"""
with tf.name_scope("twin_1"):
twin_1 = construct_cnn(x_1, conv_weights, conv_biases,
fc_weights, fc_biases, dropout)
with tf.name_scope("twin_2"):
twin_2 = construct_cnn(x_2, conv_weights, conv_biases,
fc_weights, fc_biases, dropout)
# logits on squared difference
sq_diff = tf.squared_difference(twin_1, twin_2)
logits = tf.matmul(sq_diff, fc_weights[1]) + fc_biases[1]
return logits
def construct_full_model(x_1, x_2, conv_weights, conv_biases,fc_weights,
fc_biases):
"""
constructs the graph for the neural network without loss node or optimizer
:param x_1: input image node 1
:param x_2: input image node 2
:param conv_weights: nodes for convolution weights
:param conv_biases: nodes for convolution relu biases
:param fc_weights: nodes for fully connected weights
:param fc_biases: nodes for fully connected biases
:return: sigmoid output node
"""
logits = construct_logits_model(x_1, x_2, conv_weights, conv_biases,
fc_weights, fc_biases, dropout=False)
return tf.nn.sigmoid(logits)
def construct_loss_optimizer(x_1, x_2, labels, conv_weights, conv_biases,
fc_weights, fc_biases, dropout=False,
lagrange=False):
"""
constructs the neural network graph with the loss and optimizer node
:param x_1: input image node 1
:param x_2: input image node 2
:param labels: expected output
:param conv_weights: nodes for convolution weights
:param conv_biases: nodes for convolution relu biases
:param fc_weights: nodes for fully connected weights
:param fc_biases: nodes for fully connected biases
:param dropout: whether to use dropout
:param lagrange: whether to apply constraints
:return: the node for the optimizer as well as the loss
"""
logits = construct_logits_model(x_1, x_2, conv_weights, conv_biases,
fc_weights, fc_biases, dropout)
# cross entropy loss on sigmoids of joined output and labels
loss_vec = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels,
logits=logits)
loss = tf.reduce_mean(loss_vec)
if lagrange:
# constraints on sigmoid layers
regularizers = (tf.nn.l2_loss(fc_weights[0]) + tf.nn.l2_loss(fc_biases[0]) +
tf.nn.l2_loss(fc_weights[1]) + tf.nn.l2_loss(fc_biases[1]))
loss += conf.LAMBDA * regularizers
# setting up the optimization
batch = tf.Variable(0, dtype=conf.DTYPE)
# vanilla momentum optimizer
# accumulation = momentum * accumulation + gradient
# every epoch: variable -= learning_rate * accumulation
# batch_total = labels.shape[0]
# learning_rate = tf.train.exponential_decay(
# conf.BASE_LEARNING_RATE,
# batch * conf.BATCH_SIZE, # Current index into the dataset.
# batch_total,
# conf.DECAY_RATE, # Decay rate.
# staircase=True)
# trainer = tf.train.MomentumOptimizer(learning_rate, conf.MOMENTUM)\
# .minimize(loss, global_step=batch)
# adaptive momentum estimation optimizer
# default params: learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08
trainer = tf.train.AdamOptimizer().minimize(loss, global_step=batch)
return trainer, loss
def construct_joined_model(twin_1, twin_2, fc_weights, fc_biases):
"""
constructs joined model for two sets of extracted features
:param twin_1: features node extracted from first image
:param twin_2: features node extracted from second image
:param fc_weights: nodes for fully connected weights
:param fc_biases: nodes for fully connected biases
:return: logit node
"""
# logits on squared difference
sq_diff = tf.squared_difference(twin_1, twin_2)
logits = tf.matmul(sq_diff, fc_weights[1]) + fc_biases[1]
return tf.nn.sigmoid(logits)
def initialize_weights():
"""
initializes the variable tensors to be trained in the neural network, decides
network dimensions
:return: nodes for the variables
"""
# twin network convolution and pooling variables
conv_weights = []
conv_biases = []
fc_weights = []
fc_biases = []
for i in range(conf.NUM_CONVS):
if i == 0:
inp = conf.NUM_CHANNELS
else:
inp = conf.NUM_FILTERS[i - 1]
out = conf.NUM_FILTERS[i]
conv_dim = [conf.FILTER_LEN, conf.FILTER_LEN, inp, out]
weight_name = "twin_conv" + str(i + 1) + "_weights"
bias_name = "twin_conv" + str(i + 1) + "_biases"
conv_weights.append(tf.Variable(tf.truncated_normal(conv_dim, stddev=0.1,
seed=conf.SEED, dtype=conf.DTYPE),
name=weight_name))
conv_biases.append(tf.Variable(tf.zeros([out], dtype=conf.DTYPE),
name=bias_name))
# twin network fullly connected variables
inp = conf.FEATURE_MAP_SIZE
out = conf.NUM_FC_NEURONS
fc_weights.append(tf.Variable(tf.truncated_normal([inp, out], stddev=0.1,
seed=conf.SEED, dtype=conf.DTYPE),
name="twin_fc_weights"))
fc_biases.append(tf.Variable(tf.constant(0.1, shape=[out], dtype=conf.DTYPE),
name="twin_fc_biases"))
# joined network fully connected variables
inp = conf.NUM_FC_NEURONS
out = 1
fc_weights.append(tf.Variable(tf.truncated_normal([inp, out], stddev=0.1,
seed=conf.SEED, dtype=conf.DTYPE),
name="joined_fc_weights"))
fc_biases.append(tf.Variable(tf.constant(0.1, shape=[out], dtype=conf.DTYPE),
name="joined_fc_biases"))
return conv_weights, conv_biases, fc_weights, fc_biases
def num_params():
"""
calculates the number of parameters in the model
:return: m, number of parameters
"""
m = 0
for i in range(conf.NUM_CONVS):
if i == 0:
inp = conf.NUM_CHANNELS
else:
inp = conf.NUM_FILTERS[i - 1]
out = conf.NUM_FILTERS[i]
conv_dim = [conf.FILTER_LEN, conf.FILTER_LEN, inp, out]
m += np.prod(conv_dim) + np.prod(out)
inp = conf.FEATURE_MAP_SIZE
out = conf.NUM_FC_NEURONS
m += inp * out + out
inp = conf.NUM_FC_NEURONS
out = 1
m += inp * out + out
return m
if __name__ == "__main__":
print("Number of Parameters: " + str(num_params()))
| 39.412037
| 80
| 0.670504
| 1,210
| 8,513
| 4.521488
| 0.179339
| 0.042771
| 0.026138
| 0.040395
| 0.525864
| 0.474867
| 0.441967
| 0.422044
| 0.39682
| 0.388594
| 0
| 0.014939
| 0.237284
| 8,513
| 215
| 81
| 39.595349
| 0.827661
| 0.380477
| 0
| 0.387387
| 0
| 0
| 0.028684
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.063063
| false
| 0
| 0.027027
| 0
| 0.153153
| 0.009009
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
535f56691aa062ae2f47bfd3027e86cffdb80581
| 3,591
|
py
|
Python
|
tests/test_utils_log.py
|
FingerCrunch/scrapy
|
3225de725720bba246ba8c9845fe4b84bc0c82e7
|
[
"BSD-3-Clause"
] | 41,267
|
2015-01-01T07:39:25.000Z
|
2022-03-31T20:09:40.000Z
|
tests/test_utils_log.py
|
FingerCrunch/scrapy
|
3225de725720bba246ba8c9845fe4b84bc0c82e7
|
[
"BSD-3-Clause"
] | 4,420
|
2015-01-02T09:35:38.000Z
|
2022-03-31T22:53:32.000Z
|
tests/test_utils_log.py
|
FingerCrunch/scrapy
|
3225de725720bba246ba8c9845fe4b84bc0c82e7
|
[
"BSD-3-Clause"
] | 11,080
|
2015-01-01T18:11:30.000Z
|
2022-03-31T15:33:19.000Z
|
import sys
import logging
import unittest
from testfixtures import LogCapture
from twisted.python.failure import Failure
from scrapy.utils.log import (failure_to_exc_info, TopLevelFormatter,
LogCounterHandler, StreamLogger)
from scrapy.utils.test import get_crawler
from scrapy.extensions import telnet
class FailureToExcInfoTest(unittest.TestCase):
def test_failure(self):
try:
0 / 0
except ZeroDivisionError:
exc_info = sys.exc_info()
failure = Failure()
self.assertTupleEqual(exc_info, failure_to_exc_info(failure))
def test_non_failure(self):
self.assertIsNone(failure_to_exc_info('test'))
class TopLevelFormatterTest(unittest.TestCase):
def setUp(self):
self.handler = LogCapture()
self.handler.addFilter(TopLevelFormatter(['test']))
def test_top_level_logger(self):
logger = logging.getLogger('test')
with self.handler as log:
logger.warning('test log msg')
log.check(('test', 'WARNING', 'test log msg'))
def test_children_logger(self):
logger = logging.getLogger('test.test1')
with self.handler as log:
logger.warning('test log msg')
log.check(('test', 'WARNING', 'test log msg'))
def test_overlapping_name_logger(self):
logger = logging.getLogger('test2')
with self.handler as log:
logger.warning('test log msg')
log.check(('test2', 'WARNING', 'test log msg'))
def test_different_name_logger(self):
logger = logging.getLogger('different')
with self.handler as log:
logger.warning('test log msg')
log.check(('different', 'WARNING', 'test log msg'))
class LogCounterHandlerTest(unittest.TestCase):
def setUp(self):
settings = {'LOG_LEVEL': 'WARNING'}
if not telnet.TWISTED_CONCH_AVAILABLE:
# disable it to avoid the extra warning
settings['TELNETCONSOLE_ENABLED'] = False
self.logger = logging.getLogger('test')
self.logger.setLevel(logging.NOTSET)
self.logger.propagate = False
self.crawler = get_crawler(settings_dict=settings)
self.handler = LogCounterHandler(self.crawler)
self.logger.addHandler(self.handler)
def tearDown(self):
self.logger.propagate = True
self.logger.removeHandler(self.handler)
def test_init(self):
self.assertIsNone(self.crawler.stats.get_value('log_count/DEBUG'))
self.assertIsNone(self.crawler.stats.get_value('log_count/INFO'))
self.assertIsNone(self.crawler.stats.get_value('log_count/WARNING'))
self.assertIsNone(self.crawler.stats.get_value('log_count/ERROR'))
self.assertIsNone(self.crawler.stats.get_value('log_count/CRITICAL'))
def test_accepted_level(self):
self.logger.error('test log msg')
self.assertEqual(self.crawler.stats.get_value('log_count/ERROR'), 1)
def test_filtered_out_level(self):
self.logger.debug('test log msg')
self.assertIsNone(self.crawler.stats.get_value('log_count/INFO'))
class StreamLoggerTest(unittest.TestCase):
def setUp(self):
self.stdout = sys.stdout
logger = logging.getLogger('test')
logger.setLevel(logging.WARNING)
sys.stdout = StreamLogger(logger, logging.ERROR)
def tearDown(self):
sys.stdout = self.stdout
def test_redirect(self):
with LogCapture() as log:
print('test log msg')
log.check(('test', 'ERROR', 'test log msg'))
| 32.944954
| 77
| 0.662768
| 424
| 3,591
| 5.485849
| 0.21934
| 0.051591
| 0.051591
| 0.058469
| 0.39123
| 0.366294
| 0.257094
| 0.257094
| 0.239037
| 0.156492
| 0
| 0.002158
| 0.225564
| 3,591
| 108
| 78
| 33.25
| 0.834232
| 0.010304
| 0
| 0.234568
| 0
| 0
| 0.11036
| 0.005912
| 0
| 0
| 0
| 0
| 0.111111
| 1
| 0.185185
| false
| 0
| 0.098765
| 0
| 0.333333
| 0.012346
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53620a02b1382e7015ce77097767be27a037d2cd
| 2,329
|
py
|
Python
|
astar.py
|
jeff012345/clue-part-duo
|
bd9ccd2ccdbc2fe358a696b31644b93e70ff874b
|
[
"MIT"
] | null | null | null |
astar.py
|
jeff012345/clue-part-duo
|
bd9ccd2ccdbc2fe358a696b31644b93e70ff874b
|
[
"MIT"
] | null | null | null |
astar.py
|
jeff012345/clue-part-duo
|
bd9ccd2ccdbc2fe358a696b31644b93e70ff874b
|
[
"MIT"
] | null | null | null |
import heapq
from typing import List
from definitions import RoomPosition, Position
import random
import sys
class PriorityQueue:
def __init__(self):
self.elements: Array = []
def empty(self) -> bool:
return len(self.elements) == 0
def put(self, item, priority: float):
heapq.heappush(self.elements, (priority, random.randint(1, 9999999999999999), item))
def get(self):
return heapq.heappop(self.elements)[2]
def heuristic(a: Position, b: Position) -> float:
if a == b:
return 0
if isinstance(a, RoomPosition):
if isinstance(b, RoomPosition):
raise Exception("Cannot calculate heuristic between two rooms")
return 1 # (1^2 + 0^2)
if isinstance(b, RoomPosition):
return 1 # (1^2 + 0^2)
# both are Space
return (a.col - b.col) ** 2 + (a.row - b.row) ** 2
def a_star_search(start: Position, goal: Position) -> List[Position]:
if start is None:
raise Exception("Start is None")
if goal is None:
raise Exception("goal is None")
if start == goal:
raise Exception('Start and goal are the same')
frontier = PriorityQueue()
frontier.put(start, 0)
came_from: Dict[Position, Optional[Position]] = {}
cost_so_far: Dict[Position, float] = {}
came_from[start] = None
cost_so_far[start] = 0
while not frontier.empty():
current: Position = frontier.get()
if current == goal:
break
for next in current.connections:
if isinstance(next, RoomPosition) and next != goal:
# once you enter a room, it's a dead end
continue
new_cost = cost_so_far[current] + 1
if next not in cost_so_far or new_cost < cost_so_far[next]:
cost_so_far[next] = new_cost
priority = new_cost + heuristic(goal, next)
frontier.put(next, priority)
came_from[next] = current
if frontier.empty():
print(str(start) + " to " + str(goal))
raise Exception('no path found')
shortest_path = []
prev = goal
while prev is not None:
shortest_path.append(prev)
prev = came_from[prev]
shortest_path.reverse()
return shortest_path
| 26.465909
| 92
| 0.592529
| 294
| 2,329
| 4.591837
| 0.323129
| 0.026667
| 0.04
| 0.037037
| 0.04
| 0.016296
| 0
| 0
| 0
| 0
| 0
| 0.021645
| 0.305711
| 2,329
| 87
| 93
| 26.770115
| 0.813234
| 0.033061
| 0
| 0.066667
| 0
| 0
| 0.050357
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.083333
| 0.033333
| 0.316667
| 0.016667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53620e1797375b57cbec5b488715571deabfedc5
| 19,152
|
py
|
Python
|
src/py_scripts/fc_phasing.py
|
pb-jchin/FALCON_unzip
|
21b1df3491e3bb7b9d8ecd13fc0c9c1a45b6393f
|
[
"BSD-3-Clause-Clear"
] | 2
|
2016-06-23T03:20:22.000Z
|
2016-10-07T23:45:26.000Z
|
src/py_scripts/fc_phasing.py
|
pb-jchin/FALCON_unzip
|
21b1df3491e3bb7b9d8ecd13fc0c9c1a45b6393f
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
src/py_scripts/fc_phasing.py
|
pb-jchin/FALCON_unzip
|
21b1df3491e3bb7b9d8ecd13fc0c9c1a45b6393f
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
from pypeflow.common import *
from pypeflow.data import PypeLocalFile, makePypeLocalFile, fn
from pypeflow.task import PypeTask, PypeThreadTaskBase, PypeTaskBase
from pypeflow.controller import PypeWorkflow, PypeThreadWorkflow
from falcon_kit.FastaReader import FastaReader
import subprocess, shlex
import os, re
cigar_re = r"(\d+)([MIDNSHP=X])"
def make_het_call(self):
bam_fn = fn(self.bam_file)
ctg_id = self.parameters["ctg_id"]
ref_seq = self.parameters["ref_seq"]
base_dir = self.parameters["base_dir"]
vmap_fn = fn(self.vmap_file)
vpos_fn = fn(self.vpos_file)
q_id_map_fn = fn(self.q_id_map_file)
p = subprocess.Popen(shlex.split("samtools view %s %s" % (bam_fn, ctg_id) ), stdout=subprocess.PIPE)
pileup = {}
q_id_map = {}
q_max_id = 0
q_id = 0
q_name_to_id = {}
try:
os.makedirs("%s/%s" % (base_dir, ctg_id))
except OSError:
pass
vmap = open(vmap_fn, "w")
vpos = open(vpos_fn, "w")
for l in p.stdout:
l = l.strip().split()
if l[0][0] == "@":
continue
QNAME = l[0]
if QNAME not in q_name_to_id:
q_id = q_max_id
q_name_to_id[QNAME] = q_id
q_max_id += 1
q_id = q_name_to_id[QNAME]
q_id_map[q_id] = QNAME
FLAG = int(l[1])
RNAME = l[2]
POS = int(l[3]) - 1 # convert to zero base
CIGAR = l[5]
SEQ = l[9]
rp = POS
qp = 0
skip_base = 0
total_aln_pos = 0
for m in re.finditer(cigar_re, CIGAR):
adv = int(m.group(1))
total_aln_pos += adv
if m.group(2) == "S":
skip_base += adv
if 1.0 - 1.0 * skip_base / total_aln_pos < 0.1:
continue
if total_aln_pos < 2000:
continue
for m in re.finditer(cigar_re, CIGAR):
adv = int(m.group(1))
if m.group(2) == "S":
qp += adv
if m.group(2) == "M":
matches = []
for i in range(adv):
matches.append( (rp, SEQ[qp]) )
rp += 1
qp += 1
matches = matches[1:-1]
for pos, b in matches:
pileup.setdefault(pos, {})
pileup[pos].setdefault(b, [])
pileup[pos][b].append(q_id)
elif m.group(2) == "I":
for i in range(adv):
qp += 1
elif m.group(2) == "D":
for i in range(adv):
rp += 1
pos_k = pileup.keys()
pos_k.sort()
th = 0.25
for pos in pos_k:
if pos < POS:
if len(pileup[pos]) < 2:
del pileup[pos]
continue
base_count = []
total_count = 0
for b in ["A", "C", "G", "T"]:
count = len(pileup[pos].get(b,[]))
base_count.append( (count, b) )
total_count += count
if total_count < 10:
del pileup[pos]
continue
base_count.sort()
base_count.reverse()
p0 = 1.0 * base_count[0][0] / total_count
p1 = 1.0 * base_count[1][0] / total_count
if p0 < 1.0 - th and p1 > th:
b0 = base_count[0][1]
b1 = base_count[1][1]
ref_base = ref_seq[pos]
print >> vpos, pos+1, ref_base, total_count, " ".join(["%s %d" % (x[1], x[0]) for x in base_count])
for q_id_ in pileup[pos][b0]:
print >> vmap, pos+1, ref_base, b0, q_id_
for q_id_ in pileup[pos][b1]:
print >> vmap, pos+1, ref_base, b1, q_id_
del pileup[pos]
q_id_map_f = open(q_id_map_fn, "w")
for q_id, q_name in q_id_map.items():
print >> q_id_map_f, q_id, q_name
def generate_association_table(self):
vmap_fn = fn(self.vmap_file)
atable_fn = fn(self.atable_file)
ctg_id = self.parameters["ctg_id"]
base_dir = self.parameters["base_dir"]
vmap = {}
v_positions = []
with open(vmap_fn) as f:
for l in f:
l = l.strip().split()
pos = int(l[0])
ref_b = l[1]
v_b = l[2]
q_id = int(l[3])
if (pos, ref_b) not in vmap:
v_positions.append( (pos, ref_b) )
vmap.setdefault( (pos, ref_b), {} )
vmap[ (pos, ref_b) ].setdefault(v_b, [])
vmap[ (pos, ref_b) ][v_b].append( q_id )
#xary = []
#yary = []
with open(atable_fn, "w") as out_f:
for i1 in xrange(len(v_positions)):
link_count = 0
for i2 in xrange(i1+1, len(v_positions)):
pos1, rb1 = v_positions[i1]
pos2, rb2 = v_positions[i2]
if pos2 - pos1 > (1 << 16):
continue
ct = {}
p1table = []
p2table = []
s1 = 0
list1 = vmap[ (pos1, rb1) ].items()
for b1, qids1 in list1:
p1table.append( (b1, len(qids1) ) )
s1 += len(qids1)
s2 = 0
list2 = vmap[ (pos2, rb2) ].items()
for b2, qids2 in list2:
p2table.append( (b2, len(qids2) ) )
s2 += len(qids2)
total_s = 0
for b1, qids1 in list1:
for b2, qids2 in list2:
s = len(set(qids1) & set(qids2))
ct[(b1,b2)] = s
total_s += s
if total_s < 6:
continue
b11 = p1table[0][0]
b12 = p1table[1][0]
b21 = p2table[0][0]
b22 = p2table[1][0]
print >> out_f, pos1, b11, b12, pos2, b21, b22, ct[(b11,b21)], ct[(b11,b22)], ct[(b12,b21)], ct[(b12,b22)]
#xary.append(pos1)
#yary.append(pos2)
link_count += 1
if link_count > 500:
break
def get_score( c_score, pos1, pos2, s1, s2 ):
if pos1 > pos2:
pos1, pos2 = pos2, pos1
s1, s2 = s2, s1
b11, b12 = s1
b21, b22 = s2
return c_score[ (pos1, pos2) ][ (b11+b21, b12+b22) ]
def get_phased_blocks(self):
vmap_fn = fn(self.vmap_file)
atable_fn = fn(self.atable_file)
p_variant_fn = fn(self.phased_variant_file)
left_connect = {}
right_connect = {}
c_score = {}
states = {}
positions = set()
ref_base = {}
with open(vmap_fn) as f:
for l in f:
l = l.strip().split()
pos = int(l[0])
ref_b = l[1]
v_b = l[2]
q_id = int(l[3])
ref_base[pos] = ref_b
with open(atable_fn) as f:
for l in f:
l = l.strip().split()
pos1, b11, b12, pos2, b21, b22, s11, s12, s21, s22 = l
s11, s12, s21, s22 = int(s11), int(s12), int(s21), int(s22)
if abs(s11+s22-s12-s21) < 6:
continue
pos1 = int(pos1)
pos2 = int(pos2)
positions.add(pos1)
positions.add(pos2)
right_connect.setdefault(pos1, [])
right_connect[pos1].append(pos2)
left_connect.setdefault(pos2, [])
left_connect[pos2].append(pos1)
c_score[ (pos1, pos2) ] = { (b11+b21, b12+b22): s11 + s22, (b12+b22, b11+b21): s11 + s22,
(b12+b21, b11+b22): s12 + s21, (b11+b22, b12+b21): s12 + s21 }
if pos1 not in states:
st1 = (b11, b12)
st2 = (b12, b11)
score1 = 0
score2 = 0
for pp in left_connect.get(pos1,[]):
if pp in states:
st0 = states[pp]
else:
continue
score1 += get_score( c_score, pp, pos1, st0, st1 )
score2 += get_score( c_score, pp, pos1, st0, st2 )
for pp in right_connect.get(pos1,[]):
if pp in states:
st0 = states[pp]
else:
continue
score1 += get_score( c_score, pos1, pp, st1, st0 )
score2 += get_score( c_score, pos1, pp, st2, st0 )
if score1 >= score2:
states[pos1] = st1
else:
states[pos1] = st2
if pos2 not in states:
st1 = (b21, b22)
st2 = (b22, b21)
score1 = 0
score2 = 0
for pp in left_connect.get(pos2,[]):
if pp in states:
st0 = states[pp]
else:
continue
score1 += get_score( c_score, pp, pos2, st0, st1 )
score2 += get_score( c_score, pp, pos2, st0, st2 )
for pp in right_connect.get(pos2,[]):
if pp in states:
st0 = states[pp]
else:
continue
score1 += get_score( c_score, pos2, pp, st1, st0 )
score2 += get_score( c_score, pos2, pp, st2, st0 )
if score1 >= score2:
states[pos2] = st1
else:
states[pos2] = st2
positions = list(positions)
positions.sort()
iter_count = 0
while 1:
iter_count += 1
if iter_count > 10:
break
update_count = 0
for p in positions:
b1, b2 = states[p]
st1 = (b1, b2)
st2 = (b2, b1)
score1 = 0
score2 = 0
for pp in left_connect.get(p,[]):
st0 = states[pp]
score1 += get_score( c_score, pp, p, st0 ,st1)
score2 += get_score( c_score, pp, p, st0, st2)
#for pp in right_connect.get(p,[]):
# st0 = states[pp]
# score1 += get_score( c_score, p, pp, st1 ,st0)
# score2 += get_score( c_score, p, pp, st2, st0)
if score1 >= score2:
states[p] = st1
else:
states[p] = st2
update_count += 1
if update_count == 0:
break
right_extent = {}
right_score = {}
left_extent = {}
left_score = {}
for p in positions:
left_extent[p] = p
left_score[p] = 0
if p in left_connect:
left = p
st0 = states[p]
st0_ = st0[1], st0[0]
for pp in left_connect[p]:
st1 = states[pp]
s = get_score( c_score, pp, p, st1, st0)
s_ = get_score( c_score, pp, p, st1, st0_)
left_score[p] += s - s_
if s - s_ > 0 and pp < left:
left = pp
left_extent[p] = left
right_extent[p] = p
right_score[p] = 0
if p in right_connect:
right = p
st0 = states[p]
st0_ = st0[1], st0[0]
for pp in right_connect[p]:
st1 = states[pp]
s = get_score( c_score, p, pp, st0, st1)
s_ = get_score( c_score, p, pp, st0_, st1)
right_score[p] += s - s_
if s - s_ > 0 and pp > right:
right = pp
right_extent[p] = right
phase_block_id = 1
phase_blocks = {}
pb = []
max_right_ext = 0
for p in positions:
if right_score[p] < 10 or left_score[p] < 10:
continue
b1, b2 = states[p]
if max_right_ext < left_extent[p]:
if len(pb) > 3:
phase_blocks[phase_block_id] = pb
phase_block_id += 1
pb = []
pb.append( (p, b1, b2) )
if right_extent[p] > max_right_ext:
max_right_ext = right_extent[p]
if len(pb) > 3:
phase_blocks[phase_block_id] = pb
else:
phase_block_id -= 1
with open(p_variant_fn, "w") as out_f:
for pid in xrange(1, phase_block_id+1):
if len(phase_blocks[pid]) == 0:
continue
min_ = min( [x[0] for x in phase_blocks[pid]] )
max_ = max( [x[0] for x in phase_blocks[pid]] )
print >>out_f, "P", pid, min_, max_, max_ - min_, len(phase_blocks[pid]), 1.0 * (max_-min_)/len(phase_blocks[pid])
for p, b1, b2 in phase_blocks[pid]:
rb = ref_base[p]
print >>out_f, "V", pid, p, "%d_%s_%s" % (p,rb,b1), "%d_%s_%s" % (p,rb,b2), left_extent[p], right_extent[p], left_score[p], right_score[p]
def get_phased_reads(self):
q_id_map_fn = fn(self.q_id_map_file)
vmap_fn = fn(self.vmap_file)
p_variant_fn = fn(self.phased_variant_file)
ctg_id = parameters["ctg_id"]
phased_read_fn = fn(self.phased_read_file)
rid_map = {}
with open(q_id_map_fn) as f:
for l in f:
l = l.strip().split()
rid_map[int(l[0])] = l[1]
read_to_variants = {}
variant_to_reads = {}
with open(vmap_fn) as f:
for l in f:
l = l.strip().split()
variant = "_".join(l[:3])
read_id = int(l[3])
read_to_variants.setdefault(read_id, set())
read_to_variants[read_id].add(variant)
variant_to_reads.setdefault(variant, set())
variant_to_reads[variant].add(read_id)
variant_to_phase = {}
with open(p_variant_fn) as f:
for l in f:
"""line format example: V 1 6854 6854_A_A 6854_A_G 6854 22781"""
l = l.strip().split()
if l[0] != "V":
continue
pb_id = int(l[1])
variant_to_phase[ l[3] ] = (pb_id, 0)
variant_to_phase[ l[4] ] = (pb_id, 1)
with open(phased_read_fn, "w") as out_f:
for r in read_to_variants:
vl = {}
pl = set()
for v in list( read_to_variants[r] ):
if v in variant_to_phase:
p = variant_to_phase[v]
vl[ p ] = vl.get(p, 0) + 1
pl.add(p[0])
pl = list(pl)
pl.sort()
for p in pl:
if vl.get( (p,0), 0) - vl.get( (p,1), 0) > 1:
print >> out_f, r, ctg_id, p, 0, vl.get( (p,0), 0), vl.get( (p,1), 0), rid_map[r]
elif vl.get( (p,1), 0) - vl.get( (p,0), 0) > 1:
print >> out_f, r, ctg_id, p, 1, vl.get( (p,0), 0), vl.get( (p,1), 0), rid_map[r]
if __name__ == "__main__":
import argparse
import re
parser = argparse.ArgumentParser(description='phasing variants and reads from a bam file')
# we can run this in parallel mode in the furture
#parser.add_argument('--n_core', type=int, default=4,
# help='number of processes used for generating consensus')
parser.add_argument('--bam', type=str, help='path to sorted bam file', required=True)
parser.add_argument('--fasta', type=str, help='path to the fasta file of contain the contig', required=True)
parser.add_argument('--ctg_id', type=str, help='contig identifier in the bam file', required=True)
parser.add_argument('--base_dir', type=str, default="./", help='the output base_dir, default to current working directory')
args = parser.parse_args()
bam_fn = args.bam
fasta_fn = args.fasta
ctg_id = args.ctg_id
base_dir = args.base_dir
ref_seq = ""
for r in FastaReader(fasta_fn):
rid = r.name.split()[0]
if rid != ctg_id:
continue
ref_seq = r.sequence.upper()
PypeThreadWorkflow.setNumThreadAllowed(1, 1)
wf = PypeThreadWorkflow()
bam_file = makePypeLocalFile(bam_fn)
vmap_file = makePypeLocalFile( os.path.join(base_dir, ctg_id, "variant_map") )
vpos_file = makePypeLocalFile( os.path.join(base_dir, ctg_id, "variant_pos") )
q_id_map_file = makePypeLocalFile( os.path.join(base_dir, ctg_id, "q_id_map") )
parameters = {}
parameters["ctg_id"] = ctg_id
parameters["ref_seq"] = ref_seq
parameters["base_dir"] = base_dir
make_het_call_task = PypeTask( inputs = { "bam_file": bam_file },
outputs = { "vmap_file": vmap_file, "vpos_file": vpos_file, "q_id_map_file": q_id_map_file },
parameters = parameters,
TaskType = PypeThreadTaskBase,
URL = "task://localhost/het_call") (make_het_call)
wf.addTasks([make_het_call_task])
atable_file = makePypeLocalFile( os.path.join(base_dir, ctg_id, "atable") )
parameters = {}
parameters["ctg_id"] = ctg_id
parameters["base_dir"] = base_dir
generate_association_table_task = PypeTask( inputs = { "vmap_file": vmap_file },
outputs = { "atable_file": atable_file },
parameters = parameters,
TaskType = PypeThreadTaskBase,
URL = "task://localhost/g_atable") (generate_association_table)
wf.addTasks([generate_association_table_task])
phased_variant_file = makePypeLocalFile( os.path.join(base_dir, ctg_id, "phased_variants") )
get_phased_blocks_task = PypeTask( inputs = { "vmap_file": vmap_file, "atable_file": atable_file },
outputs = { "phased_variant_file": phased_variant_file },
TaskType = PypeThreadTaskBase,
URL = "task://localhost/get_phased_blocks") (get_phased_blocks)
wf.addTasks([get_phased_blocks_task])
phased_read_file = makePypeLocalFile( os.path.join(base_dir, ctg_id, "phased_reads") )
get_phased_reads_task = PypeTask( inputs = { "vmap_file": vmap_file,
"q_id_map_file": q_id_map_file,
"phased_variant_file": phased_variant_file },
outputs = { "phased_read_file": phased_read_file },
parameters = {"ctg_id": ctg_id},
TaskType = PypeThreadTaskBase,
URL = "task://localhost/get_phased_reads") (get_phased_reads)
wf.addTasks([get_phased_reads_task])
wf.refreshTargets()
#with open("fc_phasing_wf.dot", "w") as f:
# print >>f, wf.graphvizDot
| 33.897345
| 155
| 0.477339
| 2,431
| 19,152
| 3.541752
| 0.109009
| 0.01115
| 0.011847
| 0.027642
| 0.427178
| 0.370383
| 0.32288
| 0.251336
| 0.195122
| 0.159698
| 0
| 0.04929
| 0.407843
| 19,152
| 564
| 156
| 33.957447
| 0.709902
| 0.024697
| 0
| 0.292035
| 0
| 0
| 0.039787
| 0.006291
| 0
| 0
| 0
| 0
| 0
| 1
| 0.011062
| false
| 0.002212
| 0.019912
| 0
| 0.033186
| 0.019912
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5362c554ddeabe0765b25a2b55000d5493c91742
| 8,490
|
py
|
Python
|
augmentation/combineds/wgan_gp_straight.py
|
pabloduque0/cnn_deconv_viz
|
3fc3d8a9dbad8e8e28d4df4023bdb438e4c9cf85
|
[
"MIT"
] | null | null | null |
augmentation/combineds/wgan_gp_straight.py
|
pabloduque0/cnn_deconv_viz
|
3fc3d8a9dbad8e8e28d4df4023bdb438e4c9cf85
|
[
"MIT"
] | null | null | null |
augmentation/combineds/wgan_gp_straight.py
|
pabloduque0/cnn_deconv_viz
|
3fc3d8a9dbad8e8e28d4df4023bdb438e4c9cf85
|
[
"MIT"
] | null | null | null |
from keras.datasets import mnist
from keras.layers.merge import _Merge
from keras.layers import Input, Dense, Reshape, Flatten, Dropout
from keras.layers import BatchNormalization, Activation, ZeroPadding2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from keras.optimizers import RMSprop
from functools import partial
from augmentation.discriminators import wasserstein_discriminator
from augmentation.generators import wasserstein_generator
import keras.backend as K
import matplotlib.pyplot as plt
import sys
import numpy as np
class RandomWeightedAverage(_Merge):
"""Provides a (random) weighted average between real and generated image samples"""
def _merge_function(self, inputs):
alpha = K.random_uniform((32, 1, 1, 1))
return (alpha * inputs[0]) + ((1 - alpha) * inputs[1])
class WGANGP():
def __init__(self, img_shape, noise_shape):
self.img_shape = img_shape
self.noise_shape = noise_shape
# Following parameter and optimizer set as recommended in paper
self.n_critic = 5
optimizer = RMSprop(lr=0.00005)
# Build the generator and critic
self.generator = wasserstein_generator.create_model(noise_shape)
self.critic = wasserstein_discriminator.create_model(img_shape)
#-------------------------------
# Construct Computational Graph
# for the Critic
#-------------------------------
# Freeze generator's layers while training critic
self.generator.trainable = False
# Image input (real sample)
real_img = Input(shape=self.img_shape)
# Noise input
z_disc = Input(shape=(self.noise_shape,))
# Generate image based of noise (fake sample)
fake_img = self.generator(z_disc)
# Discriminator determines validity of the real and fake images
fake = self.critic(fake_img)
valid = self.critic(real_img)
# Construct weighted average between real and fake images
interpolated_img = RandomWeightedAverage()([real_img, fake_img])
# Determine validity of weighted sample
validity_interpolated = self.critic(interpolated_img)
# Use Python partial to provide loss function with additional
# 'averaged_samples' argument
partial_gp_loss = partial(self.gradient_penalty_loss,
averaged_samples=interpolated_img)
partial_gp_loss.__name__ = 'gradient_penalty' # Keras requires function names
self.critic_model = Model(inputs=[real_img, z_disc],
outputs=[valid, fake, validity_interpolated])
self.critic_model.compile(loss=[self.wasserstein_loss,
self.wasserstein_loss,
partial_gp_loss],
optimizer=optimizer,
loss_weights=[1, 1, 10])
#-------------------------------
# Construct Computational Graph
# for Generator
#-------------------------------
# For the generator we freeze the critic's layers
self.critic.trainable = False
self.generator.trainable = True
# Sampled noise for input to generator
z_gen = Input(shape=(100,))
# Generate images based of noise
img = self.generator(z_gen)
# Discriminator determines validity
valid = self.critic(img)
# Defines generator model
self.generator_model = Model(z_gen, valid)
self.generator_model.compile(loss=self.wasserstein_loss, optimizer=optimizer)
def gradient_penalty_loss(self, y_true, y_pred, averaged_samples):
"""
Computes gradient penalty based on prediction and weighted real / fake samples
"""
gradients = K.gradients(y_pred, averaged_samples)[0]
# compute the euclidean norm by squaring ...
gradients_sqr = K.square(gradients)
# ... summing over the rows ...
gradients_sqr_sum = K.sum(gradients_sqr,
axis=np.arange(1, len(gradients_sqr.shape)))
# ... and sqrt
gradient_l2_norm = K.sqrt(gradients_sqr_sum)
# compute lambda * (1 - ||grad||)^2 still for each single sample
gradient_penalty = K.square(1 - gradient_l2_norm)
# return the mean as loss over all the batch samples
return K.mean(gradient_penalty)
def wasserstein_loss(self, y_true, y_pred):
return K.mean(y_true * y_pred)
def build_generator(self):
model = Sequential()
model.add(Dense(128 * 7 * 7, activation="relu", input_dim=self.noise_shape))
model.add(Reshape((7, 7, 128)))
model.add(UpSampling2D())
model.add(Conv2D(128, kernel_size=4, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
model.add(UpSampling2D())
model.add(Conv2D(64, kernel_size=4, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
model.add(Conv2D(self.channels, kernel_size=4, padding="same"))
model.add(Activation("tanh"))
model.summary()
noise = Input(shape=(self.noise_shape,))
img = model(noise)
return Model(noise, img)
def build_critic(self):
model = Sequential()
model.add(Conv2D(16, kernel_size=3, strides=2, input_shape=self.img_shape, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(32, kernel_size=3, strides=2, padding="same"))
model.add(ZeroPadding2D(padding=((0,1),(0,1))))
model.add(BatchNormalization(momentum=0.8))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(128, kernel_size=3, strides=1, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(1))
model.summary()
img = Input(shape=self.img_shape)
validity = model(img)
return Model(img, validity)
def train(self, epochs, batch_size, sample_interval=50):
# Adversarial ground truths
valid = -np.ones((batch_size, 1))
fake = np.ones((batch_size, 1))
dummy = np.zeros((batch_size, 1)) # Dummy gt for gradient penalty
for epoch in range(epochs):
for _ in range(self.n_critic):
# ---------------------
# Train Discriminator
# ---------------------
# Select a random batch of images
idx = np.random.randint(0, X_train.shape[0], batch_size)
imgs = X_train[idx]
# Sample generator input
noise = np.random.normal(0, 1, (batch_size, self.noise_shape))
# Train the critic
d_loss = self.critic_model.train_on_batch([imgs, noise],
[valid, fake, dummy])
# ---------------------
# Train Generator
# ---------------------
g_loss = self.generator_model.train_on_batch(noise, valid)
# Plot the progress
print ("%d [D loss: %f] [G loss: %f]" % (epoch, d_loss[0], g_loss))
# If at save interval => save generated image samples
if epoch % sample_interval == 0:
self.sample_images(epoch)
def sample_images(self, epoch):
r, c = 5, 5
noise = np.random.normal(0, 1, (r * c, self.noise_shape))
gen_imgs = self.generator.predict(noise)
# Rescale images 0 - 1
gen_imgs = 0.5 * gen_imgs + 0.5
fig, axs = plt.subplots(r, c)
cnt = 0
for i in range(r):
for j in range(c):
axs[i,j].imshow(gen_imgs[cnt, :,:,0], cmap='gray')
axs[i,j].axis('off')
cnt += 1
fig.savefig("images/mnist_%d.png" % epoch)
plt.close()
| 37.566372
| 99
| 0.593404
| 1,001
| 8,490
| 4.894106
| 0.22977
| 0.04899
| 0.020004
| 0.027148
| 0.229026
| 0.181466
| 0.124719
| 0.118596
| 0.118596
| 0.106961
| 0
| 0.022398
| 0.284806
| 8,490
| 225
| 100
| 37.733333
| 0.78442
| 0.188928
| 0
| 0.157895
| 0
| 0
| 0.01673
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.06015
| false
| 0
| 0.112782
| 0.007519
| 0.225564
| 0.007519
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
536436e3554ba3a4da46ab96d890765a1f73000c
| 554
|
py
|
Python
|
tests/test_load.py
|
ocefpaf/xroms
|
763d6e678e28fe074e0aaab26fecd2b74e51a8b0
|
[
"MIT"
] | 4
|
2020-01-21T21:24:17.000Z
|
2020-10-02T03:09:32.000Z
|
tests/test_load.py
|
ocefpaf/xroms
|
763d6e678e28fe074e0aaab26fecd2b74e51a8b0
|
[
"MIT"
] | 1
|
2020-04-08T00:11:39.000Z
|
2020-04-25T08:03:45.000Z
|
tests/test_load.py
|
ocefpaf/xroms
|
763d6e678e28fe074e0aaab26fecd2b74e51a8b0
|
[
"MIT"
] | 1
|
2020-04-06T06:42:36.000Z
|
2020-04-06T06:42:36.000Z
|
'''Test package.'''
import xroms
from glob import glob
import os
def test_open_netcdf():
'''Test xroms.open_netcdf().'''
base = os.path.join(xroms.__path__[0],'..','tests','input')
files = glob('%s/ocean_his_000?.nc' % base)
ds = xroms.open_netcdf(files)
assert ds
def test_open_zarr():
'''Test xroms.open_zarr().'''
base = os.path.join(xroms.__path__[0],'..','tests','input')
files = glob('%s/ocean_his_000?' % base)
ds = xroms.open_zarr(files, chunks={'ocean_time':2})
assert ds
| 21.307692
| 63
| 0.597473
| 77
| 554
| 4.025974
| 0.363636
| 0.116129
| 0.070968
| 0.090323
| 0.354839
| 0.354839
| 0.354839
| 0.354839
| 0.354839
| 0.354839
| 0
| 0.020642
| 0.212996
| 554
| 25
| 64
| 22.16
| 0.690367
| 0.113718
| 0
| 0.307692
| 0
| 0
| 0.149474
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 1
| 0.153846
| false
| 0
| 0.230769
| 0
| 0.384615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
53646f201e13a30e6efd94fa6ebf56d02fafc4af
| 1,381
|
py
|
Python
|
demoproject/demoproject/urls.py
|
alvnary18/django-nvd3
|
4b7dffb1107b8202698212b99c26d1d0097afd1d
|
[
"MIT"
] | 302
|
2015-01-06T14:38:22.000Z
|
2022-01-11T15:28:07.000Z
|
demoproject/demoproject/urls.py
|
alvnary18/django-nvd3
|
4b7dffb1107b8202698212b99c26d1d0097afd1d
|
[
"MIT"
] | 63
|
2015-01-03T14:39:29.000Z
|
2021-04-19T09:29:15.000Z
|
demoproject/demoproject/urls.py
|
alvnary18/django-nvd3
|
4b7dffb1107b8202698212b99c26d1d0097afd1d
|
[
"MIT"
] | 104
|
2015-01-07T21:40:53.000Z
|
2021-02-22T08:21:02.000Z
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.home, name='home'),
url(r'^piechart/', views.demo_piechart, name='demo_piechart'),
url(r'^linechart/', views.demo_linechart, name='demo_linechart'),
url(r'^linechart_without_date/', views.demo_linechart_without_date, name='demo_linechart_without_date'),
url(r'^linewithfocuschart/', views.demo_linewithfocuschart, name='demo_linewithfocuschart'),
url(r'^multibarchart/', views.demo_multibarchart, name='demo_multibarchart'),
url(r'^stackedareachart/', views.demo_stackedareachart, name='demo_stackedareachart'),
url(r'^multibarhorizontalchart/', views.demo_multibarhorizontalchart, name='demo_multibarhorizontalchart'),
url(r'^lineplusbarchart/', views.demo_lineplusbarchart, name='demo_lineplusbarchart'),
url(r'^cumulativelinechart/', views.demo_cumulativelinechart, name='demo_cumulativelinechart'),
url(r'^discretebarchart/', views.demo_discretebarchart, name='demo_discretebarchart'),
url(r'^discretebarchart_with_date/', views.demo_discretebarchart_with_date, name='demo_discretebarchart_date'),
url(r'^scatterchart/', views.demo_scatterchart, name='demo_scatterchart'),
url(r'^linechart_with_ampm/', views.demo_linechart_with_ampm, name='demo_linechart_with_ampm'),
# url(r'^demoproject/', include('demoproject.foo.urls')),
]
| 62.772727
| 115
| 0.766836
| 157
| 1,381
| 6.471338
| 0.184713
| 0.059055
| 0.038386
| 0.047244
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086169
| 1,381
| 21
| 116
| 65.761905
| 0.805071
| 0.039826
| 0
| 0
| 0
| 0
| 0.397281
| 0.252266
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5366c96f79a37fc8c50479d35ab11dc62e0b3949
| 15,109
|
py
|
Python
|
pipeline/visualization/single_tab.py
|
windblood/kafka_stock
|
8dbe4a1cf5c367b3c210683d4027bbfaf955ed41
|
[
"Apache-2.0"
] | 45
|
2019-08-06T09:06:58.000Z
|
2022-03-14T06:13:33.000Z
|
pipeline/visualization/single_tab.py
|
windblood/kafka_stock
|
8dbe4a1cf5c367b3c210683d4027bbfaf955ed41
|
[
"Apache-2.0"
] | 2
|
2021-05-10T09:23:12.000Z
|
2021-12-20T07:06:54.000Z
|
pipeline/visualization/single_tab.py
|
windblood/kafka_stock
|
8dbe4a1cf5c367b3c210683d4027bbfaf955ed41
|
[
"Apache-2.0"
] | 14
|
2020-03-19T04:38:25.000Z
|
2022-03-16T06:37:04.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 31 11:47:47 2019
@author: yanyanyu
"""
"""
Tab1-plot1: candlestick
"""
import json
import datetime
import pandas as pd
from math import pi
from random import choice
from pytz import timezone
from bokeh.plotting import figure,show
from bokeh.palettes import all_palettes,Set3
from bokeh.models import ColumnDataSource, Select,HoverTool,LinearAxis, LabelSet,Range1d,PreText,Div
from warehouse import CassandraStorage
from util.util import pandas_factory,symbol_list,splitTextToTriplet,prev_weekday
from util.config import path,timeZone
def read_company(symbol):
with open(path+'visualization/company/{}.json'.format(symbol),'r') as f:
company=json.load(f)
companyOfficers=company['assetProfile']['companyOfficers']
officerString=''
for officer in companyOfficers:
officerString+=str('<br>      '+officer['name']+' - '+officer['title'])
buzzsummary='\n'.join(splitTextToTriplet('.'.join(company['summaryProfile']['longBusinessSummary'].split('.')[:3]),8))
institutionOwnership=company['institutionOwnership']['ownershipList']
institution_list=[]
for institution in institutionOwnership:
institution_list.append([institution['organization'],institution['position']['raw'],institution['pctHeld']['fmt']])
institution_df=pd.DataFrame(institution_list,columns=['organization','position','pctHeld'])
institution_df['organization']=[i.split(',')[0] for i in institution_df['organization']]
return company,buzzsummary,officerString,institution_df
def candlestick():
if '^GSPC' in symbol_list:
symbol_list.remove('^GSPC')
stock_select=Select(value=symbol_list[0],options=symbol_list)
summaryText = Div(text="",width=400)
financialText=Div(text="",width=180)
def update_summary(symbol):
company,buzzsummary,officerString,institution_df=read_company(symbol)
summaryText.text ="""<b><p style="color:blue;">Overview: </p></b>
<b>Company:</b> {}<br>
<b>Address:</b> {} <br>
<b>City:</b> {} <br>
<b>State:</b> {} <br>
<b>Website:</b> <a href="{}">{}</a> <br>
<b>Industry:</b> {} <br>
<b>Sector:</b> {} <br>
<b>Company Officers:</b> {} <br>
<b>Summary:</b> {} <br>""".format(company['price']['longName'],
company['summaryProfile']['address1'],
company['summaryProfile']['city'],
company['summaryProfile']['state'],
company['summaryProfile']['website'],
company['summaryProfile']['website'],
company['summaryProfile']['industry'],
company['summaryProfile']['sector'],
officerString,
buzzsummary)
financialText.text="""<b><p style="color:blue;">Financial: </p></b>
<b>Recommendation: {}</b> <br>
<b>Enterprise Value:</b> {} <br>
<b>Profit Margins:</b> {} <br>
<b>Beta:</b> {} <br>
<b>EBITDA:</b> {} <br>
<b>Total Debt:</b> {} <br>
<b>Total Revenue:</b> {}<br>
<b>DebtToEquity:</b> {}<br>
<b>Revenue Growth:</b> {} <br>
<b>Current Ratio:</b> {} <br>
<b>ROE:</b> {} <br>
<b>ROA:</b> {} <br>
<b>Gross Profits:</b> {} <br>
<b>Quick Ratio:</b> {} <br>
<b>Free Cashflow:</b> {} <br>
""".format(company['financialData']['recommendationKey'].upper(),
company['defaultKeyStatistics']['enterpriseValue']['fmt'],
company['defaultKeyStatistics']['profitMargins']['fmt'],
company['defaultKeyStatistics']['beta']['fmt'],
company['financialData']['ebitda']['fmt'],
company['financialData']['totalDebt']['fmt'],
company['financialData']['totalRevenue']['fmt'],
company['financialData']['debtToEquity']['fmt'],
company['financialData']['revenueGrowth']['fmt'],
company['financialData']['currentRatio']['fmt'],
company['financialData']['returnOnAssets']['fmt'],
company['financialData']['returnOnEquity']['fmt'],
company['financialData']['grossProfits']['fmt'],
company['financialData']['quickRatio']['fmt'],
company['financialData']['freeCashflow']['fmt'])
update_summary(stock_select.value)
# connect to Cassandra database
database=CassandraStorage(symbol_list[0])
database.session.row_factory = pandas_factory
database.session.default_fetch_size = None
query="SELECT * FROM {} WHERE time>'2015-01-01' ALLOW FILTERING;".format('{}_historical'.format(symbol_list[0]))
rslt = database.session.execute(query, timeout=None)
df = rslt._current_rows
# create color list
color=df.close>df.open
color=color.replace(True,'green')
color=color.replace(False,'red')
# set data source
source = ColumnDataSource(data=dict(close=list(df.close.values),
adjusted_close=list(df.adjusted_close.values),
open=list(df.open.values),
high=list(df.high.values),
low=list(df.low.values),
volume=list(df.volume.values),
time=list(df.time.dt.date.values),
color=list(color.values)))
# hover setting
TOOLTIPS = [
("time", "@time{%F}"),
("adjusted close", "$@adjusted_close"),
("close", "$@close"),
("open", "$@open"),
("high", "$@high"),
("low", "$@low"),
("volume","@volume")]
formatters={
'time' : 'datetime'}
hover = HoverTool(tooltips=TOOLTIPS,formatters=formatters,mode='vline')
# create figure
p = figure(title='{} Candlestick'.format(stock_select.value),plot_height=400,
tools="crosshair,save,undo,xpan,xwheel_zoom,xbox_zoom,reset",
active_scroll='xwheel_zoom',
x_axis_type="datetime")
p.add_tools(hover)
p.line('time', 'close', alpha=0.2, line_width=1, color='navy', source=source)
p.segment('time', 'high', 'time', 'low', line_width=1,color="black", source=source)
p.segment('time', 'open', 'time', 'close', line_width=3, color='color', source=source)
p.y_range = Range1d(min(source.data['close'])*0.3, max(source.data['close'])*1.05)
p.extra_y_ranges = {"volumes": Range1d(start=min(source.data['volume'])/2,
end=max(source.data['volume'])*2)}
p.add_layout(LinearAxis(y_range_name="volumes"), 'right')
p.vbar('time', width=3,top='volume', color=choice(all_palettes['Set2'][8]),alpha=0.5, y_range_name="volumes",source=source)
p.xaxis.axis_label = 'Time'
# set data source
_,_,_,institution_df=read_company(symbol_list[0])
source_ins = ColumnDataSource(data=dict(organization=list(institution_df.organization.values),
pctHeld=list(institution_df.pctHeld.values),
position=list(institution_df.position.values),
color=Set3[12][:len(institution_df)]))
s1=figure(x_range=source_ins.data['organization'],plot_height=300,plot_width=700,title='Institution Ownership')
s1.vbar(x='organization', top='position', width=0.8, color='color', source=source_ins)
s1.xaxis.major_label_orientation = pi/7
labels = LabelSet(x='organization', y='position', text='pctHeld', level='glyph',
x_offset=-15, y_offset=-10, source=source_ins, render_mode='canvas',text_font_size="8pt")
s1.add_layout(labels)
# callback funtion for Select tool 'stock_select'
def callback(attr,old,new):
symbol=stock_select.value
_,_,_,institution=read_company(symbol)
if symbol=='S&P500':
symbol='^GSPC'
database=CassandraStorage(symbol)
database.session.row_factory = pandas_factory
database.session.default_fetch_size = None
if symbol=='^GSPC':
symbol='GSPC'
query="SELECT * FROM {} WHERE time>'2015-01-01' ALLOW FILTERING;".format(symbol+'_historical')
rslt = database.session.execute(query, timeout=None)
df = rslt._current_rows
color=df.close>df.open
color=color.replace(True,'green')
color=color.replace(False,'red')
# update source data
source.data=dict(close=list(df.close.values),
adjusted_close=list(df.adjusted_close.values),
open=list(df.open.values),
high=list(df.high.values),
low=list(df.low.values),
volume=list(df.volume.values),
time=list(df.time.dt.date.values),
color=list(color.values))
source_ins.data=dict(organization=list(institution.organization.values),
pctHeld=list(institution.pctHeld.values),
position=list(institution.position.values),
color=Set3[12][:len(institution)])
p.title.text=symbol+' Candlestick'
p.y_range.start=min(source.data['close'])*0.3
p.y_range.end=max(source.data['close'])*1.05
p.extra_y_ranges['volumes'].start=min(source.data['volume'])/2.
p.extra_y_ranges['volumes'].end=max(source.data['volume'])*2.
s1.x_range.factors=source_ins.data['organization']
update_summary(symbol)
stock_select.on_change('value', callback)
return p,stock_select,summaryText,financialText,s1
def stream_price():
# connect to s&p500's database
plot_symbol='^GSPC'
database=CassandraStorage(plot_symbol)
database.session.row_factory = pandas_factory
database.session.default_fetch_size = None
# if datetime.datetime.now(timezone('US/Eastern')).time()<datetime.time(9,30):
# query_time=str(datetime.datetime.now().date())
last_trading_day= datetime.datetime.now(timezone(timeZone)).date()
query="SELECT * FROM {} WHERE time>='{}' ALLOW FILTERING;".format(plot_symbol[1:]+'_tick',last_trading_day)
rslt = database.session.execute(query, timeout=None)
df = rslt._current_rows
# wrangle timezone (Cassandra will change datetime to UTC time)
trans_time=pd.DatetimeIndex(pd.to_datetime(df.time,unit='ms')).tz_localize('GMT').tz_convert('US/Pacific').to_pydatetime()
trans_time=[i.replace(tzinfo=None) for i in trans_time]
source= ColumnDataSource()
# hover setting
TOOLTIPS = [
("time", "@time{%F %T}"),
("close", "$@close"),
("volume","@volume")]
formatters={
'time' : 'datetime'}
hover = HoverTool(tooltips=TOOLTIPS,formatters=formatters,mode='vline')
# create plot
p = figure(title='S&P500 Realtime Price',
plot_height=200,
tools="crosshair,save,undo,xpan,xwheel_zoom,ybox_zoom,reset",
x_axis_type="datetime",
y_axis_location="left")
p.add_tools(hover)
p.x_range.follow = "end"
p.x_range.follow_interval = 1000000
p.x_range.range_padding = 0
# during trading
if len(df)>0 \
and datetime.datetime.now(timezone(timeZone)).time()<datetime.time(16,0,0) \
and datetime.datetime.now(timezone(timeZone)).time()>datetime.time(9,30,0):
# init source data to those already stored in Cassandra dataase - '{}_tick', so that streaming plot will not start over after refreshing
source= ColumnDataSource(dict(time=list(trans_time),
close=list(df.close.values),
volume=list(df.volume.values)))
p.y_range = Range1d(min(source.data['close'])/1.005, max(source.data['close'])*1.005)
p.extra_y_ranges = {"volumes": Range1d(start=min(source.data['volume'])*0.5,
end=max(source.data['volume'])*2)}
# no trading history or not during trading hour
else:
source= ColumnDataSource(dict(time=[],
close=[],
volume=[]))
p.y_range = Range1d(0,1e4)
p.extra_y_ranges = {"volumes": Range1d(start=0,
end=1e10)}
p.line(x='time', y='close', alpha=0.2, line_width=3, color='blue', source=source)
p.add_layout(LinearAxis(y_range_name="volumes"), 'right')
p.vbar('time', width=3,top='volume', color=choice(all_palettes['Set2'][8]),alpha=0.5, y_range_name="volumes",source=source)
# get update data from a json file overwritter every ~18 seconds
def _create_prices():
with open(path+'cache/data.json','r') as f:
dict_data = json.load(f)
return float(dict_data['close']),dict_data['volume'],dict_data['time']
# update function for stream plot
def update():
close,volume,time=_create_prices()
new_data = dict(
time=[datetime.datetime.strptime(time[:19], "%Y-%m-%d %H:%M:%S")],
close=[close],
volume=[volume]
)
#print(new_data)
source.stream(new_data)
#print ('update source data',str(time))
return p,update
| 48.117834
| 144
| 0.53253
| 1,544
| 15,109
| 5.100389
| 0.234456
| 0.008762
| 0.010667
| 0.008254
| 0.376381
| 0.293206
| 0.256762
| 0.233651
| 0.225524
| 0.225524
| 0
| 0.016665
| 0.32484
| 15,109
| 313
| 145
| 48.271566
| 0.755318
| 0.057515
| 0
| 0.220339
| 0
| 0
| 0.23785
| 0.015306
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029661
| false
| 0
| 0.050847
| 0
| 0.097458
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
536933e136a1518afd79a7f6f89175f2c5e084a2
| 2,755
|
py
|
Python
|
VirtualMouse-mediapipe.py
|
SanLiWuXun/Virtual-Control
|
c3b38d4e2df201af851ca70a90de1fdc770158e4
|
[
"MIT"
] | null | null | null |
VirtualMouse-mediapipe.py
|
SanLiWuXun/Virtual-Control
|
c3b38d4e2df201af851ca70a90de1fdc770158e4
|
[
"MIT"
] | null | null | null |
VirtualMouse-mediapipe.py
|
SanLiWuXun/Virtual-Control
|
c3b38d4e2df201af851ca70a90de1fdc770158e4
|
[
"MIT"
] | null | null | null |
import cv2
import mediapipe as mp
from time import sleep
import numpy as np
import autopy
import pynput
wCam, hCam = 1280, 720
wScr, hScr = autopy.screen.size()
cap = cv2.VideoCapture(0)
cap.set(3, wCam)
cap.set(4, hCam)
mp_drawing = mp.solutions.drawing_utils
mp_hands = mp.solutions.hands
mouse = pynput.mouse.Controller()
def findNodeDistance(imgHeight, imgWidth, landmarks, index1, index2):
x1 = int(landmarks[index1].x*imgWidth)
y1 = int(landmarks[index1].y*imgHeight)
z1 = int(landmarks[index1].z*imgWidth)
x2 = int(landmarks[index2].x*imgWidth)
y2 = int(landmarks[index2].y*imgHeight)
z2 = int(landmarks[index2].z*imgWidth)
dis = ((x1-x2)**2.0+(y1-y2)**2.0)**0.5
z_dis = abs(z1-z2)
return dis, z_dis
with mp_hands.Hands(
min_detection_confidence=0.8,
min_tracking_confidence=0.5) as hands:
while cap.isOpened():
success, image = cap.read()
if not success:
print("Ignoring empty camera frame.")
# If loading a video, use 'break' instead of 'continue'.
continue
# Flip the image horizontally for a later selfie-view display, and convert
# the BGR image to RGB.
image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB)
# To improve performance, optionally mark the image as not writeable to
# pass by reference.
image.flags.writeable = False
results = hands.process(image)
# Draw the hand annotations on the image.
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
if results.multi_hand_landmarks:
for hand_landmarks in results.multi_hand_landmarks:
mp_drawing.draw_landmarks(
image,
hand_landmarks,
mp_hands.HAND_CONNECTIONS)
#cx, cy = int(hand_landmarks.landmark[8].x*wCam), int(hand_landmarks.landmark[8].y*hCam)
targetX, targetY = int(hand_landmarks.landmark[8].x*wScr), int(hand_landmarks.landmark[8].y*hScr)
mouse.position = (targetX, targetY)
xy_dis_8_12, z_dis_8_12 = findNodeDistance(hCam, wCam, hand_landmarks.landmark, 8, 12)
xy_dis_12_16, z_dis_12_16 = findNodeDistance(hCam, wCam, hand_landmarks.landmark, 12, 16)
if xy_dis_8_12 < 40 and z_dis_8_12 < 20:
mouse.click(pynput.mouse.Button.left)
sleep(0.3)
if xy_dis_12_16 < 40 and z_dis_12_16 < 20:
mouse.click(pynput.mouse.Button.left, 2)
sleep(0.3)
cv2.imshow('MediaPipe Hands', image)
if cv2.waitKey(5) & 0xFF == 27:
break
cap.release()
| 35.320513
| 113
| 0.622142
| 374
| 2,755
| 4.44385
| 0.36631
| 0.078219
| 0.075812
| 0.066185
| 0.156438
| 0.156438
| 0.039711
| 0
| 0
| 0
| 0
| 0.054108
| 0.275499
| 2,755
| 78
| 114
| 35.320513
| 0.778557
| 0.132486
| 0
| 0.035088
| 0
| 0
| 0.018044
| 0
| 0
| 0
| 0.001679
| 0
| 0
| 1
| 0.017544
| false
| 0
| 0.105263
| 0
| 0.140351
| 0.017544
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
536b5c354fdb15e9bd9be57f477eacb913ce0e22
| 4,793
|
py
|
Python
|
Chapter10/neuroevolution/distributed_helpers.py
|
KonstantinKlepikov/Hands-on-Neuroevolution-with-Python
|
cdd35fa21f2a091d176c140427ab1644d9ecd1f2
|
[
"MIT"
] | 51
|
2019-06-03T12:45:13.000Z
|
2022-02-16T15:48:28.000Z
|
Chapter10/neuroevolution/distributed_helpers.py
|
123mitnik/Hands-on-Neuroevolution-with-Python
|
b65c7dee49303c296ae22f2d82422614bdf7a168
|
[
"MIT"
] | 3
|
2020-02-20T08:13:34.000Z
|
2020-09-16T10:11:52.000Z
|
Chapter10/neuroevolution/distributed_helpers.py
|
123mitnik/Hands-on-Neuroevolution-with-Python
|
b65c7dee49303c296ae22f2d82422614bdf7a168
|
[
"MIT"
] | 30
|
2019-05-24T02:02:47.000Z
|
2022-03-03T22:48:22.000Z
|
import threading
from queue import Queue
from multiprocessing.pool import ApplyResult
import tabular_logger as tlogger
class AsyncWorker(object):
@property
def concurrent_tasks(self):
raise NotImplementedError()
def run_async(self, task_id, task, callback):
raise NotImplementedError()
class WorkerHub(object):
def __init__(self, workers, input_queue, done_queue):
self.done_buffer = Queue()
self.workers = workers
self.available_workers = Queue()
self.done_queue = done_queue
self._cache = {}
self.input_queue = input_queue
for w in workers:
for t in w.concurrent_tasks:
self.available_workers.put((w, t))
self.__initialize_handlers()
def __initialize_handlers(self):
self._input_handler = threading.Thread(
target=WorkerHub._handle_input,
args=(self,)
)
self._input_handler._state = 0
tlogger.info('WorkerHub: _input_handler initialized')
self._output_handler = threading.Thread(
target=WorkerHub._handle_output,
args=(self,)
)
self._output_handler._state = 0
tlogger.info('WorkerHub: _output_handler initialized')
def worker_callback(self, worker, subworker, result):
worker_task = (worker, subworker)
if worker_task in self._cache:
task_id = self._cache[worker_task]
del self._cache[worker_task]
self.done_buffer.put((task_id, result))
else:
tlogger.warn('WorkerHub: Worker task not found in cache', worker_task)
tlogger.warn('WorkerHub: Subworker', subworker)
tlogger.warn('WorkerHub: Unable to process result', result)
# Return worker back
self.available_workers.put(worker_task)
@staticmethod
def _handle_input(self):
try:
while True:
worker_task = self.available_workers.get()
if worker_task is None:
tlogger.info('WorkerHub._handle_input NO MORE WORKERS AWAILABLE')
break
worker, subworker = worker_task
task = self.input_queue.get()
if task is None:
tlogger.info('WorkerHub._handle_input NO MORE INPUTS AWAILABLE')
break
task_id, task = task
self._cache[worker_task] = task_id
# tlogger.info('WorkerHub: put task id: %s in cache keyed by worker task: %s' % (task_id, worker_task))
worker.run_async(subworker, task, callback=self.worker_callback)
except:
tlogger.exception('WorkerHub._handle_input exception thrown')
raise
@staticmethod
def _handle_output(self):
try:
while True:
result = self.done_buffer.get()
if result is None:
tlogger.info('WorkerHub._handle_output done')
break
self.done_queue.put(result)
except:
tlogger.exception('WorkerHub._handle_output exception thrown')
raise
def initialize(self):
self._input_handler.start()
self._output_handler.start()
def close(self):
self.available_workers.put(None)
self.input_queue.put(None)
self.done_buffer.put(None)
class AsyncTaskHub(object):
def __init__(self, input_queue=None, results_queue=None):
if input_queue is None:
input_queue = Queue(64)
self.input_queue = input_queue
self._cache = {}
self.results_queue = None
if results_queue is not None:
self.results_queue = results_queue
self._output_handler = threading.Thread(
target=AsyncTaskHub._handle_output,
args=(self,)
)
self._output_handler.daemon = True
self._output_handler._state = 0
self._output_handler.start()
@staticmethod
def _handle_output(self):
try:
while True:
result = self.results_queue.get()
if result is None:
tlogger.info('AsyncTaskHub._handle_output done')
break
self.put(result)
except:
tlogger.exception('AsyncTaskHub._handle_output exception thrown')
raise
def run_async(self, task, callback=None, error_callback=None):
result = ApplyResult(self._cache, callback, error_callback)
self.input_queue.put((result._job, task))
return result
def put(self, result):
job, result=result
self._cache[job]._set(0, (True, result))
| 32.828767
| 119
| 0.595869
| 515
| 4,793
| 5.285437
| 0.178641
| 0.047759
| 0.043718
| 0.024982
| 0.315944
| 0.221161
| 0.121234
| 0.073475
| 0.073475
| 0.073475
| 0
| 0.001848
| 0.322554
| 4,793
| 145
| 120
| 33.055172
| 0.836464
| 0.025037
| 0
| 0.319328
| 0
| 0
| 0.097258
| 0.036632
| 0
| 0
| 0
| 0
| 0
| 1
| 0.109244
| false
| 0
| 0.033613
| 0
| 0.176471
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
536e0e040e307bf9f906164571d5de4002db0a3c
| 492
|
py
|
Python
|
tests/settings.py
|
systemallica/django-belt
|
3035a8bad26a108d9c78daaccb81ab8a9a9ebd41
|
[
"MIT"
] | 2
|
2019-10-08T08:56:46.000Z
|
2020-10-10T08:29:43.000Z
|
tests/settings.py
|
systemallica/django-belt
|
3035a8bad26a108d9c78daaccb81ab8a9a9ebd41
|
[
"MIT"
] | null | null | null |
tests/settings.py
|
systemallica/django-belt
|
3035a8bad26a108d9c78daaccb81ab8a9a9ebd41
|
[
"MIT"
] | 2
|
2019-10-08T08:59:54.000Z
|
2021-03-18T18:15:38.000Z
|
DEBUG = True
USE_TZ = True
SECRET_KEY = "dummy"
DATABASES = {"default": {"ENGINE": "django.db.backends.sqlite3", "NAME": ":memory:"}}
INSTALLED_APPS = [
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sites",
"rest_framework",
"django_filters",
"belt",
"tests.app",
]
SITE_ID = 1
ROOT_URLCONF = "tests.app.urls"
MIDDLEWARE = ()
REST_FRAMEWORK = {
"DEFAULT_FILTER_BACKENDS": ("django_filters.rest_framework.DjangoFilterBackend",)
}
| 18.222222
| 85
| 0.668699
| 54
| 492
| 5.87037
| 0.685185
| 0.123028
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004866
| 0.164634
| 492
| 26
| 86
| 18.923077
| 0.766423
| 0
| 0
| 0
| 0
| 0
| 0.506098
| 0.254065
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
72559fed2c0dfd5ae9506264bf674d020588c5b0
| 26,482
|
py
|
Python
|
Cell_Generation/fabric_CMC_NMOS.py
|
ALIGN-analoglayout/2018-01-ALIGN
|
931263cec2efc05d58657af9ecca88ae0040c3a5
|
[
"BSD-3-Clause"
] | 8
|
2019-01-10T06:34:26.000Z
|
2021-06-30T05:44:49.000Z
|
Cell_Generation/fabric_CMC_NMOS.py
|
ALIGN-analoglayout/2018-01-ALIGN
|
931263cec2efc05d58657af9ecca88ae0040c3a5
|
[
"BSD-3-Clause"
] | null | null | null |
Cell_Generation/fabric_CMC_NMOS.py
|
ALIGN-analoglayout/2018-01-ALIGN
|
931263cec2efc05d58657af9ecca88ae0040c3a5
|
[
"BSD-3-Clause"
] | 2
|
2019-01-09T19:58:28.000Z
|
2019-03-08T22:58:50.000Z
|
import sys
import json
import transformation
class StopPointGrid:
def __init__( self, nm, layer, direction, width, pitch, offset=0):
self.nm = nm
self.layer = layer
self.direction = direction
assert direction in ['v','h']
self.width = width
self.pitch = pitch
self.offset = offset
self.grid = []
self.legalStopVector = []
self.legalStopIndices = set()
def addGridPoint( self, value, isLegal):
self.grid.append( value)
self.legalStopVector.append( isLegal)
if isLegal:
self.legalStopIndices.add( len(self.grid)-1)
@property
def n( self):
return len(self.grid)-1
def value( self, idx):
whole = idx // self.n
fract = idx % self.n
while fract < 0:
whole -= 1
fract += self.n
assert fract in self.legalStopIndices
return whole * self.grid[-1] + self.grid[fract]
def segment( self, netName, pinName, center, bIdx, eIdx):
c = center*self.pitch + self.offset
c0 = c - self.width/2
c1 = c + self.width/2
if self.direction == 'h':
rect = [ bIdx, c0, eIdx, c1]
else:
rect = [ c0, bIdx, c1, eIdx]
return { 'netName' : netName, 'pin' : pinName, 'layer' : self.layer, 'rect' : rect}
def segment1( self, netName, pinName, bIdy, eIdy, bIdx, eIdx):
rect = [bIdx, bIdy, eIdx, eIdy]
return { 'netName' : netName, 'pin' : pinName, 'layer' : self.layer, 'rect' : rect}
class UnitCell:
def computeBbox( self):
self.bbox = transformation.Rect(None,None,None,None)
for term in self.terminals:
r = transformation.Rect( *term['rect'])
if self.bbox.llx is None or self.bbox.llx > r.llx: self.bbox.llx = r.llx
if self.bbox.lly is None or self.bbox.lly > r.lly: self.bbox.lly = r.lly
if self.bbox.urx is None or self.bbox.urx < r.urx: self.bbox.urx = r.urx
if self.bbox.ury is None or self.bbox.ury < r.ury: self.bbox.ury = r.ury
def __init__( self ):
self.terminals = []
m0Pitch = 54
m1Pitch = 54
m2Pitch = 54
m3Pitch = 54
plPitch = 54
plOffset = 10
m1Offset = 37
m2Offset = 9
m3Offset = 37
v0Pitch = 36
v1Pitch = m2Pitch
v2Pitch = m2Pitch
dcPitch = 36
finPitch = 27
m0Width = 18
m1Width = 18
m2Width = 18
m3Width = 18
dcWidth = 18
plWidth = 20
lisdWidth = 24
sdtWidth = 24
v0Width = 18
v1Width = 18
v2Width = 18
finWidth = 7
gcutWidth = 18
pcWidth = 18
finDummy = 4
pc_gateExtension = 1 ###Fig. 1 of Ref. [1]
pcLength = (gate_u-1)*plPitch + plWidth + (2*pc_gateExtension)
plActive = 25 ###Fig. 1 of Ref. [1]
extension_y = 0
K_space = extension_y // finPitch
fin_enclosure = 10
activeWidth1 = finPitch*fin_u
activeWidth = finPitch*fin_u1
activePitch = activeWidth1 + (2*finDummy + K_space)*finPitch + extension_y
activeOffset = (activeWidth/2) + finDummy*finPitch - fin_enclosure
pcPitch = activePitch
gcutPitch = activePitch
pc_activeDistance = 30
pc_gcutDistance = 7
pcOffset = activeOffset + pc_activeDistance + pcWidth/2 + activeWidth/2
gcutOffset = activePitch - gcutWidth/2
stoppoint = (dcWidth//2 + plOffset-plWidth//2)//2
self.m0 = StopPointGrid( 'm0', 'M0', 'h', width=m0Width, pitch=m0Pitch)
self.m0.addGridPoint( 0, False)
self.m0.addGridPoint( stoppoint, True)
self.m0.addGridPoint( plOffset, False)
self.m0.addGridPoint( dcPitch-stoppoint, True)
self.m0.addGridPoint( dcPitch, False)
self.m1 = StopPointGrid( 'm1', 'M1', 'v', width=m1Width, pitch=m1Pitch, offset=m1Offset)
self.m1.addGridPoint( 0, False)
self.m1.addGridPoint( stoppoint, True)
self.m1.addGridPoint( 2*m0Pitch, False)
self.m1.addGridPoint( 4*m0Pitch-stoppoint, True)
self.m1.addGridPoint( 4*m0Pitch, False)
self.m2 = StopPointGrid( 'm2', 'M2', 'h', width=m2Width, pitch=m2Pitch, offset=m2Offset)
self.m2.addGridPoint( 0, False)
self.m2.addGridPoint( stoppoint, True)
self.m2.addGridPoint( 2*m0Pitch, False)
self.m2.addGridPoint( 4*m0Pitch-stoppoint, True)
self.m2.addGridPoint( 4*m0Pitch, False)
self.m3 = StopPointGrid( 'm3', 'M3', 'v', width=m3Width, pitch=m3Pitch, offset=m3Offset)
self.m3.addGridPoint( 0, False)
self.m3.addGridPoint( stoppoint, True)
self.m3.addGridPoint( 2*m0Pitch, False)
self.m3.addGridPoint( 4*m0Pitch-stoppoint, True)
self.m3.addGridPoint( 4*m0Pitch, False)
self.pl = StopPointGrid( 'pl', 'poly', 'v', width=plWidth, pitch=plPitch, offset=plOffset)
self.pl.addGridPoint( 0, False)
self.pl.addGridPoint( stoppoint, True)
self.pl.addGridPoint( 2*m0Pitch, False)
self.pl.addGridPoint( 4*m0Pitch-stoppoint, True)
self.pl.addGridPoint( 4*m0Pitch, False)
self.dc = StopPointGrid( 'dc', 'diffcon', 'v', width=dcWidth, pitch=dcPitch)
self.dc.addGridPoint( 0, False)
self.dc.addGridPoint( stoppoint, True)
self.dc.addGridPoint( 2*m0Pitch, False)
self.dc.addGridPoint( 4*m0Pitch-stoppoint, True)
self.dc.addGridPoint( 4*m0Pitch, False)
self.v0 = StopPointGrid( 'v0', 'via0', 'v', width=v0Width, pitch=v0Pitch)
self.v0.addGridPoint( 0, False)
self.v0.addGridPoint( stoppoint, True)
self.v0.addGridPoint( 2*m0Pitch, False)
self.v0.addGridPoint( 4*m0Pitch-stoppoint, True)
self.v0.addGridPoint( 4*m0Pitch, False)
self.v1 = StopPointGrid( 'v1', 'via1', 'h', width=v1Width, pitch=v1Pitch, offset=m2Offset)
self.v1.addGridPoint( 0, False)
self.v1.addGridPoint( stoppoint, True)
self.v1.addGridPoint( 2*m0Pitch, False)
self.v1.addGridPoint( 4*m0Pitch-stoppoint, True)
self.v1.addGridPoint( 4*m0Pitch, False)
self.v2 = StopPointGrid( 'v2', 'via2', 'h', width=v2Width, pitch=v2Pitch, offset=m2Offset)
self.v2.addGridPoint( 0, False)
self.v2.addGridPoint( stoppoint, True)
self.v2.addGridPoint( 2*m0Pitch, False)
self.v2.addGridPoint( 4*m0Pitch-stoppoint, True)
self.v2.addGridPoint( 4*m0Pitch, False)
self.fin = StopPointGrid( 'fin', 'fin', 'h', width=finWidth, pitch=finPitch, offset=finWidth/2)
self.fin.addGridPoint( 0, False)
self.fin.addGridPoint( stoppoint, True)
self.fin.addGridPoint( plOffset, False)
self.fin.addGridPoint( dcPitch-stoppoint, True)
self.fin.addGridPoint( dcPitch, False)
self.active = StopPointGrid( 'active', 'active', 'h', width=activeWidth, pitch=activePitch, offset=activeOffset)
self.active.addGridPoint( 0, False)
self.active.addGridPoint( stoppoint, True)
self.active.addGridPoint( plOffset, False)
self.active.addGridPoint( dcPitch-stoppoint, True)
self.active.addGridPoint( dcPitch, False)
self.nselect = StopPointGrid( 'nselect', 'nselect', 'v', width=activeWidth, pitch=activePitch, offset=activeOffset)
self.gcut = StopPointGrid( 'GCUT', 'GCUT', 'h', width=gcutWidth, pitch=gcutPitch, offset=gcutOffset)
self.gcut.addGridPoint( 0, False)
self.gcut.addGridPoint( stoppoint, True)
self.gcut.addGridPoint( plOffset, False)
self.gcut.addGridPoint( dcPitch-stoppoint, True)
self.gcut.addGridPoint( dcPitch, False)
self.gcut1 = StopPointGrid( 'GCUT', 'GCUT', 'h', width=gcutWidth, pitch=gcutPitch, offset=gcutWidth/2)
self.gcut1.addGridPoint( 0, False)
self.gcut1.addGridPoint( stoppoint, True)
self.gcut1.addGridPoint( plOffset, False)
self.gcut1.addGridPoint( dcPitch-stoppoint, True)
self.gcut1.addGridPoint( dcPitch, False)
self.pc = StopPointGrid( 'pc', 'polycon', 'h', width=pcWidth, pitch=pcPitch, offset=pcOffset)
self.pc.addGridPoint( 0, False)
self.pc.addGridPoint( stoppoint, True)
self.pc.addGridPoint( dcPitch//2, False)
self.pc.addGridPoint( dcPitch-stoppoint, True)
self.pc.addGridPoint( dcPitch, False)
self.lisd = StopPointGrid( 'LISD', 'LISD', 'v', width=lisdWidth, pitch=m1Pitch, offset=m1Offset)
self.lisd.addGridPoint( 0, False)
self.lisd.addGridPoint( stoppoint, True)
self.lisd.addGridPoint( 2*m0Pitch, False)
self.lisd.addGridPoint( 4*m0Pitch-stoppoint, True)
self.lisd.addGridPoint( 4*m0Pitch, False)
self.sdt = StopPointGrid( 'SDT', 'SDT', 'v', width=sdtWidth, pitch=m1Pitch, offset=m1Offset)
self.sdt.addGridPoint( 0, False)
self.sdt.addGridPoint( stoppoint, True)
self.sdt.addGridPoint( 2*m0Pitch, False)
self.sdt.addGridPoint( 4*m0Pitch-stoppoint, True)
self.sdt.addGridPoint( 4*m0Pitch, False)
def addSegment( self, grid, netName, pinName, c, bIdx, eIdx):
segment = grid.segment( netName, pinName, c, bIdx, eIdx)
self.terminals.append( segment)
return segment
def addSegment1( self, grid, netName, pinName, bIdy, eIdy, bIdx, eIdx):
segment1 = grid.segment1( netName, pinName, bIdy, eIdy, bIdx, eIdx)
self.terminals.append( segment1)
return segment1
def m0Segment( self, netName, pinName, x0, x1, y): return self.addSegment( self.m0, netName, pinName, y, x0, x1)
def m1Segment( self, netName, pinName, x, y0, y1): return self.addSegment( self.m1, netName, pinName, x, y0, y1)
def m2Segment( self, netName, pinName, x0, x1, y): return self.addSegment( self.m2, netName, pinName, y, x0, x1)
def m3Segment( self, netName, pinName, x, y0, y1): return self.addSegment( self.m3, netName, pinName, x, y0, y1)
def plSegment( self, netName, pinName, x, y0, y1): return self.addSegment( self.pl, netName, pinName, x, y0, y1)
def dcSegment( self, netName, pinName, x, y0, y1): return self.addSegment( self.dc, netName, pinName, x, y0, y1)
def finSegment( self, netName, pinName, x0, x1, y): return self.addSegment( self.fin, netName, pinName, y, x0, x1)
def activeSegment( self, netName, pinName, x0, x1, y): return self.addSegment( self.active, netName, pinName, y, x0, x1)
def nselectSegment( self, netName, pinName, y0, y1, x0, x1): return self.addSegment1( self.nselect, netName, pinName, y0, y1, x0, x1)
def gcutSegment( self, netName, pinName, x0, x1, y): return self.addSegment( self.gcut, netName, pinName, y, x0, x1)
def gcut1Segment( self, netName, pinName, x0, x1, y): return self.addSegment( self.gcut1, netName, pinName, y, x0, x1)
def pcSegment( self, netName, pinName, x0, x1, y): return self.addSegment( self.pc, netName, pinName, y, x0, x1)
def v0Segment( self, netName, pinName, y0, y1, x0, x1): return self.addSegment1( self.v0, netName, pinName, y0, y1, x0, x1)
def lisdSegment( self, netName, pinName, x, y0, y1): return self.addSegment( self.lisd, netName, pinName, x, y0, y1)
def sdtSegment( self, netName, pinName, x, y0, y1): return self.addSegment( self.sdt, netName, pinName, x, y0, y1)
def v1Segment( self, netName, pinName, x0, x1, y): return self.addSegment( self.v1, netName, pinName, y, x0, x1)
def v2Segment( self, netName, pinName, x0, x1, y): return self.addSegment( self.v2, netName, pinName, y, x0, x1)
def unit( self, x, y):
######## Basic data #############
m1Pitch = 54
m1Offset = 37
m1Width = 18
m2Pitch = 54
m2Width = 18
lisdWidth = 24
sdtWidth = 24
v0Width = 18
v1Width = 18
gcutWidth = 18
v0Pitch = 36
v_enclosure = 7
poly_enclosure = 7
plPitch = 54
finPitch = 27
finWidth = 7
plWidth = 20
plActive = 25
plActive_s = 29
pcWidth = 18
pc_gateExtension = 1
pc_activeDistance = 30
pcLength = (gate_u-1)*plPitch + plWidth + (2*pc_gateExtension)
extension_x = (plPitch - plWidth)/2
extension_y = 0
K_space = extension_y // finPitch
fin_enclosure = 10
######## Derived from Basic data ###########
finDummy = 4
fin = int(round(fin_u + 2*finDummy))
fin1 = int(round(fin_u + 1))
gate = int(round(gate_u + 2))
activeWidth_h = ((gate - 3)) * plPitch + (plActive * 2) + plWidth
activeWidth1 = finPitch*fin_u
activeWidth = finPitch*fin_u1
activePitch = activeWidth1 + (2*finDummy + K_space)*finPitch + extension_y
activeOffset = (activeWidth/2) + finDummy*finPitch - fin_enclosure
pcOffset = activeOffset + pc_activeDistance + pcWidth/2 + activeWidth/2
cont_no = (activeWidth//v0Pitch -1)
pcPitch = activePitch
x_length = ((gate-1)*plPitch) + plWidth + extension_x
y_length = fin * finPitch + extension_y
y_total = y_length*y_cells
m1Length = m2Width + (2*v_enclosure) + (m2Pitch*((fin_u+2)//2))
m1PCLength = m2Width + (2*v_enclosure) + (m2Pitch*((fin_u+4)//2))
m2_tracks = int(round(y_total/m2Pitch))
SA = []
SB = []
DA = []
DB = []
GA = []
GB = []
for k in range(x_cells//2):
if k%2 == 0:
p = 0
else:
p = 4
lS = 8*k + p
lG = lS + 1
lD = lS + gate_u
SA.append(lS)
GA.append(lG)
DA.append(lD)
for k in range(x_cells//2):
if k%2 == 0:
p = 4
else:
p = 0
lS = 8*k + p
lG = lS + 1
lD = lS + gate_u
SB.append(lS)
GB.append(lG)
DB.append(lD)
for i in range(gate):
uc.plSegment( 'g', 'NA', (i+(x*gate)), ((y*y_length)+((y-1)*extension_y)), (((1+y)*y_length)+(y*extension_y)))
if i < (gate-1):
if i == 0 or i == gate_u:
uc.lisdSegment( 'LISD', 'NA', (i+(x*gate)), (finDummy*finPitch - fin_enclosure + y*activePitch), (finDummy*finPitch - fin_enclosure + activeWidth + y*activePitch))
uc.sdtSegment( 'SDT', 'NA', (i+(x*gate)), (finDummy*finPitch - fin_enclosure + y*activePitch), (finDummy*finPitch - fin_enclosure + activeWidth + y*activePitch))
for j in range(cont_no):
uc.v0Segment( 'v0', 'NA', (finDummy*finPitch - fin_enclosure + j*v0Pitch + y*activePitch + v0Width), (finDummy*finPitch - fin_enclosure + j*v0Pitch + y*activePitch + 2*v0Width ), (m1Offset - (m1Width/2) + i*m1Pitch + x*gate*plPitch), (m1Offset - (m1Width/2) + i*m1Pitch +x*gate*plPitch + v0Width) )
else:
uc.v0Segment( 'v0', 'NA', ( pcOffset - pcWidth/2 + y*activePitch), (pcOffset - pcWidth/2 + y*activePitch + v0Width ), (m1Offset - (m1Width/2) + i*m1Pitch + x*gate*plPitch), (m1Offset - (m1Width/2) + i*m1Pitch + x*gate*plPitch + v0Width) )
for i in range(fin):
uc.finSegment( 'fin', 'NA', (((x-1)*extension_x)+ x*x_length), ((1+x)*(x_length)+x*extension_x), (i+(y*fin) + (2*K_space)*y))
uc.gcutSegment( 'GCUT', 'NA', (((x-1)*extension_x)+ x*x_length), ((1+x)*(x_length)+x*extension_x), y)
if y == 0:
uc.gcut1Segment( 'GCUT', 'NA', (((x-1)*extension_x)+ x*x_length), ((1+x)*(x_length)+x*extension_x), 0)
uc.activeSegment( 'active', 'NA', (plActive_s+ x*(plPitch*gate)), ( activeWidth_h + plActive_s + x*(plPitch * gate)), y)
uc.pcSegment( 'PC', 'NA', ( plPitch - pc_gateExtension + x*(gate*plPitch)), (plPitch - pc_gateExtension + x*(gate*plPitch) + pcLength), y)
if x == x_cells -1 and y == y_cells -1:
uc.nselectSegment( 'nselect', 'NA', 0, (((y+1)*y_length)+((y)*extension_y)), (((0-1)*extension_x)), ((1+x)*(x_length)+x*extension_x))
##### Routing for CMC Load
############### M3 routing ###########################
for i in range(3):
if x == 0 and y_cells > 1 and i == 0:
if y == 0:
uc.m3Segment( 'm3', 'SA', (i+(x*gate)), ((m2Pitch*(finDummy//2-1)) - v_enclosure), ((y_cells-1)*m2Pitch*(K_space + fin//2) - v_enclosure + m1Length + (m2Pitch*(finDummy//2))))
uc.v2Segment( 'v2', 'NA', ((i+x*gate)*m1Pitch + m1Offset - v1Width/2), ((i+x*gate)*m1Pitch + m1Offset + v1Width - v1Width/2), (y*(m2_tracks //y_cells + K_space) + finDummy//2 + 1))
if x == 0 and y_cells > 1 and i == 1:
if y == 0:
uc.m3Segment( 'm3', 'G', (i+(x*gate)), ((m2Pitch*(finDummy//2-1)) - v_enclosure), ((y_cells-1)*m2Pitch*(K_space + fin//2) - v_enclosure + m1Length + (m2Pitch*(finDummy//2))))
uc.v2Segment( 'v2', 'NA', ((i+x*gate)*m1Pitch + m1Offset - v1Width/2), ((i+x*gate)*m1Pitch + m1Offset + v1Width - v1Width/2), (y*(m2_tracks //y_cells + K_space) + finDummy//2 + 0))
if x == 0 and y_cells > 1 and i == 2:
if y == 0:
uc.m3Segment( 'm3', 'DA', (i+(x*gate)), ((m2Pitch*(finDummy//2-1)) - v_enclosure), ((y_cells-1)*m2Pitch*(K_space + fin//2) - v_enclosure + m1Length + (m2Pitch*(finDummy//2))))
uc.v2Segment( 'v2', 'NA', ((i+x*gate)*m1Pitch + m1Offset - v1Width/2), ((i+x*gate)*m1Pitch + m1Offset + v1Width - v1Width/2), (y*(m2_tracks //y_cells + K_space) + finDummy//2 + 2))
if x == 1 and y_cells > 1 and i == 0:
if y == 0:
uc.m3Segment( 'm3', 'SB', (i+(x*gate)), ((m2Pitch*(finDummy//2-1)) - v_enclosure), ((y_cells-1)*m2Pitch*(K_space + fin//2) - v_enclosure + m1Length + (m2Pitch*(finDummy//2))))
uc.v2Segment( 'v2', 'NA', ((i+x*gate)*m1Pitch + m1Offset - v1Width/2), ((i+x*gate)*m1Pitch + m1Offset + v1Width - v1Width/2), (y*(m2_tracks //y_cells + K_space) + finDummy//2 + 3))
if x == 1 and y_cells > 1 and i == 2:
if y == 0:
uc.m3Segment( 'm3', 'DB', (i+(x*gate)), ((m2Pitch*(finDummy//2-1)) - v_enclosure), ((y_cells-1)*m2Pitch*(K_space + fin//2) - v_enclosure + m1Length + (m2Pitch*(finDummy//2))))
uc.v2Segment( 'v2', 'NA', ((i+x*gate)*m1Pitch + m1Offset - v1Width/2), ((i+x*gate)*m1Pitch + m1Offset + v1Width - v1Width/2), (y*(m2_tracks //y_cells + K_space) + finDummy//2 + 4))
############### M2 routing ###########################
for i in range((m2_tracks+1)):
if i == (2*y*(m2_tracks //y_cells + K_space)):
uc.m2Segment( 'm2', 'GND', (((x-1)*extension_x)+ x*x_length), ((1+x)*(x_length)+x*extension_x), i)
if i == ((2*y+1)*(m2_tracks //y_cells + K_space)):
uc.m2Segment( 'm2', 'VDD', (((x-1)*extension_x)+ x*x_length), ((1+x)*(x_length)+x*extension_x), i)
if x_cells > 1 and x == 0 and i == (y*(m2_tracks //y_cells + K_space) + finDummy//2 + 0):
uc.m2Segment( 'm2', 'G', (m1Offset - m1Width/2 - v_enclosure), (m1Offset + m1Width/2 + v_enclosure + (x_cells*gate-2)*m1Pitch), i)
if x_cells > 1 and x == 0 and i == (y*(m2_tracks //y_cells + K_space) + finDummy//2 + 1):
uc.m2Segment( 'm2', 'SA', (m1Offset - m1Width/2 - v_enclosure), (m1Offset + m1Width/2 + v_enclosure + (x_cells*gate-2)*m1Pitch), i)
if x_cells > 1 and x == 0 and i == (y*(m2_tracks //y_cells + K_space) + finDummy//2 + 3):
uc.m2Segment( 'm2', 'SB', (m1Offset - m1Width/2 - v_enclosure), (m1Offset + m1Width/2 + v_enclosure + (x_cells*gate-2)*m1Pitch), i)
if x_cells > 1 and x == 0 and i == (y*(m2_tracks //y_cells + K_space) + finDummy//2 + 2):
uc.m2Segment( 'm2', 'DA', (m1Offset - m1Width/2 - v_enclosure), (m1Offset + m1Width/2 + v_enclosure + (x_cells*gate-2)*m1Pitch), i)
if x_cells > 1 and x == 0 and i == (y*(m2_tracks //y_cells + K_space) + finDummy//2 + 4):
uc.m2Segment( 'm2', 'DB', (m1Offset - m1Width/2 - v_enclosure), (m1Offset + m1Width/2 + v_enclosure + (x_cells*gate-2)*m1Pitch), i)
################# M1 routing ######################
if (x_cells - 1 - x) == 0:
if (y % 2) == 0:
for i in DA:
uc.v1Segment( 'v1', 'NA', (i*m1Pitch + m1Offset - v1Width/2), (i*m1Pitch + m1Offset + v1Width - v1Width/2), (y*(m2_tracks //y_cells + K_space) + finDummy//2 + 2))
uc.m1Segment( 'D', 'DA', i, ((m2Pitch*(finDummy//2-1)) - v_enclosure + y*m2Pitch*(K_space + fin//2) ), (y*m2Pitch*(K_space + fin//2) - v_enclosure + m1Length + (m2Pitch*(finDummy//2-1))))
for i in DB:
uc.v1Segment( 'v1', 'NA', (i*m1Pitch + m1Offset - v1Width/2), (i*m1Pitch + m1Offset + v1Width - v1Width/2), (y*(m2_tracks //y_cells + K_space) + finDummy//2 + 4))
uc.m1Segment( 'D', 'DB', i, ((m2Pitch*(finDummy//2-1)) - v_enclosure + y*m2Pitch*(K_space + fin//2) ), (y*m2Pitch*(K_space + fin//2) - v_enclosure + m1Length + (m2Pitch*(finDummy//2-1))))
for i in SA:
uc.v1Segment( 'v1', 'NA', (i*m1Pitch + m1Offset - v1Width/2), (i*m1Pitch + m1Offset + v1Width - v1Width/2), (y*(m2_tracks //y_cells + K_space) + finDummy//2 + 1))
uc.m1Segment( 'S', 'SA', i, ((m2Pitch*(finDummy//2-1)) - v_enclosure + y*m2Pitch*(K_space + fin//2) ), (y*m2Pitch*(K_space + fin//2) - v_enclosure + m1Length + (m2Pitch*(finDummy//2-1))))
for i in SB:
uc.v1Segment( 'v1', 'NA', (i*m1Pitch + m1Offset - v1Width/2), (i*m1Pitch + m1Offset + v1Width - v1Width/2), (y*(m2_tracks //y_cells + K_space) + finDummy//2 + 3))
uc.m1Segment( 'S', 'SB', i, ((m2Pitch*(finDummy//2-1)) - v_enclosure + y*m2Pitch*(K_space + fin//2) ), (y*m2Pitch*(K_space + fin//2) - v_enclosure + m1Length + (m2Pitch*(finDummy//2-1))))
else:
for i in DA:
uc.v1Segment( 'v1', 'NA', (i*m1Pitch + m1Offset - v1Width/2), (i*m1Pitch + m1Offset + v1Width - v1Width/2), (y*(m2_tracks //y_cells + K_space) + finDummy//2 + 4))
uc.m1Segment( 'D', 'DA', i, ((m2Pitch*(finDummy//2-1)) - v_enclosure + y*m2Pitch*(K_space + fin//2) ), (y*m2Pitch*(K_space + fin//2) - v_enclosure + m1Length + (m2Pitch*(finDummy//2-1))))
for i in DB:
uc.v1Segment( 'v1', 'NA', (i*m1Pitch + m1Offset - v1Width/2), (i*m1Pitch + m1Offset + v1Width - v1Width/2), (y*(m2_tracks //y_cells + K_space) + finDummy//2 + 2))
uc.m1Segment( 'D', 'DB', i, ((m2Pitch*(finDummy//2-1)) - v_enclosure + y*m2Pitch*(K_space + fin//2) ), (y*m2Pitch*(K_space + fin//2) - v_enclosure + m1Length + (m2Pitch*(finDummy//2-1))))
for i in SA:
uc.v1Segment( 'v1', 'NA', (i*m1Pitch + m1Offset - v1Width/2), (i*m1Pitch + m1Offset + v1Width - v1Width/2), (y*(m2_tracks //y_cells + K_space) + finDummy//2 + 3))
uc.m1Segment( 'S', 'SA', i, ((m2Pitch*(finDummy//2-1)) - v_enclosure + y*m2Pitch*(K_space + fin//2) ), (y*m2Pitch*(K_space + fin//2) - v_enclosure + m1Length + (m2Pitch*(finDummy//2-1))))
for i in SB:
uc.v1Segment( 'v1', 'NA', (i*m1Pitch + m1Offset - v1Width/2), (i*m1Pitch + m1Offset + v1Width - v1Width/2), (y*(m2_tracks //y_cells + K_space) + finDummy//2 + 1))
uc.m1Segment( 'S', 'SB', i, ((m2Pitch*(finDummy//2-1)) - v_enclosure + y*m2Pitch*(K_space + fin//2) ), (y*m2Pitch*(K_space + fin//2) - v_enclosure + m1Length + (m2Pitch*(finDummy//2-1))))
if (x_cells - 1 - x) == 0:
for i in GA:
uc.v1Segment( 'v1', 'NA', (i*m1Pitch + m1Offset - v1Width/2), (i*m1Pitch + m1Offset + v1Width - v1Width/2), (y*(m2_tracks //y_cells + K_space) + finDummy//2 + 0))
uc.m1Segment( 'G', 'G', i, (0 - v_enclosure + (m2Pitch*(finDummy//2-1)) + y*m2Pitch*(K_space + fin//2) ), (y*m2Pitch*(K_space + fin//2)+ (m2Pitch*(finDummy//2-1)) - v_enclosure + m1PCLength))
for i in GB:
uc.v1Segment( 'v1', 'NA', (i*m1Pitch + m1Offset - v1Width/2), (i*m1Pitch + m1Offset + v1Width - v1Width/2), (y*(m2_tracks //y_cells + K_space) + finDummy//2 + 0))
uc.m1Segment( 'G', 'G', i, (0 - v_enclosure + (m2Pitch*(finDummy//2-1)) + y*m2Pitch*(K_space + fin//2) ), (y*m2Pitch*(K_space + fin//2)+ (m2Pitch*(finDummy//2-1)) - v_enclosure + m1PCLength))
if __name__ == "__main__":
fin_u1 = int(sys.argv[1])
x_cells = int(sys.argv[2])
y_cells = int(sys.argv[3])
assert (x_cells%2) == 0
gate_u = 2
if fin_u1%2 != 0:
fin_u = fin_u1 + 1
else:
fin_u = fin_u1
uc = UnitCell()
for (x,y) in ( (x,y) for x in range(x_cells) for y in range(y_cells)):
uc.unit( x, y)
uc.computeBbox()
with open( "./mydesign_dr_globalrouting.json", "wt") as fp:
data = { 'bbox' : uc.bbox.toList(), 'globalRoutes' : [], 'globalRouteGrid' : [], 'terminals' : uc.terminals}
fp.write( json.dumps( data, indent=2) + '\n')
| 55.987315
| 323
| 0.551809
| 3,332
| 26,482
| 4.30042
| 0.073529
| 0.021774
| 0.037965
| 0.02966
| 0.664317
| 0.540791
| 0.472887
| 0.456417
| 0.436318
| 0.415381
| 0
| 0.055736
| 0.29201
| 26,482
| 472
| 324
| 56.105932
| 0.708518
| 0.004871
| 0
| 0.293532
| 0
| 0
| 0.017653
| 0.001223
| 0
| 0
| 0
| 0
| 0.007463
| 1
| 0.069652
| false
| 0
| 0.007463
| 0.044776
| 0.097015
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
725600e7a0a1963a9922b5936348396ff3c1bd52
| 13,457
|
py
|
Python
|
docs/testcases/all_in_one.py
|
tiramtaramta/conduit
|
ae4ca8e64fe64c2b6702d803d799e380fda84a92
|
[
"MIT"
] | null | null | null |
docs/testcases/all_in_one.py
|
tiramtaramta/conduit
|
ae4ca8e64fe64c2b6702d803d799e380fda84a92
|
[
"MIT"
] | null | null | null |
docs/testcases/all_in_one.py
|
tiramtaramta/conduit
|
ae4ca8e64fe64c2b6702d803d799e380fda84a92
|
[
"MIT"
] | null | null | null |
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
import os
import time
import csv
from webdriver_manager.chrome import ChromeDriverManager
import math
from basic_function import basic_login, find_element
class TestConduit(object):
def setup(self):
browser_options = Options()
browser_options.headless = True
self.driver = webdriver.Chrome(ChromeDriverManager().install(), options=browser_options)
self.driver.get("http://localhost:1667/#/")
def teardown(self):
self.driver.quit()
# -------- A028, TC-0037 Cookie kezelési tájékoztató --------
def test_cookie_process(self):
assert self.driver.find_element_by_id("cookie-policy-panel").is_displayed()
# Cookie-k elfogadása folyamat
self.driver.find_element_by_xpath(
"//button[@class='cookie__bar__buttons__button cookie__bar__buttons__button--accept']").click()
time.sleep(2)
# # Cookie-k elutasítása folyamat
# self.driver.find_element_by_xpath(
# "//button[@class='cookie__bar__buttons__button cookie__bar__buttons__button--decline']").click()
#
# time.sleep(2)
try:
self.driver.find_element_by_id("cookie-policy-panel")
time.sleep(2)
except NoSuchElementException:
return True
return False
# -------- A002, TC-0002 Regisztráció helyes adatokkal --------
def test_registration_process(self):
user_input_data = ["user200", "user200@hotmail.com", "Userpass1"]
self.driver.find_element_by_xpath("//a[@href='#/register']").click()
# Beviteli mezők feltöltése a random user adatokkal
for i in range(len(user_input_data)):
self.driver.find_element_by_xpath(f"//fieldset[{i + 1}]/input").send_keys(user_input_data[i])
self.driver.find_element_by_tag_name("button").click()
time.sleep(2)
# Sikeres regisztrációs értesítési ablak szövegének ellenőrzése
swal_text = find_element(self.driver, By.CLASS_NAME, "swal-text")
assert swal_text.text == "Your registration was successful!"
# time.sleep(2)
# Értesítési ablak bezárása
close_btn = find_element(self.driver, By.XPATH, "//button[normalize-space()='OK']")
close_btn.click()
# self.driver.find_element_by_xpath("//button[normalize-space()='OK']").click()
time.sleep(1)
# Bejelentkezés tényének ellenőrzése
username_check = self.driver.find_element_by_xpath("//a[starts-with(@href, '#/@')]").text
assert username_check == user_input_data[
0], f"Test Failed: Username did not match expected ({user_input_data[0]})."
# time.sleep(2)
# -------- A004, TC-0010 Bejelentkezés helyes adatokkal --------
def test_login_process(self):
user_input_data = ["user200", "user200@hotmail.com", "Userpass1"]
self.driver.find_element_by_xpath("//a[@href='#/login']").click()
# Bejelentkezési űrlap feltöltése
for i in range(len(user_input_data) - 1):
self.driver.find_element_by_xpath(f"//fieldset[{i + 1}]/input").send_keys(user_input_data[i + 1])
time.sleep(1)
self.driver.find_element_by_tag_name("button").click()
time.sleep(3)
# Bejelentkezés tényének ellenőrzése
username_check = self.driver.find_element_by_xpath("//a[starts-with(@href, '#/@')]").text
assert username_check == user_input_data[0], f"Test Failed: User is not logged in ({user_input_data[0]})."
time.sleep(2)
# -------- A010, TC-0034 Saját profil szerkesztése, képcsere --------
def test_edit_settings_process(self):
basic_login(self.driver)
self.driver.find_element_by_xpath("//a[@href='#/settings']").click()
time.sleep(2)
# Your Settings oldal megjelenésének ellenőrzése
settings_check = self.driver.find_element_by_tag_name("h1").text
assert settings_check == "Your Settings", f"Test Failed: Page names did not match expected ({settings_check})."
time.sleep(3)
# Beolvassuk az előkészített adatokat
with open('edit_user.csv') as article_file:
csv_reader = csv.reader(article_file, delimiter=';')
for row in csv_reader:
user_update = row
time.sleep(2)
# Feltöltjük az adatokkal a beviteli űrlap egyes sorait
user_picture = self.driver.find_element_by_class_name("form-control")
user_bio = self.driver.find_element_by_xpath("//textarea[@placeholder='Short bio about you']")
user_picture.clear()
user_picture.send_keys(user_update[0])
user_bio.clear()
user_bio.send_keys(user_update[1])
time.sleep(1)
self.driver.find_element_by_xpath("//button[normalize-space()='Update Settings']").click()
time.sleep(2)
# Sikeres update értesítési ablak szövegének ellenőrzése
assert self.driver.find_element_by_class_name("swal-title").text == "Update successful!"
time.sleep(2)
# Értesítési ablak bezárása
self.driver.find_element_by_xpath("//button[normalize-space()='OK']").click()
time.sleep(1)
# Ellenőrizzük a felhasználó profiljában történt változásokat
self.driver.find_element_by_xpath("//a[starts-with(@href, '#/@')]").click()
time.sleep(2)
img_check = self.driver.find_element_by_class_name("user-img").get_attribute("src")
assert img_check == user_update[
0], f"Test Failed: Image did not match expected ({user_update[0]})."
bio_check = self.driver.find_element_by_css_selector("div[class='user-info'] p").text
assert bio_check == user_update[
1], f"Test Failed: User's bio did not match expected ({user_update[1]})."
time.sleep(2)
# -------- A005, TC-0003 Kijelentkezés --------
def test_logout_process(self):
basic_login(self.driver)
self.driver.find_element_by_xpath("//i[@class='ion-android-exit']").click()
time.sleep(2)
# Kijelentkezés tényének ellenőrzése
sign_out_check = self.driver.find_element_by_xpath("//a[starts-with(@href, '#/login')]").text
assert sign_out_check == f"{sign_out_check}", f"Test Failed: User is logged in."
time.sleep(1)
# -------- A006, TC-0015 Új poszt létrehozása helyes adatokkal --------
def test_create_post(self):
basic_login(self.driver)
self.driver.find_element_by_xpath("//a[@href='#/editor']").click()
with open('new_post_content.csv') as article_file:
csv_reader = csv.reader(article_file, delimiter=';')
for row in csv_reader:
new_article_data = row
time.sleep(2)
# Beviteli űrlap feltöltése
self.driver.find_element_by_xpath("//input[starts-with(@placeholder,'Article')]").send_keys(new_article_data[0])
self.driver.find_element_by_xpath("//input[starts-with(@placeholder,'What')]").send_keys(new_article_data[1])
self.driver.find_element_by_xpath("//textarea[starts-with(@placeholder,'Write')]").send_keys(
new_article_data[2])
self.driver.find_element_by_xpath("//input[@placeholder='Enter tags']").send_keys(new_article_data[3])
time.sleep(1)
self.driver.find_element_by_css_selector("button[type='submit']").click()
time.sleep(2)
# Bejegyzés létrejöttének ellenőrzése
title_check = self.driver.find_element_by_tag_name("h1").text
assert title_check == new_article_data[
0], f"Test Failed: Content title did not match expected ({new_article_data[0]})."
time.sleep(2)
# -------- A006, TC-0015 Új adatbevitel helyes adatokkal (sorozatos) --------
def test_create_posts_process(self):
basic_login(self.driver)
for i in range(1):
with open('contents.csv') as article_file:
csv_reader = csv.reader(article_file, delimiter=';')
for row in csv_reader:
new_article_data = row
# Beviteli űrlap feltöltése
self.driver.find_element_by_xpath("//a[@href='#/editor']").click()
time.sleep(4)
self.driver.find_element_by_xpath("//input[@placeholder='Article Title']").send_keys(
new_article_data[0])
self.driver.find_element_by_xpath("//input[starts-with(@placeholder,'What')]").send_keys(
new_article_data[1])
self.driver.find_element_by_xpath("//textarea[starts-with(@placeholder,'Write')]").send_keys(
new_article_data[2])
self.driver.find_element_by_xpath("//input[@placeholder='Enter tags']").send_keys(
new_article_data[3])
time.sleep(1)
self.driver.find_element_by_css_selector("button[type='submit']").click()
time.sleep(2)
# Bejegyzés létrejöttének ellenőrzése
title_check = self.driver.find_element_by_tag_name("h1").text
assert title_check == new_article_data[
0], f"Test Failed: Content title did not match expected ({new_article_data[0]})."
time.sleep(4)
# -------- A015, TC-0024 Saját poszt törlése --------
def test_delete_post_process(self):
basic_login(self.driver)
my_articles = self.driver.find_element_by_xpath("//a[starts-with(@href, '#/@')]")
my_articles.click()
time.sleep(2)
articles_list = self.driver.find_elements_by_tag_name("h1")
if len(articles_list) > 0:
articles_list[0].click()
time.sleep(3)
self.driver.find_element_by_xpath("//*[@id='app']/div/div[1]/div/div/span/button/span").click()
time.sleep(2)
# Ellenőrizzük, hogy valóban törlődött-e a bejegyzés
my_articles.click()
time.sleep(2)
new_articles_list = self.driver.find_elements_by_tag_name("h1")
assert not new_articles_list[0] == articles_list[
0], f"Test Failed: Content is not deleted ({articles_list[0]})."
# -------- A029 Adatok lementése felületről --------
def test_export_my_last_post(self):
basic_login(self.driver)
self.driver.find_element_by_xpath("//a[starts-with(@href, '#/@')]").click()
time.sleep(2)
articles_list = self.driver.find_elements_by_tag_name("h1")
if os.path.exists("my_last_article.txt"):
os.remove("my_last_article.txt")
else:
pass
articles_list[0].click()
time.sleep(2)
article_title = self.driver.find_element_by_tag_name("h1").text
article_text = self.driver.find_element_by_tag_name("p").text
with open("my_last_article.txt", "a") as my_txt:
my_txt.write(f"{article_title};{article_text};\n")
time.sleep(3)
# a kiírt tartalom ellenőrzése
with open("my_last_article.txt", "r") as my_txt2:
my_txt = my_txt2.readline()
my_txt_list = my_txt.split(";")
assert my_txt_list[0] == article_title, f"Test Failed: Content title is not exported."
assert my_txt_list[1] == article_text, f"Test Failed: Content text is not exported."
# -------- A007, TC-0025 Bejegyzések listájának megtekintése --------
def test_global_feed_list(self):
basic_login(self.driver)
self.driver.find_element_by_xpath("//a[starts-with(@href, '#/')]").click()
time.sleep(2)
articles_list = self.driver.find_elements_by_xpath("//div[@class='article-preview']/a/h1")
if os.path.exists("titles_list.csv"):
os.remove("titles_list.csv")
else:
pass
for i in range(len(articles_list)):
article_title = articles_list[i].text
with open('titles_list.csv', 'a', encoding="utf-8") as csv_titles:
csv_titles.write(f"{article_title};")
# a lista hosszának ellenőrzése
with open('titles_list.csv', 'r', encoding="utf-8") as csv_titles2:
check_articles = csv.reader(csv_titles2, delimiter=';')
for row in check_articles:
check_articles_list = row
assert len(articles_list) == len(
check_articles_list) - 1, f"Test Failed: The length of the lists are not exactly the same."
# -------- A007, TC-0025 Bejegyzések listájának megtekintése (lapozóval) --------
def test_global_feed_pagination(self):
basic_login(self.driver)
self.driver.find_element_by_xpath("//a[starts-with(@href, '#/')]").click()
time.sleep(2)
articles_list = self.driver.find_elements_by_xpath("//div[@class='article-preview']/a/h1")
# lapozógombok használata
pages = self.driver.find_elements_by_class_name("page-link")
for page in pages:
page.click()
time.sleep(1)
# Az oldal bejárásának ellenőrzése
assert len(pages) == int(math.ceil(
len(articles_list) / 10)), f"Test Failed: The length of the list and pagination not exactly the same."
| 37.48468
| 120
| 0.630081
| 1,690
| 13,457
| 4.768047
| 0.172189
| 0.080665
| 0.090345
| 0.119881
| 0.598784
| 0.555473
| 0.498263
| 0.451104
| 0.438819
| 0.409531
| 0
| 0.017895
| 0.235937
| 13,457
| 358
| 121
| 37.589385
| 0.765804
| 0.137846
| 0
| 0.425121
| 0
| 0
| 0.215522
| 0.09846
| 0
| 0
| 0
| 0
| 0.077295
| 1
| 0.062802
| false
| 0.019324
| 0.048309
| 0
| 0.125604
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
725642bee9a909399840fd99543e731184a069ea
| 819
|
py
|
Python
|
IV_semester/os/configs.py
|
dainiusjocas/labs
|
25aa0ae2032681dbaf0afd83f3d80bedddea6407
|
[
"Beerware"
] | 1
|
2019-04-16T22:05:42.000Z
|
2019-04-16T22:05:42.000Z
|
IV_semester/os/configs.py
|
dainiusjocas/labs
|
25aa0ae2032681dbaf0afd83f3d80bedddea6407
|
[
"Beerware"
] | null | null | null |
IV_semester/os/configs.py
|
dainiusjocas/labs
|
25aa0ae2032681dbaf0afd83f3d80bedddea6407
|
[
"Beerware"
] | null | null | null |
#!/usr/bin/env python
''' This module provides configuration options for OS project. No more magic numbers! '''
BLOCK_SIZE = 16 # words
WORD_SIZE = 4 # bytes
# length od RS in blocks
RESTRICTED_LENGTH = 1
# length of DS in blocks
DS_LENGTH = 6
# timer value
TIMER_VALUE = 10
# buffer size
BUFFER_SIZE = 16
# number of blocks in HD
HD_BLOCKS_SIZE = 500
# default priorities
ROOT_PRIORITY = 40
VM_PRIORITY = 50
LOADER_PRIORITY = 60
INTERRUPT_PRIORITY = 70
PRINT_PRIORITY = 70
# Process states
RUNNING_STATE = 'running'
READY_STATE = 'ready'
BLOCKED_STATE = 'blocked'
# Page tables
PAGE_TABLE_STARTING_BLOCK = 0
PAGE_TABLE_ENDING_BLOCK = 14
# Shared memory
SH_MEMEORY_STARTING_BLOCK = 15
SH_MEMORY_ENDING_BLOCK = 31
# blocks dedicated for user tasks are from
USER_STARTING_BLOCK = 32
USER_ENDING_BLOCK = 255
| 18.2
| 89
| 0.764347
| 126
| 819
| 4.714286
| 0.619048
| 0.065657
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.050147
| 0.172161
| 819
| 44
| 90
| 18.613636
| 0.825959
| 0.377289
| 0
| 0
| 0
| 0
| 0.038697
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7256bed763fbd51245f430291a65885eb6f4534d
| 3,022
|
py
|
Python
|
roboticstoolbox/models/URDF/Puma560.py
|
Russ76/robotics-toolbox-python
|
4b3e82a6522757ffde1f83aef8d05b3ad475e9de
|
[
"MIT"
] | null | null | null |
roboticstoolbox/models/URDF/Puma560.py
|
Russ76/robotics-toolbox-python
|
4b3e82a6522757ffde1f83aef8d05b3ad475e9de
|
[
"MIT"
] | null | null | null |
roboticstoolbox/models/URDF/Puma560.py
|
Russ76/robotics-toolbox-python
|
4b3e82a6522757ffde1f83aef8d05b3ad475e9de
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import numpy as np
from roboticstoolbox.robot.ERobot import ERobot
from math import pi
class Puma560(ERobot):
"""
Class that imports a Puma 560 URDF model
``Puma560()`` is a class which imports a Unimation Puma560 robot definition
from a URDF file. The model describes its kinematic and graphical
characteristics.
.. runblock:: pycon
>>> import roboticstoolbox as rtb
>>> robot = rtb.models.URDF.Puma560()
>>> print(robot)
Defined joint configurations are:
- qz, zero joint angle configuration, 'L' shaped configuration
- qr, vertical 'READY' configuration
- qs, arm is stretched out in the x-direction
- qn, arm is at a nominal non-singular configuration
.. warning:: This file has been modified so that the zero-angle pose is the
same as the DH model in the toolbox. ``j3`` rotation is changed from
-𝜋/2 to 𝜋/2. Dimensions are also slightly different. Both models
include the pedestal height.
.. note:: The original file is from https://github.com/nimasarli/puma560_description/blob/master/urdf/puma560_robot.urdf.xacro
.. codeauthor:: Jesse Haviland
.. sectionauthor:: Peter Corke
"""
def __init__(self):
links, name, urdf_string, urdf_filepath = self.URDF_read(
"puma560_description/urdf/puma560_robot.urdf.xacro"
)
super().__init__(
links,
name=name,
urdf_string=urdf_string,
urdf_filepath=urdf_filepath,
)
self.manufacturer = "Unimation"
# self.ee_link = self.ets[9]
# ready pose, arm up
self.qr = np.array([0, pi / 2, -pi / 2, 0, 0, 0])
self.qz = np.zeros(6)
self.addconfiguration("qr", self.qr)
self.addconfiguration("qz", self.qz)
# zero angles, upper arm horizontal, lower up straight up
self.addconfiguration_attr("qz", np.array([0, 0, 0, 0, 0, 0]))
# reference pose, arm to the right, elbow up
self.addconfiguration_attr(
"ru", np.array([-0.0000, 0.7854, 3.1416, -0.0000, 0.7854, 0.0000])
)
# reference pose, arm to the right, elbow up
self.addconfiguration_attr(
"rd", np.array([-0.0000, -0.8335, 0.0940, -3.1416, 0.8312, 3.1416])
)
# reference pose, arm to the left, elbow up
self.addconfiguration_attr(
"lu", np.array([2.6486, -3.9270, 0.0940, 2.5326, 0.9743, 0.3734])
)
# reference pose, arm to the left, elbow down
self.addconfiguration_attr(
"ld", np.array([2.6486, -2.3081, 3.1416, 0.6743, 0.8604, 2.6611])
)
# straight and horizontal
self.addconfiguration_attr("qs", np.array([0, 0, -pi / 2, 0, 0, 0]))
# nominal table top picking pose
self.addconfiguration_attr("qn", np.array([0, pi / 4, pi, 0, pi / 4, 0]))
if __name__ == "__main__": # pragma nocover
robot = Puma560()
print(robot)
| 31.154639
| 130
| 0.610192
| 408
| 3,022
| 4.433824
| 0.401961
| 0.011056
| 0.092869
| 0.05749
| 0.162521
| 0.096186
| 0.096186
| 0.063018
| 0.063018
| 0.063018
| 0
| 0.08113
| 0.273991
| 3,022
| 96
| 131
| 31.479167
| 0.743391
| 0.453011
| 0
| 0.108108
| 0
| 0
| 0.054159
| 0.031593
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027027
| false
| 0
| 0.081081
| 0
| 0.135135
| 0.027027
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7259d1c0671ff1b759aee401e67ca154f987dcca
| 5,059
|
py
|
Python
|
ntpclients/ntptrace.py
|
OptimalRanging/NTPsec
|
7fa9b38c3e91f96b173ffa02bafa29cf81173cf7
|
[
"CC-BY-4.0",
"BSD-2-Clause",
"NTP",
"MIT",
"BSD-3-Clause"
] | null | null | null |
ntpclients/ntptrace.py
|
OptimalRanging/NTPsec
|
7fa9b38c3e91f96b173ffa02bafa29cf81173cf7
|
[
"CC-BY-4.0",
"BSD-2-Clause",
"NTP",
"MIT",
"BSD-3-Clause"
] | null | null | null |
ntpclients/ntptrace.py
|
OptimalRanging/NTPsec
|
7fa9b38c3e91f96b173ffa02bafa29cf81173cf7
|
[
"CC-BY-4.0",
"BSD-2-Clause",
"NTP",
"MIT",
"BSD-3-Clause"
] | 1
|
2021-09-24T18:19:49.000Z
|
2021-09-24T18:19:49.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
ntptrace - trace peers of an NTP server
Usage: ntptrace [-n | --numeric] [-m number | --max-hosts=number]
[-r hostname | --host=hostname] [--help | --more-help]
hostname
See the manual page for details.
"""
# SPDX-License-Identifier: BSD-2-Clause
from __future__ import print_function
import getopt
import re
import subprocess
import sys
try:
import ntp.util
except ImportError as e:
sys.stderr.write(
"ntptrace: can't find Python NTP library.\n")
sys.stderr.write("%s\n" % e)
sys.exit(1)
def get_info(host):
info = ntp_read_vars(0, [], host)
if info is None or 'stratum' not in info:
return
info['offset'] = round(float(info['offset']) / 1000, 6)
info['syncdistance'] = \
(float(info['rootdisp']) + (float(info['rootdelay']) / 2)) / 1000
return info
def get_next_host(peer, host):
info = ntp_read_vars(peer, ["srcadr"], host)
if info is None:
return
return info['srcadr']
def ntp_read_vars(peer, vars, host):
obsolete = {'phase': 'offset',
'rootdispersion': 'rootdisp'}
if not len(vars):
do_all = True
else:
do_all = False
outvars = {}.fromkeys(vars)
if do_all:
outvars['status_line'] = {}
cmd = ["ntpq", "-n", "-c", "rv %s %s" % (peer, ",".join(vars))]
if host is not None:
cmd.append(host)
try:
# sadly subprocess.check_output() is not in Python 2.6
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out = proc.communicate()[0]
output = out.decode('utf-8').splitlines()
except subprocess.CalledProcessError as e:
print("Could not start ntpq: %s" % e.output, file=sys.stderr)
raise SystemExit(1)
except OSError as e:
print("Could not start ntpq: %s" % e.strerror, file=sys.stderr)
raise SystemExit(1)
for line in output:
if re.search(r'Connection refused', line):
return
match = re.search(r'^asso?c?id=0 status=(\S{4}) (\S+), (\S+),', line,
flags=re.IGNORECASE)
if match:
outvars['status_line']['status'] = match.group(1)
outvars['status_line']['leap'] = match.group(2)
outvars['status_line']['sync'] = match.group(3)
iterator = re.finditer(r'(\w+)=([^,]+),?\s?', line)
for match in iterator:
key = match.group(1)
val = match.group(2)
val = re.sub(r'^"([^"]+)"$', r'\1', val)
if key in obsolete:
key = obsolete[key]
if do_all or key in outvars:
outvars[key] = val
return outvars
usage = r"""ntptrace - trace peers of an NTP server
USAGE: ntptrace [-<flag> [<val>] | --<name>[{=| }<val>]]... [host]
-n, --numeric Print IP addresses instead of hostnames
-m, --max-hosts=num Maximum number of peers to trace
-r, --host=str Single remote host
-?, --help Display usage information and exit
--more-help Pass the extended usage text through a pager
Options are specified by doubled hyphens and their name or by a single
hyphen and the flag character.""" + "\n"
try:
(options, arguments) = getopt.getopt(
sys.argv[1:], "m:nr:?",
["help", "host=", "max-hosts=", "more-help", "numeric"])
except getopt.GetoptError as err:
sys.stderr.write(str(err) + "\n")
raise SystemExit(1)
numeric = False
maxhosts = 99
host = '127.0.0.1'
for (switch, val) in options:
if switch == "-m" or switch == "--max-hosts":
errmsg = "Error: -m parameter '%s' not a number\n"
maxhosts = ntp.util.safeargcast(val, int, errmsg, usage)
elif switch == "-n" or switch == "--numeric":
numeric = True
elif switch == "-r" or switch == "--host":
host = val
elif switch == "-?" or switch == "--help" or switch == "--more-help":
print(usage, file=sys.stderr)
raise SystemExit(0)
if len(arguments):
host = arguments[0]
hostcount = 0
while True:
hostcount += 1
info = get_info(host)
if info is None:
break
if not numeric:
host = ntp.util.canonicalize_dns(host)
print("%s: stratum %d, offset %f, synch distance %f" %
(host, int(info['stratum']), info['offset'], info['syncdistance']),
end='')
if int(info['stratum']) == 1:
print(", refid '%s'" % info['refid'], end='')
print()
if (int(info['stratum']) == 0 or int(info['stratum']) == 1 or
int(info['stratum']) == 16):
break
if re.search(r'^127\.127\.\d{1,3}\.\d{1,3}$', info['refid']):
break
if hostcount == maxhosts:
break
next_host = get_next_host(info['peer'], host)
if next_host is None:
break
if re.search(r'^127\.127\.\d{1,3}\.\d{1,3}$', next_host):
break
host = next_host
| 27.796703
| 77
| 0.555841
| 664
| 5,059
| 4.188253
| 0.302711
| 0.019417
| 0.025171
| 0.012945
| 0.133046
| 0.092053
| 0.071197
| 0.071197
| 0.071197
| 0.020137
| 0
| 0.018508
| 0.284444
| 5,059
| 181
| 78
| 27.950276
| 0.749724
| 0.073137
| 0
| 0.131783
| 0
| 0
| 0.262292
| 0.011971
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023256
| false
| 0.007752
| 0.054264
| 0
| 0.116279
| 0.054264
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7259dd2585e473152b9b222537cbe864940bc023
| 7,529
|
py
|
Python
|
lbrynet/wallet/server/block_processor.py
|
abueide/lbry
|
7f5deaf6c80422a30b3714d4bf12e028756ed9fe
|
[
"MIT"
] | null | null | null |
lbrynet/wallet/server/block_processor.py
|
abueide/lbry
|
7f5deaf6c80422a30b3714d4bf12e028756ed9fe
|
[
"MIT"
] | null | null | null |
lbrynet/wallet/server/block_processor.py
|
abueide/lbry
|
7f5deaf6c80422a30b3714d4bf12e028756ed9fe
|
[
"MIT"
] | null | null | null |
import struct
import msgpack
from lbrynet.wallet.transaction import Transaction, Output
from torba.server.hash import hash_to_hex_str
from torba.server.block_processor import BlockProcessor
from lbrynet.schema.claim import Claim
from lbrynet.wallet.server.model import ClaimInfo
class LBRYBlockProcessor(BlockProcessor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.env.coin.NET == "regtest":
self.prefetcher.polling_delay = 0.5
self.should_validate_signatures = self.env.boolean('VALIDATE_CLAIM_SIGNATURES', False)
self.logger.info("LbryumX Block Processor - Validating signatures: {}".format(self.should_validate_signatures))
def advance_blocks(self, blocks):
# save height, advance blocks as usual, then hook our claim tx processing
height = self.height + 1
super().advance_blocks(blocks)
pending_undo = []
for index, block in enumerate(blocks):
undo = self.advance_claim_txs(block.transactions, height + index)
pending_undo.append((height+index, undo,))
self.db.write_undo(pending_undo)
def advance_claim_txs(self, txs, height):
# TODO: generate claim undo info!
undo_info = []
add_undo = undo_info.append
update_inputs = set()
for etx, txid in txs:
update_inputs.clear()
tx = Transaction(etx.serialize())
for index, output in enumerate(tx.outputs):
if not output.is_claim:
continue
if output.script.is_claim_name:
add_undo(self.advance_claim_name_transaction(output, height, txid, index))
elif output.script.is_update_claim:
update_input = self.db.get_update_input(output.claim_hash, tx.inputs)
if update_input:
update_inputs.add(update_input)
add_undo(self.advance_update_claim(output, height, txid, index))
else:
info = (hash_to_hex_str(txid), output.claim_id,)
self.logger.error("REJECTED: {} updating {}".format(*info))
for txin in tx.inputs:
if txin not in update_inputs:
abandoned_claim_id = self.db.abandon_spent(txin.txo_ref.tx_ref.hash, txin.txo_ref.position)
if abandoned_claim_id:
add_undo((abandoned_claim_id, self.db.get_claim_info(abandoned_claim_id)))
return undo_info
def advance_update_claim(self, output: Output, height, txid, nout):
claim_id = output.claim_hash
claim_info = self.claim_info_from_output(output, txid, nout, height)
old_claim_info = self.db.get_claim_info(claim_id)
self.db.put_claim_id_for_outpoint(old_claim_info.txid, old_claim_info.nout, None)
if old_claim_info.cert_id:
self.db.remove_claim_from_certificate_claims(old_claim_info.cert_id, claim_id)
if claim_info.cert_id:
self.db.put_claim_id_signed_by_cert_id(claim_info.cert_id, claim_id)
self.db.put_claim_info(claim_id, claim_info)
self.db.put_claim_id_for_outpoint(txid, nout, claim_id)
return claim_id, old_claim_info
def advance_claim_name_transaction(self, output: Output, height, txid, nout):
claim_id = output.claim_hash
claim_info = self.claim_info_from_output(output, txid, nout, height)
if claim_info.cert_id:
self.db.put_claim_id_signed_by_cert_id(claim_info.cert_id, claim_id)
self.db.put_claim_info(claim_id, claim_info)
self.db.put_claim_id_for_outpoint(txid, nout, claim_id)
return claim_id, None
def backup_from_undo_info(self, claim_id, undo_claim_info):
"""
Undo information holds a claim state **before** a transaction changes it
There are 4 possibilities when processing it, of which only 3 are valid ones:
1. the claim is known and the undo info has info, it was an update
2. the claim is known and the undo info doesn't hold any info, it was claimed
3. the claim in unknown and the undo info has info, it was abandoned
4. the claim is unknown and the undo info does't hold info, error!
"""
undo_claim_info = ClaimInfo(*undo_claim_info) if undo_claim_info else None
current_claim_info = self.db.get_claim_info(claim_id)
if current_claim_info and undo_claim_info:
# update, remove current claim
self.db.remove_claim_id_for_outpoint(current_claim_info.txid, current_claim_info.nout)
if current_claim_info.cert_id:
self.db.remove_claim_from_certificate_claims(current_claim_info.cert_id, claim_id)
elif current_claim_info and not undo_claim_info:
# claim, abandon it
self.db.abandon_spent(current_claim_info.txid, current_claim_info.nout)
elif not current_claim_info and undo_claim_info:
# abandon, reclaim it (happens below)
pass
else:
# should never happen, unless the database got into an inconsistent state
raise Exception("Unexpected situation occurred on backup, this means the database is inconsistent. "
"Please report. Resetting the data folder (reindex) solves it for now.")
if undo_claim_info:
self.db.put_claim_info(claim_id, undo_claim_info)
if undo_claim_info.cert_id:
cert_id = self._checksig(undo_claim_info.value, undo_claim_info.address)
self.db.put_claim_id_signed_by_cert_id(cert_id, claim_id)
self.db.put_claim_id_for_outpoint(undo_claim_info.txid, undo_claim_info.nout, claim_id)
def backup_txs(self, txs):
self.logger.info("Reorg at height {} with {} transactions.".format(self.height, len(txs)))
undo_info = msgpack.loads(self.db.claim_undo_db.get(struct.pack(">I", self.height)), use_list=False)
for claim_id, undo_claim_info in reversed(undo_info):
self.backup_from_undo_info(claim_id, undo_claim_info)
return super().backup_txs(txs)
def backup_blocks(self, raw_blocks):
self.db.batched_flush_claims()
super().backup_blocks(raw_blocks=raw_blocks)
self.db.batched_flush_claims()
async def flush(self, flush_utxos):
self.db.batched_flush_claims()
return await super().flush(flush_utxos)
def claim_info_from_output(self, output: Output, txid, nout, height):
address = self.coin.address_from_script(output.script.source)
name, value, cert_id = output.script.values['claim_name'], output.script.values['claim'], None
assert txid and address
cert_id = self._checksig(value, address)
return ClaimInfo(name, value, txid, nout, output.amount, address, height, cert_id)
def _checksig(self, value, address):
try:
claim_dict = Claim.from_bytes(value)
cert_id = claim_dict.signing_channel_hash
if not self.should_validate_signatures:
return cert_id
if cert_id:
cert_claim = self.db.get_claim_info(cert_id)
if cert_claim:
certificate = Claim.from_bytes(cert_claim.value)
claim_dict.validate_signature(address, certificate)
return cert_id
except Exception:
pass
| 48.574194
| 119
| 0.663036
| 1,018
| 7,529
| 4.604126
| 0.202358
| 0.09601
| 0.044378
| 0.02987
| 0.313847
| 0.268615
| 0.252187
| 0.207169
| 0.168338
| 0.134414
| 0
| 0.001611
| 0.257936
| 7,529
| 154
| 120
| 48.88961
| 0.837301
| 0.09231
| 0
| 0.173554
| 0
| 0
| 0.046563
| 0.003695
| 0
| 0
| 0
| 0.006494
| 0.008264
| 1
| 0.082645
| false
| 0.016529
| 0.057851
| 0
| 0.214876
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
725c4a78b42553c5dfa61cb7be78dad147ba621d
| 4,584
|
py
|
Python
|
api/app/endpoints/datasets.py
|
historeno/enermaps
|
ad3a97636baa153a56367e374d0fef7f009bf19d
|
[
"Apache-2.0"
] | null | null | null |
api/app/endpoints/datasets.py
|
historeno/enermaps
|
ad3a97636baa153a56367e374d0fef7f009bf19d
|
[
"Apache-2.0"
] | null | null | null |
api/app/endpoints/datasets.py
|
historeno/enermaps
|
ad3a97636baa153a56367e374d0fef7f009bf19d
|
[
"Apache-2.0"
] | null | null | null |
"""Endpoint for the manipulation of datasets
"""
import hashlib
from flask import Response
from flask_restx import Namespace, Resource, abort
from app.common import client
from app.common import datasets as datasets_fcts
from app.common import path
api = Namespace("datasets", description="Datasets related endpoints")
@api.route("/")
class Datasets(Resource):
def get(self):
"""Return a list of all datasets known by the platform"""
datasets = client.get_dataset_list()
if len(datasets) == 0:
abort(404)
add_openaire_links(datasets)
return datasets
@api.route("/full/")
class DatasetsFull(Resource):
def get(self):
"""Return a list of all datasets known by the platform, along with their
variables and time periods"""
datasets = client.get_dataset_list()
if len(datasets) == 0:
abort(404)
for dataset in datasets:
dataset["info"] = client.get_parameters(dataset["ds_id"])
if dataset["info"] is None:
abort(404)
datasets_fcts.process_parameters(
dataset["info"],
dataset_id=dataset["ds_id"],
is_raster=dataset["is_raster"],
)
add_openaire_links(datasets)
return datasets
@api.route("/<int:id>/parameters/")
class DatasetParameters(Resource):
def get(self, id):
"""Return the variables and time periods available in a dataset"""
parameters = client.get_parameters(id)
if parameters is None:
abort(404)
datasets_fcts.process_parameters(parameters)
return parameters
@api.route(
"/layer_name/vector/<int:id>/", defaults={"variable": None, "time_period": None}
)
@api.route("/layer_name/vector/<int:id>/<string:variable>/<string:time_period>/")
@api.route(
"/layer_name/vector/<int:id>/<string:variable>/", defaults={"time_period": None}
)
@api.route(
"/layer_name/vector/<int:id>/-/<string:time_period>/",
defaults={"variable": None},
)
class VectorLayerName(Resource):
def get(self, id, variable=None, time_period=None):
"""Return an unique layer name"""
if variable is not None:
variable = variable.replace("__SLASH__", "/")
layer_name = path.make_unique_layer_name(
path.VECTOR, id, variable=variable, time_period=time_period
)
return Response(layer_name, mimetype="text/plain")
@api.route(
"/layer_name/raster/<int:id>/", defaults={"variable": None, "time_period": None}
)
@api.route("/layer_name/raster/<int:id>/<string:variable>/<string:time_period>/")
@api.route(
"/layer_name/raster/<int:id>/<string:variable>/", defaults={"time_period": None}
)
@api.route(
"/layer_name/raster/<int:id>/-/<string:time_period>/",
defaults={"variable": None},
)
class RasterLayerName(Resource):
def get(self, id, variable=None, time_period=None):
"""Return an unique layer name"""
if variable is not None:
variable = variable.replace("__SLASH__", "/")
layer_name = path.make_unique_layer_name(
path.RASTER, id, variable=variable, time_period=time_period
)
return Response(layer_name, mimetype="text/plain")
@api.route("/legend/<path:layer_name>/")
class Legend(Resource):
def get(self, layer_name):
"""Return the legend of the layer"""
legend = client.get_legend(layer_name)
if legend is None:
abort(404)
return legend
@api.route("/geojson/<path:layer_name>/")
class GeoJSON(Resource):
def get(self, layer_name):
"""Return the GeoJSON file corresponding to the layer"""
geojson = client.get_geojson(layer_name, ignore_intersecting=True)
if geojson is None:
abort(404)
return geojson
@api.route("/areas/")
class Areas(Resource):
def get(self):
"""Return a list of all areas known by the platform"""
areas = client.get_areas()
if len(areas) == 0:
abort(404)
return areas
def add_openaire_links(datasets):
for dataset in datasets:
shared_id = dataset.get("shared_id")
if not shared_id:
dataset["openaireLink"] = "https://enermaps.openaire.eu/"
else:
shared_id_hash = hashlib.md5(shared_id.encode()) # nosec
dataset["openaireLink"] = (
"https://enermaps.openaire.eu/search/dataset?datasetId=enermaps____::{}"
.format(shared_id_hash.hexdigest())
)
| 28.830189
| 88
| 0.630017
| 551
| 4,584
| 5.087114
| 0.194192
| 0.070639
| 0.039957
| 0.051374
| 0.578666
| 0.55726
| 0.527292
| 0.513735
| 0.424545
| 0.383161
| 0
| 0.007165
| 0.238874
| 4,584
| 158
| 89
| 29.012658
| 0.796217
| 0.097077
| 0
| 0.363636
| 0
| 0
| 0.192214
| 0.112145
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081818
| false
| 0
| 0.054545
| 0
| 0.281818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
725c8eebe50fb704a955c92f2df5a2010dd496c0
| 360
|
py
|
Python
|
python/p45.py
|
forewing/lc
|
314468a1a3bb7d38eccf1f34b0d1b7da04a34784
|
[
"CC0-1.0"
] | null | null | null |
python/p45.py
|
forewing/lc
|
314468a1a3bb7d38eccf1f34b0d1b7da04a34784
|
[
"CC0-1.0"
] | null | null | null |
python/p45.py
|
forewing/lc
|
314468a1a3bb7d38eccf1f34b0d1b7da04a34784
|
[
"CC0-1.0"
] | null | null | null |
class Solution:
def jump(self, nums: List[int]) -> int:
n = len(nums)
dp = [float('inf')] * n
dp[0] = 0
tail = 1
for i in range(n):
limit = min(n, i + nums[i] + 1)
for j in range(tail, limit):
dp[j] = min(dp[j], dp[i] + 1)
tail = limit - 1
return dp[-1]
| 24
| 45
| 0.405556
| 53
| 360
| 2.754717
| 0.45283
| 0.054795
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.034653
| 0.438889
| 360
| 14
| 46
| 25.714286
| 0.688119
| 0
| 0
| 0
| 0
| 0
| 0.008333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
725e0b8e42aaad734ba9a21ce1eb2b48fbf8f5f0
| 2,575
|
py
|
Python
|
ademo.py
|
erikdelange/MicroPython-HTTP-Server
|
54bda9d55ac65b9a6bbf2189098a788add52b344
|
[
"MIT"
] | null | null | null |
ademo.py
|
erikdelange/MicroPython-HTTP-Server
|
54bda9d55ac65b9a6bbf2189098a788add52b344
|
[
"MIT"
] | null | null | null |
ademo.py
|
erikdelange/MicroPython-HTTP-Server
|
54bda9d55ac65b9a6bbf2189098a788add52b344
|
[
"MIT"
] | null | null | null |
import sys
import time
import uasyncio as asyncio
from ahttpserver import sendfile, Server
app = Server()
@app.route("GET", "/")
async def root(reader, writer, request):
writer.write(b"HTTP/1.1 200 OK\r\n")
writer.write(b"Connection: close\r\n")
writer.write(b"Content-Type: text/html\r\n")
writer.write(b"\r\n")
await writer.drain()
await sendfile(writer, "index.html")
try:
print(1/0)
except Exception as e:
print("exception in function root():", e) # exception handled locally
# @app.route("GET", "/") # if uncommented raises route already declared exception
# async def also_root(reader, writer, request):
# return
@app.route("GET", "/favicon.ico")
async def favicon(reader, writer, request):
writer.write(b"HTTP/1.1 200 OK\r\n")
writer.write(b"Connection: close\r\n")
writer.write(b"Content-Type: image/x-icon\r\n")
writer.write(b"\r\n")
await writer.drain()
await sendfile(writer, "favicon.ico")
@app.route("GET", "/api/time")
async def get_time(reader, writer, request):
writer.write(b"HTTP/1.1 200 OK\r\n")
writer.write(b"Connection: close\r\n")
writer.write(b"Content-Type: text/html\r\n")
writer.write(b"\r\n")
await writer.drain()
t = time.localtime()
writer.write(f"{t[2]:02d}-{t[1]:02d}-{t[0]:04d} {t[3]:02d}:{t[4]:02d}:{t[5]:02d}")
print(1/0) # will be caught by global exception handler
@app.route("GET", "/api/stop")
async def stop(reader, writer, request):
writer.write(b"HTTP/1.1 200 OK\r\n")
writer.write(b"Connection: close\r\n")
writer.write(b"\r\n")
await writer.drain()
raise(KeyboardInterrupt)
async def hello():
""" For demo purposes show system is still alive """
count = 0
while True:
print("hello", count)
count += 1
await asyncio.sleep(60)
def set_global_exception_handler():
def handle_exception(loop, context):
# uncaught exceptions raised in route handlers end up here
print("global exception handler:", context)
sys.print_exception(context["exception"])
loop = asyncio.get_event_loop()
loop.set_exception_handler(handle_exception)
if __name__ == "__main__":
try:
set_global_exception_handler()
asyncio.create_task(hello())
asyncio.run(app.start()) # must be last, does not return
except KeyboardInterrupt:
pass
finally:
asyncio.run(app.stop())
asyncio.new_event_loop()
| 28.611111
| 87
| 0.626796
| 365
| 2,575
| 4.350685
| 0.317808
| 0.110831
| 0.11335
| 0.09005
| 0.34068
| 0.34068
| 0.34068
| 0.34068
| 0.34068
| 0.34068
| 0
| 0.023058
| 0.225243
| 2,575
| 89
| 88
| 28.932584
| 0.772932
| 0.113398
| 0
| 0.34375
| 0
| 0.015625
| 0.218003
| 0.030005
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03125
| false
| 0.015625
| 0.0625
| 0
| 0.09375
| 0.09375
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
725f0a434de0934431956914cb716614971e97cb
| 3,851
|
py
|
Python
|
models/audio_net.py
|
vipulSharma18/Deep-Self-Supervised-Audio-Video-Cosegmentation-with-Adaptive-Noise-Cancellation
|
d52695be31a1552d0785f3b6634bde6ef9276a90
|
[
"MIT"
] | null | null | null |
models/audio_net.py
|
vipulSharma18/Deep-Self-Supervised-Audio-Video-Cosegmentation-with-Adaptive-Noise-Cancellation
|
d52695be31a1552d0785f3b6634bde6ef9276a90
|
[
"MIT"
] | null | null | null |
models/audio_net.py
|
vipulSharma18/Deep-Self-Supervised-Audio-Video-Cosegmentation-with-Adaptive-Noise-Cancellation
|
d52695be31a1552d0785f3b6634bde6ef9276a90
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
class Unet(nn.Module):
def __init__(self, fc_dim=64, num_downs=5, ngf=64, use_dropout=False):
super(Unet, self).__init__()
# construct unet structure
unet_block = UnetBlock(
ngf * 8, ngf * 8, input_nc=None,
submodule=None, innermost=True)
for i in range(num_downs - 5):
unet_block = UnetBlock(
ngf * 8, ngf * 8, input_nc=None,
submodule=unet_block, use_dropout=use_dropout)
unet_block = UnetBlock(
ngf * 4, ngf * 8, input_nc=None,
submodule=unet_block)
unet_block = UnetBlock(
ngf * 2, ngf * 4, input_nc=None,
submodule=unet_block)
unet_block = UnetBlock(
ngf, ngf * 2, input_nc=None,
submodule=unet_block)
unet_block = UnetBlock(
fc_dim, ngf, input_nc=1,
submodule=unet_block, outermost=True)
self.bn0 = nn.BatchNorm2d(1)
self.unet_block = unet_block
def forward(self, x):
x = self.bn0(x)
x = self.unet_block(x)
return x
# Defines the submodule with skip connection.
# X -------------------identity---------------------- X
# |-- downsampling -- |submodule| -- upsampling --|
class UnetBlock(nn.Module):
def __init__(self, outer_nc, inner_input_nc, input_nc=None,
submodule=None, outermost=False, innermost=False,
use_dropout=False, inner_output_nc=None, noskip=False):
super(UnetBlock, self).__init__()
self.outermost = outermost
self.noskip = noskip
use_bias = False
if input_nc is None:
input_nc = outer_nc
if innermost:
inner_output_nc = inner_input_nc
elif inner_output_nc is None:
inner_output_nc = 2 * inner_input_nc
downrelu = nn.LeakyReLU(0.2, True)
downnorm = nn.BatchNorm2d(inner_input_nc)
uprelu = nn.ReLU(True)
upnorm = nn.BatchNorm2d(outer_nc)
upsample = nn.Upsample(
scale_factor=2, mode='bilinear', align_corners=True)
if outermost:
downconv = nn.Conv2d(
input_nc, inner_input_nc, kernel_size=4,
stride=2, padding=1, bias=use_bias)
upconv = nn.Conv2d(
inner_output_nc, outer_nc, kernel_size=3, padding=1)
down = [downconv]
up = [uprelu, upsample, upconv]
model = down + [submodule] + up
elif innermost:
downconv = nn.Conv2d(
input_nc, inner_input_nc, kernel_size=4,
stride=2, padding=1, bias=use_bias)
upconv = nn.Conv2d(
inner_output_nc, outer_nc, kernel_size=3,
padding=1, bias=use_bias)
down = [downrelu, downconv]
up = [uprelu, upsample, upconv, upnorm]
model = down + up
else:
downconv = nn.Conv2d(
input_nc, inner_input_nc, kernel_size=4,
stride=2, padding=1, bias=use_bias)
upconv = nn.Conv2d(
inner_output_nc, outer_nc, kernel_size=3,
padding=1, bias=use_bias)
down = [downrelu, downconv, downnorm]
up = [uprelu, upsample, upconv, upnorm]
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost or self.noskip:
return self.model(x)
else:
return torch.cat([x, self.model(x)], 1)
| 35.657407
| 75
| 0.539081
| 448
| 3,851
| 4.412946
| 0.205357
| 0.067274
| 0.042489
| 0.060698
| 0.425392
| 0.348508
| 0.348508
| 0.348508
| 0.341932
| 0.318159
| 0
| 0.020251
| 0.358868
| 3,851
| 107
| 76
| 35.990654
| 0.780478
| 0.045183
| 0
| 0.404494
| 0
| 0
| 0.002244
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044944
| false
| 0
| 0.033708
| 0
| 0.134831
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
726015e732db272b6ddb3ba0b812c3994b6a974f
| 6,117
|
py
|
Python
|
tests/test_core.py
|
cnschema/kgtool
|
599e23a9e8a856625143b171f9c36eb5b00623f6
|
[
"Apache-2.0"
] | 7
|
2018-08-22T01:09:40.000Z
|
2022-03-31T18:03:33.000Z
|
tests/test_core.py
|
cnschema/kgtool
|
599e23a9e8a856625143b171f9c36eb5b00623f6
|
[
"Apache-2.0"
] | 2
|
2020-05-09T12:01:15.000Z
|
2021-06-01T22:17:12.000Z
|
tests/test_core.py
|
cnschema/kgtool
|
599e23a9e8a856625143b171f9c36eb5b00623f6
|
[
"Apache-2.0"
] | 8
|
2018-03-07T01:28:32.000Z
|
2020-09-06T18:27:27.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Path hack
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
try:
import unittest2 as unittest
except ImportError:
import unittest
from kgtool.core import * # noqa
class CoreTestCase(unittest.TestCase):
def setUp(self):
pass
def test_file2abspath(self):
tin = "test.json"
tout = file2abspath(tin, __file__)
logging.info(" {} => {}".format(tin, tout))
assert tout.endswith(u"tests/" + tin), tout
tin = "../test.json"
tout = file2abspath(tin)
logging.info(" {} => {}".format(tin, tout))
assert tout.endswith(
u"kgtool/" + os.path.basename(tin)), tout
def test_file2json(self):
filename = "test_core_file.json"
filename = file2abspath(filename, __file__)
ret = file2json(filename)
assert len(ret) == 3
def test_file2iter(self):
filename = "test_core_file.json"
filename = file2abspath(filename, __file__)
str_iter = file2iter(filename)
assert len(list(str_iter)) == 5
def test_json_get(self):
json_data = {"a": {"b": 1}, "c": ["d"], "e": "f"}
assert type(json_get(json_data, ["a"])) == dict
assert json_get(json_data, ["k"]) is None
assert json_get(json_data, ["k"], 10) == 10
assert json_get(json_data, ["a", "b"], 10) == 1
assert json_get(json_data, ["a", "k"], 10) == 10
assert json_get(json_data, ["c", "d"], 10) is None
assert json_get(json_data, ["e", "k"], 10) is None
assert type(json_get(json_data, ["c"])) == list
json_data = {
"father": {"name": "john"},
"birthPlace": "Beijing"
}
assert json_get(json_data, ["father", "name"]) == "john"
assert json_get(json_data, ["father", "image"], default="n/a") == "n/a"
assert json_get(json_data, ["father", "father"]) is None
assert json_get(json_data, ["birthPlace"]) == "Beijing"
assert json_get(
json_data, ["birthPlace", "name"], default="n/a") is None
def test_json_get_list(self):
json_data = {
"name": "john",
"age": None,
"birthPlace": ["Beijing"]
}
assert json_get_list(json_data, "name") == ["john"]
assert json_get_list(json_data, "birthPlace") == ["Beijing"]
assert json_get_list(json_data, "age") == []
def test_json_get_first_item(self):
json_data = {
"name": "john",
"birthPlace": ["Beijing"],
"interests": []
}
assert json_get_first_item(json_data, "name") == "john"
assert json_get_first_item(json_data, "birthPlace") == "Beijing"
assert json_get_first_item(json_data, "birthDate") == ''
assert json_get_first_item(json_data, "interests", defaultValue=None) is None
def test_json_append(self):
json_data = {
"name": "john",
"birthPlace": ["Beijing"],
"interests": []
}
json_append(json_data, "name", "a")
assert json_data["name"] == "john"
json_append(json_data, "birthPlace", "a")
assert json_data["birthPlace"] == ["Beijing","a"]
json_append(json_data, "keywords", "a")
assert json_data["keywords"] == ["a"]
def test_any2utf8(self):
tin = "你好世界"
tout = any2utf8(tin)
logging.info(" {} => {}".format(tin, tout))
tin = u"你好世界"
tout = any2utf8(tin)
logging.info((tin, tout))
tin = "hello world"
tout = any2utf8(tin)
logging.info((tin, tout))
tin = ["hello", "世界"]
tout = any2utf8(tin)
logging.info((tin, tout))
tin = {"hello": u"世界"}
tout = any2utf8(tin)
logging.info((tin, tout))
tin = {"hello": u"世界", "number": 90}
tout = any2utf8(tin)
logging.info((tin, tout))
def test_any2unicode(self):
tin = "你好世界"
tout = any2unicode(tin)
logging.info((tin, tout))
tin = u"你好世界"
tout = any2unicode(tin)
logging.info((tin, tout))
tin = "hello world"
tout = any2unicode(tin)
logging.info((tin, tout))
tin = ["hello", "世界"]
tout = any2unicode(tin)
logging.info((tin, tout))
tin = {"hello": u"世界"}
tout = any2unicode(tin)
logging.info((tin, tout))
def test_any2sha256(self):
tin = "你好世界"
tout = any2sha256(tin)
assert "beca6335b20ff57ccc47403ef4d9e0b8fccb4442b3151c2e7d50050673d43172" == tout, tout
def test_any2sha1(self):
tin = "你好世界"
tout = any2sha1(tin)
assert "dabaa5fe7c47fb21be902480a13013f16a1ab6eb" == tout, tout
tin = u"你好世界"
tout = any2sha1(tin)
assert "dabaa5fe7c47fb21be902480a13013f16a1ab6eb" == tout, tout
tin = "hello world"
tout = any2sha1(tin)
assert "2aae6c35c94fcfb415dbe95f408b9ce91ee846ed" == tout, tout
tin = ["hello", "world"]
tout = any2sha1(tin)
assert "2ed0a51bbdbc4f57378e8c64a1c7a0cd4386cc09" == tout, tout
tin = {"hello": "world"}
tout = any2sha1(tin)
assert "d3b09abe30cfe2edff4ee9e0a141c93bf5b3af87" == tout, tout
def test_json_dict_copy(self):
property_list = [
{ "name":"name", "alternateName": ["name","title"]},
{ "name":"birthDate", "alternateName": ["dob","dateOfBirth"] },
{ "name":"description" }
]
json_object = {"dob":"2010-01-01","title":"John","interests":"data","description":"a person"}
ret = json_dict_copy(json_object, property_list)
assert json_object["title"] == ret["name"]
assert json_object["dob"] == ret["birthDate"]
assert json_object["description"] == ret["description"]
assert ret.get("interests") is None
def test_parse_list_value(self):
ret = parse_list_value(u"原文,正文")
assert len(ret) == 2
if __name__ == '__main__':
unittest.main()
| 29.839024
| 101
| 0.560896
| 678
| 6,117
| 4.874631
| 0.176991
| 0.075038
| 0.070802
| 0.059002
| 0.559758
| 0.533132
| 0.448109
| 0.333737
| 0.255068
| 0.074433
| 0
| 0.050983
| 0.284944
| 6,117
| 204
| 102
| 29.985294
| 0.704618
| 0.009318
| 0
| 0.38961
| 0
| 0
| 0.172391
| 0.043593
| 0
| 0
| 0
| 0
| 0.246753
| 1
| 0.090909
| false
| 0.006494
| 0.038961
| 0
| 0.136364
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
726209920117b9b9ebcbf40bbfd0a7a9d4bd3f25
| 10,312
|
py
|
Python
|
lpp/evaluator.py
|
VidoniJorge/c-interprete
|
4f026d093b26289d3f692cd64d52069fdd1d954c
|
[
"Apache-2.0"
] | null | null | null |
lpp/evaluator.py
|
VidoniJorge/c-interprete
|
4f026d093b26289d3f692cd64d52069fdd1d954c
|
[
"Apache-2.0"
] | null | null | null |
lpp/evaluator.py
|
VidoniJorge/c-interprete
|
4f026d093b26289d3f692cd64d52069fdd1d954c
|
[
"Apache-2.0"
] | null | null | null |
from typing import (
Any,
cast,
List,
Optional,
Type
)
import lpp.ast as ast
from lpp.builtins import BUILTINS
from lpp.object import(
Boolean,
Builtin,
Environment,
Error,
Function,
Integer,
Null,
Object,
ObjectType,
String,
Return
)
TRUE = Boolean(True)
FALSE = Boolean(False)
NULL = Null()
_NOT_A_FUNCTION = 'No es una funcion: {}'
_TYPE_MISMATCH = 'Discrepancia de tipos: {} {} {}'
_UNKNOWN_PREFIX_OPERATOR = 'Operador desconocido: {}{}'
_UNKNOWN_INFIX_OPERATOR = 'Operador desconocido: {} {} {}'
_UNKNOWN_IDENTIFIER = 'Identificador no encontrado: {}'
def evaluate(node:ast.ASTNode, env: Environment) -> Optional[Object]:
node_type: Type = type(node)
if node_type == ast.Program:
node = cast(ast.Program, node)
return _evaluate_program(node, env)
elif node_type == ast.ExpressionStatement:
node = cast(ast.ExpressionStatement, node)
assert node.expression is not None
return evaluate(node.expression, env)
elif node_type == ast.Integer:
node = cast(ast.Integer, node)
assert node.value is not None
return Integer(node.value)
elif node_type == ast.Boolean:
node = cast(ast.Boolean, node)
assert node.value is not None
return _to_boolean_object(node.value)
elif node_type == ast.Prefix:
node = cast(ast.Prefix, node)
assert node.right is not None
right = evaluate(node.right, env)
assert right is not None
return _evaluate_prifix_expression(node.operator, right, node.right.token.line)
elif node_type == ast.Infix:
node = cast(ast.Infix, node)
assert node.left is not None and node.right is not None
left = evaluate(node.left, env)
right = evaluate(node.right, env)
assert right is not None and left is not None
return _evaluate_infix_expression(node.operator, left, right, node.left.token.line)
elif node_type == ast.Block:
node = cast(ast.Block, node)
return _evaluate_block_statement(node, env)
elif node_type == ast.If:
node = cast(ast.If, node)
return _evaluate_if_expression(node, env)
elif node_type == ast.ReturnStatement:
node = cast(ast.ReturnStatement, node)
assert node.return_value is not None
value = evaluate(node.return_value, env)
assert value is not None
return Return(value)
elif node_type == ast.LetStatement:
node = cast(ast.LetStatement, node)
assert node.value is not None
value = evaluate(node.value, env)
assert node.name is not None
env[node.name.value] = value
elif node_type == ast.Identifier:
node = cast(ast.Identifier, node)
return _evaluate_identifier(node, env, node.token.line)
elif node_type == ast.Function:
node = cast(ast.Function, node)
assert node.body is not None
return Function(node.parameters,
node.body,
env)
elif node_type == ast.Call:
node = cast(ast.Call, node)
function = evaluate(node.function, env)
assert function is not None
assert node.arguments is not None
args = _evaluate_expression(node.arguments, env)
assert function is not None
return _apply_function(function, args, node.token.line)
elif node_type == ast.StringLiteral:
node = cast(ast.StringLiteral, node)
return String(node.value)
return None
def _apply_function(fn: Object, args: List[Object],line_evaluated: int) -> Object:
if type(fn) == Function:
fn = cast(Function, fn)
extended_enviroment = _extended_function_enviroment(fn, args)
evaluated = evaluate(fn.body, extended_enviroment)
assert evaluated is not None
return _unwrap_return_value(evaluated)
elif type(fn) == Builtin:
fn = cast(Builtin, fn)
return fn.fn(*args)
else:
return _new_error(_NOT_A_FUNCTION, args, line_evaluated)
def _evaluate_bang_operator_expression(right: Object) -> Object:
if right is TRUE:
return FALSE
elif right is FALSE:
return TRUE
elif right is NULL:
return TRUE
else:
return FALSE
def _evaluate_expression(expressions: List[ast.Expression], env: Environment) -> List[Object]:
result: List[Object] = []
for expression in expressions:
evaluated = evaluate(expression, env)
assert evaluated is not None
result.append(evaluated)
return result
def _extended_function_enviroment(fn: Function, args: List[Object]) -> Environment:
env = Environment(outer=fn.env)
for idx, param in enumerate(fn.parameters):
env[param.value] = args[idx - 1]
return env
def _evaluate_identifier(node: ast.Identifier, env: Environment, line_evaluated:int) -> Object:
try:
return env[node.value]
except KeyError:
return BUILTINS.get(node.value,
_new_error(_UNKNOWN_IDENTIFIER, [node.value], line_evaluated))
def _evaluate_if_expression(if_expression: ast.If, env: Environment) -> Optional[Object]:
assert if_expression.condition is not None
condition = evaluate(if_expression.condition, env)
assert condition is not None
if _is_truthy(condition):
assert if_expression.consequence is not None
return evaluate(if_expression.consequence, env)
elif if_expression.alternative is not None:
return evaluate(if_expression.alternative, env)
else:
return NULL
def _is_truthy(obj: Object) -> bool:
if obj is NULL:
return False
elif obj is TRUE:
return True
elif obj is FALSE:
return False
else:
return True
def _evaluate_block_statement(block: ast.Block, env: Environment) -> Optional[Object]:
result: Optional[Object] = None
for statement in block.statements:
result = evaluate(statement, env)
if result is not None and \
(result.type() == ObjectType.RETURN or result.type() == ObjectType.ERROR):
return result
return result
def _evaluate_infix_expression(operator:str, left:Object, right:Object, line_evaluated:int) -> Object:
if left.type() == ObjectType.INTEGER \
and right.type() == ObjectType.INTEGER:
return _evaluate_integer_infix_expression(operator, left, right, line_evaluated)
if left.type() == ObjectType.STRING \
and right.type() == ObjectType.STRING:
return _evaluate_string_infix_expression(operator, left, right, line_evaluated)
elif operator == '==':
return _to_boolean_object(left is right)
elif operator == '!=':
return _to_boolean_object(left is not right)
elif left.type() != right.type():
return _new_error(_TYPE_MISMATCH, [left.type().name,
operator,
right.type().name
], line_evaluated)
else:
return _new_error(_UNKNOWN_INFIX_OPERATOR,[left.type().name,
operator,
right.type().name
], line_evaluated)
def _evaluate_integer_infix_expression(operator: str, left: Object, right: Object, line_evaluated:int) -> Object:
left_value = int = cast(Integer, left).value
right_value = int = cast(Integer, right).value
if operator == '+':
return Integer(left_value + right_value)
elif operator == '-':
return Integer(left_value - right_value)
elif operator == '*':
return Integer(left_value * right_value)
elif operator == '/':
return Integer(left_value // right_value) #divicio de enteros
elif operator == '<':
return _to_boolean_object(left_value < right_value)
elif operator == '>':
return _to_boolean_object(left_value > right_value)
elif operator == '==':
return _to_boolean_object(left_value == right_value)
elif operator == '!=':
return _to_boolean_object(left_value != right_value)
else:
return _new_error(_UNKNOWN_INFIX_OPERATOR,[left.type().name,
operator,
right.type().name
], line_evaluated)
def _evaluate_string_infix_expression(operator: str, left: Object, right: Object, line_evaluated:int) -> Object:
left_value = str = cast(String, left).value
right_value = str = cast(String, right).value
if operator == '+':
return String(left_value + right_value)
elif operator == '==':
return _to_boolean_object(left_value == right_value)
elif operator == '!=':
return _to_boolean_object(left_value != right_value)
else:
return _new_error(_UNKNOWN_INFIX_OPERATOR,[left.type().name,
operator,
right.type().name
], line_evaluated)
def _evaluate_minus_operator_expression(right: Object, line_evaluated:int) -> Object:
if type(right) != Integer:
return _new_error(_UNKNOWN_PREFIX_OPERATOR, ['-', right.type().name], line_evaluated)
right = cast(Integer, right)
return Integer(-right.value)
def _evaluate_prifix_expression(operator: str, right: Object,line_evaluated:int) -> Object:
if operator == '!':
return _evaluate_bang_operator_expression(right)
elif operator == '-':
return _evaluate_minus_operator_expression(right, line_evaluated)
else:
return _new_error(_UNKNOWN_PREFIX_OPERATOR,[operator, right.type().name],line_evaluated)
def _evaluate_program(program: ast.Program, env) -> Optional[Object]:
result: Optional[Object] = None
for statement in program.statements:
result = evaluate(statement, env)
if type(result) == Return:
result = cast(Return, result)
return result.value
elif type(result) == Error:
return result
return result
def _new_error(message: str, args:List[Any], error_line: int) -> Error:
return Error(message.format(*args), error_line)
def _unwrap_return_value(obj: Object) -> Object:
if type(obj) == Return:
obj = cast(Return, obj)
return obj.value
return obj
def _to_boolean_object(value: bool) -> Boolean:
return TRUE if value else FALSE
| 33.264516
| 113
| 0.650698
| 1,240
| 10,312
| 5.215323
| 0.091935
| 0.019329
| 0.0334
| 0.030153
| 0.435905
| 0.357662
| 0.291325
| 0.223287
| 0.193753
| 0.170558
| 0
| 0.00013
| 0.253006
| 10,312
| 310
| 114
| 33.264516
| 0.839413
| 0.001746
| 0
| 0.246094
| 0
| 0
| 0.01564
| 0
| 0
| 0
| 0
| 0
| 0.078125
| 1
| 0.070313
| false
| 0
| 0.015625
| 0.007813
| 0.339844
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
726246040afca77178e3293325a2bcbc9ed6e53e
| 121,132
|
py
|
Python
|
cli/tests/pcluster/config/test_validators.py
|
QPC-database/aws-parallelcluster
|
8c2e9595ca171340df21695c27d85dc00f19d3e4
|
[
"Apache-2.0"
] | 1
|
2021-07-10T13:59:46.000Z
|
2021-07-10T13:59:46.000Z
|
cli/tests/pcluster/config/test_validators.py
|
QPC-database/aws-parallelcluster
|
8c2e9595ca171340df21695c27d85dc00f19d3e4
|
[
"Apache-2.0"
] | null | null | null |
cli/tests/pcluster/config/test_validators.py
|
QPC-database/aws-parallelcluster
|
8c2e9595ca171340df21695c27d85dc00f19d3e4
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
# with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import json
import os
import re
import configparser
import pytest
from assertpy import assert_that
import tests.pcluster.config.utils as utils
from pcluster.config.cfn_param_types import CfnParam, CfnSection
from pcluster.config.mappings import ALLOWED_VALUES, FSX
from pcluster.config.validators import (
DCV_MESSAGES,
EBS_VOLUME_TYPE_TO_VOLUME_SIZE_BOUNDS,
FSX_MESSAGES,
FSX_SUPPORTED_ARCHITECTURES_OSES,
LOGFILE_LOGGER,
architecture_os_validator,
check_usage_class,
cluster_type_validator,
compute_resource_validator,
disable_hyperthreading_architecture_validator,
efa_gdr_validator,
efa_os_arch_validator,
fsx_ignored_parameters_validator,
instances_architecture_compatibility_validator,
intel_hpc_architecture_validator,
queue_compute_type_validator,
queue_validator,
region_validator,
s3_bucket_region_validator,
settings_validator,
)
from pcluster.constants import FSX_HDD_THROUGHPUT, FSX_SSD_THROUGHPUT
from tests.common import MockedBoto3Request
from tests.pcluster.config.defaults import DefaultDict
@pytest.fixture()
def boto3_stubber_path():
return "pcluster.config.validators.boto3"
@pytest.mark.parametrize(
"section_dict, expected_message, expected_warning",
[
# traditional scheduler
({"scheduler": "sge", "initial_queue_size": 1, "max_queue_size": 2, "maintain_initial_size": True}, None, None),
(
{"scheduler": "sge", "initial_queue_size": 3, "max_queue_size": 2, "maintain_initial_size": True},
"initial_queue_size must be fewer than or equal to max_queue_size",
None,
),
(
{"scheduler": "sge", "initial_queue_size": 3, "max_queue_size": 2, "maintain_initial_size": False},
"initial_queue_size must be fewer than or equal to max_queue_size",
None,
),
# awsbatch
({"scheduler": "awsbatch", "min_vcpus": 1, "desired_vcpus": 2, "max_vcpus": 3}, None, None),
(
{"scheduler": "awsbatch", "min_vcpus": 3, "desired_vcpus": 2, "max_vcpus": 3},
"desired_vcpus must be greater than or equal to min_vcpus",
None,
),
(
{"scheduler": "awsbatch", "min_vcpus": 1, "desired_vcpus": 4, "max_vcpus": 3},
"desired_vcpus must be fewer than or equal to max_vcpus",
None,
),
(
{"scheduler": "awsbatch", "min_vcpus": 4, "desired_vcpus": 4, "max_vcpus": 3},
"max_vcpus must be greater than or equal to min_vcpus",
None,
),
# key pair not provided
({"scheduler": "awsbatch"}, None, "If you do not specify a key pair"),
],
)
def test_cluster_validator(mocker, capsys, section_dict, expected_message, expected_warning):
config_parser_dict = {"cluster default": section_dict}
utils.assert_param_validator(
mocker, config_parser_dict, expected_message, capsys, expected_warning=expected_warning
)
@pytest.mark.parametrize(
"instance_type, expected_message", [("t2.micro", None), ("c4.xlarge", None), ("c5.xlarge", "is not supported")]
)
def test_ec2_instance_type_validator(mocker, instance_type, expected_message):
config_parser_dict = {"cluster default": {"compute_instance_type": instance_type}}
utils.assert_param_validator(mocker, config_parser_dict, expected_message)
@pytest.mark.parametrize("instance_type, expected_message", [("t2.micro", None), ("c4.xlarge", None)])
def test_head_node_instance_type_validator(mocker, instance_type, expected_message):
config_parser_dict = {"cluster default": {"master_instance_type": instance_type}}
utils.assert_param_validator(mocker, config_parser_dict, expected_message)
@pytest.mark.parametrize(
"scheduler, instance_type, expected_message, expected_warnings",
[
("sge", "t2.micro", None, None),
("sge", "c4.xlarge", None, None),
("sge", "c5.xlarge", "is not supported", None),
# NOTE: compute_instance_type_validator calls ec2_instance_type_validator only if the scheduler is not awsbatch
("awsbatch", "t2.micro", None, None),
("awsbatch", "c4.xlarge", "is not supported", None),
("awsbatch", "t2", None, None), # t2 family
("awsbatch", "optimal", None, None),
("sge", "p4d.24xlarge", None, "has 4 Network Interfaces."),
("slurm", "p4d.24xlarge", None, None),
],
)
def test_compute_instance_type_validator(mocker, scheduler, instance_type, expected_message, expected_warnings):
config_parser_dict = {"cluster default": {"scheduler": scheduler, "compute_instance_type": instance_type}}
extra_patches = {
"pcluster.config.validators.InstanceTypeInfo.max_network_interface_count": 4
if instance_type == "p4d.24xlarge"
else 1,
}
utils.assert_param_validator(
mocker, config_parser_dict, expected_message, expected_warnings, extra_patches=extra_patches
)
def test_ec2_key_pair_validator(mocker, boto3_stubber):
describe_key_pairs_response = {
"KeyPairs": [
{"KeyFingerprint": "12:bf:7c:56:6c:dd:4f:8c:24:45:75:f1:1b:16:54:89:82:09:a4:26", "KeyName": "key1"}
]
}
mocked_requests = [
MockedBoto3Request(
method="describe_key_pairs", response=describe_key_pairs_response, expected_params={"KeyNames": ["key1"]}
)
]
boto3_stubber("ec2", mocked_requests)
# TODO test with invalid key
config_parser_dict = {"cluster default": {"key_name": "key1"}}
utils.assert_param_validator(mocker, config_parser_dict)
@pytest.mark.parametrize(
"image_architecture, bad_ami_message, bad_architecture_message",
[
("x86_64", None, None),
(
"arm64",
None,
"incompatible with the architecture supported by the instance type chosen for the head node",
),
(
"arm64",
"Unable to get information for AMI",
"incompatible with the architecture supported by the instance type chosen for the head node",
),
],
)
def test_ec2_ami_validator(mocker, boto3_stubber, image_architecture, bad_ami_message, bad_architecture_message):
describe_images_response = {
"Images": [
{
"VirtualizationType": "paravirtual",
"Name": "My server",
"Hypervisor": "xen",
"ImageId": "ami-12345678",
"RootDeviceType": "ebs",
"State": "available",
"BlockDeviceMappings": [
{
"DeviceName": "/dev/sda1",
"Ebs": {
"DeleteOnTermination": True,
"SnapshotId": "snap-1234567890abcdef0",
"VolumeSize": 8,
"VolumeType": "standard",
},
}
],
"Architecture": image_architecture,
"ImageLocation": "123456789012/My server",
"KernelId": "aki-88aa75e1",
"OwnerId": "123456789012",
"RootDeviceName": "/dev/sda1",
"Public": False,
"ImageType": "machine",
"Description": "An AMI for my server",
}
]
}
mocked_requests = [
MockedBoto3Request(
method="describe_images",
response=describe_images_response,
expected_params={"ImageIds": ["ami-12345678"]},
generate_error=bad_ami_message,
)
]
boto3_stubber("ec2", mocked_requests)
# TODO test with invalid key
config_parser_dict = {"cluster default": {"custom_ami": "ami-12345678"}}
expected_message = bad_ami_message or bad_architecture_message
utils.assert_param_validator(mocker, config_parser_dict, expected_message)
@pytest.mark.parametrize(
"section_dict, expected_message",
[
({"tags": {"key": "value", "key2": "value2"}}, None),
(
{"tags": {"key": "value", "Version": "value2"}},
r"Version.*reserved",
),
],
)
def test_tags_validator(mocker, capsys, section_dict, expected_message):
config_parser_dict = {"cluster default": section_dict}
utils.assert_param_validator(mocker, config_parser_dict, expected_error=expected_message)
def test_ec2_volume_validator(mocker, boto3_stubber):
describe_volumes_response = {
"Volumes": [
{
"AvailabilityZone": "us-east-1a",
"Attachments": [
{
"AttachTime": "2013-12-18T22:35:00.000Z",
"InstanceId": "i-1234567890abcdef0",
"VolumeId": "vol-12345678",
"State": "attached",
"DeleteOnTermination": True,
"Device": "/dev/sda1",
}
],
"Encrypted": False,
"VolumeType": "gp2",
"VolumeId": "vol-049df61146c4d7901",
"State": "available", # TODO add test with "in-use"
"SnapshotId": "snap-1234567890abcdef0",
"CreateTime": "2013-12-18T22:35:00.084Z",
"Size": 8,
}
]
}
mocked_requests = [
MockedBoto3Request(
method="describe_volumes",
response=describe_volumes_response,
expected_params={"VolumeIds": ["vol-12345678"]},
)
]
boto3_stubber("ec2", mocked_requests)
# TODO test with invalid key
config_parser_dict = {
"cluster default": {"ebs_settings": "default"},
"ebs default": {"shared_dir": "test", "ebs_volume_id": "vol-12345678"},
}
utils.assert_param_validator(mocker, config_parser_dict)
@pytest.mark.parametrize(
"region, base_os, scheduler, expected_message",
[
# verify awsbatch supported regions
(
"ap-northeast-3",
"alinux2",
"awsbatch",
"Region 'ap-northeast-3' is not yet officially supported by ParallelCluster",
),
("us-gov-east-1", "alinux2", "awsbatch", None),
("us-gov-west-1", "alinux2", "awsbatch", None),
("eu-west-1", "alinux2", "awsbatch", None),
("us-east-1", "alinux2", "awsbatch", None),
("eu-north-1", "alinux2", "awsbatch", None),
("cn-north-1", "alinux2", "awsbatch", None),
("cn-northwest-1", "alinux2", "awsbatch", None),
# verify traditional schedulers are supported in all the regions but ap-northeast-3
("cn-northwest-1", "alinux2", "sge", None),
("us-gov-east-1", "alinux2", "sge", None),
("cn-northwest-1", "alinux2", "slurm", None),
("us-gov-east-1", "alinux2", "slurm", None),
("cn-northwest-1", "alinux2", "torque", None),
("us-gov-east-1", "alinux2", "torque", None),
(
"ap-northeast-3",
"alinux2",
"sge",
"Region 'ap-northeast-3' is not yet officially supported by ParallelCluster",
),
# verify awsbatch supported OSes
("eu-west-1", "centos7", "awsbatch", "scheduler supports the following Operating Systems"),
("eu-west-1", "centos8", "awsbatch", "scheduler supports the following Operating Systems"),
("eu-west-1", "ubuntu1804", "awsbatch", "scheduler supports the following Operating Systems"),
("eu-west-1", "alinux2", "awsbatch", None),
# verify sge supports all the OSes
("eu-west-1", "centos7", "sge", None),
("eu-west-1", "centos8", "sge", None),
("eu-west-1", "ubuntu1804", "sge", None),
("eu-west-1", "alinux2", "sge", None),
# verify slurm supports all the OSes
("eu-west-1", "centos7", "slurm", None),
("eu-west-1", "centos8", "slurm", None),
("eu-west-1", "ubuntu1804", "slurm", None),
("eu-west-1", "alinux2", "slurm", None),
# verify torque supports all the OSes
("eu-west-1", "centos7", "torque", None),
("eu-west-1", "centos8", "torque", None),
("eu-west-1", "ubuntu1804", "torque", None),
("eu-west-1", "alinux2", "torque", None),
],
)
def test_scheduler_validator(mocker, capsys, region, base_os, scheduler, expected_message):
# we need to set the region in the environment because it takes precedence respect of the config file
os.environ["AWS_DEFAULT_REGION"] = region
config_parser_dict = {"cluster default": {"base_os": base_os, "scheduler": scheduler}}
# Deprecation warning should be printed for sge and torque
expected_warning = None
wiki_url = "https://github.com/aws/aws-parallelcluster/wiki/Deprecation-of-SGE-and-Torque-in-ParallelCluster"
if scheduler in ["sge", "torque"]:
expected_warning = ".{0}. is scheduled to be deprecated.*{1}".format(scheduler, wiki_url)
utils.assert_param_validator(mocker, config_parser_dict, expected_message, capsys, expected_warning)
def test_placement_group_validator(mocker, boto3_stubber):
describe_placement_groups_response = {
"PlacementGroups": [{"GroupName": "my-cluster", "State": "available", "Strategy": "cluster"}]
}
mocked_requests = [
MockedBoto3Request(
method="describe_placement_groups",
response=describe_placement_groups_response,
expected_params={"GroupNames": ["my-cluster"]},
)
]
boto3_stubber("ec2", mocked_requests)
# TODO test with invalid group name
config_parser_dict = {"cluster default": {"placement_group": "my-cluster"}}
utils.assert_param_validator(mocker, config_parser_dict)
def test_url_validator(mocker, boto3_stubber, capsys):
head_object_response = {
"AcceptRanges": "bytes",
"ContentType": "text/html",
"LastModified": "Thu, 16 Apr 2015 18:19:14 GMT",
"ContentLength": 77,
"VersionId": "null",
"ETag": '"30a6ec7e1a9ad79c203d05a589c8b400"',
"Metadata": {},
}
mocked_requests = [
MockedBoto3Request(
method="head_object", response=head_object_response, expected_params={"Bucket": "test", "Key": "test.json"}
)
]
boto3_stubber("s3", mocked_requests)
mocker.patch("pcluster.config.validators.urllib.request.urlopen")
tests = [("s3://test/test.json", None), ("http://test/test.json", None)]
for template_url, expected_message in tests:
config_parser_dict = {"cluster default": {"template_url": template_url}}
utils.assert_param_validator(mocker, config_parser_dict, expected_message)
# Test S3 URI in custom_chef_cookbook.
tests = [
(
"s3://test/cookbook.tgz",
None,
MockedBoto3Request(
method="head_object",
response=head_object_response,
expected_params={"Bucket": "test", "Key": "cookbook.tgz"},
),
),
(
"s3://failure/cookbook.tgz",
(
"WARNING: The configuration parameter 'custom_chef_cookbook' generated the following warnings:\n"
"The S3 object does not exist or you do not have access to it.\n"
"Please make sure the cluster nodes have access to it."
),
MockedBoto3Request(
method="head_object",
response=head_object_response,
expected_params={"Bucket": "failure", "Key": "cookbook.tgz"},
generate_error=True,
error_code=404,
),
),
]
for custom_chef_cookbook_url, expected_message, mocked_request in tests:
boto3_stubber("s3", mocked_request)
mocker.patch("pcluster.config.validators.urllib.request.urlopen")
config_parser_dict = {
"cluster default": {
"scheduler": "slurm",
"s3_read_resource": "arn:aws:s3:::test*",
"custom_chef_cookbook": custom_chef_cookbook_url,
}
}
utils.assert_param_validator(mocker, config_parser_dict, capsys=capsys, expected_warning=expected_message)
@pytest.mark.parametrize(
"config, num_calls, error_code, bucket, expected_message",
[
(
{
"cluster default": {"fsx_settings": "fsx"},
"fsx fsx": {
"storage_capacity": 1200,
"import_path": "s3://test/test1/test2",
"export_path": "s3://test/test1/test2",
"auto_import_policy": "NEW",
},
},
2,
None,
{"Bucket": "test"},
"AutoImport is not supported for cross-region buckets.",
),
(
{
"cluster default": {"fsx_settings": "fsx"},
"fsx fsx": {
"storage_capacity": 1200,
"import_path": "s3://test/test1/test2",
"export_path": "s3://test/test1/test2",
"auto_import_policy": "NEW",
},
},
2,
"NoSuchBucket",
{"Bucket": "test"},
"The S3 bucket 'test' does not appear to exist.",
),
(
{
"cluster default": {"fsx_settings": "fsx"},
"fsx fsx": {
"storage_capacity": 1200,
"import_path": "s3://test/test1/test2",
"export_path": "s3://test/test1/test2",
"auto_import_policy": "NEW",
},
},
2,
"AccessDenied",
{"Bucket": "test"},
"You do not have access to the S3 bucket",
),
],
)
def test_auto_import_policy_validator(mocker, boto3_stubber, config, num_calls, error_code, bucket, expected_message):
os.environ["AWS_DEFAULT_REGION"] = "eu-west-1"
head_bucket_response = {
"ResponseMetadata": {
"AcceptRanges": "bytes",
"ContentType": "text/html",
"LastModified": "Thu, 16 Apr 2015 18:19:14 GMT",
"ContentLength": 77,
"VersionId": "null",
"ETag": '"30a6ec7e1a9ad79c203d05a589c8b400"',
"Metadata": {},
}
}
get_bucket_location_response = {
"ResponseMetadata": {
"LocationConstraint": "af-south1",
}
}
mocked_requests = []
for _ in range(num_calls):
mocked_requests.append(
MockedBoto3Request(method="head_bucket", response=head_bucket_response, expected_params=bucket)
)
if error_code is None:
mocked_requests.append(
MockedBoto3Request(
method="get_bucket_location", response=get_bucket_location_response, expected_params=bucket
)
)
else:
mocked_requests.append(
MockedBoto3Request(
method="get_bucket_location",
response=get_bucket_location_response,
expected_params=bucket,
generate_error=error_code is not None,
error_code=error_code,
)
)
boto3_stubber("s3", mocked_requests)
utils.assert_param_validator(mocker, config, expected_message)
@pytest.mark.parametrize(
"config, num_calls, bucket, expected_message",
[
(
{
"cluster default": {"fsx_settings": "fsx"},
"fsx fsx": {
"storage_capacity": 1200,
"import_path": "s3://test/test1/test2",
"export_path": "s3://test/test1/test2",
},
},
2,
{"Bucket": "test"},
None,
),
(
{
"cluster default": {"fsx_settings": "fsx"},
"fsx fsx": {
"storage_capacity": 1200,
"import_path": "http://test/test.json",
"export_path": "s3://test/test1/test2",
},
},
1,
{"Bucket": "test"},
"The value 'http://test/test.json' used for the parameter 'import_path' is not a valid S3 URI.",
),
],
)
def test_s3_validator(mocker, boto3_stubber, config, num_calls, bucket, expected_message):
if bucket:
_head_bucket_stubber(mocker, boto3_stubber, bucket, num_calls)
utils.assert_param_validator(mocker, config, expected_message)
@pytest.mark.parametrize(
"bucket, region, error_code, expected_message, client_error",
[
(
"bucket",
"us-east-1",
None,
None,
False,
),
(
"bucket",
"us-west-1",
None,
None,
False,
),
(
"bucket",
"eu-west-1",
None,
"cluster_resource_bucket must be in the same region of the cluster.",
False,
),
(
"not_existed_bucket",
"af-south-1",
"NoSuchBucket",
"The S3 bucket 'not_existed_bucket' does not appear to exist",
True,
),
(
"access_denied_bucket",
"af-south-1",
"AccessDenied",
"You do not have access to the S3 bucket 'access_denied_bucket'",
True,
),
(
"unexpected_error_bucket",
"af-south-1",
None,
"Unexpected error for S3 bucket",
True,
),
],
)
def test_s3_bucket_region_validator(mocker, boto3_stubber, error_code, bucket, region, client_error, expected_message):
os.environ["AWS_DEFAULT_REGION"] = "us-west-1" if region == "us-west-1" else "us-east-1"
if region == "us-east-1":
# The actual response when region is us-east-1 is
# {'ResponseMetadata': {...}, 'LocationConstraint': None}
# But botocore doesn't support mock None response. we mock the return as following
get_bucket_location_response = {
"ResponseMetadata": {},
}
else:
get_bucket_location_response = {
"ResponseMetadata": {},
"LocationConstraint": region,
}
mocked_requests = []
if error_code is None:
mocked_requests.append(
MockedBoto3Request(
method="get_bucket_location",
response=get_bucket_location_response,
expected_params={"Bucket": bucket},
generate_error=client_error is True,
)
)
else:
mocked_requests.append(
MockedBoto3Request(
method="get_bucket_location",
response=get_bucket_location_response,
expected_params={"Bucket": bucket},
generate_error=error_code is not None,
error_code=error_code,
)
)
boto3_stubber("s3", mocked_requests)
config = {
"cluster default": {"cluster_resource_bucket": bucket},
}
config_parser = configparser.ConfigParser()
config_parser.read_dict(config)
pcluster_config = utils.init_pcluster_config_from_configparser(config_parser, False, auto_refresh=False)
errors, warnings = s3_bucket_region_validator("cluster_resource_bucket", bucket, pcluster_config)
if expected_message:
assert_that(errors[0]).contains(expected_message)
else:
assert_that(errors).is_empty()
def test_ec2_vpc_id_validator(mocker, boto3_stubber):
mocked_requests = []
# mock describe_vpc boto3 call
describe_vpc_response = {
"Vpcs": [
{
"VpcId": "vpc-12345678",
"InstanceTenancy": "default",
"Tags": [{"Value": "Default VPC", "Key": "Name"}],
"State": "available",
"DhcpOptionsId": "dopt-4ef69c2a",
"CidrBlock": "172.31.0.0/16",
"IsDefault": True,
}
]
}
mocked_requests.append(
MockedBoto3Request(
method="describe_vpcs", response=describe_vpc_response, expected_params={"VpcIds": ["vpc-12345678"]}
)
)
# mock describe_vpc_attribute boto3 call
describe_vpc_attribute_response = {
"VpcId": "vpc-12345678",
"EnableDnsSupport": {"Value": True},
"EnableDnsHostnames": {"Value": True},
}
mocked_requests.append(
MockedBoto3Request(
method="describe_vpc_attribute",
response=describe_vpc_attribute_response,
expected_params={"VpcId": "vpc-12345678", "Attribute": "enableDnsSupport"},
)
)
mocked_requests.append(
MockedBoto3Request(
method="describe_vpc_attribute",
response=describe_vpc_attribute_response,
expected_params={"VpcId": "vpc-12345678", "Attribute": "enableDnsHostnames"},
)
)
boto3_stubber("ec2", mocked_requests)
# TODO mock and test invalid vpc-id
for vpc_id, expected_message in [("vpc-12345678", None)]:
config_parser_dict = {"cluster default": {"vpc_settings": "default"}, "vpc default": {"vpc_id": vpc_id}}
utils.assert_param_validator(mocker, config_parser_dict, expected_message)
def test_ec2_subnet_id_validator(mocker, boto3_stubber):
describe_subnets_response = {
"Subnets": [
{
"AvailabilityZone": "us-east-2c",
"AvailabilityZoneId": "use2-az3",
"AvailableIpAddressCount": 248,
"CidrBlock": "10.0.1.0/24",
"DefaultForAz": False,
"MapPublicIpOnLaunch": False,
"State": "available",
"SubnetId": "subnet-12345678",
"VpcId": "vpc-06e4ab6c6cEXAMPLE",
"OwnerId": "111122223333",
"AssignIpv6AddressOnCreation": False,
"Ipv6CidrBlockAssociationSet": [],
"Tags": [{"Key": "Name", "Value": "MySubnet"}],
"SubnetArn": "arn:aws:ec2:us-east-2:111122223333:subnet/subnet-12345678",
}
]
}
mocked_requests = [
MockedBoto3Request(
method="describe_subnets",
response=describe_subnets_response,
expected_params={"SubnetIds": ["subnet-12345678"]},
)
]
boto3_stubber("ec2", mocked_requests)
# TODO test with invalid key
config_parser_dict = {
"cluster default": {"vpc_settings": "default"},
"vpc default": {"master_subnet_id": "subnet-12345678"},
}
utils.assert_param_validator(mocker, config_parser_dict)
def test_ec2_security_group_validator(mocker, boto3_stubber):
describe_security_groups_response = {
"SecurityGroups": [
{
"IpPermissionsEgress": [],
"Description": "My security group",
"IpPermissions": [
{
"PrefixListIds": [],
"FromPort": 22,
"IpRanges": [{"CidrIp": "203.0.113.0/24"}],
"ToPort": 22,
"IpProtocol": "tcp",
"UserIdGroupPairs": [],
}
],
"GroupName": "MySecurityGroup",
"OwnerId": "123456789012",
"GroupId": "sg-12345678",
}
]
}
mocked_requests = [
MockedBoto3Request(
method="describe_security_groups",
response=describe_security_groups_response,
expected_params={"GroupIds": ["sg-12345678"]},
)
]
boto3_stubber("ec2", mocked_requests)
# TODO test with invalid key
config_parser_dict = {
"cluster default": {"vpc_settings": "default"},
"vpc default": {"vpc_security_group_id": "sg-12345678"},
}
utils.assert_param_validator(mocker, config_parser_dict)
@pytest.mark.parametrize(
"section_dict, expected_message",
[
(
{"throughput_mode": "bursting", "provisioned_throughput": 1024},
"When specifying 'provisioned_throughput', the 'throughput_mode' must be set to 'provisioned'",
),
({"throughput_mode": "provisioned", "provisioned_throughput": 1024}, None),
({"shared_dir": "NONE"}, "NONE cannot be used as a shared directory"),
({"shared_dir": "/NONE"}, "/NONE cannot be used as a shared directory"),
({"shared_dir": "/efs"}, None),
],
)
def test_efs_validator(mocker, section_dict, expected_message):
config_parser_dict = {"cluster default": {"efs_settings": "default"}, "efs default": section_dict}
utils.assert_param_validator(mocker, config_parser_dict, expected_message)
@pytest.mark.parametrize(
"section_dict, expected_message",
[
({"volume_type": "io1", "volume_size": 20, "volume_iops": 120}, None),
(
{"volume_type": "io1", "volume_size": 20, "volume_iops": 90},
"IOPS rate must be between 100 and 64000 when provisioning io1 volumes.",
),
(
{"volume_type": "io1", "volume_size": 20, "volume_iops": 64001},
"IOPS rate must be between 100 and 64000 when provisioning io1 volumes.",
),
({"volume_type": "io1", "volume_size": 20, "volume_iops": 1001}, "IOPS to volume size ratio of .* is too high"),
({"volume_type": "io2", "volume_size": 20, "volume_iops": 120}, None),
(
{"volume_type": "io2", "volume_size": 20, "volume_iops": 90},
"IOPS rate must be between 100 and 256000 when provisioning io2 volumes.",
),
(
{"volume_type": "io2", "volume_size": 20, "volume_iops": 256001},
"IOPS rate must be between 100 and 256000 when provisioning io2 volumes.",
),
(
{"volume_type": "io2", "volume_size": 20, "volume_iops": 20001},
"IOPS to volume size ratio of .* is too high",
),
({"volume_type": "gp3", "volume_size": 20, "volume_iops": 3000}, None),
(
{"volume_type": "gp3", "volume_size": 20, "volume_iops": 2900},
"IOPS rate must be between 3000 and 16000 when provisioning gp3 volumes.",
),
(
{"volume_type": "gp3", "volume_size": 20, "volume_iops": 16001},
"IOPS rate must be between 3000 and 16000 when provisioning gp3 volumes.",
),
(
{"volume_type": "gp3", "volume_size": 20, "volume_iops": 10001},
"IOPS to volume size ratio of .* is too high",
),
],
)
def test_raid_validators(mocker, section_dict, expected_message):
config_parser_dict = {"cluster default": {"raid_settings": "default"}, "raid default": section_dict}
utils.assert_param_validator(mocker, config_parser_dict, expected_message)
@pytest.mark.parametrize(
"kms_key_id, expected_message",
[
("9e8a129be-0e46-459d-865b-3a5bf974a22k", None),
(
"9e7a129be-0e46-459d-865b-3a5bf974a22k",
"Key 'arn:aws:kms:us-east-1:12345678:key/9e7a129be-0e46-459d-865b-3a5bf974a22k' does not exist",
),
],
)
def test_kms_key_validator(mocker, boto3_stubber, kms_key_id, expected_message):
_kms_key_stubber(mocker, boto3_stubber, kms_key_id, expected_message, 1)
config_parser_dict = {
"cluster default": {"fsx_settings": "fsx"},
"fsx fsx": {
"storage_capacity": 1200,
"fsx_kms_key_id": kms_key_id,
"deployment_type": "PERSISTENT_1",
"per_unit_storage_throughput": 50,
},
}
utils.assert_param_validator(
mocker, config_parser_dict, expected_error=expected_message if expected_message else None
)
def _kms_key_stubber(mocker, boto3_stubber, kms_key_id, expected_message, num_calls):
describe_key_response = {
"KeyMetadata": {
"AWSAccountId": "1234567890",
"Arn": "arn:aws:kms:us-east-1:1234567890:key/{0}".format(kms_key_id),
"CreationDate": datetime.datetime(2019, 1, 10, 11, 25, 59, 128000),
"Description": "",
"Enabled": True,
"KeyId": kms_key_id,
"KeyManager": "CUSTOMER",
"KeyState": "Enabled",
"KeyUsage": "ENCRYPT_DECRYPT",
"Origin": "AWS_KMS",
}
}
mocked_requests = [
MockedBoto3Request(
method="describe_key",
response=expected_message if expected_message else describe_key_response,
expected_params={"KeyId": kms_key_id},
generate_error=True if expected_message else False,
)
] * num_calls
boto3_stubber("kms", mocked_requests)
@pytest.mark.parametrize(
"section_dict, bucket, expected_error, num_calls",
[
(
{"imported_file_chunk_size": 1024, "import_path": "s3://test", "storage_capacity": 1200},
{"Bucket": "test"},
None,
1,
),
(
{"imported_file_chunk_size": 1024, "storage_capacity": 1200},
None,
"When specifying 'imported_file_chunk_size', the 'import_path' option must be specified",
0,
),
(
{"export_path": "s3://test", "import_path": "s3://test", "storage_capacity": 1200},
{"Bucket": "test"},
None,
2,
),
(
{"export_path": "s3://test", "storage_capacity": 1200},
{"Bucket": "test"},
"When specifying 'export_path', the 'import_path' option must be specified",
0,
),
({"shared_dir": "NONE", "storage_capacity": 1200}, None, "NONE cannot be used as a shared directory", 0),
({"shared_dir": "/NONE", "storage_capacity": 1200}, None, "/NONE cannot be used as a shared directory", 0),
({"shared_dir": "/fsx"}, None, "the 'storage_capacity' option must be specified", 0),
({"shared_dir": "/fsx", "storage_capacity": 1200}, None, None, 0),
(
{
"deployment_type": "PERSISTENT_1",
"fsx_kms_key_id": "9e8a129be-0e46-459d-865b-3a5bf974a22k",
"storage_capacity": 1200,
"per_unit_storage_throughput": 50,
},
None,
None,
0,
),
(
{"deployment_type": "PERSISTENT_1", "per_unit_storage_throughput": 200, "storage_capacity": 1200},
None,
None,
0,
),
(
{
"deployment_type": "SCRATCH_2",
"fsx_kms_key_id": "9e8a129be-0e46-459d-865b-3a5bf974a22k",
"storage_capacity": 1200,
},
None,
"'fsx_kms_key_id' can only be used when 'deployment_type = PERSISTENT_1'",
1,
),
(
{"deployment_type": "SCRATCH_1", "per_unit_storage_throughput": 200, "storage_capacity": 1200},
None,
"'per_unit_storage_throughput' can only be used when 'deployment_type = PERSISTENT_1'",
0,
),
(
{"deployment_type": "PERSISTENT_1", "storage_capacity": 1200},
None,
"'per_unit_storage_throughput' must be specified when 'deployment_type = PERSISTENT_1'",
0,
),
(
{
"storage_capacity": 1200,
"per_unit_storage_throughput": "50",
"deployment_type": "PERSISTENT_1",
"automatic_backup_retention_days": 2,
},
None,
None,
0,
),
(
{
"storage_capacity": 1200,
"deployment_type": "PERSISTENT_1",
"per_unit_storage_throughput": "50",
"automatic_backup_retention_days": 2,
"daily_automatic_backup_start_time": "03:00",
"copy_tags_to_backups": True,
},
None,
None,
0,
),
(
{"automatic_backup_retention_days": 2, "deployment_type": "SCRATCH_1"},
None,
"FSx automatic backup features can be used only with 'PERSISTENT_1' file systems",
0,
),
(
{"daily_automatic_backup_start_time": "03:00"},
None,
"When specifying 'daily_automatic_backup_start_time', "
"the 'automatic_backup_retention_days' option must be specified",
0,
),
(
{"storage_capacity": 1200, "deployment_type": "PERSISTENT_1", "copy_tags_to_backups": True},
None,
"When specifying 'copy_tags_to_backups', the 'automatic_backup_retention_days' option must be specified",
0,
),
(
{"storage_capacity": 1200, "deployment_type": "PERSISTENT_1", "copy_tags_to_backups": False},
None,
"When specifying 'copy_tags_to_backups', the 'automatic_backup_retention_days' option must be specified",
0,
),
(
{"daily_automatic_backup_start_time": "03:00", "copy_tags_to_backups": True},
None,
"When specifying 'daily_automatic_backup_start_time', "
"the 'automatic_backup_retention_days' option must be specified",
0,
),
(
{
"deployment_type": "PERSISTENT_1",
"automatic_backup_retention_days": 2,
"imported_file_chunk_size": 1024,
"export_path": "s3://test",
"import_path": "s3://test",
"storage_capacity": 1200,
},
{"Bucket": "test"},
"Backups cannot be created on S3-linked file systems",
0,
),
(
{
"deployment_type": "PERSISTENT_1",
"automatic_backup_retention_days": 2,
"export_path": "s3://test",
"import_path": "s3://test",
"storage_capacity": 1200,
},
{"Bucket": "test"},
"Backups cannot be created on S3-linked file systems",
0,
),
(
{
"deployment_type": "SCRATCH_1",
"storage_type": "HDD",
"per_unit_storage_throughput": 12,
"storage_capacity": 1200,
"drive_cache_type": "READ",
},
None,
"For HDD filesystems, 'deployment_type' must be 'PERSISTENT_1'",
0,
),
(
{
"deployment_type": "PERSISTENT_1",
"storage_type": "HDD",
"per_unit_storage_throughput": 50,
"storage_capacity": 1200,
"drive_cache_type": "READ",
},
None,
"For HDD filesystems, 'per_unit_storage_throughput' can only have the following values: {0}".format(
FSX_HDD_THROUGHPUT
),
0,
),
(
{
"deployment_type": "PERSISTENT_1",
"storage_type": "SSD",
"per_unit_storage_throughput": 12,
"storage_capacity": 1200,
},
None,
"For SSD filesystems, 'per_unit_storage_throughput' can only have the following values: {0}".format(
FSX_SSD_THROUGHPUT
),
0,
),
(
{
"deployment_type": "PERSISTENT_1",
"storage_type": "SSD",
"per_unit_storage_throughput": 50,
"storage_capacity": 1200,
"drive_cache_type": "NONE",
},
None,
"The configuration parameter 'drive_cache_type' has an invalid value 'NONE'",
0,
),
(
{
"deployment_type": "PERSISTENT_1",
"storage_type": "SSD",
"per_unit_storage_throughput": 50,
"storage_capacity": 1200,
},
None,
None,
0,
),
(
{
"deployment_type": "PERSISTENT_1",
"per_unit_storage_throughput": 50,
"storage_capacity": 1200,
"drive_cache_type": "READ",
},
None,
"'drive_cache_type' features can be used only with HDD filesystems",
0,
),
(
{
"data_compression_type": "LZ4",
"fsx_backup_id": "backup-12345678",
},
None,
"FSx data compression option (LZ4) cannot be specified when creating a filesystem from backup",
0,
),
(
{
"data_compression_type": "NONE",
"fsx_backup_id": "backup-12345678",
},
None,
"The configuration parameter 'data_compression_type' has an invalid value 'NONE'",
0,
),
(
{
"data_compression_type": "LZ4",
"storage_capacity": 1200,
},
None,
None,
0,
),
],
)
def test_fsx_validator(mocker, boto3_stubber, section_dict, bucket, expected_error, num_calls):
if bucket:
_head_bucket_stubber(mocker, boto3_stubber, bucket, num_calls)
if "fsx_kms_key_id" in section_dict:
_kms_key_stubber(mocker, boto3_stubber, section_dict.get("fsx_kms_key_id"), None, 0 if expected_error else 1)
config_parser_dict = {"cluster default": {"fsx_settings": "default"}, "fsx default": section_dict}
if expected_error:
expected_error = re.escape(expected_error)
utils.assert_param_validator(mocker, config_parser_dict, expected_error=expected_error)
@pytest.mark.parametrize(
"section_dict, expected_error, expected_warning",
[
(
{"storage_capacity": 1, "deployment_type": "SCRATCH_1"},
"Capacity for FSx SCRATCH_1 filesystem is 1,200 GB, 2,400 GB or increments of 3,600 GB",
None,
),
({"storage_capacity": 1200, "deployment_type": "SCRATCH_1"}, None, None),
({"storage_capacity": 2400, "deployment_type": "SCRATCH_1"}, None, None),
({"storage_capacity": 3600, "deployment_type": "SCRATCH_1"}, None, None),
(
{"storage_capacity": 3600, "deployment_type": "SCRATCH_2"},
"Capacity for FSx SCRATCH_2 and PERSISTENT_1 filesystems is 1,200 GB or increments of 2,400 GB",
None,
),
(
{"storage_capacity": 3600, "deployment_type": "PERSISTENT_1", "per_unit_storage_throughput": 50},
"Capacity for FSx SCRATCH_2 and PERSISTENT_1 filesystems is 1,200 GB or increments of 2,400 GB",
None,
),
(
{"storage_capacity": 3601, "deployment_type": "PERSISTENT_1", "per_unit_storage_throughput": 50},
"Capacity for FSx SCRATCH_2 and PERSISTENT_1 filesystems is 1,200 GB or increments of 2,400 GB",
None,
),
({"storage_capacity": 7200}, None, None),
(
{"deployment_type": "SCRATCH_1"},
"When specifying 'fsx' section, the 'storage_capacity' option must be specified",
None,
),
(
{
"storage_type": "HDD",
"deployment_type": "PERSISTENT_1",
"storage_capacity": 1801,
"per_unit_storage_throughput": 40,
},
"Capacity for FSx PERSISTENT HDD 40 MB/s/TiB file systems is increments of 1,800 GiB",
None,
),
(
{
"storage_type": "HDD",
"deployment_type": "PERSISTENT_1",
"storage_capacity": 6001,
"per_unit_storage_throughput": 12,
},
"Capacity for FSx PERSISTENT HDD 12 MB/s/TiB file systems is increments of 6,000 GiB",
None,
),
(
{
"storage_type": "HDD",
"deployment_type": "PERSISTENT_1",
"storage_capacity": 1800,
"per_unit_storage_throughput": 40,
},
None,
None,
),
(
{
"storage_type": "HDD",
"deployment_type": "PERSISTENT_1",
"storage_capacity": 6000,
"per_unit_storage_throughput": 12,
},
None,
None,
),
],
)
def test_fsx_storage_capacity_validator(mocker, boto3_stubber, capsys, section_dict, expected_error, expected_warning):
config_parser_dict = {"cluster default": {"fsx_settings": "default"}, "fsx default": section_dict}
utils.assert_param_validator(
mocker, config_parser_dict, capsys=capsys, expected_error=expected_error, expected_warning=expected_warning
)
def _head_bucket_stubber(mocker, boto3_stubber, bucket, num_calls):
head_bucket_response = {
"ResponseMetadata": {
"AcceptRanges": "bytes",
"ContentType": "text/html",
"LastModified": "Thu, 16 Apr 2015 18:19:14 GMT",
"ContentLength": 77,
"VersionId": "null",
"ETag": '"30a6ec7e1a9ad79c203d05a589c8b400"',
"Metadata": {},
}
}
mocked_requests = [
MockedBoto3Request(method="head_bucket", response=head_bucket_response, expected_params=bucket)
] * num_calls
boto3_stubber("s3", mocked_requests)
mocker.patch("pcluster.config.validators.urllib.request.urlopen")
@pytest.mark.parametrize(
"fsx_vpc, ip_permissions, network_interfaces, expected_message",
[
( # working case, right vpc and sg, multiple network interfaces
"vpc-06e4ab6c6cEXAMPLE",
[{"IpProtocol": "-1", "UserIdGroupPairs": [{"UserId": "123456789012", "GroupId": "sg-12345678"}]}],
["eni-09b9460295ddd4e5f", "eni-001b3cef7c78b45c4"],
None,
),
( # working case, right vpc and sg, single network interface
"vpc-06e4ab6c6cEXAMPLE",
[{"IpProtocol": "-1", "UserIdGroupPairs": [{"UserId": "123456789012", "GroupId": "sg-12345678"}]}],
["eni-09b9460295ddd4e5f"],
None,
),
( # not working case --> no network interfaces
"vpc-06e4ab6c6cEXAMPLE",
[{"IpProtocol": "-1", "UserIdGroupPairs": [{"UserId": "123456789012", "GroupId": "sg-12345678"}]}],
[],
"doesn't have Elastic Network Interfaces attached",
),
( # not working case --> wrong vpc
"vpc-06e4ab6c6ccWRONG",
[{"IpProtocol": "-1", "UserIdGroupPairs": [{"UserId": "123456789012", "GroupId": "sg-12345678"}]}],
["eni-09b9460295ddd4e5f"],
"only support using FSx file system that is in the same VPC as the stack",
),
( # not working case --> wrong ip permissions in security group
"vpc-06e4ab6c6cWRONG",
[
{
"PrefixListIds": [],
"FromPort": 22,
"IpRanges": [{"CidrIp": "203.0.113.0/24"}],
"ToPort": 22,
"IpProtocol": "tcp",
"UserIdGroupPairs": [],
}
],
["eni-09b9460295ddd4e5f"],
"does not satisfy mounting requirement",
),
],
)
def test_fsx_id_validator(mocker, boto3_stubber, fsx_vpc, ip_permissions, network_interfaces, expected_message):
describe_file_systems_response = {
"FileSystems": [
{
"VpcId": fsx_vpc,
"NetworkInterfaceIds": network_interfaces,
"SubnetIds": ["subnet-12345678"],
"FileSystemType": "LUSTRE",
"CreationTime": 1567636453.038,
"ResourceARN": "arn:aws:fsx:us-west-2:111122223333:file-system/fs-0ff8da96d57f3b4e3",
"StorageCapacity": 3600,
"LustreConfiguration": {"WeeklyMaintenanceStartTime": "4:07:00"},
"FileSystemId": "fs-0ff8da96d57f3b4e3",
"DNSName": "fs-0ff8da96d57f3b4e3.fsx.us-west-2.amazonaws.com",
"OwnerId": "059623208481",
"Lifecycle": "AVAILABLE",
}
]
}
fsx_mocked_requests = [
MockedBoto3Request(
method="describe_file_systems",
response=describe_file_systems_response,
expected_params={"FileSystemIds": ["fs-0ff8da96d57f3b4e3"]},
)
]
boto3_stubber("fsx", fsx_mocked_requests)
describe_subnets_response = {
"Subnets": [
{
"AvailabilityZone": "us-east-2c",
"AvailabilityZoneId": "use2-az3",
"AvailableIpAddressCount": 248,
"CidrBlock": "10.0.1.0/24",
"DefaultForAz": False,
"MapPublicIpOnLaunch": False,
"State": "available",
"SubnetId": "subnet-12345678",
"VpcId": "vpc-06e4ab6c6cEXAMPLE",
"OwnerId": "111122223333",
"AssignIpv6AddressOnCreation": False,
"Ipv6CidrBlockAssociationSet": [],
"Tags": [{"Key": "Name", "Value": "MySubnet"}],
"SubnetArn": "arn:aws:ec2:us-east-2:111122223333:subnet/subnet-12345678",
}
]
}
ec2_mocked_requests = [
MockedBoto3Request(
method="describe_subnets",
response=describe_subnets_response,
expected_params={"SubnetIds": ["subnet-12345678"]},
)
] * 2
if network_interfaces:
network_interfaces_in_response = []
for network_interface in network_interfaces:
network_interfaces_in_response.append(
{
"Association": {
"AllocationId": "eipalloc-01564b674a1a88a47",
"AssociationId": "eipassoc-02726ee370e175cea",
"IpOwnerId": "111122223333",
"PublicDnsName": "ec2-34-248-114-123.eu-west-1.compute.amazonaws.com",
"PublicIp": "34.248.114.123",
},
"Attachment": {
"AttachmentId": "ela-attach-0cf98331",
"DeleteOnTermination": False,
"DeviceIndex": 1,
"InstanceOwnerId": "amazon-aws",
"Status": "attached",
},
"AvailabilityZone": "eu-west-1a",
"Description": "Interface for NAT Gateway nat-0a8b0e0d28266841f",
"Groups": [{"GroupName": "default", "GroupId": "sg-12345678"}],
"InterfaceType": "nat_gateway",
"Ipv6Addresses": [],
"MacAddress": "0a:e5:8a:82:fd:24",
"NetworkInterfaceId": network_interface,
"OwnerId": "111122223333",
"PrivateDnsName": "ip-10-0-124-85.eu-west-1.compute.internal",
"PrivateIpAddress": "10.0.124.85",
"PrivateIpAddresses": [
{
"Association": {
"AllocationId": "eipalloc-01564b674a1a88a47",
"AssociationId": "eipassoc-02726ee370e175cea",
"IpOwnerId": "111122223333",
"PublicDnsName": "ec2-34-248-114-123.eu-west-1.compute.amazonaws.com",
"PublicIp": "34.248.114.123",
},
"Primary": True,
"PrivateDnsName": "ip-10-0-124-85.eu-west-1.compute.internal",
"PrivateIpAddress": "10.0.124.85",
}
],
"RequesterId": "036872051663",
"RequesterManaged": True,
"SourceDestCheck": False,
"Status": "in-use",
"SubnetId": "subnet-12345678",
"TagSet": [],
"VpcId": fsx_vpc,
}
)
describe_network_interfaces_response = {"NetworkInterfaces": network_interfaces_in_response}
ec2_mocked_requests.append(
MockedBoto3Request(
method="describe_network_interfaces",
response=describe_network_interfaces_response,
expected_params={"NetworkInterfaceIds": network_interfaces},
)
)
if fsx_vpc == "vpc-06e4ab6c6cEXAMPLE":
# the describe security group is performed only if the VPC of the network interface is the same of the FSX
describe_security_groups_response = {
"SecurityGroups": [
{
"IpPermissionsEgress": ip_permissions,
"Description": "My security group",
"IpPermissions": ip_permissions,
"GroupName": "MySecurityGroup",
"OwnerId": "123456789012",
"GroupId": "sg-12345678",
}
]
}
ec2_mocked_requests.append(
MockedBoto3Request(
method="describe_security_groups",
response=describe_security_groups_response,
expected_params={"GroupIds": ["sg-12345678"]},
)
)
boto3_stubber("ec2", ec2_mocked_requests)
fsx_spy = mocker.patch(
"pcluster.config.cfn_param_types.get_fsx_info",
return_value={"DNSName": "my.fsx.dns.name", "LustreConfiguration": {"MountName": "somemountname"}},
)
config_parser_dict = {
"cluster default": {"fsx_settings": "default", "vpc_settings": "default"},
"vpc default": {"master_subnet_id": "subnet-12345678"},
"fsx default": {"fsx_fs_id": "fs-0ff8da96d57f3b4e3"},
}
utils.assert_param_validator(mocker, config_parser_dict, expected_message)
fsx_spy.assert_called_with("fs-0ff8da96d57f3b4e3")
@pytest.mark.parametrize(
"section_dict, expected_message",
[
({"enable_intel_hpc_platform": "true", "base_os": "centos7"}, None),
({"enable_intel_hpc_platform": "true", "base_os": "centos8"}, None),
({"enable_intel_hpc_platform": "true", "base_os": "alinux2"}, "it is required to set the 'base_os'"),
({"enable_intel_hpc_platform": "true", "base_os": "ubuntu1804"}, "it is required to set the 'base_os'"),
# intel hpc disabled, you can use any os
({"enable_intel_hpc_platform": "false", "base_os": "alinux2"}, None),
],
)
def test_intel_hpc_os_validator(mocker, section_dict, expected_message):
config_parser_dict = {"cluster default": section_dict}
utils.assert_param_validator(mocker, config_parser_dict, expected_message)
@pytest.mark.parametrize(
"section_dict, expected_message",
[
(
{"disable_hyperthreading": True, "extra_json": '{"cluster": {"cfn_scheduler_slots": "vcpus"}}'},
"cfn_scheduler_slots cannot be set in addition to disable_hyperthreading = true",
),
(
{"disable_hyperthreading": True, "extra_json": '{"cluster": {"cfn_scheduler_slots": "cores"}}'},
"cfn_scheduler_slots cannot be set in addition to disable_hyperthreading = true",
),
(
{"disable_hyperthreading": True, "extra_json": '{"cluster": {"cfn_scheduler_slots": 3}}'},
"cfn_scheduler_slots cannot be set in addition to disable_hyperthreading = true",
),
({"disable_hyperthreading": True, "extra_json": '{"cluster": {"other_param": "fake_value"}}'}, None),
({"disable_hyperthreading": True}, None),
({"disable_hyperthreading": False, "extra_json": '{"cluster": {"cfn_scheduler_slots": "vcpus"}}'}, None),
({"disable_hyperthreading": False, "extra_json": '{"cluster": {"cfn_scheduler_slots": "cores"}}'}, None),
({"disable_hyperthreading": False, "extra_json": '{"cluster": {"cfn_scheduler_slots": 3}}'}, None),
],
)
def test_disable_hyperthreading_validator(mocker, section_dict, expected_message):
config_parser_dict = {"cluster default": section_dict}
utils.assert_param_validator(mocker, config_parser_dict, expected_message)
@pytest.mark.parametrize(
"section_dict, bucket, expected_message",
[
(
{"imported_file_chunk_size": 0, "import_path": "s3://test-import", "storage_capacity": 1200},
None,
"has a minimum size of 1 MiB, and max size of 512,000 MiB",
),
(
{"imported_file_chunk_size": 1, "import_path": "s3://test-import", "storage_capacity": 1200},
{"Bucket": "test-import"},
None,
),
(
{"imported_file_chunk_size": 10, "import_path": "s3://test-import", "storage_capacity": 1200},
{"Bucket": "test-import"},
None,
),
(
{"imported_file_chunk_size": 512000, "import_path": "s3://test-import", "storage_capacity": 1200},
{"Bucket": "test-import"},
None,
),
(
{"imported_file_chunk_size": 512001, "import_path": "s3://test-import", "storage_capacity": 1200},
None,
"has a minimum size of 1 MiB, and max size of 512,000 MiB",
),
],
)
def test_fsx_imported_file_chunk_size_validator(mocker, boto3_stubber, section_dict, bucket, expected_message):
if bucket:
_head_bucket_stubber(mocker, boto3_stubber, bucket, num_calls=1)
config_parser_dict = {"cluster default": {"fsx_settings": "default"}, "fsx default": section_dict}
utils.assert_param_validator(mocker, config_parser_dict, expected_message)
@pytest.mark.parametrize(
"section_dict, expected_error, expected_warning",
[
({"enable_efa": "NONE"}, "invalid value", None),
({"enable_efa": "compute", "scheduler": "sge"}, "is required to set the 'compute_instance_type'", None),
(
{"enable_efa": "compute", "compute_instance_type": "t2.large", "scheduler": "sge"},
None,
"You may see better performance using a cluster placement group",
),
(
{
"enable_efa": "compute",
"compute_instance_type": "t2.large",
"base_os": "alinux2",
"scheduler": "awsbatch",
},
"it is required to set the 'scheduler'",
None,
),
(
{
"enable_efa": "compute",
"compute_instance_type": "t2.large",
"base_os": "centos7",
"scheduler": "sge",
"placement_group": "DYNAMIC",
},
None,
None,
),
(
{
"enable_efa": "compute",
"compute_instance_type": "t2.large",
"base_os": "alinux2",
"scheduler": "sge",
"placement_group": "DYNAMIC",
},
None,
None,
),
# Additional instance type
(
{
"enable_efa": "compute",
"compute_instance_type": "additional-instance-type",
"base_os": "alinux2",
"scheduler": "sge",
"placement_group": "DYNAMIC",
"instance_types_data": json.dumps(
{
"additional-instance-type": {
"InstanceType": "additional-instance-type",
"NetworkInfo": {"EfaSupported": True},
}
}
),
},
None,
None,
),
],
)
def test_efa_validator(boto3_stubber, mocker, capsys, section_dict, expected_error, expected_warning):
if section_dict.get("enable_efa") != "NONE":
mocked_requests = [
MockedBoto3Request(
method="describe_instance_types",
response={"InstanceTypes": [{"InstanceType": "t2.large"}]},
expected_params={"Filters": [{"Name": "network-info.efa-supported", "Values": ["true"]}]},
)
]
boto3_stubber("ec2", mocked_requests)
config_parser_dict = {"cluster default": section_dict}
# Patch to prevent instance type validators to fail with additional instance type
extra_patches = {
"pcluster.config.validators.get_supported_instance_types": ["t2.large", "additional-instance-type"],
}
utils.assert_param_validator(
mocker,
config_parser_dict,
expected_error,
capsys,
expected_warning,
extra_patches=extra_patches,
use_mock_instance_type_info=False,
)
@pytest.mark.parametrize(
"cluster_dict, expected_error",
[
# EFAGDR without EFA
(
{"enable_efa_gdr": "compute"},
"The parameter 'enable_efa_gdr' can be used only in combination with 'enable_efa'",
),
# EFAGDR with EFA
({"enable_efa": "compute", "enable_efa_gdr": "compute"}, None),
# EFA withoud EFAGDR
({"enable_efa": "compute"}, None),
],
)
def test_efa_gdr_validator(cluster_dict, expected_error):
config_parser_dict = {
"cluster default": cluster_dict,
}
config_parser = configparser.ConfigParser()
config_parser.read_dict(config_parser_dict)
pcluster_config = utils.init_pcluster_config_from_configparser(config_parser, False, auto_refresh=False)
enable_efa_gdr_value = pcluster_config.get_section("cluster").get_param_value("enable_efa_gdr")
errors, warnings = efa_gdr_validator("enable_efa_gdr", enable_efa_gdr_value, pcluster_config)
if expected_error:
assert_that(errors[0]).matches(expected_error)
else:
assert_that(errors).is_empty()
@pytest.mark.parametrize(
"ip_permissions, ip_permissions_egress, expected_message",
[
([], [], "must allow all traffic in and out from itself"),
(
[{"IpProtocol": "-1", "UserIdGroupPairs": [{"UserId": "123456789012", "GroupId": "sg-12345678"}]}],
[],
"must allow all traffic in and out from itself",
),
(
[{"IpProtocol": "-1", "UserIdGroupPairs": [{"UserId": "123456789012", "GroupId": "sg-12345678"}]}],
[{"IpProtocol": "-1", "UserIdGroupPairs": [{"UserId": "123456789012", "GroupId": "sg-12345678"}]}],
None,
),
(
[
{
"PrefixListIds": [],
"FromPort": 22,
"IpRanges": [{"CidrIp": "203.0.113.0/24"}],
"ToPort": 22,
"IpProtocol": "tcp",
"UserIdGroupPairs": [],
}
],
[],
"must allow all traffic in and out from itself",
),
],
)
def test_efa_validator_with_vpc_security_group(
boto3_stubber, mocker, ip_permissions, ip_permissions_egress, expected_message
):
describe_security_groups_response = {
"SecurityGroups": [
{
"IpPermissionsEgress": ip_permissions_egress,
"Description": "My security group",
"IpPermissions": ip_permissions,
"GroupName": "MySecurityGroup",
"OwnerId": "123456789012",
"GroupId": "sg-12345678",
}
]
}
mocked_requests = [
MockedBoto3Request(
method="describe_security_groups",
response=describe_security_groups_response,
expected_params={"GroupIds": ["sg-12345678"]},
),
MockedBoto3Request(
method="describe_instance_types",
response={"InstanceTypes": [{"InstanceType": "t2.large"}]},
expected_params={"Filters": [{"Name": "network-info.efa-supported", "Values": ["true"]}]},
),
MockedBoto3Request(
method="describe_security_groups",
response=describe_security_groups_response,
expected_params={"GroupIds": ["sg-12345678"]},
), # it is called two times, for vpc_security_group_id validation and to validate efa
]
boto3_stubber("ec2", mocked_requests)
config_parser_dict = {
"cluster default": {
"enable_efa": "compute",
"compute_instance_type": "t2.large",
"placement_group": "DYNAMIC",
"vpc_settings": "default",
"scheduler": "sge",
},
"vpc default": {"vpc_security_group_id": "sg-12345678"},
}
utils.assert_param_validator(mocker, config_parser_dict, expected_message)
@pytest.mark.parametrize(
"cluster_section_dict, ebs_section_dict, expected_message",
[
(
{"ebs_settings": "vol1, vol2, vol3, vol4, vol5, vol6"},
{
"vol1": {"shared_dir": "/vol1"},
"vol2": {"shared_dir": "/vol2"},
"vol3": {"shared_dir": "/vol3"},
"vol4": {"shared_dir": "/vol4"},
"vol5": {"shared_dir": "/vol5"},
"vol6": {"shared_dir": "/vol6"},
},
"Invalid number of 'ebs' sections specified. Max 5 expected.",
),
(
{"ebs_settings": "vol1, vol2 "},
{"vol1": {"shared_dir": "vol1"}, "vol2": {"volume_type": "io1"}},
"When using more than 1 EBS volume, shared_dir is required under each EBS section",
),
(
{"ebs_settings": "vol1,vol2"},
{"vol1": {"shared_dir": "/NONE"}, "vol2": {"shared_dir": "vol2"}},
"/NONE cannot be used as a shared directory",
),
(
{"ebs_settings": "vol1, vol2 "},
{"vol1": {"shared_dir": "/vol1"}, "vol2": {"shared_dir": "NONE"}},
"NONE cannot be used as a shared directory",
),
],
)
def test_ebs_settings_validator(mocker, cluster_section_dict, ebs_section_dict, expected_message):
config_parser_dict = {"cluster default": cluster_section_dict}
if ebs_section_dict:
for vol in ebs_section_dict:
config_parser_dict["ebs {0}".format(vol)] = ebs_section_dict.get(vol)
utils.assert_param_validator(mocker, config_parser_dict, expected_message)
@pytest.mark.parametrize(
"section_dict, expected_message",
[
({"shared_dir": "NONE"}, "NONE cannot be used as a shared directory"),
({"shared_dir": "/NONE"}, "/NONE cannot be used as a shared directory"),
({"shared_dir": "/NONEshared"}, None),
],
)
def test_shared_dir_validator(mocker, section_dict, expected_message):
config_parser_dict = {"cluster default": section_dict}
utils.assert_param_validator(mocker, config_parser_dict, expected_message)
@pytest.mark.parametrize(
"base_os, instance_type, access_from, expected_error, expected_warning",
[
("centos7", "t2.medium", None, None, None),
("centos8", "t2.medium", None, None, None),
("ubuntu1804", "t2.medium", None, None, None),
("ubuntu1804", "t2.medium", "1.2.3.4/32", None, None),
("centos7", "t2.medium", "0.0.0.0/0", None, None),
("centos8", "t2.medium", "0.0.0.0/0", None, None),
("alinux2", "t2.medium", None, None, None),
("alinux2", "t2.nano", None, None, "is recommended to use an instance type with at least"),
("alinux2", "t2.micro", None, None, "is recommended to use an instance type with at least"),
("ubuntu1804", "m6g.xlarge", None, None, None),
("alinux2", "m6g.xlarge", None, None, None),
("centos7", "m6g.xlarge", None, None, None),
("centos8", "m6g.xlarge", None, None, None),
],
)
def test_dcv_enabled_validator(
mocker, base_os, instance_type, expected_error, expected_warning, access_from, caplog, capsys
):
config_parser_dict = {
"cluster default": {"base_os": base_os, "dcv_settings": "dcv"},
"dcv dcv": {"enable": "master"},
}
if access_from:
config_parser_dict["dcv dcv"]["access_from"] = access_from
architectures = ["x86_64"] if instance_type.startswith("t2") else ["arm64"]
extra_patches = {
"pcluster.config.validators.get_supported_instance_types": ["t2.nano", "t2.micro", "t2.medium", "m6g.xlarge"],
"pcluster.config.validators.get_supported_architectures_for_instance_type": architectures,
"pcluster.config.cfn_param_types.get_supported_architectures_for_instance_type": architectures,
}
utils.assert_param_validator(
mocker, config_parser_dict, expected_error, capsys, expected_warning, extra_patches=extra_patches
)
access_from_error_msg = DCV_MESSAGES["warnings"]["access_from_world"].format(port=8443)
assert_that(access_from_error_msg in caplog.text).is_equal_to(not access_from or access_from == "0.0.0.0/0")
@pytest.mark.parametrize(
"architecture, base_os, expected_message",
[
# Supported combinations
("x86_64", "alinux2", None),
("x86_64", "centos7", None),
("x86_64", "centos8", None),
("x86_64", "ubuntu1804", None),
("arm64", "ubuntu1804", None),
("arm64", "alinux2", None),
("arm64", "centos7", None),
("arm64", "centos8", None),
# Unsupported combinations
(
"UnsupportedArchitecture",
"alinux2",
FSX_MESSAGES["errors"]["unsupported_architecture"].format(
supported_architectures=list(FSX_SUPPORTED_ARCHITECTURES_OSES.keys())
),
),
],
)
def test_fsx_architecture_os_validator(mocker, architecture, base_os, expected_message):
config_parser_dict = {
"cluster default": {"base_os": base_os, "fsx_settings": "fsx"},
"fsx fsx": {"storage_capacity": 3200},
}
expected_message = re.escape(expected_message) if expected_message else None
extra_patches = {
"pcluster.config.cfn_param_types.get_supported_architectures_for_instance_type": [architecture],
"pcluster.config.validators.get_supported_architectures_for_instance_type": [architecture],
}
utils.assert_param_validator(mocker, config_parser_dict, expected_message, extra_patches=extra_patches)
@pytest.mark.parametrize(
"section_dict, expected_message",
[
(
{"initial_queue_size": "0", "maintain_initial_size": True},
"maintain_initial_size cannot be set to true if initial_queue_size is 0",
),
(
{"scheduler": "awsbatch", "maintain_initial_size": True},
"maintain_initial_size is not supported when using awsbatch as scheduler",
),
],
)
def test_maintain_initial_size_validator(mocker, section_dict, expected_message):
config_parser_dict = {"cluster default": section_dict}
utils.assert_param_validator(mocker, config_parser_dict, expected_message)
@pytest.mark.parametrize(
"cluster_section_dict, expected_message",
[
# SIT cluster, perfectly fine
({"scheduler": "slurm"}, None),
# HIT cluster with one queue
({"scheduler": "slurm", "queue_settings": "queue1"}, None),
({"scheduler": "slurm", "queue_settings": "queue1,queue2,queue3,queue4,queue5"}, None),
({"scheduler": "slurm", "queue_settings": "queue1, queue2"}, None),
(
{"scheduler": "slurm", "queue_settings": "queue1,queue2,queue3,queue4,queue5,queue6"},
"Invalid number of 'queue' sections specified. Max 5 expected.",
),
(
{"scheduler": "slurm", "queue_settings": "queue_1"},
(
"Invalid queue name 'queue_1'. Queue section names can be at most 30 chars long, must begin with"
" a letter and only contain lowercase letters, digits and hyphens. It is forbidden to use"
" 'default' as a queue section name."
),
),
(
{"scheduler": "slurm", "queue_settings": "default"},
(
"Invalid queue name 'default'. Queue section names can be at most 30 chars long, must begin with"
" a letter and only contain lowercase letters, digits and hyphens. It is forbidden to use"
" 'default' as a queue section name."
),
),
(
{"scheduler": "slurm", "queue_settings": "queue1, default"},
(
"Invalid queue name '.*'. Queue section names can be at most 30 chars long, must begin with"
" a letter and only contain lowercase letters, digits and hyphens. It is forbidden to use"
" 'default' as a queue section name."
),
),
(
{"scheduler": "slurm", "queue_settings": "QUEUE"},
(
"Invalid queue name 'QUEUE'. Queue section names can be at most 30 chars long, must begin with"
" a letter and only contain lowercase letters, digits and hyphens. It is forbidden to use"
" 'default' as a queue section name."
),
),
(
{"scheduler": "slurm", "queue_settings": "aQUEUEa"},
(
"Invalid queue name 'aQUEUEa'. Queue section names can be at most 30 chars long, must begin with"
" a letter and only contain lowercase letters, digits and hyphens. It is forbidden to use"
" 'default' as a queue section name."
),
),
({"scheduler": "slurm", "queue_settings": "my-default-queue"}, None),
],
)
def test_queue_settings_validator(mocker, cluster_section_dict, expected_message):
config_parser_dict = {"cluster default": cluster_section_dict}
if cluster_section_dict.get("queue_settings"):
for i, queue_name in enumerate(cluster_section_dict["queue_settings"].split(",")):
config_parser_dict["queue {0}".format(queue_name.strip())] = {
"compute_resource_settings": "cr{0}".format(i),
"disable_hyperthreading": True,
"enable_efa": True,
}
config_parser_dict["compute_resource cr{0}".format(i)] = {"instance_type": "t2.micro"}
utils.assert_param_validator(mocker, config_parser_dict, expected_message)
@pytest.mark.parametrize(
"cluster_dict, queue_dict, expected_error_messages, expected_warning_messages",
[
(
{"queue_settings": "default"},
{"compute_resource_settings": "cr1,cr2", "enable_efa": True, "disable_hyperthreading": True},
[
"Duplicate instance type 't2.micro' found in queue 'default'. "
"Compute resources in the same queue must use different instance types"
],
[
"EFA was enabled on queue 'default', but instance type 't2.micro' "
"defined in compute resource settings cr1 does not support EFA.",
"EFA was enabled on queue 'default', but instance type 't2.micro' "
"defined in compute resource settings cr2 does not support EFA.",
],
),
(
{"queue_settings": "default"},
{"compute_resource_settings": "cr3,cr4", "enable_efa": True, "disable_hyperthreading": True},
[
"Duplicate instance type 'c4.xlarge' found in queue 'default'. "
"Compute resources in the same queue must use different instance types"
],
[
"EFA was enabled on queue 'default', but instance type 'c4.xlarge' "
"defined in compute resource settings cr3 does not support EFA.",
"EFA was enabled on queue 'default', but instance type 'c4.xlarge' "
"defined in compute resource settings cr4 does not support EFA.",
],
),
(
{"queue_settings": "default"},
{"compute_resource_settings": "cr1,cr3", "enable_efa": True, "disable_hyperthreading": True},
None,
[
"EFA was enabled on queue 'default', but instance type 't2.micro' "
"defined in compute resource settings cr1 does not support EFA.",
"EFA was enabled on queue 'default', but instance type 'c4.xlarge' "
"defined in compute resource settings cr3 does not support EFA.",
],
),
(
{"queue_settings": "default"},
{"compute_resource_settings": "cr2,cr4", "enable_efa": True, "disable_hyperthreading": True},
None,
[
"EFA was enabled on queue 'default', but instance type 't2.micro' "
"defined in compute resource settings cr2 does not support EFA.",
"EFA was enabled on queue 'default', but instance type 'c4.xlarge' "
"defined in compute resource settings cr4 does not support EFA.",
],
),
(
{"queue_settings": "default"},
{"compute_resource_settings": "cr2,cr4", "enable_efa": True, "enable_efa_gdr": True},
None,
[
"EFA was enabled on queue 'default', but instance type 't2.micro' "
"defined in compute resource settings cr2 does not support EFA.",
"EFA GDR was enabled on queue 'default', but instance type 't2.micro' "
"defined in compute resource settings cr2 does not support EFA GDR.",
"EFA was enabled on queue 'default', but instance type 'c4.xlarge' "
"defined in compute resource settings cr4 does not support EFA.",
"EFA GDR was enabled on queue 'default', but instance type 'c4.xlarge' "
"defined in compute resource settings cr4 does not support EFA GDR.",
],
),
(
{"queue_settings": "default"},
{"compute_resource_settings": "efa_instance", "enable_efa_gdr": True},
["The parameter 'enable_efa_gdr' can be used only in combination with 'enable_efa'"],
None,
),
({"queue_settings": "default"}, {"compute_resource_settings": "cr1"}, None, None),
(
{"queue_settings": "default", "enable_efa": "compute", "disable_hyperthreading": True},
{"compute_resource_settings": "cr1", "enable_efa": True, "disable_hyperthreading": True},
[
"Parameter 'enable_efa' can be used only in 'cluster' or in 'queue' section",
"Parameter 'disable_hyperthreading' can be used only in 'cluster' or in 'queue' section",
],
[
"EFA was enabled on queue 'default', but instance type 't2.micro' "
"defined in compute resource settings cr1 does not support EFA."
],
),
(
{
"queue_settings": "default",
"enable_efa": "compute",
"enable_efa_gdr": "compute",
"disable_hyperthreading": True,
},
{
"compute_resource_settings": "cr1",
"enable_efa": False,
"enable_efa_gdr": False,
"disable_hyperthreading": False,
},
[
"Parameter 'enable_efa' can be used only in 'cluster' or in 'queue' section",
"Parameter 'enable_efa_gdr' can be used only in 'cluster' or in 'queue' section",
"Parameter 'disable_hyperthreading' can be used only in 'cluster' or in 'queue' section",
],
None,
),
(
{"queue_settings": "default"},
{"compute_resource_settings": "efa_instance", "enable_efa": True},
None,
None,
),
],
)
def test_queue_validator(cluster_dict, queue_dict, expected_error_messages, expected_warning_messages):
config_parser_dict = {
"cluster default": cluster_dict,
"queue default": queue_dict,
"compute_resource cr1": {"instance_type": "t2.micro"},
"compute_resource cr2": {"instance_type": "t2.micro"},
"compute_resource cr3": {"instance_type": "c4.xlarge"},
"compute_resource cr4": {"instance_type": "c4.xlarge"},
"compute_resource efa_instance": {"instance_type": "p3dn.24xlarge"},
}
config_parser = configparser.ConfigParser()
config_parser.read_dict(config_parser_dict)
pcluster_config = utils.init_pcluster_config_from_configparser(config_parser, False, auto_refresh=False)
efa_instance_compute_resource = pcluster_config.get_section("compute_resource", "efa_instance")
if efa_instance_compute_resource:
# Override `enable_efa` and `enable_efa_gdr` default value for instance with efa support
efa_instance_compute_resource.get_param("enable_efa").value = True
efa_instance_compute_resource.get_param("enable_efa_gdr").value = True
errors, warnings = queue_validator("queue", "default", pcluster_config)
if expected_error_messages:
assert_that(expected_error_messages).is_equal_to(errors)
else:
assert_that(errors).is_empty()
if expected_warning_messages:
assert_that(expected_warning_messages).is_equal_to(warnings)
else:
assert_that(warnings).is_empty()
@pytest.mark.parametrize(
"param_value, expected_message",
[
(
"section1!2",
"Invalid label 'section1!2' in param 'queue_settings'. "
"Section labels can only contain alphanumeric characters, dashes or underscores.",
),
(
"section!123456789abcdefghijklmnopqrstuvwxyz_123456789abcdefghijklmnopqrstuvwxyz_",
"Invalid label 'section!123456789...' in param 'queue_settings'. "
"Section labels can only contain alphanumeric characters, dashes or underscores.",
),
("section-1", None),
("section_1", None),
(
"section_123456789abcdefghijklmnopqrstuvwxyz_123456789abcdefghijklmnopqrstuvwxyz_",
"Invalid label 'section_123456789...' in param 'queue_settings'. "
"The maximum length allowed for section labels is 64 characters",
),
],
)
def test_settings_validator(param_value, expected_message):
errors, warnings = settings_validator("queue_settings", param_value, None)
if expected_message:
assert_that(errors and len(errors) == 1).is_true()
assert_that(errors[0]).is_equal_to(expected_message)
else:
assert_that(errors).is_empty()
@pytest.mark.parametrize(
"section_dict, expected_message",
[
({"min_count": -1, "initial_count": -1}, "Parameter 'min_count' must be 0 or greater than 0"),
(
{"min_count": 0, "initial_count": 1, "spot_price": -1.1},
"Parameter 'spot_price' must be 0 or greater than 0",
),
(
{"min_count": 1, "max_count": 0, "initial_count": 1},
"Parameter 'max_count' must be greater than or equal to 'min_count'",
),
({"min_count": 0, "max_count": 0, "initial_count": 0}, "Parameter 'max_count' must be 1 or greater than 1"),
({"min_count": 1, "max_count": 2, "spot_price": 1.5, "initial_count": 1}, None),
(
{"min_count": 2, "max_count": 4, "initial_count": 1},
"Parameter 'initial_count' must be greater than or equal to 'min_count'",
),
(
{"min_count": 2, "max_count": 4, "initial_count": 5},
"Parameter 'initial_count' must be lower than or equal to 'max_count'",
),
],
)
def test_compute_resource_validator(mocker, section_dict, expected_message):
config_parser_dict = {
"cluster default": {"queue_settings": "default"},
"queue default": {"compute_resource_settings": "default"},
"compute_resource default": section_dict,
}
config_parser = configparser.ConfigParser()
config_parser.read_dict(config_parser_dict)
mocker.patch(
"pcluster.config.cfn_param_types.get_supported_architectures_for_instance_type", return_value=["x86_64"]
)
instance_type_info_mock = mocker.MagicMock()
mocker.patch(
"pcluster.config.cfn_param_types.InstanceTypeInfo.init_from_instance_type", return_value=instance_type_info_mock
)
instance_type_info_mock.max_network_interface_count.return_value = 1
mocker.patch("pcluster.config.validators.get_supported_architectures_for_instance_type", return_value=["x86_64"])
pcluster_config = utils.init_pcluster_config_from_configparser(config_parser, False)
errors, warnings = compute_resource_validator("compute_resource", "default", pcluster_config)
if expected_message:
assert_that(expected_message in errors)
else:
assert_that(errors).is_empty()
@pytest.mark.parametrize(
"cluster_section_dict, sections_dict, expected_message",
[
(
{"vpc_settings": "vpc1, vpc2"},
{"vpc vpc1": {}, "vpc vpc2": {}},
"The value of 'vpc_settings' parameter is invalid. It can only contain a single vpc section label",
),
(
{"efs_settings": "efs1, efs2"},
{"efs efs1": {}, "efs efs2": {}},
"The value of 'efs_settings' parameter is invalid. It can only contain a single efs section label",
),
],
)
def test_single_settings_validator(mocker, cluster_section_dict, sections_dict, expected_message):
config_parser_dict = {"cluster default": cluster_section_dict}
if sections_dict:
for key, section in sections_dict.items():
config_parser_dict[key] = section
utils.assert_param_validator(mocker, config_parser_dict, expected_message)
#########
#
# architecture validator tests
#
# Two things make it difficult to test validators that key on architecture in the same way that:
# 1) architecture is a derived parameter and cannot be configured directly via the config file
# 2) many validators key on the architecture, which makes it impossible to test some combinations of
# parameters for validators that run later than others, because those run earlier will have
# already raised exceptions.
#
# Thus, the following code mocks the pcluster_config object passed to the validator functions
# and calls those functions directly (as opposed to patching functions and instantiating a config
# as would be done when running `pcluster create/update`).
#
#########
def get_default_pcluster_sections_dict():
"""Return a dict similar in structure to that of a cluster config file."""
default_pcluster_sections_dict = {}
for section_default_dict in DefaultDict:
if section_default_dict.name == "pcluster": # Get rid of the extra layer in this case
default_pcluster_sections_dict["cluster"] = section_default_dict.value.get("cluster")
else:
default_pcluster_sections_dict[section_default_dict.name] = section_default_dict.value
return default_pcluster_sections_dict
def make_pcluster_config_mock(mocker, config_dict):
"""Mock the calls that made on a pcluster_config by validator functions."""
cluster_config_dict = get_default_pcluster_sections_dict()
for section_key in config_dict:
cluster_config_dict = utils.merge_dicts(cluster_config_dict.get(section_key), config_dict.get(section_key))
section_to_mocks = {}
for section_key, section_dict in config_dict.items():
section_mock = mocker.MagicMock()
section_mock.get_param_value.side_effect = lambda param: section_dict.get(param)
section_to_mocks[section_key] = section_mock
pcluster_config_mock = mocker.MagicMock()
pcluster_config_mock.get_section.side_effect = lambda section: section_to_mocks.get(section)
return pcluster_config_mock
def run_architecture_validator_test(
mocker,
config,
constrained_param_section,
constrained_param_name,
param_name,
param_val,
validator,
expected_warnings,
expected_errors,
):
"""Run a test for a validator that's concerned with the architecture param."""
mocked_pcluster_config = make_pcluster_config_mock(mocker, config)
errors, warnings = validator(param_name, param_val, mocked_pcluster_config)
mocked_pcluster_config.get_section.assert_called_once_with(constrained_param_section)
mocked_pcluster_config.get_section.side_effect(constrained_param_section).get_param_value.assert_called_with(
constrained_param_name
)
assert_that(len(warnings)).is_equal_to(len(expected_warnings))
for warnings, expected_warnings in zip(warnings, expected_warnings):
assert_that(warnings).matches(re.escape(expected_warnings))
assert_that(len(errors)).is_equal_to(len(expected_errors))
for errors, expected_errors in zip(errors, expected_errors):
assert_that(errors).matches(re.escape(expected_errors))
@pytest.mark.parametrize(
"enabled, architecture, expected_errors",
[
(True, "x86_64", []),
(True, "arm64", ["instance types and an AMI that support these architectures"]),
(False, "x86_64", []),
(False, "arm64", []),
],
)
def test_intel_hpc_architecture_validator(mocker, enabled, architecture, expected_errors):
"""Verify that setting enable_intel_hpc_platform is invalid when architecture != x86_64."""
config_dict = {"cluster": {"enable_intel_hpc_platform": enabled, "architecture": architecture}}
run_architecture_validator_test(
mocker,
config_dict,
"cluster",
"architecture",
"enable_intel_hpc_platform",
enabled,
intel_hpc_architecture_validator,
[],
expected_errors,
)
@pytest.mark.parametrize(
"base_os, architecture, expected_warnings, expected_errors",
[
# All OSes supported for x86_64
("alinux2", "x86_64", [], []),
("centos7", "x86_64", [], []),
("centos8", "x86_64", [], []),
("ubuntu1804", "x86_64", [], []),
# Only a subset of OSes supported for arm64
("alinux2", "arm64", [], []),
(
"centos7",
"arm64",
[
"Warning: The aarch64 CentOS 7 OS is not validated for the 6th generation aarch64 instances "
"(M6g, C6g, etc.). To proceed please provide a custom_ami, "
"for more info see: https://wiki.centos.org/Cloud/AWS#aarch64_notes"
],
[],
),
("centos8", "arm64", [], []),
("ubuntu1804", "arm64", [], []),
],
)
def test_architecture_os_validator(mocker, base_os, architecture, expected_warnings, expected_errors):
"""Verify that the correct set of OSes is supported for each supported architecture."""
config_dict = {"cluster": {"base_os": base_os, "architecture": architecture}}
run_architecture_validator_test(
mocker,
config_dict,
"cluster",
"architecture",
"base_os",
base_os,
architecture_os_validator,
expected_warnings,
expected_errors,
)
@pytest.mark.parametrize(
"disable_hyperthreading, architecture, expected_errors",
[
(True, "x86_64", []),
(False, "x86_64", []),
(
True,
"arm64",
["disable_hyperthreading is only supported on instance types that support these architectures"],
),
(False, "arm64", []),
],
)
def test_disable_hyperthreading_architecture_validator(mocker, disable_hyperthreading, architecture, expected_errors):
config_dict = {"cluster": {"architecture": architecture, "disable_hyperthreading": disable_hyperthreading}}
run_architecture_validator_test(
mocker,
config_dict,
"cluster",
"architecture",
"disable_hyperthreading",
disable_hyperthreading,
disable_hyperthreading_architecture_validator,
[],
expected_errors,
)
@pytest.mark.parametrize(
"head_node_architecture, compute_architecture, compute_instance_type, expected_errors",
[
# Single compute_instance_type
("x86_64", "x86_64", "c5.xlarge", []),
(
"x86_64",
"arm64",
"m6g.xlarge",
["none of which are compatible with the architecture supported by the master_instance_type"],
),
(
"arm64",
"x86_64",
"c5.xlarge",
["none of which are compatible with the architecture supported by the master_instance_type"],
),
("arm64", "arm64", "m6g.xlarge", []),
("x86_64", "x86_64", "optimal", []),
# Function to get supported architectures shouldn't be called because compute_instance_type arg
# are instance families.
("x86_64", None, "m6g", []),
("x86_64", None, "c5", []),
# The validator must handle the case where compute_instance_type is a CSV list
("arm64", "arm64", "m6g.xlarge,r6g.xlarge", []),
(
"x86_64",
"arm64",
"m6g.xlarge,r6g.xlarge",
["none of which are compatible with the architecture supported by the master_instance_type"] * 2,
),
],
)
def test_instances_architecture_compatibility_validator(
mocker, caplog, head_node_architecture, compute_architecture, compute_instance_type, expected_errors
):
def internal_is_instance_type(itype):
return "." in itype or itype == "optimal"
supported_architectures_patch = mocker.patch(
"pcluster.config.validators.get_supported_architectures_for_instance_type", return_value=[compute_architecture]
)
is_instance_type_patch = mocker.patch(
"pcluster.config.validators.is_instance_type_format", side_effect=internal_is_instance_type
)
logger_patch = mocker.patch.object(LOGFILE_LOGGER, "debug")
run_architecture_validator_test(
mocker,
{"cluster": {"architecture": head_node_architecture}},
"cluster",
"architecture",
"compute_instance_type",
compute_instance_type,
instances_architecture_compatibility_validator,
[],
expected_errors,
)
compute_instance_types = compute_instance_type.split(",")
non_instance_families = [
instance_type for instance_type in compute_instance_types if internal_is_instance_type(instance_type)
]
assert_that(supported_architectures_patch.call_count).is_equal_to(len(non_instance_families))
assert_that(logger_patch.call_count).is_equal_to(len(compute_instance_types) - len(non_instance_families))
assert_that(is_instance_type_patch.call_count).is_equal_to(len(compute_instance_types))
@pytest.mark.parametrize(
"section_dict, bucket, num_calls, expected_error",
[
(
{
"fsx_backup_id": "backup-0ff8da96d57f3b4e3",
"deployment_type": "PERSISTENT_1",
"per_unit_storage_throughput": 50,
},
None,
0,
"When restoring an FSx Lustre file system from backup, 'deployment_type' cannot be specified.",
),
(
{"fsx_backup_id": "backup-0ff8da96d57f3b4e3", "storage_capacity": 7200},
None,
0,
"When restoring an FSx Lustre file system from backup, 'storage_capacity' cannot be specified.",
),
(
{
"fsx_backup_id": "backup-0ff8da96d57f3b4e3",
"deployment_type": "PERSISTENT_1",
"per_unit_storage_throughput": 100,
},
None,
0,
"When restoring an FSx Lustre file system from backup, 'per_unit_storage_throughput' cannot be specified.",
),
(
{
"fsx_backup_id": "backup-0ff8da96d57f3b4e3",
"imported_file_chunk_size": 1024,
"export_path": "s3://test",
"import_path": "s3://test",
},
{"Bucket": "test"},
2,
"When restoring an FSx Lustre file system from backup, 'imported_file_chunk_size' cannot be specified.",
),
(
{
"fsx_backup_id": "backup-0ff8da96d57f3b4e3",
"fsx_kms_key_id": "somekey",
"deployment_type": "PERSISTENT_1",
"per_unit_storage_throughput": 50,
},
None,
0,
"When restoring an FSx Lustre file system from backup, 'fsx_kms_key_id' cannot be specified.",
),
(
{
"fsx_backup_id": "backup-00000000000000000",
"deployment_type": "PERSISTENT_1",
"per_unit_storage_throughput": 50,
},
None,
0,
"Failed to retrieve backup with Id 'backup-00000000000000000'",
),
],
)
def test_fsx_lustre_backup_validator(mocker, boto3_stubber, section_dict, bucket, num_calls, expected_error):
valid_key_id = "backup-0ff8da96d57f3b4e3"
describe_backups_response = {
"Backups": [
{
"BackupId": valid_key_id,
"Lifecycle": "AVAILABLE",
"Type": "USER_INITIATED",
"CreationTime": 1594159673.559,
"FileSystem": {
"StorageCapacity": 7200,
"StorageType": "SSD",
"LustreConfiguration": {"DeploymentType": "PERSISTENT_1", "PerUnitStorageThroughput": 200},
},
}
]
}
if bucket:
_head_bucket_stubber(mocker, boto3_stubber, bucket, num_calls)
generate_describe_backups_error = section_dict.get("fsx_backup_id") != valid_key_id
fsx_mocked_requests = [
MockedBoto3Request(
method="describe_backups",
response=expected_error if generate_describe_backups_error else describe_backups_response,
expected_params={"BackupIds": [section_dict.get("fsx_backup_id")]},
generate_error=generate_describe_backups_error,
)
]
boto3_stubber("fsx", fsx_mocked_requests)
if "fsx_kms_key_id" in section_dict:
describe_key_response = {"KeyMetadata": {"KeyId": section_dict.get("fsx_kms_key_id")}}
kms_mocked_requests = [
MockedBoto3Request(
method="describe_key",
response=describe_key_response,
expected_params={"KeyId": section_dict.get("fsx_kms_key_id")},
)
]
boto3_stubber("kms", kms_mocked_requests)
config_parser_dict = {"cluster default": {"fsx_settings": "default"}, "fsx default": section_dict}
utils.assert_param_validator(mocker, config_parser_dict, expected_error=expected_error)
#########
#
# ignored FSx params validator test
#
# Testing a validator that requires the fsx_fs_id parameter to be specified requires a lot of
# boto3 stubbing due to the complexity contained in the fsx_id_validator.
#
# Thus, the following code mocks the pcluster_config object passed to the validator functions
# and calls the validator directly.
#
#########
@pytest.mark.parametrize(
"section_dict, expected_error",
[
({"fsx_fs_id": "fs-0123456789abcdef0", "shared_dir": "/fsx"}, None),
(
{"fsx_fs_id": "fs-0123456789abcdef0", "shared_dir": "/fsx", "storage_capacity": 3600},
"storage_capacity is ignored when specifying an existing Lustre file system",
),
],
)
def test_fsx_ignored_parameters_validator(mocker, section_dict, expected_error):
mocked_pcluster_config = utils.get_mocked_pcluster_config(mocker)
fsx_section = CfnSection(FSX, mocked_pcluster_config, "default")
for param_key, param_value in section_dict.items():
param = FSX.get("params").get(param_key).get("type", CfnParam)
param.value = param_value
fsx_section.set_param(param_key, param)
mocked_pcluster_config.add_section(fsx_section)
errors, warnings = fsx_ignored_parameters_validator("fsx", "default", mocked_pcluster_config)
assert_that(warnings).is_empty()
if expected_error:
assert_that(errors[0]).matches(expected_error)
else:
assert_that(errors).is_empty()
@pytest.mark.parametrize(
"section_dict, expected_error",
[
({"volume_type": "standard", "volume_size": 15}, None),
({"volume_type": "standard", "volume_size": 0}, "The size of standard volumes must be at least 1 GiB"),
({"volume_type": "standard", "volume_size": 1025}, "The size of standard volumes can not exceed 1024 GiB"),
({"volume_type": "io1", "volume_size": 15}, None),
({"volume_type": "io1", "volume_size": 3}, "The size of io1 volumes must be at least 4 GiB"),
({"volume_type": "io1", "volume_size": 16385}, "The size of io1 volumes can not exceed 16384 GiB"),
({"volume_type": "io2", "volume_size": 15}, None),
({"volume_type": "io2", "volume_size": 3}, "The size of io2 volumes must be at least 4 GiB"),
({"volume_type": "io2", "volume_size": 65537}, "The size of io2 volumes can not exceed 65536 GiB"),
({"volume_type": "gp2", "volume_size": 15}, None),
({"volume_type": "gp2", "volume_size": 0}, "The size of gp2 volumes must be at least 1 GiB"),
({"volume_type": "gp2", "volume_size": 16385}, "The size of gp2 volumes can not exceed 16384 GiB"),
({"volume_type": "gp3", "volume_size": 15}, None),
({"volume_type": "gp3", "volume_size": 0}, "The size of gp3 volumes must be at least 1 GiB"),
({"volume_type": "gp3", "volume_size": 16385}, "The size of gp3 volumes can not exceed 16384 GiB"),
({"volume_type": "st1", "volume_size": 500}, None),
({"volume_type": "st1", "volume_size": 20}, "The size of st1 volumes must be at least 500 GiB"),
({"volume_type": "st1", "volume_size": 16385}, "The size of st1 volumes can not exceed 16384 GiB"),
({"volume_type": "sc1", "volume_size": 500}, None),
({"volume_type": "sc1", "volume_size": 20}, "The size of sc1 volumes must be at least 500 GiB"),
({"volume_type": "sc1", "volume_size": 16385}, "The size of sc1 volumes can not exceed 16384 GiB"),
],
)
def test_ebs_volume_type_size_validator(mocker, section_dict, caplog, expected_error):
config_parser_dict = {"cluster default": {"ebs_settings": "default"}, "ebs default": section_dict}
utils.assert_param_validator(mocker, config_parser_dict, expected_error)
def test_ebs_allowed_values_all_have_volume_size_bounds():
"""Ensure that all known EBS volume types are accounted for by the volume size validator."""
allowed_values_all_have_volume_size_bounds = set(ALLOWED_VALUES["volume_types"]) <= set(
EBS_VOLUME_TYPE_TO_VOLUME_SIZE_BOUNDS.keys()
)
assert_that(allowed_values_all_have_volume_size_bounds).is_true()
@pytest.mark.parametrize(
"section_dict, expected_message",
[
({"volume_type": "io1", "volume_size": 20, "volume_iops": 120}, None),
(
{"volume_type": "io1", "volume_size": 20, "volume_iops": 90},
"IOPS rate must be between 100 and 64000 when provisioning io1 volumes.",
),
(
{"volume_type": "io1", "volume_size": 20, "volume_iops": 64001},
"IOPS rate must be between 100 and 64000 when provisioning io1 volumes.",
),
({"volume_type": "io1", "volume_size": 20, "volume_iops": 1001}, "IOPS to volume size ratio of .* is too high"),
({"volume_type": "io2", "volume_size": 20, "volume_iops": 120}, None),
(
{"volume_type": "io2", "volume_size": 20, "volume_iops": 90},
"IOPS rate must be between 100 and 256000 when provisioning io2 volumes.",
),
(
{"volume_type": "io2", "volume_size": 20, "volume_iops": 256001},
"IOPS rate must be between 100 and 256000 when provisioning io2 volumes.",
),
(
{"volume_type": "io2", "volume_size": 20, "volume_iops": 20001},
"IOPS to volume size ratio of .* is too high",
),
({"volume_type": "gp3", "volume_size": 20, "volume_iops": 3000}, None),
(
{"volume_type": "gp3", "volume_size": 20, "volume_iops": 2900},
"IOPS rate must be between 3000 and 16000 when provisioning gp3 volumes.",
),
(
{"volume_type": "gp3", "volume_size": 20, "volume_iops": 16001},
"IOPS rate must be between 3000 and 16000 when provisioning gp3 volumes.",
),
(
{"volume_type": "gp3", "volume_size": 20, "volume_iops": 10001},
"IOPS to volume size ratio of .* is too high",
),
],
)
def test_ebs_volume_iops_validator(mocker, section_dict, expected_message):
config_parser_dict = {"cluster default": {"ebs_settings": "default"}, "ebs default": section_dict}
utils.assert_param_validator(mocker, config_parser_dict, expected_message)
@pytest.mark.parametrize(
"section_dict, snapshot_size, state, partition, expected_warning, expected_error, "
"raise_error_when_getting_snapshot_info",
[
(
{"volume_size": 100, "ebs_snapshot_id": "snap-1234567890abcdef0"},
50,
"completed",
"aws-cn",
"The specified volume size is larger than snapshot size. In order to use the full capacity of the "
"volume, you'll need to manually resize the partition "
"according to this doc: "
"https://docs.amazonaws.cn/AWSEC2/latest/UserGuide/recognize-expanded-volume-linux.html",
None,
False,
),
(
{"volume_size": 100, "ebs_snapshot_id": "snap-1234567890abcdef0"},
50,
"completed",
"aws-us-gov",
"The specified volume size is larger than snapshot size. In order to use the full capacity of the "
"volume, you'll need to manually resize the partition "
"according to this doc: "
"https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/recognize-expanded-volume-linux.html",
None,
False,
),
(
{"volume_size": 100, "ebs_snapshot_id": "snap-1234567890abcdef0"},
50,
"incompleted",
"aws-us-gov",
"Snapshot snap-1234567890abcdef0 is in state 'incompleted' not 'completed'",
None,
False,
),
({"ebs_snapshot_id": "snap-1234567890abcdef0"}, 50, "completed", "partition", None, None, False),
(
{"volume_size": 100, "ebs_snapshot_id": "snap-1234567891abcdef0"},
120,
"completed",
"aws-us-gov",
None,
"The EBS volume size of the section 'default' must not be smaller than 120, because it is the size of the "
"provided snapshot snap-1234567891abcdef0",
False,
),
(
{"volume_size": 100, "ebs_snapshot_id": "snap-1234567890abcdef0"},
None,
"completed",
"aws-cn",
None,
"Unable to get volume size for snapshot snap-1234567890abcdef0",
False,
),
(
{"ebs_snapshot_id": "snap-1234567890abcdef0"},
20,
"completed",
"aws",
None,
"some message",
True,
),
],
)
def test_ebs_volume_size_snapshot_validator(
section_dict,
snapshot_size,
state,
partition,
mocker,
expected_warning,
expected_error,
raise_error_when_getting_snapshot_info,
capsys,
):
ebs_snapshot_id = section_dict["ebs_snapshot_id"]
describe_snapshots_response = {
"Description": "This is my snapshot",
"Encrypted": False,
"VolumeId": "vol-049df61146c4d7901",
"State": state,
"VolumeSize": snapshot_size,
"StartTime": "2014-02-28T21:28:32.000Z",
"Progress": "100%",
"OwnerId": "012345678910",
"SnapshotId": ebs_snapshot_id,
}
mocker.patch("pcluster.config.cfn_param_types.get_ebs_snapshot_info", return_value=describe_snapshots_response)
if raise_error_when_getting_snapshot_info:
mocker.patch("pcluster.config.validators.get_ebs_snapshot_info", side_effect=Exception(expected_error))
else:
mocker.patch("pcluster.config.validators.get_ebs_snapshot_info", return_value=describe_snapshots_response)
mocker.patch(
"pcluster.config.validators.get_partition", return_value="aws-cn" if partition == "aws-cn" else "aws-us-gov"
)
config_parser_dict = {"cluster default": {"ebs_settings": "default"}, "ebs default": section_dict}
utils.assert_param_validator(
mocker, config_parser_dict, expected_error=expected_error, capsys=capsys, expected_warning=expected_warning
)
@pytest.mark.parametrize(
"cluster_section_dict, ebs_section_dict1, ebs_section_dict2, expected_message",
[
(
{"shared_dir": "shared_directory", "ebs_settings": "vol1"},
{"volume_size": 30},
{},
None,
),
(
{"shared_dir": "shared_directory", "ebs_settings": "vol1"},
{"shared_dir": "shared_directory1"},
{},
"'shared_dir' can not be specified both in cluster section and EBS section",
),
(
{"shared_dir": "shared_directory", "ebs_settings": "vol1, vol2"},
{"shared_dir": "shared_directory1", "volume_size": 30},
{"shared_dir": "shared_directory2", "volume_size": 30},
"'shared_dir' can not be specified in cluster section when using multiple EBS volumes",
),
(
{"ebs_settings": "vol1, vol2"},
{"shared_dir": "shared_directory1", "volume_size": 30},
{"shared_dir": "shared_directory2", "volume_size": 30},
None,
),
(
{"ebs_settings": "vol1"},
{"volume_size": 30},
{},
None,
),
(
{"ebs_settings": "vol1"},
{},
{},
None,
),
(
{"shared_dir": "shared_directory"},
{},
{},
None,
),
],
)
def test_duplicate_shared_dir_validator(
mocker, cluster_section_dict, ebs_section_dict1, ebs_section_dict2, expected_message
):
config_parser_dict = {
"cluster default": cluster_section_dict,
"ebs vol1": ebs_section_dict1,
"ebs vol2": ebs_section_dict2,
}
utils.assert_param_validator(mocker, config_parser_dict, expected_error=expected_message)
@pytest.mark.parametrize(
"extra_json, expected_message",
[
(
{"extra_json": {"cluster": {"cfn_scheduler_slots": "1"}}},
"It is highly recommended to use the disable_hyperthreading parameter in order to control the "
"hyper-threading configuration in the cluster rather than using cfn_scheduler_slots in extra_json",
),
(
{"extra_json": {"cluster": {"cfn_scheduler_slots": "vcpus"}}},
"It is highly recommended to use the disable_hyperthreading parameter in order to control the "
"hyper-threading configuration in the cluster rather than using cfn_scheduler_slots in extra_json",
),
(
{"extra_json": {"cluster": {"cfn_scheduler_slots": "cores"}}},
"It is highly recommended to use the disable_hyperthreading parameter in order to control the "
"hyper-threading configuration in the cluster rather than using cfn_scheduler_slots in extra_json",
),
],
)
def test_extra_json_validator(mocker, capsys, extra_json, expected_message):
config_parser_dict = {"cluster default": extra_json}
utils.assert_param_validator(mocker, config_parser_dict, capsys=capsys, expected_warning=expected_message)
@pytest.mark.parametrize(
"cluster_dict, architecture, expected_error",
[
({"base_os": "alinux2", "enable_efa": "compute"}, "x86_64", None),
({"base_os": "alinux2", "enable_efa": "compute"}, "arm64", None),
({"base_os": "centos8", "enable_efa": "compute"}, "x86_64", None),
({"base_os": "centos8"}, "x86_64", None),
(
{"base_os": "centos8", "enable_efa": "compute"},
"arm64",
"EFA currently not supported on centos8 for arm64 architecture",
),
({"base_os": "centos8"}, "arm64", None), # must not fail because by default EFA is disabled
({"base_os": "ubuntu1804", "enable_efa": "compute"}, "x86_64", None),
({"base_os": "ubuntu1804", "enable_efa": "compute"}, "arm64", None),
],
)
def test_efa_os_arch_validator(mocker, cluster_dict, architecture, expected_error):
mocker.patch(
"pcluster.config.cfn_param_types.BaseOSCfnParam.get_instance_type_architecture", return_value=architecture
)
config_parser_dict = {"cluster default": cluster_dict}
config_parser = configparser.ConfigParser()
config_parser.read_dict(config_parser_dict)
pcluster_config = utils.init_pcluster_config_from_configparser(config_parser, False, auto_refresh=False)
pcluster_config.get_section("cluster").get_param("architecture").value = architecture
enable_efa_value = pcluster_config.get_section("cluster").get_param_value("enable_efa")
errors, warnings = efa_os_arch_validator("enable_efa", enable_efa_value, pcluster_config)
if expected_error:
assert_that(errors[0]).matches(expected_error)
else:
assert_that(errors).is_empty()
@pytest.mark.parametrize(
"section_dict, expected_message",
[
({"volume_type": "gp3", "volume_throughput": 125}, None),
(
{"volume_type": "gp3", "volume_throughput": 100},
"Throughput must be between 125 MB/s and 1000 MB/s when provisioning gp3 volumes.",
),
(
{"volume_type": "gp3", "volume_throughput": 1001},
"Throughput must be between 125 MB/s and 1000 MB/s when provisioning gp3 volumes.",
),
({"volume_type": "gp3", "volume_throughput": 125, "volume_iops": 3000}, None),
(
{"volume_type": "gp3", "volume_throughput": 760, "volume_iops": 3000},
"Throughput to IOPS ratio of .* is too high",
),
({"volume_type": "gp3", "volume_throughput": 760, "volume_iops": 10000}, None),
],
)
def test_ebs_volume_throughput_validator(mocker, section_dict, expected_message):
config_parser_dict = {"cluster default": {"ebs_settings": "default"}, "ebs default": section_dict}
utils.assert_param_validator(mocker, config_parser_dict, expected_message)
@pytest.mark.parametrize(
"region, expected_message",
[
("invalid-region", "Region 'invalid-region' is not yet officially supported "),
("us-east-1", None),
],
)
def test_region_validator(mocker, region, expected_message):
pcluster_config = utils.get_mocked_pcluster_config(mocker)
pcluster_config.region = region
errors, warnings = region_validator("aws", None, pcluster_config)
if expected_message:
assert_that(len(errors)).is_greater_than(0)
assert_that(errors[0]).matches(expected_message)
else:
assert_that(errors).is_empty()
@pytest.mark.parametrize(
"usage_class, supported_usage_classes, expected_error_message, expected_warning_message",
[
("ondemand", ["ondemand", "spot"], None, None),
("spot", ["ondemand", "spot"], None, None),
("ondemand", ["ondemand"], None, None),
("spot", ["spot"], None, None),
("spot", [], None, "Could not check support for usage class 'spot' with instance type 'instance-type'"),
("ondemand", [], None, "Could not check support for usage class 'ondemand' with instance type 'instance-type'"),
("spot", ["ondemand"], "Usage type 'spot' not supported with instance type 'instance-type'", None),
("ondemand", ["spot"], "Usage type 'ondemand' not supported with instance type 'instance-type'", None),
],
)
def test_check_usage_class(
mocker, usage_class, supported_usage_classes, expected_error_message, expected_warning_message
):
# This test checks the common logic triggered from cluster_type_validator and queue_compute_type_validator.
instance_type_info_mock = mocker.MagicMock()
mocker.patch(
"pcluster.config.cfn_param_types.InstanceTypeInfo.init_from_instance_type", return_value=instance_type_info_mock
)
instance_type_info_mock.supported_usage_classes.return_value = supported_usage_classes
errors = []
warnings = []
check_usage_class("instance-type", usage_class, errors, warnings)
if expected_error_message:
assert_that(errors).contains(expected_error_message)
else:
assert_that(errors).is_empty()
if expected_warning_message:
assert_that(warnings).contains(expected_warning_message)
else:
assert_that(warnings).is_empty()
@pytest.mark.parametrize(
"scheduler, expected_usage_class_check", [("sge", True), ("torque", True), ("slurm", True), ("awsbatch", False)]
)
def test_cluster_type_validator(mocker, scheduler, expected_usage_class_check):
# Usage class validation logic is tested in `test_check_usage_class`.
# This test only makes sure that the logic is triggered from validator.
mock = mocker.patch("pcluster.config.validators.check_usage_class", return_value=None)
cluster_dict = {"compute_instance_type": "t2.micro", "scheduler": scheduler}
config_parser_dict = {"cluster default": cluster_dict}
config_parser = configparser.ConfigParser()
config_parser.read_dict(config_parser_dict)
pcluster_config = utils.init_pcluster_config_from_configparser(config_parser, False, auto_refresh=False)
errors, warnings = cluster_type_validator("compute_type", "spot", pcluster_config)
if expected_usage_class_check:
mock.assert_called_with("t2.micro", "spot", [], [])
else:
mock.assert_not_called()
assert_that(errors).is_equal_to([])
assert_that(warnings).is_equal_to([])
@pytest.mark.parametrize("compute_type", [("ondemand"), ("spot")])
def test_queue_compute_type_validator(mocker, compute_type):
# Usage class validation logic is tested in `test_check_usage_class`.
# This test only makes sure that the logic is triggered from validator.
mock = mocker.patch("pcluster.config.validators.check_usage_class", return_value=None)
config_parser_dict = {
"cluster default": {
"queue_settings": "q1",
},
"queue q1": {"compute_resource_settings": "q1cr1, q1cr2", "compute_type": compute_type},
"compute_resource q1cr1": {"instance_type": "q1cr1_instance_type"},
"compute_resource q1cr2": {"instance_type": "q1cr2_instance_type"},
}
config_parser = configparser.ConfigParser()
config_parser.read_dict(config_parser_dict)
pcluster_config = utils.init_pcluster_config_from_configparser(config_parser, False, auto_refresh=False)
errors, warnings = queue_compute_type_validator("queue", "q1", pcluster_config)
mock.assert_has_calls(
[
mocker.call("q1cr1_instance_type", compute_type, [], []),
mocker.call("q1cr2_instance_type", compute_type, [], []),
],
any_order=True,
)
assert_that(errors).is_equal_to([])
assert_that(warnings).is_equal_to([])
| 39.780624
| 120
| 0.588383
| 12,448
| 121,132
| 5.459753
| 0.082423
| 0.020835
| 0.022836
| 0.015567
| 0.68065
| 0.62431
| 0.57624
| 0.527772
| 0.479643
| 0.447773
| 0
| 0.041839
| 0.290856
| 121,132
| 3,044
| 121
| 39.793693
| 0.749348
| 0.038817
| 0
| 0.472061
| 0
| 0.003991
| 0.376327
| 0.071676
| 0
| 0
| 0
| 0.000329
| 0.031567
| 1
| 0.023222
| false
| 0.000726
| 0.016328
| 0.000726
| 0.041001
| 0.000363
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
72656ef10a55622587068a8e047a20f959778ca6
| 2,583
|
py
|
Python
|
pycbc/config.py
|
mchestr/pycbc
|
c215c1f177fe383ec6e797437fa2d5f4727eb9f3
|
[
"Unlicense"
] | null | null | null |
pycbc/config.py
|
mchestr/pycbc
|
c215c1f177fe383ec6e797437fa2d5f4727eb9f3
|
[
"Unlicense"
] | null | null | null |
pycbc/config.py
|
mchestr/pycbc
|
c215c1f177fe383ec6e797437fa2d5f4727eb9f3
|
[
"Unlicense"
] | null | null | null |
import os
from functools import reduce
import boto3
import yaml
from copy import deepcopy
from cryptography.fernet import Fernet
from pycbc import json
from pycbc.utils import AttrDict as d
s3 = boto3.client('s3')
_mapping_tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG
_DEFAULTS = d({
'users': [],
'encrypt_key': Fernet.generate_key().decode('utf-8'),
'api_gateway': None,
'sender_email': None,
'logging': d({
'version': 1,
'formatters': d({
'default': d({
'format': '%(asctime)-15s - %(levelname)-7s - %(message)s',
}),
}),
'handlers': d({
'console': d({
'class': 'logging.StreamHandler',
'formatter': 'default',
'level': 'DEBUG',
'stream': 'ext://sys.stderr',
}),
}),
'loggers': d({
'pycbc': d({
'handlers': ['console'],
'level': 'INFO',
})
})
})
})
def load(event):
event_override = event.get('config', d())
env_prefix = event_override.get(
'env_prefix', os.getenv('ENV_PREFIX', 'PYCBC_'))
s3_bucket = event_override.get(
's3_bucket', os.getenv(f'{env_prefix}S3_BUCKET', 'pycbc'))
s3_filename = event_override.get(
's3_filename',
os.getenv(f'{env_prefix}S3_FILENAME', 'pycbc-config.yaml')
)
return json.loads(json.dumps(reduce(
_merge,
[
deepcopy(_DEFAULTS),
_from_s3(s3_bucket, s3_filename),
_from_env(env_prefix),
event_override,
{'s3_bucket': s3_bucket, 's3_filename': s3_filename}
])
))
def _merge(a, b, path=None):
if path is None:
path = []
for key in b:
if key in a:
if isinstance(a[key], dict) and isinstance(b[key], dict):
_merge(a[key], b[key], path + [str(key)])
else:
a[key] = b[key]
else:
a[key] = b[key]
return a
def _yaml_load(data):
yaml.add_constructor(
_mapping_tag,
lambda loader, node: d(loader.construct_pairs(node)),
)
return yaml.load(data, Loader=yaml.FullLoader)
def _from_env(prefix):
env_vars = (k for k in os.environ if k.startswith(prefix))
return d({
k[len(prefix):].lower(): os.environ[k] for k in env_vars
})
def _from_s3(bucket, filename):
fileobj = s3.get_object(
Bucket=bucket,
Key=filename,
)
return _yaml_load(fileobj['Body'].read())
| 25.323529
| 75
| 0.541231
| 301
| 2,583
| 4.458472
| 0.358804
| 0.046945
| 0.035768
| 0.017884
| 0.049925
| 0.049925
| 0
| 0
| 0
| 0
| 0
| 0.013552
| 0.314363
| 2,583
| 101
| 76
| 25.574257
| 0.744212
| 0
| 0
| 0.149425
| 0
| 0
| 0.15331
| 0.025165
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057471
| false
| 0
| 0.091954
| 0
| 0.206897
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
72676d2137788d26c0fcf85bc5ff2c5f1b9c272c
| 2,893
|
py
|
Python
|
05-Intro-to-SpaCy/scripts/choropleth.py
|
henchc/Rediscovering-Text-as-Data
|
3e14fa7a4bd82899ea564d4f7857a5dbdc616a4f
|
[
"MIT"
] | 15
|
2017-08-29T01:13:42.000Z
|
2020-12-30T17:10:32.000Z
|
05-Intro-to-SpaCy/scripts/choropleth.py
|
henchc/Rediscovering-Text-as-Data
|
3e14fa7a4bd82899ea564d4f7857a5dbdc616a4f
|
[
"MIT"
] | null | null | null |
05-Intro-to-SpaCy/scripts/choropleth.py
|
henchc/Rediscovering-Text-as-Data
|
3e14fa7a4bd82899ea564d4f7857a5dbdc616a4f
|
[
"MIT"
] | 23
|
2017-08-30T16:59:41.000Z
|
2019-12-07T07:09:17.000Z
|
def us_choropleth(t):
import matplotlib.cm
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
from matplotlib.colors import Normalize
import shapefile
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
import numpy as np
import random
import pandas as pd
from collections import Counter
plt.title("NER", fontsize=12)
us_locations_map = Basemap(
resolution="l",
llcrnrlon=-128.94,
llcrnrlat=23.52,
urcrnrlon=-60.12,
urcrnrlat=50.93,
lat_0=37.26,
lon_0=-94.53)
us_locations_map.drawmapboundary(
fill_color="#46bcec") # Fills in the oceans
us_locations_map.fillcontinents(
color="#eabc77",
lake_color="#46bcec") # Defines the continents
us_locations_map.drawcoastlines()
fig = matplotlib.pyplot.gcf()
fig.set_size_inches(15.5, 12.5) # Sets the size of the map
# Converts the coordinates to map points
lons, lats = us_locations_map(t["longitude"], t["latitude"])
us_locations_map.scatter(
lons,
lats,
color="black",
zorder=10) # Draws the points on the map
# Labels each point with the location name
for i in range(t.num_rows):
lat_lon = (
t.row(i).item("longitude") + .2,
t.row(i).item("latitude") - .1)
plt.annotate(np.array(t.row(i).item("name")), lat_lon, fontsize=10)
# Here we are reading in a shape file, which places state boundary
# information for our Basemap
us_locations_map.readshapefile(
"data/us_shapefiles/cb_2016_us_state_20m", "us_states")
state_names = []
for shape_dict in us_locations_map.us_states_info:
state_names.append(shape_dict['NAME'])
ax = plt.gca() # get current axes instance
cmap = plt.get_cmap('Reds')
names = []
shapes = []
counts = []
state_counts = Counter(t["state"])
for index, state in enumerate(state_names):
seg = us_locations_map.us_states[index]
poly = Polygon(seg)
names.append(state)
shapes.append(poly)
if state in t['state']:
counts.append(state_counts[state])
else:
counts.append(0)
# Loading our lists into the DataFrame
shape_table = pd.DataFrame()
shape_table["State Name"] = np.array(names)
shape_table["Shapes"] = np.array(shapes)
shape_table["Count"] = np.array(counts)
pc = PatchCollection(shape_table["Shapes"], zorder=2)
norm = Normalize()
pc.set_facecolor(cmap(norm(shape_table['Count'].fillna(0).values)))
pc.set_edgecolor("black")
ax.add_collection(pc)
# Adds colorbar showing the scale
mapper = matplotlib.cm.ScalarMappable(norm=norm, cmap=cmap)
mapper.set_array(shape_table['Count'])
plt.colorbar(mapper, shrink=0.4)
| 30.776596
| 75
| 0.648462
| 384
| 2,893
| 4.736979
| 0.4375
| 0.054426
| 0.069269
| 0.014843
| 0.024189
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026424
| 0.241272
| 2,893
| 93
| 76
| 31.107527
| 0.802278
| 0.125475
| 0
| 0
| 0
| 0
| 0.069897
| 0.015488
| 0
| 0
| 0
| 0
| 0
| 1
| 0.013889
| false
| 0
| 0.152778
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
726777c55df7d3f2aede322d72f4954164b655c1
| 2,348
|
py
|
Python
|
take_day_and_night_pictures.py
|
ntmoore/skycamera
|
c8c67970b0e3a52ce008dbd6b34df20cdda786b7
|
[
"MIT"
] | null | null | null |
take_day_and_night_pictures.py
|
ntmoore/skycamera
|
c8c67970b0e3a52ce008dbd6b34df20cdda786b7
|
[
"MIT"
] | null | null | null |
take_day_and_night_pictures.py
|
ntmoore/skycamera
|
c8c67970b0e3a52ce008dbd6b34df20cdda786b7
|
[
"MIT"
] | null | null | null |
import time
import os
#parameters
sunset_hr=8
dawn_hr=7
daytime_period_min=60
nighttime_period_min=1
time.localtime()
print("program starts at ",time.localtime());
while(1):
#Is it day or night?
time.localtime()
hour = time.localtime()[3]
minute = time.localtime()[4]
hour_float = 1.0*hour+minute/60.0
if( hour_float>(sunset_hr+12) or hour_float<dawn_hr ):
daytime=0
else :
daytime=1
print("Is it day? ",daytime)
# night
if( daytime==0): # night
filename='sky-{:d}-{:02d}-{:02d}-{:02d}-{:02d}-{:02d}.jpg'.format(
time.localtime()[0], # year
time.localtime()[1], # month
time.localtime()[2], # day of month
time.localtime()[3], # hr
time.localtime()[4], # min
time.localtime()[5] # sec
)
path="/home/pi/skyphotos/data/night/"
command = ("raspistill --shutter 30000000 --analoggain 12.0" +
" --digitalgain 1.0 --nopreview --mode 3 "+
" --annotate "+filename+" -o "+path+filename )
print("running command: ",command)
os.system(command)
print("took picture ",filename)
command = "rclone copy " +path+filename+ " wsu-physics-skycamera:23817_camera/night/ "
os.system(command)
print("uploaded picture ",filename)
if(time.localtime()[3]>sunset_hr) :
time.sleep(30*60) # wait 30 min if its before midnight
# normal wait
time.sleep(nighttime_period_min*60)
# day
if(daytime==1): #implicit else
filename='sky-{:d}-{:02d}-{:02d}-{:02d}-{:02d}-{:02d}.jpg'.format(
time.localtime()[0], # year
time.localtime()[1], # month
time.localtime()[2], # day of month
time.localtime()[3], # hr
time.localtime()[4], # min
time.localtime()[5] # sec
)
path="/home/pi/skyphotos/data/day/"
command="raspistill -annotate "+filename+" --nopreview --mode 3 -o " + path + filename
os.system(command)
print("took picture ",filename)
command = "rclone copy " +path+filename+ " wsu-physics-skycamera:23817_camera/day/ "
os.system(command)
print("uploaded picture ",filename)
time.sleep(daytime_period_min*60)
# program (never) ends
| 29.721519
| 94
| 0.568143
| 284
| 2,348
| 4.633803
| 0.295775
| 0.177812
| 0.041033
| 0.036474
| 0.472644
| 0.472644
| 0.472644
| 0.407295
| 0.407295
| 0.407295
| 0
| 0.050967
| 0.272998
| 2,348
| 78
| 95
| 30.102564
| 0.719977
| 0.083475
| 0
| 0.421053
| 0
| 0
| 0.241671
| 0.108869
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.035088
| 0
| 0.035088
| 0.122807
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7269858ddc95892c083fd9a632926838c559c8a0
| 7,344
|
py
|
Python
|
malchive/utilities/comguidtoyara.py
|
6un9-h0-Dan/malchive
|
1d150430559a307cdfee49d47799c95caea47415
|
[
"Apache-2.0"
] | 59
|
2021-01-29T15:58:43.000Z
|
2022-02-11T20:15:04.000Z
|
malchive/utilities/comguidtoyara.py
|
6un9-h0-Dan/malchive
|
1d150430559a307cdfee49d47799c95caea47415
|
[
"Apache-2.0"
] | 1
|
2021-04-09T13:53:47.000Z
|
2021-04-09T13:53:47.000Z
|
malchive/utilities/comguidtoyara.py
|
6un9-h0-Dan/malchive
|
1d150430559a307cdfee49d47799c95caea47415
|
[
"Apache-2.0"
] | 10
|
2021-01-29T19:35:45.000Z
|
2021-07-18T21:07:40.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright(c) 2021 The MITRE Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at:
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import os
import sys
import struct
import binascii
import logging
import argparse
import progressbar
from datetime import datetime
from Registry import Registry
__version__ = "1.0.0"
__author__ = "Jason Batchelor"
log = logging.getLogger(__name__)
def iid_text_to_bin(iid):
"""
Process an IID and convert to a YARA compliant search string.
Below describes the GUID structure used to describe an identifier
for a MAPI interface:
https://msdn.microsoft.com/en-us/library/office/cc815892.aspx
:param str iid: Name of the IID to convert
:return: bin_yara
:rtype: str
"""
# remove begin and end brackets
guid = re.sub('[{}-]', '', iid)
# convert to binary representation
bin_struc = struct.unpack("IHH8B", binascii.a2b_hex(guid))
bin_str = '%.8X%.4X%.4X%s' % \
(bin_struc[0], bin_struc[1], bin_struc[2],
(''.join('{:02X}'.format(x) for x in bin_struc[3:])))
# create YARA compliant search string
bin_yara = '{ ' + ' '.join(a + b for a, b in
zip(bin_str[::2], bin_str[1::2])) + ' }'
return bin_yara
def enumerate_com_interfaces(reg_keys, show_bar=False):
"""
Iterate through registry keys and retrieve unique interface identifiers
and their name.
:param list reg_keys: List of registry key objects from python-registry
module.
:param bool show_bar: Show progressbar as subfiles are identified.
:param bytes buff: File to look for subfiles.
:return: com
:rtype: dict
"""
total_iters = 0
counter = 0
com = {}
for key in reg_keys:
total_iters += len(key.subkeys())
if show_bar:
print('Processing %s results...' % total_iters)
bar = progressbar.ProgressBar(redirect_stdout=True,
max_value=total_iters)
for key in reg_keys:
for subkey in key.subkeys():
for v in list(subkey.values()):
# Per MS documentation, Interface names must start with the
# 'I' prefix, so we limit our values here as well.
# Not doing so can lead to some crazy names and conflicting
# results!
# https://docs.microsoft.com/en-us/dotnet/standard/design-guidelines/names-of-classes-structs-and-interfaces
if v.value_type() == Registry.RegSZ \
and v.name() == '(default)' \
and v.value().startswith('I'):
bin_guid = iid_text_to_bin(subkey.name())
# Names with special characters/spaces are truncated
stop_chars = ['_', '<', '[', ' ']
index = min(v.value().find(i)
if i in v.value()
else
len(v.value())
for i in stop_chars)
value = v.value()[:index]
if value not in com:
com[value] = [bin_guid]
elif bin_guid not in com[value]:
com[value].append(bin_guid)
if show_bar:
bar.update(counter)
counter += 1
if show_bar:
bar.finish()
return com
def initialize_parser():
parser = argparse.ArgumentParser(
description="Crawls windows registry to hunt for and convert IIDs for "
"COM interfaces to binary YARA signatures. The submitted "
"hives must be from HKLM\\SOFTWARE. Make copies of "
"these files off an active Windows OS using the command "
"'reg save HKLM\\SOFTWARE hklm_sft.hiv' when running as "
"administrator.")
parser.add_argument('hive', metavar='FILE', nargs='*',
help='Full path to the registry hive to be processed.')
parser.add_argument('-o', '--output-filename', type=str,
default='com_interface_ids.yara',
help='Filename to write YARA signatures '
'to (default: com_interface_ids.yara)')
parser.add_argument('-v', '--verbose', action='store_true', default=False,
help='Output additional information when processing '
'(mostly for debugging purposes).')
return parser
def main():
p = initialize_parser()
args = p.parse_args()
root = logging.getLogger()
logging.basicConfig()
if args.verbose:
root.setLevel(logging.DEBUG)
else:
root.setLevel(logging.WARNING)
if len(args.hive) == 0:
p.print_help()
sys.exit(2)
keys = []
for hive in args.hive:
print('Collecting IIDs from %s...' % hive)
if not os.path.isfile(hive):
log.warning('Failed to find file %s. Skipping...' % hive)
continue
try:
reg = Registry.Registry(hive)
except Registry.RegistryParse.ParseException:
log.warning('Error parsing %s. Skipping...' % hive)
continue
try:
keys.append(reg.open("Classes\\Interface"))
except Registry.RegistryKeyNotFoundException:
log.warning("Couldn't find 'Classes\\Interface' key in %s." % hive)
try:
keys.append(reg.open("Classes\\Wow6432Node\\Interface"))
except Registry.RegistryKeyNotFoundException:
log.warning("Couldn't find 'Classes\\Wow6432Node\\Interface\\ "
"key in %s." % hive)
com_signatures = enumerate_com_interfaces(keys, True)
counter = 0
total_rules = len(com_signatures)
print('Generating %s YARA signatures...' % total_rules)
bar = progressbar.ProgressBar(redirect_stdout=True, max_value=total_rules)
yara_rule = '// %s\n// COM IID YARA sig collection.\n// ' \
'Autogenerated on %s\n\n' % (__author__, datetime.now())
for name, rules in com_signatures.items():
yara_rule += 'rule %s\n{\n\t' \
'strings:' % name
if len(rules) > 1:
for i in range(0, len(rules)):
yara_rule += '\n\t\t$%s_%s = %s' % (name, i, rules[i])
else:
yara_rule += '\n\t\t$%s = %s' % (name, rules[0])
yara_rule += '\n\tcondition:\n\t\tany of them\n}\n'
bar.update(counter)
counter += 1
bar.finish()
print('Writing YARA rules to %s' % args.output_filename)
with open(args.output_filename, 'w') as f:
f.write(yara_rule)
f.close()
if __name__ == '__main__':
main()
| 32.932735
| 124
| 0.578431
| 899
| 7,344
| 4.615128
| 0.362625
| 0.014461
| 0.006508
| 0.007713
| 0.135695
| 0.084358
| 0.071342
| 0.065076
| 0.065076
| 0.038081
| 0
| 0.009903
| 0.3125
| 7,344
| 222
| 125
| 33.081081
| 0.811844
| 0.225218
| 0
| 0.167939
| 0
| 0
| 0.198603
| 0.023818
| 0.007634
| 0
| 0
| 0
| 0
| 1
| 0.030534
| false
| 0
| 0.076336
| 0
| 0.129771
| 0.038168
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
726b36a2e85a950a7d407068d5aa12a5d50355a1
| 4,089
|
py
|
Python
|
main.py
|
tani-cat/point_maximizer
|
c9ff868377bbeed4727914d7be258457dc8295a3
|
[
"MIT"
] | 1
|
2021-09-07T04:19:48.000Z
|
2021-09-07T04:19:48.000Z
|
main.py
|
tani-cat/point_maximizer
|
c9ff868377bbeed4727914d7be258457dc8295a3
|
[
"MIT"
] | null | null | null |
main.py
|
tani-cat/point_maximizer
|
c9ff868377bbeed4727914d7be258457dc8295a3
|
[
"MIT"
] | null | null | null |
import csv
import os
from collections import deque
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
INPUT_PATH = os.path.join(BASE_DIR, 'goods_source.csv')
OUTPUT_PATH = os.path.join(BASE_DIR, 'result.csv')
FILE_ENCODE = 'shift_jis'
INPUT_COLS = ('id', 'goods_name', 'price')
def import_csv():
"""入力データの読み込み
"""
try:
data_l = list()
with open(INPUT_PATH, mode='r', encoding=FILE_ENCODE, newline='') as csvf:
reader = csv.DictReader(csvf)
for dic in reader:
dic['id'] = int(dic['id'])
dic['price'] = int(dic['price'])
data_l.append(dic)
for col in INPUT_COLS:
if col not in data_l[0]:
raise IndexError(col)
return data_l
except FileNotFoundError:
print('goods_source.csvがありません')
return list()
except IndexError as e:
print('列が不足しています: ' + str(e))
return list()
def func(init, old_que, threshold=50):
keep = dict()
new_que = deque(list())
while old_que:
last = old_que.pop()
if init['mod'] + last['mod'] >= threshold:
if keep:
new_que.appendleft(keep)
keep = last
else:
new_que.appendleft(last)
break
return init, keep, old_que, new_que
def calculate(data_l):
"""アルゴリズム
1. 50未満の中でペアにできるものを探す
1-1. queの末端でペアを作れる場合、左端を固定し和が50以上で最小になるように右を選んでペアにする
1-2. queの末端でペアを作れない場合、末端2つを取り出した上で3個以上の組み合わせで消化する
1-2-1. 右末端で和が50以上なら右から左に探索して和が50以上になる最小値を得る->組にして除外
1-2-2. 右末端でも和が50にならないなら右末端をして1-2に戻る
-> 全部を消化しても50にならないならそのまま全部を足してしまう
2. 1と同じことを全体かつ閾値150で行う
"""
# 50未満のものだけ和を取る処理に入れる
under_que = list()
over_que = list()
for i in range(len(data_l)):
_mod = data_l[i]['price'] % 100
data_l[i]['set'] = 0
dic = {
'id': [i],
'mod': _mod,
}
if _mod < 50:
under_que.append(dic)
else:
over_que.append(dic)
under_que.sort(key=lambda x: x['mod'])
under_que = deque(under_que)
while under_que:
init = under_que.popleft()
while under_que:
init, keep, under_que, last_que = func(init, under_que)
# この時点でlast_queは要素1以上
if not keep:
keep = last_que.pop()
init = {
'id': init['id'] + keep['id'],
'mod': init['mod'] + keep['mod'],
}
if last_que:
over_que.append(init)
under_que.extend(last_que)
break
else:
over_que.append(init)
break
# 50以上の項目のうち、合計が150以上になる項目同士を足す
# (これにより購入回数を最小にする)
# final_que: 最終的な組み合わせ
over_que = deque(sorted(over_que, key=lambda x: x['mod']))
final_que = list()
while over_que:
init = over_que.popleft()
init, keep, over_que, last_que = func(init, over_que, 150)
if keep:
init = {
'id': init['id'] + keep['id'],
'mod': (init['mod'] + keep['mod']) % 100,
}
over_que.appendleft(init)
else:
final_que.append(init)
over_que.extend(last_que)
sum_p = 0
# 計算結果の出力
for cnt, que in enumerate(final_que):
point = 0
for id in que['id']:
data_l[id]['set'] = cnt + 1
point += data_l[id]['price']
print(f'set{cnt + 1} {round(point / 100)} P')
sum_p += round(point / 100)
print(f'total: {sum_p} P')
return data_l
def main():
# ファイルの読み込み
data_l = import_csv()
if not data_l:
print('処理を中止します')
return False
# 計算処理
data_l = calculate(data_l)
# 結果をファイルに出力
data_l.sort(key=lambda x: (x['set'], x['id']))
with open(OUTPUT_PATH, mode='w', encoding=FILE_ENCODE, newline='') as csvf:
writer = csv.DictWriter(csvf, data_l[0].keys())
writer.writeheader()
writer.writerows(data_l)
print('Done')
if __name__ == '__main__':
main()
| 25.716981
| 82
| 0.544876
| 495
| 4,089
| 4.307071
| 0.274747
| 0.042214
| 0.018293
| 0.015478
| 0.120544
| 0.081614
| 0.032833
| 0.032833
| 0.032833
| 0.032833
| 0
| 0.024052
| 0.328931
| 4,089
| 158
| 83
| 25.879747
| 0.752915
| 0.106872
| 0
| 0.189189
| 0
| 0
| 0.067222
| 0.006111
| 0
| 0
| 0
| 0
| 0
| 1
| 0.036036
| false
| 0
| 0.045045
| 0
| 0.135135
| 0.054054
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
726f55adcb3a541f1d732e7444389c568ce9cca4
| 4,059
|
py
|
Python
|
src/main/python/hydra/lib/cli.py
|
bopopescu/hydra
|
ec0793f8c1f49ceb93bf1f1a9789085b68d55f08
|
[
"Apache-2.0"
] | 10
|
2016-05-28T15:56:43.000Z
|
2018-01-03T21:30:58.000Z
|
src/main/python/hydra/lib/cli.py
|
bopopescu/hydra
|
ec0793f8c1f49ceb93bf1f1a9789085b68d55f08
|
[
"Apache-2.0"
] | 17
|
2016-06-06T22:15:28.000Z
|
2020-07-22T20:28:12.000Z
|
src/main/python/hydra/lib/cli.py
|
bopopescu/hydra
|
ec0793f8c1f49ceb93bf1f1a9789085b68d55f08
|
[
"Apache-2.0"
] | 5
|
2016-06-01T22:01:44.000Z
|
2020-07-22T20:12:49.000Z
|
"""hydra cli.
Usage:
hydra cli ls slaves
hydra cli ls apps
hydra cli ls task <app>
hydra cli [force] stop <app>
hydra cli scale <app> <scale>
hydra cli (-h | --help)
hydra cli --version
Options:
-h --help Show this screen.
--version Show version.
"""
__author__ = 'sushil'
from docopt import docopt
from pprint import pprint, pformat # NOQA
from hydra.lib import util, mmapi
import os
import sys
import logging
try:
# Python 2.x
from ConfigParser import ConfigParser
except ImportError:
# Python 3.x
from configparser import ConfigParser
l = util.createlogger('cli', logging.INFO)
# l.setLevel(logging.DEBUG)
def cli(argv):
config = ConfigParser()
config_file_name = 'hydra.ini'
if len(argv) >= 2 and argv[1].find('.ini') != -1:
config_file_name = argv[1]
del argv[1]
if not os.path.isfile(config_file_name):
l.error("Unable to open config file %s" % config_file_name)
sys.exit(1)
config.read(config_file_name)
mesos_addr = 'http://' + config.get('mesos', 'ip') + ':' + \
config.get('mesos', 'port')
marathon_addr = 'http://' + config.get('marathon', 'ip') + ':' + \
config.get('marathon', 'port')
argv[0] = 'cli'
args = docopt(__doc__, argv=argv, version='hydra 0.1.0', )
# pprint (args)
if args['ls']:
if args['slaves']:
mesos = mmapi.MesosIF(mesos_addr)
mesos.print_slaves()
elif args['apps']:
mt = mmapi.MarathonIF(marathon_addr, '127.0.0.1', None)
apps = mt.get_apps()
for app in apps:
st = "App:" + app.id
st += " CPU:" + str(app.cpus)
st += " MEM:" + str(app.mem)
st += " Instances:" + str(app.instances)
if len(app.constraints):
st += " Constraints:" + pformat(app.constraints)
l.info(st)
elif args['task']:
mt = mmapi.MarathonIF(marathon_addr, '127.0.0.1', None)
app = mt.get_app(args['<app>'])
st = "App:" + args['<app>']
st += " CPU:" + str(app.cpus)
st += " MEM:" + str(app.mem)
st += " Instances:" + str(app.instances)
if len(app.constraints):
st += " Constraints:" + pformat(app.constraints)
l.info(st)
st = "CMD:" + app.cmd
l.info(st)
st = "ID:" + app.id
st += " task_running:" + str(app.tasks_running)
st += " task_staged:" + str(app.tasks_staged)
l.info(st)
tasks = app.tasks
for task in tasks:
st = "\tTASK ID:" + task.id + " host:" + task.host
if len(task.ports):
st += " ports:" + pformat(task.ports)
if len(task.service_ports):
st += " service_ports:" + pformat(task.service_ports)
l.info(st)
elif args['stop']:
mt = mmapi.MarathonIF(marathon_addr, '127.0.0.1', None)
l.info("Deleting app:" + args['<app>'])
mt.delete_app(args['<app>'], args['force'])
l.info("Waiting for app removal to complete")
mt.wait_app_removal(args['<app>'])
elif args['scale']:
mt = mmapi.MarathonIF(marathon_addr, '127.0.0.1', None)
app = args['<app>']
scale = int(args['<scale>'])
l.info("Scaling app:" + app + " to scale:" + str(scale))
mt.scale_app(app, scale)
l.info("Waiting for app scale to complete")
mt.wait_app_ready(app, scale)
# SK:Tried to add log collection but no luck so far.
# elif args['logs']:
# path = "/tmp/mesos/slaves/"
# #11323ada-daab-4d76-8749-3113b5448bed-S0/
# path += "/frameworks/
# # #11323ada-daab-4d76-8749-3113b5448bed-0007
# path += "/executors/"
# #zst-pub.4bdec0e2-e7e3-11e5-a874-fe2077b92eeb
# path += "/runs/"
# # d00620ea-8f3e-427d-9404-6f6b9701f64f/
# app = args['<app>']
| 34.109244
| 73
| 0.533875
| 507
| 4,059
| 4.193294
| 0.285996
| 0.021167
| 0.028222
| 0.047037
| 0.293039
| 0.184384
| 0.184384
| 0.184384
| 0.184384
| 0.184384
| 0
| 0.042949
| 0.311653
| 4,059
| 118
| 74
| 34.398305
| 0.717967
| 0.186006
| 0
| 0.234568
| 0
| 0
| 0.144339
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.012346
| false
| 0
| 0.111111
| 0
| 0.123457
| 0.024691
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
726f6ae6d6811d44c154cb87d7ac33570c6e67c7
| 1,378
|
py
|
Python
|
azure-devops/azext_devops/test/common/test_format.py
|
doggy8088/azure-devops-cli-extension
|
2f6b1a6ffbc49ae454df640a8bb00dac991d6514
|
[
"MIT"
] | 326
|
2019-04-10T12:38:23.000Z
|
2022-03-31T23:07:49.000Z
|
azure-devops/azext_devops/test/common/test_format.py
|
doggy8088/azure-devops-cli-extension
|
2f6b1a6ffbc49ae454df640a8bb00dac991d6514
|
[
"MIT"
] | 562
|
2019-04-10T07:36:12.000Z
|
2022-03-28T07:37:54.000Z
|
azure-devops/azext_devops/test/common/test_format.py
|
doggy8088/azure-devops-cli-extension
|
2f6b1a6ffbc49ae454df640a8bb00dac991d6514
|
[
"MIT"
] | 166
|
2019-04-10T07:59:40.000Z
|
2022-03-16T14:17:13.000Z
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import unittest
from azext_devops.dev.common.format import trim_for_display, date_time_to_only_date
class TestFormatMethods(unittest.TestCase):
def test_trim_for_display(self):
input = 'Gallery extensions for Portal Extension'
output = trim_for_display(input, 20)
self.assertEqual(output, 'Gallery extensions f...')
input = 'Aex platform'
output = trim_for_display(input, 20)
self.assertEqual(output, input)
input = ''
output = trim_for_display(input, 20)
self.assertEqual(output, input)
input = None
output = trim_for_display(input, 20)
self.assertEqual(output, input)
def test_date_time_to_only_date(self):
input = '2019-02-24T02:45:41.277000+00:00'
output = date_time_to_only_date(input)
self.assertEqual(output, '2019-02-24')
input = 'Aex platform'
output = date_time_to_only_date(input)
self.assertEqual(output, input)
if __name__ == '__main__':
unittest.main()
| 34.45
| 94
| 0.58926
| 152
| 1,378
| 5.085526
| 0.407895
| 0.054334
| 0.108668
| 0.072445
| 0.456662
| 0.410091
| 0.410091
| 0.410091
| 0.410091
| 0.347995
| 0
| 0.036134
| 0.196662
| 1,378
| 40
| 95
| 34.45
| 0.66215
| 0.243832
| 0
| 0.48
| 0
| 0
| 0.131021
| 0.030829
| 0
| 0
| 0
| 0
| 0.24
| 1
| 0.08
| false
| 0
| 0.08
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7271b55252042ba4d3337da31bedcd35a08671cd
| 7,464
|
py
|
Python
|
github/GitReleaseAsset.py
|
aantr/WindowsHostManager
|
75d248fc8991d471c6802fa79e7dee44a5708c65
|
[
"CNRI-Python-GPL-Compatible"
] | 1
|
2021-06-25T09:13:12.000Z
|
2021-06-25T09:13:12.000Z
|
venv/lib/python3.6/site-packages/github/GitReleaseAsset.py
|
rongshaoshuai/blogs
|
dafeb789428436c1ec8069e605400612b776b8f2
|
[
"MIT"
] | 3
|
2021-03-30T23:03:03.000Z
|
2021-03-30T23:06:57.000Z
|
lib/github/GitReleaseAsset.py
|
Corionis/Knobs-And-Scripts
|
81a954fd0ed697e5759359ec0383a3f16a841143
|
[
"MIT"
] | null | null | null |
############################ Copyrights and license ############################
# #
# Copyright 2017 Chris McBride <thehighlander@users.noreply.github.com> #
# Copyright 2017 Simon <spam@esemi.ru> #
# Copyright 2018 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
import github.GithubObject
class GitReleaseAsset(github.GithubObject.CompletableGithubObject):
"""
This class represents GitReleaseAssets. The reference can be found here https://developer.github.com/v3/repos/releases/#get-a-single-release-asset
"""
def __repr__(self):
return self.get__repr__({"url": self.url})
@property
def url(self):
"""
:type: string
"""
self._completeIfNotSet(self._url)
return self._url.value
@property
def id(self):
"""
:type: integer
"""
self._completeIfNotSet(self._id)
return self._id.value
@property
def name(self):
"""
:type: string
"""
self._completeIfNotSet(self._name)
return self._name.value
@property
def label(self):
"""
:type: string
"""
self._completeIfNotSet(self._label)
return self._label.value
@property
def content_type(self):
"""
:type: string
"""
self._completeIfNotSet(self._content_type)
return self._content_type.value
@property
def state(self):
"""
:type: string
"""
self._completeIfNotSet(self._state)
return self._state.value
@property
def size(self):
"""
:type: integer
"""
self._completeIfNotSet(self._size)
return self._size.value
@property
def download_count(self):
"""
:type: integer
"""
self._completeIfNotSet(self._download_count)
return self._download_count.value
@property
def created_at(self):
"""
:type: datetime
"""
self._completeIfNotSet(self._created_at)
return self._created_at.value
@property
def updated_at(self):
"""
:type: datetime
"""
self._completeIfNotSet(self._updated_at)
return self._updated_at.value
@property
def browser_download_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._browser_download_url)
return self._browser_download_url.value
@property
def uploader(self):
"""
:type: github.NamedUser.NamedUser
"""
self._completeIfNotSet(self._uploader)
return self._uploader.value
def delete_asset(self):
"""
Delete asset from the release.
:rtype: bool
"""
headers, data = self._requester.requestJsonAndCheck("DELETE", self.url)
return True
def update_asset(self, name, label=""):
"""
Update asset metadata.
:rtype: github.GitReleaseAsset.GitReleaseAsset
"""
assert isinstance(name, str), name
assert isinstance(label, str), label
post_parameters = {"name": name, "label": label}
headers, data = self._requester.requestJsonAndCheck(
"PATCH", self.url, input=post_parameters
)
return GitReleaseAsset(self._requester, headers, data, completed=True)
def _initAttributes(self):
self._url = github.GithubObject.NotSet
self._id = github.GithubObject.NotSet
self._name = github.GithubObject.NotSet
self._label = github.GithubObject.NotSet
self._uploader = github.GithubObject.NotSet
self._content_type = github.GithubObject.NotSet
self._state = github.GithubObject.NotSet
self._size = github.GithubObject.NotSet
self._download_count = github.GithubObject.NotSet
self._created_at = github.GithubObject.NotSet
self._updated_at = github.GithubObject.NotSet
self._browser_download_url = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "name" in attributes: # pragma no branch
self._name = self._makeStringAttribute(attributes["name"])
if "label" in attributes: # pragma no branch
self._label = self._makeStringAttribute(attributes["label"])
if "uploader" in attributes: # pragma no branch
self._uploader = self._makeClassAttribute(
github.NamedUser.NamedUser, attributes["uploader"]
)
if "content_type" in attributes: # pragma no branch
self._content_type = self._makeStringAttribute(attributes["content_type"])
if "state" in attributes: # pragma no branch
self._state = self._makeStringAttribute(attributes["state"])
if "size" in attributes: # pragma no branch
self._size = self._makeIntAttribute(attributes["size"])
if "download_count" in attributes: # pragma no branch
self._download_count = self._makeIntAttribute(attributes["download_count"])
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "updated_at" in attributes: # pragma no branch
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "browser_download_url" in attributes: # pragma no branch
self._browser_download_url = self._makeStringAttribute(
attributes["browser_download_url"]
)
| 37.888325
| 150
| 0.554126
| 692
| 7,464
| 5.787572
| 0.236994
| 0.062921
| 0.07191
| 0.059925
| 0.262422
| 0.225968
| 0.090886
| 0
| 0
| 0
| 0
| 0.003664
| 0.341774
| 7,464
| 196
| 151
| 38.081633
| 0.81152
| 0.321543
| 0
| 0.114286
| 0
| 0
| 0.048019
| 0
| 0
| 0
| 0
| 0
| 0.019048
| 1
| 0.161905
| false
| 0
| 0.009524
| 0.009524
| 0.32381
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7272f878dcec10a25f78ca45ec857fd8f9c8248c
| 3,092
|
py
|
Python
|
indicoio/utils/__init__.py
|
JoseRoman/IndicoIo-python
|
4fe2952df45c26392f36acd8b43391dfc50e140b
|
[
"MIT"
] | 1
|
2021-05-26T09:03:15.000Z
|
2021-05-26T09:03:15.000Z
|
indicoio/utils/__init__.py
|
JoseRoman/IndicoIo-python
|
4fe2952df45c26392f36acd8b43391dfc50e140b
|
[
"MIT"
] | null | null | null |
indicoio/utils/__init__.py
|
JoseRoman/IndicoIo-python
|
4fe2952df45c26392f36acd8b43391dfc50e140b
|
[
"MIT"
] | null | null | null |
import inspect
import numpy as np
class TypeCheck(object):
"""
Decorator that performs a typecheck on the input to a function
"""
def __init__(self, accepted_structures, arg_name):
"""
When initialized, include list of accepted datatypes and the
arg_name to enforce the check on. Can totally be daisy-chained.
"""
self.accepted_structures = accepted_structures
self.is_accepted = lambda x: type(x) in accepted_structures
self.arg_name = arg_name
def __call__(self, fn):
def check_args(*args, **kwargs):
arg_dict = dict(zip(inspect.getargspec(fn).args, args))
full_args = dict(arg_dict.items() + kwargs.items())
if not self.is_accepted(full_args[self.arg_name]):
raise DataStructureException(
fn,
full_args[self.arg_name],
self.accepted_structures
)
return fn(*args, **kwargs)
return check_args
class DataStructureException(Exception):
"""
If a non-accepted datastructure is passed, throws an exception
"""
def __init__(self, callback, passed_structure, accepted_structures):
self.callback = callback.__name__
self.structure = str(type(passed_structure))
self.accepted = [str(structure) for structure in accepted_structures]
def __str__(self):
return """
function %s does not accept %s, accepted types are: %s
""" % (self.callback, self.structure, str(self.accepted))
@TypeCheck((list, dict, np.ndarray), 'array')
def normalize(array, distribution=1, norm_range=(0, 1), **kwargs):
"""
First arg is an array, whether that's in the form of a numpy array,
a list, or a dictionary that contains the data in its values.
Second arg is the desired distribution which would be applied before
normalization.
Supports linear, exponential, logarithmic and raising to whatever
power specified (in which case you just put a number)
Third arg is the range across which you want the data normalized
"""
# Handling dictionary array input
# Note: lists and numpy arrays behave the same in this program
dict_array = isinstance(array, dict)
if dict_array:
keys = array.keys()
array = np.array(array.values()).astype('float')
else: # Decorator errors if this isn't a list or a numpy array
array = np.array(array).astype('float')
# Handling various distributions
if type(distribution) in [float, int]:
array = np.power(array, distribution)
else:
array = getattr(np, distribution)(array, **kwargs)
# Prep for normalization
x_max, x_min = (np.max(array), np.min(array))
def norm(element,x_min,x_max):
base_span = (element - x_min)*(norm_range[-1] - norm_range[0])
return norm_range[0] + base_span / (x_max - x_min)
norm_array = np.vectorize(norm)(array, x_min, x_max)
if dict_array:
return dict(zip(keys, norm_array))
return norm_array
| 35.54023
| 77
| 0.647477
| 404
| 3,092
| 4.794554
| 0.346535
| 0.065049
| 0.034073
| 0.015488
| 0.019618
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002618
| 0.258732
| 3,092
| 86
| 78
| 35.953488
| 0.842496
| 0.278784
| 0
| 0.083333
| 0
| 0
| 0.040941
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.145833
| false
| 0.041667
| 0.041667
| 0.020833
| 0.354167
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
727435e62860b2aa00ad17f363ac314751d38249
| 3,770
|
py
|
Python
|
openprocurement/tender/openuadefense/tests/tender.py
|
ProzorroUKR/openprocurement.tender.openuadefense
|
5d6a7433839178edba35015ae614ba3e36b29d0b
|
[
"Apache-2.0"
] | null | null | null |
openprocurement/tender/openuadefense/tests/tender.py
|
ProzorroUKR/openprocurement.tender.openuadefense
|
5d6a7433839178edba35015ae614ba3e36b29d0b
|
[
"Apache-2.0"
] | 5
|
2018-08-14T19:41:27.000Z
|
2018-12-28T13:17:00.000Z
|
openprocurement/tender/openuadefense/tests/tender.py
|
ProzorroUKR/openprocurement.tender.openuadefense
|
5d6a7433839178edba35015ae614ba3e36b29d0b
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import unittest
from openprocurement.api.tests.base import snitch
from openprocurement.api.tests.base import BaseWebTest
from openprocurement.tender.belowthreshold.tests.base import test_lots
from openprocurement.tender.belowthreshold.tests.tender import TenderResourceTestMixin
from openprocurement.tender.belowthreshold.tests.tender_blanks import (
# TenderUAProcessTest
invalid_tender_conditions,
)
from openprocurement.tender.openua.tests.tender import TenderUaProcessTestMixin
from openprocurement.tender.openua.tests.tender_blanks import (
# TenderUAResourceTest
empty_listing,
create_tender_generated,
tender_with_main_procurement_category,
tender_finance_milestones,
)
from openprocurement.tender.openuadefense.tests.base import (
BaseTenderUAWebTest,
test_tender_data,
)
from openprocurement.tender.openuadefense.tests.tender_blanks import (
# TenderUATest
simple_add_tender,
# TenderUAResourceTest
create_tender_invalid,
patch_tender,
patch_tender_ua,
# TenderUAProcessTest
one_valid_bid_tender_ua,
one_invalid_bid_tender,
)
class TenderUATest(BaseWebTest):
initial_data = test_tender_data
test_simple_add_tender = snitch(simple_add_tender)
class TenderUAResourceTest(BaseTenderUAWebTest, TenderResourceTestMixin):
test_lots_data = test_lots # TODO: change attribute identifier
initial_data = test_tender_data
test_empty_listing = snitch(empty_listing)
test_create_tender_invalid = snitch(create_tender_invalid)
test_create_tender_generated = snitch(create_tender_generated)
test_patch_tender = snitch(patch_tender)
test_patch_tender_ua = snitch(patch_tender_ua)
test_tender_with_main_procurement_category = snitch(tender_with_main_procurement_category)
test_tender_finance_milestones = snitch(tender_finance_milestones)
class TenderUAProcessTest(BaseTenderUAWebTest, TenderUaProcessTestMixin):
initial_data = test_tender_data
test_invalid_tender_conditions = snitch(invalid_tender_conditions)
test_one_valid_bid_tender_ua = snitch(one_valid_bid_tender_ua)
test_one_invalid_bid_tender = snitch(one_invalid_bid_tender)
def test_patch_not_author(self):
response = self.app.post_json('/tenders', {'data': test_tender_data})
self.assertEqual(response.status, '201 Created')
tender = response.json['data']
owner_token = response.json['access']['token']
authorization = self.app.authorization
self.app.authorization = ('Basic', ('bot', 'bot'))
response = self.app.post('/tenders/{}/documents'.format(tender['id']),
upload_files=[('file', 'name.doc', 'content')])
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
doc_id = response.json["data"]['id']
self.assertIn(doc_id, response.headers['Location'])
self.app.authorization = authorization
response = self.app.patch_json('/tenders/{}/documents/{}?acc_token={}'.format(tender['id'], doc_id, owner_token),
{"data": {"description": "document description"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can update document only author")
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TenderUAProcessTest))
suite.addTest(unittest.makeSuite(TenderUAResourceTest))
suite.addTest(unittest.makeSuite(TenderUATest))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| 37.326733
| 121
| 0.748276
| 412
| 3,770
| 6.536408
| 0.245146
| 0.063498
| 0.064983
| 0.026736
| 0.299666
| 0.193465
| 0.036391
| 0
| 0
| 0
| 0
| 0.004403
| 0.156499
| 3,770
| 100
| 122
| 37.7
| 0.842453
| 0.039788
| 0
| 0.097222
| 0
| 0
| 0.081949
| 0.016058
| 0
| 0
| 0
| 0.01
| 0.097222
| 1
| 0.027778
| false
| 0
| 0.138889
| 0
| 0.430556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7274cfef52d1610735171f4f6183581cfb8fe6dd
| 3,791
|
py
|
Python
|
fabry/tools/file_io.py
|
jmilhone/fabry_perot
|
cd3cb7a1dbcaa3c9382f9f2dbd3407d95447b3ce
|
[
"MIT"
] | 1
|
2020-03-29T20:39:31.000Z
|
2020-03-29T20:39:31.000Z
|
fabry/tools/file_io.py
|
jmilhone/fabry_perot
|
cd3cb7a1dbcaa3c9382f9f2dbd3407d95447b3ce
|
[
"MIT"
] | null | null | null |
fabry/tools/file_io.py
|
jmilhone/fabry_perot
|
cd3cb7a1dbcaa3c9382f9f2dbd3407d95447b3ce
|
[
"MIT"
] | 2
|
2020-04-16T15:05:23.000Z
|
2020-12-05T18:19:10.000Z
|
from __future__ import print_function, division
import os
import numpy as np
import h5py
def dict_2_h5(fname, dic, append=False):
'''Writes a dictionary to a hdf5 file with given filename
It will use lzf compression for all numpy arrays
Args:
fname (str): filename to write to
dic (dict): dictionary to write
append (bool): if true, will append to file instead of overwriting, default=False
'''
if append:
method = 'r+'
else:
method = 'w'
with h5py.File(fname, method) as h5:
recursive_save_dict_to_h5(h5, '/', dic)
def h5_2_dict(fname):
'''Reads a dictionary from a hdf5 file with given filename
Args:
fname (str): hdf5 filename to read
Returns:
dict: dictionary of hdf5 keys
'''
with h5py.File(fname, 'r') as h5:
return recursive_load_dict_from_h5(h5, '/')
def prep_folder(path):
'''Checks if folder exists and recursively creates folders
to ensure the path is valid
Args:
path (str): path to folder
'''
if os.path.isdir(path):
return
else:
os.makedirs(path)
def recursive_save_dict_to_h5(h5, path, dic):
''' function used in save_dict_to_h5 in order to get recursion
'''
for key, item in dic.items():
if path + key in h5: ### overwrites pre-existing keys with same name
del h5[path + key]
if type(item) in [np.ndarray, np.generic]:
h5.create_dataset(path + key, data=item, compression='lzf')
elif type(item) != dict:
try:
h5.create_dataset(path + key, data=item)
except TypeError:
raise ValueError('Cannot save %s type' % type(item))
else:
recursive_save_dict_to_h5(h5, path + key + '/', item)
def recursive_load_dict_from_h5(h5, path):
''' function used in load_h5_to_dict in order to get recursion
'''
out_dict = {}
for key, item in h5[path].items():
# if type(item) == h5py._hl.dataset.Dataset:
if isinstance(item, h5py.Dataset):
out_dict[key] = item.value
# elif type(item) == h5py._hl.group.Group:
elif isinstance(item, h5py.Group):
out_dict[key] = recursive_load_dict_from_h5(h5, path + key + '/')
return out_dict
def read_Ld_results(Ld_directory):
'''Reads L and d histogram data from multinest run
Args:
Ld_directory (str): path to multinest save directory
Returns:
Tuple (np.ndarray, np.ndarray) L histogram values (in pixels), d histogram values (in mm)
'''
try:
fname = os.path.join(Ld_directory, "Ld_post_equal_weights.dat")
post = np.loadtxt(fname, ndmin=2)
except IOError:
fname = os.path.join(Ld_directory, "Ld_solver_post_equal_weights.dat")
post = np.loadtxt(fname, ndmin=2)
L = post[:, 0]
d = post[:, 1]
return L, d
def read_match_finesse_results(finesse_directory, errtemp=False):
fname = os.path.join(finesse_directory, "F_post_equal_weights.dat")
post = np.loadtxt(fname, ndmin=2)
F = post[:, 0]
V = post[:, 1]
T = post[:, 2]
if errtemp:
E = post[:, 3]
return F, V, T, E
else:
return F, V, T
def read_finesse_results(finesse_directory):
fname = os.path.join(finesse_directory, "finesse_post_equal_weights.dat")
post = np.loadtxt(fname, ndmin=2)
F = post[:, 0]
A = post[:, 1]
Arel = post[:, 2]
Ti = post[:, 3]
return F, A, Arel, Ti
def read_lyon_temp_results(temp_directory):
fname = os.path.join(temp_directory, 'temp_post_equal_weights.dat')
post = np.loadtxt(fname, ndmin=2)
T = post[:, 0]
V = post[:, 1]
# A = post[:,2]
# O = post[:,3]
return T, V # ,A#,O
| 28.291045
| 97
| 0.611976
| 545
| 3,791
| 4.102752
| 0.26055
| 0.010733
| 0.024598
| 0.033542
| 0.317084
| 0.275939
| 0.203488
| 0.101521
| 0.101521
| 0.101521
| 0
| 0.019971
| 0.273543
| 3,791
| 133
| 98
| 28.503759
| 0.791939
| 0.277499
| 0
| 0.2
| 0
| 0
| 0.064269
| 0.052793
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12
| false
| 0
| 0.053333
| 0
| 0.28
| 0.013333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
72789eb5c6f77f3e712cea1375e6fa57c2e3d189
| 1,170
|
py
|
Python
|
trapping_rain_water/solution.py
|
haotianzhu/C_Questions_Solutions
|
2677b6d26bedb9bc6c6137a2392d0afaceb91ec2
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
trapping_rain_water/solution.py
|
haotianzhu/C_Questions_Solutions
|
2677b6d26bedb9bc6c6137a2392d0afaceb91ec2
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
trapping_rain_water/solution.py
|
haotianzhu/C_Questions_Solutions
|
2677b6d26bedb9bc6c6137a2392d0afaceb91ec2
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
class Solution:
def trap(self, height):
"""
:type height: List[int]
:rtype: int
"""
if not height:
return 0
left = 0
right = len(height)-1
total_area = 0
if height[left] <= height[right]:
m = left
else:
m =right
while(left < right):
if height[left] <= height[right]:
# move m from left to right
m += 1
if height[m] >= height[left]:
# found a local convave shape
left = m # search the remainder part from [m,right]
m = left if height[left] <= height[right] else right # reset m as min hight between left and right
else:
# since right is higher than left, we can guarantee that
# each index in interval (left,right) will increase height[left]-height[m] 's water trapped area
total_area += height[left]-height[m]
else:
# move m from right to left
m-=1
if height[m] >= height[right]:
# found a local convave shape
right = m
m = left if height[left] <= height[right] else right
else:
# same as left part above
total_area += height[right]-height[m]
return total_area
if __name__ == '__main__':
res = Solution().trap([])
print(res)
| 24.375
| 103
| 0.616239
| 175
| 1,170
| 4.051429
| 0.365714
| 0.098731
| 0.135402
| 0.101551
| 0.282087
| 0.152327
| 0.104372
| 0.104372
| 0.104372
| 0
| 0
| 0.007026
| 0.270085
| 1,170
| 48
| 104
| 24.375
| 0.823185
| 0.346154
| 0
| 0.266667
| 0
| 0
| 0.010753
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033333
| false
| 0
| 0
| 0
| 0.133333
| 0.033333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
72792ffbb67fb0954aabb51561db8a143501fbea
| 607
|
py
|
Python
|
ABC/178/D.py
|
yu9824/AtCoder
|
50a209059c005efadc1c912e443ec41365381c16
|
[
"MIT"
] | null | null | null |
ABC/178/D.py
|
yu9824/AtCoder
|
50a209059c005efadc1c912e443ec41365381c16
|
[
"MIT"
] | null | null | null |
ABC/178/D.py
|
yu9824/AtCoder
|
50a209059c005efadc1c912e443ec41365381c16
|
[
"MIT"
] | null | null | null |
# list(map(int, input().split()))
# int(input())
import sys
sys.setrecursionlimit(10 ** 9)
'''
DP
A[n] = A[n-3] + A[n-4] + ... + A[0] (O(S**2))
ここで,A[n-1] = A[n-4] + A[n-5] + ... + A[0]より,
A[n] = A[n-3] + A[n-1]とも表せる.(O(S)でより高速.)
'''
mod = 10 ** 9 + 7
def main(*args):
S = args[0]
A = [0 for s in range(S+1)]
A[0] = 1 # 何も足さない (= S自身のみの1通りを表すためのやつ.)
s = 3
while s <= S:
# A[s] = sum(A[:(s-3)+1]) % mod # どっちでもOK.速いのは下のやつ.
A[s] = (A[s-3] + A[s-1]) % mod
s += 1
print(A[S])
if __name__ == '__main__':
args = [int(input())]
main(*args)
| 18.96875
| 59
| 0.444811
| 112
| 607
| 2.339286
| 0.375
| 0.068702
| 0.022901
| 0.030534
| 0.053435
| 0.053435
| 0.053435
| 0
| 0
| 0
| 0
| 0.066514
| 0.281713
| 607
| 32
| 60
| 18.96875
| 0.534404
| 0.202636
| 0
| 0
| 0
| 0
| 0.023739
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.066667
| 0
| 0.133333
| 0.066667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
727a16aa07e94a6fe76bbd40cc0c276c9724b76b
| 2,517
|
py
|
Python
|
contrib/analysis_server/src/analysis_server/factory.py
|
OzanCKN/OpenMDAO-Framework
|
05e9d4b9bc41d0ec00a7073545146c925cd33b0b
|
[
"Apache-2.0"
] | 1
|
2015-11-05T11:14:45.000Z
|
2015-11-05T11:14:45.000Z
|
contrib/analysis_server/src/analysis_server/factory.py
|
janus/OpenMDAO-Framework
|
05e9d4b9bc41d0ec00a7073545146c925cd33b0b
|
[
"Apache-2.0"
] | null | null | null |
contrib/analysis_server/src/analysis_server/factory.py
|
janus/OpenMDAO-Framework
|
05e9d4b9bc41d0ec00a7073545146c925cd33b0b
|
[
"Apache-2.0"
] | 1
|
2020-07-15T02:45:54.000Z
|
2020-07-15T02:45:54.000Z
|
from openmdao.main.factory import Factory
from analysis_server import client, proxy, server
class ASFactory(Factory):
"""
Factory for components running under an AnalysisServer.
An instance would typically be passed to
:meth:`openmdao.main.factorymanager.register_class_factory`.
host: string
Host name or IP address of the AnalysisServer to connect to.
port: int
Port number of the AnalysisServer to connect to.
"""
def __init__(self, host='localhost', port=server.DEFAULT_PORT):
super(ASFactory, self).__init__()
self._host = host
self._port = port
self._client = client.Client(host, port)
def create(self, typname, version=None, server=None,
res_desc=None, **ctor_args):
"""
Create a `typname` object.
typname: string
Type of object to create.
version: string or None
Version of `typname` to create.
server:
Not used.
res_desc: dict or None
Not used.
ctor_args: dict
Other constructor arguments. Not used.
"""
for typ, ver in self.get_available_types():
if typ == typname:
if version is None or ver == version:
return proxy.ComponentProxy(typname, self._host, self._port)
return None
def get_available_types(self, groups=None):
"""
Returns a set of tuples of the form ``(typname, version)``,
one for each available component type.
groups: list[string]
OpenMDAO entry point groups.
Only 'openmdao.component' is supported.
"""
if groups is not None and 'openmdao.component' not in groups:
return []
types = []
self._list('', types)
return types
def _list(self, category, types):
""" List components in `category` and sub-categories. """
if category:
category += '/'
for comp in self._client.list_components(category):
comp = '%s%s' % (category, comp)
try:
versions = self._client.versions(comp)
except RuntimeError:
types.append((comp, ''))
else:
for version in versions:
types.append((comp, version))
for sub in self._client.list_categories(category):
sub = '%s%s' % (category, sub)
self._list(sub, types)
| 28.602273
| 80
| 0.573699
| 282
| 2,517
| 5.003546
| 0.340426
| 0.028349
| 0.026931
| 0.029766
| 0.042523
| 0.042523
| 0
| 0
| 0
| 0
| 0
| 0
| 0.337306
| 2,517
| 87
| 81
| 28.931034
| 0.845923
| 0.320222
| 0
| 0
| 0
| 0
| 0.023904
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.055556
| 0
| 0.305556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
727c127931f6990427efb822c10447cc3dcfa23b
| 1,726
|
bzl
|
Python
|
antlir/bzl/image_actions/tarball.bzl
|
SaurabhAgarwala/antlir
|
d9513d35d3eaa9d28717a40057a14d099c6ec775
|
[
"MIT"
] | null | null | null |
antlir/bzl/image_actions/tarball.bzl
|
SaurabhAgarwala/antlir
|
d9513d35d3eaa9d28717a40057a14d099c6ec775
|
[
"MIT"
] | null | null | null |
antlir/bzl/image_actions/tarball.bzl
|
SaurabhAgarwala/antlir
|
d9513d35d3eaa9d28717a40057a14d099c6ec775
|
[
"MIT"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
load("//antlir/bzl:maybe_export_file.bzl", "maybe_export_file")
load("//antlir/bzl:shape.bzl", "shape")
load(
"//antlir/bzl:target_tagger.bzl",
"image_source_as_target_tagged_shape",
"new_target_tagger",
"target_tagged_image_source_shape",
"target_tagger_to_feature",
)
tarball_t = shape.shape(
force_root_ownership = shape.field(bool, optional = True),
into_dir = shape.path(),
source = target_tagged_image_source_shape,
)
def image_tarball(source, dest, force_root_ownership = False):
"""
`image.tarball("files/xyz.tar", "/a/b")` extracts tarball located at `files/xyz.tar` to `/a/b` in the image --
- `source` is one of:
- an `image.source` (docs in `image_source.bzl`), or
- the path of a target outputting a tarball target path,
e.g. an `export_file` or a `genrule`
- `dest` is the destination of the unpacked tarball in the image.
This is an image-absolute path to a directory that must be created
by another `feature_new` item.
"""
target_tagger = new_target_tagger()
tarball = shape.new(
tarball_t,
force_root_ownership = force_root_ownership,
into_dir = dest,
source = image_source_as_target_tagged_shape(
target_tagger,
maybe_export_file(source),
),
)
return target_tagger_to_feature(
target_tagger,
items = struct(tarballs = [tarball]),
# The `fake_macro_library` docblock explains this self-dependency
extra_deps = ["//antlir/bzl/image_actions:tarball"],
)
| 34.52
| 110
| 0.685979
| 239
| 1,726
| 4.711297
| 0.380753
| 0.085258
| 0.063943
| 0.031972
| 0.10302
| 0.053286
| 0
| 0
| 0
| 0
| 0
| 0
| 0.206837
| 1,726
| 49
| 111
| 35.22449
| 0.822498
| 0.406721
| 0
| 0.066667
| 0
| 0
| 0.246792
| 0.208292
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033333
| false
| 0
| 0
| 0
| 0.066667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
727d45160be9a8707217c7303a6e3540147bb34c
| 457
|
py
|
Python
|
submit.py
|
young-geng/UVaClient
|
8ff4a368ac8f0395248292a0d903047a074752ed
|
[
"BSD-2-Clause"
] | 2
|
2017-09-07T07:01:53.000Z
|
2018-04-26T08:08:12.000Z
|
submit.py
|
young-geng/UVaClient
|
8ff4a368ac8f0395248292a0d903047a074752ed
|
[
"BSD-2-Clause"
] | null | null | null |
submit.py
|
young-geng/UVaClient
|
8ff4a368ac8f0395248292a0d903047a074752ed
|
[
"BSD-2-Clause"
] | null | null | null |
import requests
from sys import stderr
import re
def submit(session, problem_id, language, source):
language_code = {
'c': 1,
'java': 2,
'c++': 3,
'pascal': 4,
'c++11': 5
}
url = "http://uva.onlinejudge.org/index.php?option=com_onlinejudge&Itemid=25&page=save_submission"
data = {
'problemid': '',
'category': '',
'localid': problem_id,
'language': language_code[language],
'code': source
}
session.post(url, data=data)
| 19.041667
| 99
| 0.641138
| 60
| 457
| 4.783333
| 0.7
| 0.125436
| 0.118467
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.024064
| 0.181619
| 457
| 23
| 100
| 19.869565
| 0.743316
| 0
| 0
| 0
| 0
| 0.05
| 0.317982
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.15
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
727ddb1308f9ba28efd3e85b5a607205219bd3f3
| 5,028
|
py
|
Python
|
FIGURE4/eddymoc_scripts/noresm_cesm_eddymoc_150yrs.py
|
adagj/ECS_SOconvection
|
d1bb935b37380f11e021a463c6a807d7527220a6
|
[
"MIT"
] | 1
|
2021-11-26T00:29:28.000Z
|
2021-11-26T00:29:28.000Z
|
FIGURE4/eddymoc_scripts/noresm_cesm_eddymoc_150yrs.py
|
adagj/ECS_SOconvection
|
d1bb935b37380f11e021a463c6a807d7527220a6
|
[
"MIT"
] | null | null | null |
FIGURE4/eddymoc_scripts/noresm_cesm_eddymoc_150yrs.py
|
adagj/ECS_SOconvection
|
d1bb935b37380f11e021a463c6a807d7527220a6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: Ada Gjermundsen
year: 2019 - 2021
This script is used to calculate the eddy-induced overturning in CESM2 and NorESM2 (LM and MM) south of 50S
for the CMIP experiments piControl and abrupt-4xCO2 after 150
the average time is 30 years
The result is used in FIGURE 4
"""
import sys
sys.path.insert(1, '/scratch/adagj/CMIP6/CLIMSENS/CMIP6_UTILS')
import CMIP6_ATMOS_UTILS as atmos
import CMIP6_SEAICE_UTILS as ocean
from read_modeldata_cmip6 import ecs_models_cmip6, make_filelist_cmip6, Modelinfo
import numpy as np
from dask.diagnostics import ProgressBar
import warnings
warnings.simplefilter('ignore')
import xarray as xr
xr.set_options(enable_cftimeindex=True)
def make_attributes(da, var, expid):
da.attrs['long_name']='Global Ocean Meridional Overturning Mass Streamfunction Due to Parameterized Mesoscale Advection'
da.attrs['name']='eddymoc'
da.attrs['units']='kg s-1'
da.attrs['standard_name']='global_ocean_meridional_overturning_mass_streamfunction_due_to_parameterized_mesoscale_eddy_advection'
da.attrs['expid']=expid
ds = da.to_dataset(name = var)
return ds
def extract_global_moc(modelname, da, dac, var):
if 'sector' in da.coords:
da = da.drop('sector')
if 'sector' in dac.coords:
dac = dac.drop('sector')
da = da.isel(basin=-1)
dac = dac.isel(basin=-1)
return da, dac
def make_reference_slice(model, ds, var, endyr):
ds = ocean.consistent_naming(ds)
ds = atmos.fix_time(ds, 1)
return ds
def make_yearly_avg(model, ds, var, endyr):
da = atmos.yearly_avg(ds[var])
if model.expid in ['piControl']:
da = da.isel(year=slice(model.branchtime_year+endyr-30, model.branchtime_year+endyr))
else:
da = da.isel(year=slice(endyr-30, endyr))
da = da.mean(dim='year')
return da
def make_modelobj(modelname, expinfo, expid='piControl'):
model = Modelinfo(name = modelname, institute = expinfo['institute'], expid = expid, realm = 'Omon',
realiz=expinfo['variant_labels'][0], grid_atmos = expinfo['grid_label_atmos'][0], grid_ocean = expinfo['grid_label_ocean'], branchtime_year=expinfo['branch_yr'])
return model
def read_files(model, var):
if model.name in ['NorESM2-LM', 'NorESM2-MM']:
make_filelist_cmip6(model, var, component = 'ocean', activity_id='CMIP',path_to_data = '/projects/NS9034K/CMIP6/')
else:
make_filelist_cmip6(model, var, component = 'ocean')
print(model.filenames)
if model.filenames:
if len(model.filenames)>1:
ds = xr.open_mfdataset(model.filenames, combine='nested', concat_dim='time', parallel=True, chunks={"time":1})
else:
ds = xr.open_dataset(model.filenames[0], chunks={"time":1})
print('%s loaded for model: %s, experiment: piControl . Lenght of simulation: %.1f years'%(var,model.name, len(ds[var].time.values)/12))
else:
print('%s not loaded for model %s, experiment: piControl. Skipping model! Please check!'%(var,model.name))
return ds
def make_last_30yrs_avg(models, var, outpath, endyr=150):
print('global eddy moc: \n')
for modelname,expinfo in models.items():
print(modelname)
if var in ['msftmzsmpa'] and modelname in ['NorESM2-LM']:
continue
modelctrl = make_modelobj(modelname, expinfo, expid='piControl')
dsc = read_files(modelctrl, var)
dsc = make_reference_slice(modelctrl, dsc, var, endyr)
model4xco2 = make_modelobj(modelname, expinfo, expid='abrupt-4xCO2')
ds = read_files(model4xco2, var)
ds = make_reference_slice(model4xco2, ds, var, endyr)
ds, dsc = extract_global_moc(modelname, ds, dsc, var)
da = make_yearly_avg(model4xco2, ds, var, endyr)
dac = make_yearly_avg(modelctrl, dsc, var, endyr)
dsout_ctrl = make_attributes(dac, var, 'piControl')
dsout_case = make_attributes(da, var, 'abrupt-4xCO2')
print(dsout_ctrl)
print(dsout_case)
dsout_ctrl = dsout_ctrl.to_netcdf(outpath + var +'_' + modelctrl.realm +'_' + modelctrl.name + '_' + modelctrl.expid + '_' + modelctrl.realiz + '_'+str(endyr) + '_30yravg.nc', compute=False)
dsout_case = dsout_case.to_netcdf(outpath + var +'_' + model4xco2.realm +'_' + model4xco2.name + '_' + model4xco2.expid + '_' + model4xco2.realiz + '_'+str(endyr) + '_30yravg.nc', compute=False)
with ProgressBar():
result = dsout_ctrl.compute()
result = dsout_case.compute()
del model4xco2, modelctrl, dsc, ds, dac, da, dsout_ctrl, dsout_case
if __name__ == '__main__':
outpath = 'path_to_outdata/'
models = ecs_models_cmip6()
models = {'NorESM2-LM':models['NorESM2-LM'], 'CESM2':models['CESM2']}
for var in ['msftmzsmpa', 'msftmzmpa']:
make_last_30yrs_avg(models, var=var, outpath=outpath, endyr=150)
| 45.297297
| 202
| 0.666468
| 664
| 5,028
| 4.865964
| 0.295181
| 0.009285
| 0.01238
| 0.025998
| 0.179201
| 0.158465
| 0.095946
| 0.050139
| 0.050139
| 0.050139
| 0
| 0.024605
| 0.207836
| 5,028
| 110
| 203
| 45.709091
| 0.786593
| 0.063047
| 0
| 0.078652
| 0
| 0
| 0.176521
| 0.035304
| 0
| 0
| 0
| 0
| 0
| 1
| 0.078652
| false
| 0
| 0.089888
| 0
| 0.235955
| 0.078652
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7280fea22c910a6288443234b126b5c1c0f5d8b9
| 4,161
|
py
|
Python
|
wings/planner.py
|
KnowledgeCaptureAndDiscovery/wings-client
|
af1d068f4adc07d9060afa94dc99e0b2565be088
|
[
"Apache-2.0"
] | null | null | null |
wings/planner.py
|
KnowledgeCaptureAndDiscovery/wings-client
|
af1d068f4adc07d9060afa94dc99e0b2565be088
|
[
"Apache-2.0"
] | 8
|
2019-07-28T17:04:38.000Z
|
2019-08-06T23:57:08.000Z
|
wings/planner.py
|
KnowledgeCaptureAndDiscovery/wings-client
|
af1d068f4adc07d9060afa94dc99e0b2565be088
|
[
"Apache-2.0"
] | 1
|
2019-07-29T22:53:41.000Z
|
2019-07-29T22:53:41.000Z
|
import json
import re
class Planner(object):
def __init__(self, api_client):
self.api_client = api_client
def set_template(self, template):
self.wflowns = self.api_client.get_export_url() + "workflows/" + template + ".owl#"
self.wflowid = self.wflowns + template
def _set_bindings(self, invar, val, data_bindings, parameter_bindings, parameter_types):
if isinstance(val, basestring) and val.startswith('file:'):
data = data_bindings.get(self.wflowns + invar, [])
data.append(self.api_client.libns + val[5:])
data_bindings[self.wflowns + invar] = data
else:
parameter_bindings[self.wflowns + invar] = val
typeid = self.api_client.xsdns + "string"
if type(val) is int:
typeid = self.api_client.xsdns + "integer"
elif type(val) is float:
typeid = self.api_client.xsdns + "float"
elif type(val) is bool:
typeid = self.api_client.xsdns + "boolean"
parameter_types[self.wflowns + invar] = typeid
def get_expansions(self, inputs):
postdata = [('templateId', self.wflowid),
('componentBindings', '{}'), ('parameterBindings', '{}')]
data_bindings = dict()
parameter_bindings = dict()
parameter_types = dict()
for invar in inputs:
if type(inputs[invar]) is list:
for val in inputs[invar]:
self._set_bindings(
invar, val, data_bindings, parameter_bindings, parameter_types)
else:
self._set_bindings(
invar, inputs[invar], data_bindings, parameter_bindings, parameter_types)
postdata = {
"templateId": self.wflowid,
"dataBindings": data_bindings,
"parameterBindings": parameter_bindings,
"parameter_types": parameter_types,
"componentBindings": dict()
}
resp = self.api_client.session.post(
self.api_client.get_request_url() + 'plan/getExpansions', json=postdata)
return resp.json()
def select_template(self, templates):
from sys import version_info
py3 = version_info[0] > 2
i = 1
num = len(templates)
for tpl in templates:
print("%s. %s" %
(i, self.api_client.get_template_description(tpl['template'])))
i += 1
index = 0
while True:
if py3:
index = int(input("Please enter your selection: "))
else:
index = int(raw_input("Please enter your selection: "))
if index < 1 or index > num:
print("Invalid Selection. Try again")
else:
break
return templates[index - 1]
def get_template_description(self, template):
regex = re.compile(r"^.*#")
components = {}
for nodeid in template['Nodes']:
node = template['Nodes'][nodeid]
comp = regex.sub("", node['componentVariable']['binding']['id'])
if comp in components:
components[comp] += 1
else:
components[comp] = 1
description = regex.sub("", template['id']) + " ( "
i = 0
for comp in components:
if i > 0:
description += ", "
description += str(components[comp]) + " " + comp
i += 1
description += " )"
return description
def run_workflow(self, template, seed):
postdata = {
'template_id': seed["template"]["id"],
'json': json.dumps(template["template"]),
'constraints_json': json.dumps(template["constraints"]),
'seed_json': json.dumps(seed["template"]),
'seed_constraints_json': json.dumps(seed["constraints"])
}
resp = self.api_client.session.post(self.api_client.get_request_url(
) + 'executions/runWorkflow', data=postdata)
regex = re.compile(r"^.*#")
return regex.sub("", resp.text)
| 37.827273
| 93
| 0.550829
| 426
| 4,161
| 5.225352
| 0.274648
| 0.056604
| 0.075921
| 0.028751
| 0.182839
| 0.113657
| 0.09434
| 0.09434
| 0.048518
| 0.048518
| 0
| 0.005392
| 0.331411
| 4,161
| 109
| 94
| 38.174312
| 0.794752
| 0
| 0
| 0.132653
| 0
| 0
| 0.112233
| 0.010334
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.030612
| 0
| 0.153061
| 0.020408
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7283a25bff5f7d4878aa3e626f36db48a3d5a96f
| 1,065
|
py
|
Python
|
scripts/run_d435.py
|
suet-lee/mycelium
|
db83cd3ab00697f28b2def2cebcdef52698fdd92
|
[
"Apache-2.0"
] | 6
|
2021-05-23T17:36:02.000Z
|
2022-01-21T20:34:17.000Z
|
scripts/run_d435.py
|
suet-lee/mycelium
|
db83cd3ab00697f28b2def2cebcdef52698fdd92
|
[
"Apache-2.0"
] | null | null | null |
scripts/run_d435.py
|
suet-lee/mycelium
|
db83cd3ab00697f28b2def2cebcdef52698fdd92
|
[
"Apache-2.0"
] | 1
|
2021-06-17T20:35:10.000Z
|
2021-06-17T20:35:10.000Z
|
#!/usr/bin/env python3
from mycelium import CameraD435
from mycelium_utils import Scripter
class ScripterExt(Scripter):
def run_main(self):
self.camera = CameraD435(
configuration_mode=self.cfg.d435['configuration_mode'],
enable_rgb_stream=self.cfg.d435['enable_rgb_stream'],
enable_depth_stream=self.cfg.d435['enable_depth_stream'],
enable_infrared_stream=self.cfg.d435['enable_infrared_stream'],
save_rgb_frames=self.cfg.d435['save_rgb_frames'],
save_depth_frames=self.cfg.d435['save_depth_frames'],
save_infrared_frames=self.cfg.d435['save_infrared_frames'])
self.camera.start()
def _sigint_handler(self, sig, frame):
self.camera.exit_threads = True
def _sigterm_handler(self, sig, frame):
self.camera.exit_threads = True
self.exit_code = 0
def close_script(self):
try:
self.camera.stop()
except:
pass
scripter = ScripterExt(log_source="run_d435")
scripter.run()
| 29.583333
| 75
| 0.661033
| 131
| 1,065
| 5.083969
| 0.358779
| 0.073574
| 0.115616
| 0.076577
| 0.33033
| 0.132132
| 0.132132
| 0.132132
| 0.132132
| 0
| 0
| 0.039312
| 0.235681
| 1,065
| 35
| 76
| 30.428571
| 0.77887
| 0.019718
| 0
| 0.08
| 0
| 0
| 0.130393
| 0.021093
| 0
| 0
| 0
| 0
| 0
| 1
| 0.16
| false
| 0.04
| 0.08
| 0
| 0.28
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7285e9ab42c1215b9cfd19e9d7ea2bd99678e38c
| 2,789
|
py
|
Python
|
evology/research/MCarloLongRuns/Exp1_WSvsReturn.py
|
aymericvie/evology
|
8f00d94dee7208be5a5bdd0375a9d6ced25097f4
|
[
"Apache-2.0"
] | null | null | null |
evology/research/MCarloLongRuns/Exp1_WSvsReturn.py
|
aymericvie/evology
|
8f00d94dee7208be5a5bdd0375a9d6ced25097f4
|
[
"Apache-2.0"
] | 2
|
2022-01-10T02:10:56.000Z
|
2022-01-14T03:41:42.000Z
|
evology/research/MCarloLongRuns/Exp1_WSvsReturn.py
|
aymericvie/evology
|
8f00d94dee7208be5a5bdd0375a9d6ced25097f4
|
[
"Apache-2.0"
] | null | null | null |
# Imports
import numpy as np
import pandas as pd
import sys
import tqdm
import warnings
import time
import ternary
from ternary.helpers import simplex_iterator
import multiprocessing as mp
warnings.simplefilter("ignore")
if sys.platform == "darwin":
sys.path.append("/Users/aymericvie/Documents/GitHub/evology/evology/code")
# Need to be executed from cd to MCarloLongRuns
if sys.platform == "linux":
sys.path.append("/home/vie/Documents/GitHub/evology/evology/code")
from main import main as evology
startTime = time.time()
TimeHorizon = 252 * 5
PopulationSize = 3
def job(coords):
np.random.seed()
try:
df, pop = evology(
space="scholl",
solver="esl.true",
wealth_coordinates=coords,
POPULATION_SIZE=PopulationSize,
MAX_GENERATIONS=TimeHorizon,
PROBA_SELECTION=0,
MUTATION_RATE=0,
ReinvestmentRate=1.0,
InvestmentHorizon=21,
InvestorBehavior="profit",
tqdm_display=True,
reset_wealth=True,
)
result = [
coords[0],
coords[1],
coords[2],
df["NT_returns"].mean(),
df["VI_returns"].mean(),
df["TF_returns"].mean(),
df["NT_returns"].std(),
df["VI_returns"].std(),
df["TF_returns"].std(),
df["HighestT"].mean(),
df["AvgAbsT"].mean(),
]
return result
except Exception as e:
print(e)
print("Failed run" + str(coords) + str(e))
result = [coords[0], coords[1], coords[2]]
for _ in range(8):
result.append(0)
return result
# Define the domains
def GenerateCoords(reps, scale):
param = []
for (i, j, k) in simplex_iterator(scale):
for _ in range(reps):
param.append([i / scale, j / scale, k / scale])
return param
reps = 10
scale = 50 # increment = 1/scale
param = GenerateCoords(reps, scale)
# print(param)
print(len(param))
# Run experiment
def main():
p = mp.Pool()
data = p.map(job, tqdm.tqdm(param))
p.close()
data = np.array(data)
return data
if __name__ == "__main__":
data = main()
df = pd.DataFrame()
# Inputs
df["WS_NT"] = data[:, 0]
df["WS_VI"] = data[:, 1]
df["WS_TF"] = data[:, 2]
# Outputs
df["NT_returns_mean"] = data[:, 3]
df["VI_returns_mean"] = data[:, 4]
df["TF_returns_mean"] = data[:, 5]
df["NT_returns_std"] = data[:, 6]
df["VI_returns_std"] = data[:, 7]
df["TF_returns_std"] = data[:, 8]
df["HighestT"] = data[:, 9]
df["AvgAbsT"] = data[:, 10]
print(df)
df.to_csv("data/data1.csv")
print("Completion time: " + str(time.time() - startTime))
| 24.043103
| 78
| 0.570097
| 345
| 2,789
| 4.489855
| 0.388406
| 0.042608
| 0.028405
| 0.037444
| 0.077469
| 0.034861
| 0.034861
| 0
| 0
| 0
| 0
| 0.018556
| 0.285048
| 2,789
| 115
| 79
| 24.252174
| 0.758275
| 0.048404
| 0
| 0.022472
| 0
| 0
| 0.143667
| 0.038563
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033708
| false
| 0
| 0.11236
| 0
| 0.191011
| 0.05618
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
72881bbcc670bbaede8d42280fea146e0bccefea
| 702
|
py
|
Python
|
piglatin_microservice/views/main.py
|
Curly-Mo/piglatin
|
9ea4a7533675bcb5b28f708beda18f175e0a9fe4
|
[
"MIT"
] | null | null | null |
piglatin_microservice/views/main.py
|
Curly-Mo/piglatin
|
9ea4a7533675bcb5b28f708beda18f175e0a9fe4
|
[
"MIT"
] | null | null | null |
piglatin_microservice/views/main.py
|
Curly-Mo/piglatin
|
9ea4a7533675bcb5b28f708beda18f175e0a9fe4
|
[
"MIT"
] | null | null | null |
from flask import request, jsonify, Blueprint
from .. import piglatin
main = Blueprint('main', __name__)
@main.route('/', methods=['GET', 'POST'])
def index():
response = """
Please use the endpoint /translate to access this api.
Usage: "{}translate?text=Translate+this+text+into+Piglatin."
""".format(request.url)
return response
@main.route('/translate', methods=['GET'])
def translate():
text = request.args.get('text')
if not text:
message = 'Invalid parameter text={}'.format(text)
return jsonify(error=500, text=str(message)), 500
pig_text = piglatin.translate(text)
response = {'text': pig_text}
return jsonify(response)
| 23.4
| 68
| 0.650997
| 84
| 702
| 5.369048
| 0.488095
| 0.086475
| 0.075388
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010638
| 0.196581
| 702
| 29
| 69
| 24.206897
| 0.789007
| 0
| 0
| 0
| 0
| 0
| 0.277778
| 0.075499
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.105263
| 0
| 0.368421
| 0.105263
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7289763d0bdfbe7696c866a5439b0ea41f2358eb
| 1,240
|
py
|
Python
|
projects/pong-game/pong.py
|
sumanentc/Python-Projects
|
11c763fcbe4e088928bd56c28f767b93ae73984d
|
[
"MIT"
] | null | null | null |
projects/pong-game/pong.py
|
sumanentc/Python-Projects
|
11c763fcbe4e088928bd56c28f767b93ae73984d
|
[
"MIT"
] | null | null | null |
projects/pong-game/pong.py
|
sumanentc/Python-Projects
|
11c763fcbe4e088928bd56c28f767b93ae73984d
|
[
"MIT"
] | null | null | null |
from turtle import Screen
from paddle import Paddle
from ball import Ball
import time
from scoreboard import ScoreBoard
screen = Screen()
screen.bgcolor('black')
screen.setup(width=800, height=600)
screen.title('pong')
# Turn off animation to show paddle after it has been shifted
screen.tracer(0)
right_paddle = Paddle(350, 0)
left_paddle = Paddle(-350, 0)
ball = Ball()
score = ScoreBoard()
screen.listen()
screen.onkey(right_paddle.go_up, 'Up')
screen.onkey(right_paddle.go_down, 'Down')
screen.onkey(left_paddle.go_up, 'w')
screen.onkey(left_paddle.go_down, 's')
game_is_on = True
while game_is_on:
time.sleep(ball.ball_speed)
screen.update()
ball.move()
# bounce when the ball hit the wall
if ball.ycor() > 280 or ball.ycor() < -280:
ball.bounce_y()
# detect collision with the paddle
if (ball.distance(right_paddle) < 50 and ball.xcor() > 320) or (
ball.distance(left_paddle) < 50 and ball.xcor() < -320):
ball.bounce_x()
# detect R paddle miss
if ball.xcor() > 380:
ball.reset_pos()
score.increase_l_point()
# detect L paddle miss
if ball.xcor() < -380:
ball.reset_pos()
score.increase_r_point()
screen.exitonclick()
| 22.962963
| 68
| 0.679032
| 185
| 1,240
| 4.416216
| 0.416216
| 0.053856
| 0.03672
| 0.039168
| 0.286414
| 0.171359
| 0.117503
| 0.117503
| 0.117503
| 0.117503
| 0
| 0.037336
| 0.200806
| 1,240
| 53
| 69
| 23.396226
| 0.787084
| 0.135484
| 0
| 0.055556
| 0
| 0
| 0.015947
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.138889
| 0
| 0.138889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
728a356d657b32fde675735de36842cf48062bf3
| 1,579
|
py
|
Python
|
ExPy/ExPy/module20.py
|
brad-h/expy
|
d3f3dfbbdae31ab8c7e134a5ce9d5f6adf94b516
|
[
"MIT"
] | null | null | null |
ExPy/ExPy/module20.py
|
brad-h/expy
|
d3f3dfbbdae31ab8c7e134a5ce9d5f6adf94b516
|
[
"MIT"
] | null | null | null |
ExPy/ExPy/module20.py
|
brad-h/expy
|
d3f3dfbbdae31ab8c7e134a5ce9d5f6adf94b516
|
[
"MIT"
] | null | null | null |
""" Multistate Sales Tax Calculator """
import os
from decimal import Decimal
from decimal import InvalidOperation
def prompt_decimal(prompt):
""" Using the prompt, attempt to get a decimal from the user """
while True:
try:
return Decimal(input(prompt))
except InvalidOperation:
print('Enter a valid number')
def dollar(amount):
""" Given an amount as a number
Return a string formatted as a dollar amount
"""
amount = round(amount, 2)
return '${0:0.2f}'.format(amount)
STATE_RATES = {
'ILLINOIS': Decimal('0.08'),
'IL': Decimal('0.08'),
'WISCONSIN': Decimal('0.05'),
'WI': Decimal('0.05'),
}
WISCONSIN_RATES = {
'EAU CLAIRE': Decimal('0.005'),
'DUNN': Decimal('0.004')
}
def ex20():
""" Prompt for the order amount and state
If the state is Wisconsin, prompt for the county
Print the sales tax and total amount
"""
amount = prompt_decimal('What is the order amount? ')
state = input('What state do you live in? ')
if state.upper() in STATE_RATES:
rate = STATE_RATES[state.upper()]
else:
rate = Decimal(0)
if state.upper() == 'WISCONSIN':
county = input('What county do you live in? ')
if county.upper() in WISCONSIN_RATES:
rate += WISCONSIN_RATES[county.upper()]
tax = amount * rate
total = tax + amount
output = os.linesep.join([
'The tax is {}'.format(dollar(tax)),
'The total is {}'.format(dollar(total))])
print(output)
if __name__ == '__main__':
ex20()
| 25.467742
| 68
| 0.609246
| 206
| 1,579
| 4.592233
| 0.359223
| 0.059197
| 0.035941
| 0.023256
| 0.027484
| 0
| 0
| 0
| 0
| 0
| 0
| 0.024765
| 0.258391
| 1,579
| 61
| 69
| 25.885246
| 0.783091
| 0.181127
| 0
| 0
| 0
| 0
| 0.173494
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.073171
| false
| 0
| 0.073171
| 0
| 0.195122
| 0.04878
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
728a6a081e2856ad1af42a97ac6d7dc3898ac287
| 2,650
|
py
|
Python
|
pyvdms/util/verify.py
|
psmsmets/pyVDMS
|
cb3db93b655d3a02ae3aa1fdd418ae70dd249271
|
[
"MIT"
] | 1
|
2020-04-22T14:38:23.000Z
|
2020-04-22T14:38:23.000Z
|
pyvdms/util/verify.py
|
psmsmets/PyVDMS
|
cb3db93b655d3a02ae3aa1fdd418ae70dd249271
|
[
"MIT"
] | null | null | null |
pyvdms/util/verify.py
|
psmsmets/PyVDMS
|
cb3db93b655d3a02ae3aa1fdd418ae70dd249271
|
[
"MIT"
] | null | null | null |
r"""
:mod:`util.verify` -- Input verification
========================================
Common input verification methods.
"""
# Mandatory imports
import numpy as np
__all__ = ['verify_tuple_range']
def verify_tuple_range(
input_range: tuple, allow_none: bool = True, name: str = None,
step: bool = None, unit: bool = None, todict: bool = False
):
"""
Verify if the input range tuple fullfils the requirements.
An error is raised if a criteria is failed.
"""
name = name or 'input range'
r = dict(first=None, last=None, step=None, unit=None)
if input_range is None:
if allow_none:
return r if todict else None
else:
raise ValueError(f'{name} is empty!')
if not isinstance(input_range, tuple):
raise TypeError(f'{name} should be a tuple!')
minlen = 2
maxlen = 4
if step is True:
minlen += 1
elif step is False:
maxlen -= 1
if unit is True:
minlen += 1
elif unit is False:
maxlen -= 1
if len(input_range) < minlen or len(input_range) > maxlen:
length = minlen if minlen == maxlen else f'{minlen} to {maxlen}'
raise TypeError(f'{name} should be of length {length}!')
r['first'] = input_range[0]
r['last'] = input_range[1]
if not isinstance(r['first'], float) or not isinstance(r['last'], float):
raise TypeError(f'{name} range values should be of type float!')
if step is not False:
if step: # required
r['step'] = input_range[2]
if not isinstance(r['step'], float):
raise TypeError(f'{name} step should be of type float!')
else: # optional
r['step'] = input_range[2] if len(input_range) > minlen else None
r['step'] = r['step'] if isinstance(r['step'], float) else None
if r['step']:
if r['step'] == 0.:
raise ValueError(f'{name} step cannot be zero!')
if np.sign(r['last'] - r['first']) != np.sign(r['step']):
raise ValueError(f'{name} range and step signs should be equal!')
else:
if r['last'] <= r['first']:
raise ValueError(f'{name} range should be incremental!')
if unit is not False:
if unit: # required
r['unit'] = input_range[-1]
if not isinstance(r['unit'], str):
raise TypeError(f'{name} unit should be of type string!')
else: # optional
r['unit'] = input_range[-1] if len(input_range) > minlen else None
r['unit'] = r['unit'] if isinstance(r['unit'], str) else None
return r if todict else None
| 30.45977
| 78
| 0.570189
| 366
| 2,650
| 4.065574
| 0.213115
| 0.100806
| 0.050403
| 0.063844
| 0.334677
| 0.200941
| 0.112903
| 0.040323
| 0
| 0
| 0
| 0.006904
| 0.289434
| 2,650
| 86
| 79
| 30.813953
| 0.783324
| 0.103774
| 0
| 0.172414
| 0
| 0
| 0.190009
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017241
| false
| 0
| 0.017241
| 0
| 0.068966
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
728cb69af3c08ef7582f9c410b1fc5f27c722c62
| 2,349
|
py
|
Python
|
api/image_similarity.py
|
reneraab/librephotos
|
a3972ab520586e721c67f283b1a50ccb7abe2b01
|
[
"MIT"
] | null | null | null |
api/image_similarity.py
|
reneraab/librephotos
|
a3972ab520586e721c67f283b1a50ccb7abe2b01
|
[
"MIT"
] | null | null | null |
api/image_similarity.py
|
reneraab/librephotos
|
a3972ab520586e721c67f283b1a50ccb7abe2b01
|
[
"MIT"
] | null | null | null |
import numpy as np
import requests
from django.db.models import Q
from api.models import Photo, User
from api.util import logger
from ownphotos.settings import IMAGE_SIMILARITY_SERVER
def search_similar_embedding(user, emb, result_count=100, threshold=27):
if type(user) == int:
user_id = user
else:
user_id = user.id
image_embedding = np.array(emb, dtype=np.float32)
post_data = {
"user_id": user_id,
"image_embedding": image_embedding.tolist(),
"n": result_count,
"threshold": threshold,
}
res = requests.post(IMAGE_SIMILARITY_SERVER + "/search/", json=post_data)
if res.status_code == 200:
return res.json()["result"]
else:
logger.error("error retrieving similar embeddings for user {}".format(user_id))
return []
def search_similar_image(user, photo):
if type(user) == int:
user_id = user
else:
user_id = user.id
if photo.clip_embeddings == None:
photo._generate_clip_embeddings()
if photo.clip_embeddings == None:
return []
image_embedding = np.array(photo.clip_embeddings, dtype=np.float32)
post_data = {"user_id": user_id, "image_embedding": image_embedding.tolist()}
res = requests.post(IMAGE_SIMILARITY_SERVER + "/search/", json=post_data)
if res.status_code == 200:
return res.json()
else:
logger.error(
"error retrieving similar photos to {} belonging to user {}".format(
photo.image_hash, user.username
)
)
return []
def build_image_similarity_index(user):
logger.info("builing similarity index for user {}".format(user.username))
photos = (
Photo.objects.filter(Q(hidden=False) & Q(owner=user))
.exclude(clip_embeddings=None)
.only("clip_embeddings")
)
image_hashes = []
image_embeddings = []
for photo in photos:
image_hashes.append(photo.image_hash)
image_embedding = np.array(photo.clip_embeddings, dtype=np.float32)
image_embeddings.append(image_embedding.tolist())
post_data = {
"user_id": user.id,
"image_hashes": image_hashes,
"image_embeddings": image_embeddings,
}
res = requests.post(IMAGE_SIMILARITY_SERVER + "/build/", json=post_data)
return res.json()
| 29
| 87
| 0.648361
| 289
| 2,349
| 5.055363
| 0.259516
| 0.053388
| 0.047912
| 0.041068
| 0.463381
| 0.429158
| 0.344285
| 0.327173
| 0.327173
| 0.327173
| 0
| 0.009524
| 0.240102
| 2,349
| 80
| 88
| 29.3625
| 0.808964
| 0
| 0
| 0.384615
| 0
| 0
| 0.116645
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046154
| false
| 0
| 0.092308
| 0
| 0.230769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
728d2b759b651322512d149b383118385699c3b6
| 621
|
py
|
Python
|
WEEKS/CD_Sata-Structures/_MISC/misc-examples/python3-book-examples/struct/struct_endianness.py
|
webdevhub42/Lambda
|
b04b84fb5b82fe7c8b12680149e25ae0d27a0960
|
[
"MIT"
] | null | null | null |
WEEKS/CD_Sata-Structures/_MISC/misc-examples/python3-book-examples/struct/struct_endianness.py
|
webdevhub42/Lambda
|
b04b84fb5b82fe7c8b12680149e25ae0d27a0960
|
[
"MIT"
] | null | null | null |
WEEKS/CD_Sata-Structures/_MISC/misc-examples/python3-book-examples/struct/struct_endianness.py
|
webdevhub42/Lambda
|
b04b84fb5b82fe7c8b12680149e25ae0d27a0960
|
[
"MIT"
] | null | null | null |
#
"""
"""
# end_pymotw_header
import struct
import binascii
values = (1, "ab".encode("utf-8"), 2.7)
print("Original values:", values)
endianness = [
("@", "native, native"),
("=", "native, standard"),
("<", "little-endian"),
(">", "big-endian"),
("!", "network"),
]
for code, name in endianness:
s = struct.Struct(code + " I 2s f")
packed_data = s.pack(*values)
print()
print("Format string :", s.format, "for", name)
print("Uses :", s.size, "bytes")
print("Packed Value :", binascii.hexlify(packed_data))
print("Unpacked Value :", s.unpack(packed_data))
| 21.413793
| 60
| 0.566828
| 73
| 621
| 4.753425
| 0.589041
| 0.086455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010267
| 0.215781
| 621
| 28
| 61
| 22.178571
| 0.702259
| 0.027375
| 0
| 0
| 0
| 0
| 0.281145
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.105263
| 0
| 0.105263
| 0.315789
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
728dfcc1ec3bf1913efacef3f37dc134c73588dd
| 7,307
|
py
|
Python
|
pydov/util/net.py
|
GuillaumeVandekerckhove/pydov
|
b51f77bf93d1f9e96dd39edf564d95426da04126
|
[
"MIT"
] | 32
|
2017-03-17T16:36:40.000Z
|
2022-02-18T13:10:50.000Z
|
pydov/util/net.py
|
GuillaumeVandekerckhove/pydov
|
b51f77bf93d1f9e96dd39edf564d95426da04126
|
[
"MIT"
] | 240
|
2017-01-03T12:32:15.000Z
|
2022-03-30T11:52:02.000Z
|
pydov/util/net.py
|
GuillaumeVandekerckhove/pydov
|
b51f77bf93d1f9e96dd39edf564d95426da04126
|
[
"MIT"
] | 17
|
2017-01-09T21:00:36.000Z
|
2022-03-01T15:04:21.000Z
|
# -*- coding: utf-8 -*-
"""Module grouping network-related utilities and functions."""
from queue import Empty, Queue
from threading import Thread
import requests
import urllib3
from requests.adapters import HTTPAdapter
import pydov
request_timeout = 300
class TimeoutHTTPAdapter(HTTPAdapter):
"""HTTPAdapter which adds a default timeout to requests. Allows timeout
to be overridden on a per-request basis.
"""
def __init__(self, *args, **kwargs):
"""Initialisation."""
self.timeout = request_timeout
if "timeout" in kwargs:
self.timeout = kwargs["timeout"]
del kwargs["timeout"]
super().__init__(*args, **kwargs)
def send(self, request, **kwargs):
"""Sends PreparedRequest object. Returns Response object.
Parameters
----------
request : requests.PreparedRequest
The PreparedRequest being sent.
Returns
-------
requests.Response
The Response of the request.
"""
timeout = kwargs.get("timeout")
if timeout is None:
kwargs["timeout"] = self.timeout
return super().send(request, **kwargs)
class SessionFactory:
"""Class for generating pydov configured requests Sessions. They are used
to send HTTP requests using our user-agent and with added retry-logic.
One global session is used for all requests, and additionally one
session is used per thread executing XML requests in parallel.
"""
@staticmethod
def get_session():
"""Request a new session.
Returns
-------
requests.Session
pydov configured requests Session.
"""
session = requests.Session()
session.headers.update(
{'user-agent': 'pydov/{}'.format(pydov.__version__)})
try:
retry = urllib3.util.Retry(
total=10, connect=10, read=10, redirect=5, backoff_factor=1,
allowed_methods=set(
['HEAD', 'GET', 'POST', 'PUT', 'OPTIONS']))
except TypeError:
# urllib3 < 1.26.0 used method_whitelist instead
retry = urllib3.util.Retry(
total=10, connect=10, read=10, redirect=5, backoff_factor=1,
method_whitelist=set(
['HEAD', 'GET', 'POST', 'PUT', 'OPTIONS']))
adapter = TimeoutHTTPAdapter(timeout=request_timeout,
max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session
class LocalSessionThreadPool:
"""Thread pool of LocalSessionThreads used to perform XML I/O operations
in parallel.
"""
def __init__(self, workers=4):
"""Initialisation.
Set up the pool and start all workers.
Parameters
----------
workers : int, optional
Number of worker threads to use, defaults to 4.
"""
self.workers = []
self.input_queue = Queue(maxsize=100)
self.result_queue = Queue()
for i in range(workers):
self.workers.append(LocalSessionThread(self.input_queue))
self._start()
def _start(self):
"""Start all worker threads. """
for w in self.workers:
w.start()
def stop(self):
"""Stop all worker threads. """
for w in self.workers:
w.stop()
def execute(self, fn, args):
"""Execute the given function with its arguments in a worker thread.
This will add the job to the queue and will not wait for the result.
Use join() to retrieve the result.
Parameters
----------
fn : function
Function to execute.
args : tuple
Arguments that will be passed to the function.
"""
r = WorkerResult()
self.input_queue.put((fn, args, r))
self.result_queue.put(r)
def join(self):
"""Wait for all the jobs to be executed and return the results of all
jobs in a list.
Yields
------
WorkerResult
Results of the executed functions in the order they were
submitted.
"""
self.input_queue.join()
self.stop()
while not self.result_queue.empty():
yield self.result_queue.get()
class WorkerResult:
"""Class for storing the result of a job execution in the result queue.
This allows putting a result instance in the queue on job submission and
fill in the result later when the job completes. This ensures the result
output is in the same order as the jobs were submitted.
"""
def __init__(self):
"""Initialisation. """
self.result = None
self.error = None
def set_result(self, value):
"""Set the result of this job.
Parameters
----------
value : any
The result of the execution of the job.
"""
self.result = value
def get_result(self):
"""Retrieve the result of this job.
Returns
-------
any
The result of the execution of the job.
"""
return self.result
def set_error(self, error):
"""Set the error, in case the jobs fails with an exception.
Parameters
----------
error : Exception
The exception raised while executing this job.
"""
self.error = error
def get_error(self):
"""Retrieve the error, if any, of this job.
Returns
-------
Exception
The exception raised while executing this job.
"""
return self.error
class LocalSessionThread(Thread):
"""Worker thread using a local Session to execute functions. """
def __init__(self, input_queue):
"""Initialisation.
Bind to the input queue and create a Session.
Parameters
----------
input_queue : queue.Queue
Queue to poll for input, this should be in the form of a tuple with
3 items: function to call, list with arguments and WorkerResult
instance to store the output. The list with arguments will be
automatically extended with the local Session instance.
"""
super().__init__()
self.input_queue = input_queue
self.stopping = False
self.session = SessionFactory.get_session()
def stop(self):
"""Stop the worker thread at the next occasion. This can take up to
500 ms. """
self.stopping = True
def run(self):
"""Executed while the thread is running. This is called implicitly
when starting the thread. """
while not self.stopping:
try:
fn, args, r = self.input_queue.get(timeout=0.5)
args = list(args)
args.append(self.session)
try:
result = fn(*args)
except BaseException as e:
r.set_error(e)
else:
r.set_result(result)
finally:
self.input_queue.task_done()
except Empty:
pass
| 28.542969
| 79
| 0.569454
| 824
| 7,307
| 4.974515
| 0.283981
| 0.026836
| 0.027324
| 0.010246
| 0.109295
| 0.100512
| 0.088802
| 0.088802
| 0.065382
| 0.032203
| 0
| 0.008051
| 0.337074
| 7,307
| 255
| 80
| 28.654902
| 0.83815
| 0.407281
| 0
| 0.131313
| 0
| 0
| 0.030378
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.161616
| false
| 0.010101
| 0.060606
| 0
| 0.313131
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
728ed3b7ad18763967710681b9cc5dc917ab6fd6
| 11,470
|
py
|
Python
|
SBOL2Excel/utils/sbol2excel.py
|
abamaj/SBOL-to-Excel
|
790ef5242990c06b20dcb8e207def8e4527aea02
|
[
"BSD-3-Clause"
] | null | null | null |
SBOL2Excel/utils/sbol2excel.py
|
abamaj/SBOL-to-Excel
|
790ef5242990c06b20dcb8e207def8e4527aea02
|
[
"BSD-3-Clause"
] | null | null | null |
SBOL2Excel/utils/sbol2excel.py
|
abamaj/SBOL-to-Excel
|
790ef5242990c06b20dcb8e207def8e4527aea02
|
[
"BSD-3-Clause"
] | null | null | null |
import sbol2
import pandas as pd
import os
import logging
from openpyxl import load_workbook
from openpyxl.worksheet.table import Table, TableStyleInfo
from openpyxl.utils.dataframe import dataframe_to_rows
from openpyxl.styles import Font, PatternFill, Border, Side
from requests_html import HTMLSession
#wasderivedfrom: source
#remove identity, persistenID, displayID, version
#remove attachment (if empty)
#add library sheets
#add postprocessing function to remove unecessaries
class seqFile:
def __init__(self, file_path_in, output_path):
# global varibales for homespace, document, and sheet
self.homeSpace = 'https://sys-bio.org'
self.document = file_path_in
self.file_location_path = os.path.dirname(__file__)
self.sheet = os.path.join(self.file_location_path, 'ontologies.xlsx')
self.output_template = os.path.join(self.file_location_path, 'Template_to_Output_Into_v001.xlsx')
self.output_path = output_path
def roleVariables(self):
# set Excel file into a dataframe
df = pd.read_excel(self.sheet, index_col=0,
sheet_name=1, usecols=[1, 2])
# convert the dataframe into a dictionary
roleConvertDict = df.to_dict()
# set dictionary indices and values (use column 'URI' in excel sheet)
roleName = roleConvertDict['URI']
# switch indices' and values' postions
roleDictionary = {uri: role for role, uri in roleName.items()}
return roleDictionary
def orgVariables(self):
# set Excel file into a dataframe
df = pd.read_excel(self.sheet, index_col=0,
sheet_name=2, usecols=[0, 1])
# convert the dataframe into a dictionary
organismConvertDict = df.to_dict()
# set dictionary indices and values (use column 'txid' in excel sheet)
organismName = organismConvertDict['txid']
# switch indices' and values' postions
organismDictionary = {str(txid): organism for organism, txid in organismName.items()}
return organismDictionary
# def inspectDocInfo(self):
# # declare homespace
# sbol2.setHomespace(self.homeSpace)
# doc = sbol2.Document()
# doc.read('../tests/test_files/' + self.document)
# # doc.read(self.document)
# # print document information
# print(doc)
# def printDocContents(self):
# # declare homespace
# sbol2.setHomespace(self.homeSpace)
# doc = sbol2.Document()
# doc.read('../tests/test_files/' + self.document)
# # doc.read(self.document)
# # print document contents
# for obj in doc:
# print(obj)
def readDocChart(self):
# declare homespace
sbol2.setHomespace(self.homeSpace)
doc = sbol2.Document()
doc.read(self.document)
# create a dictionary to hold all the component defintions' information
componentDefinitions = {}
# iterate through the component definitions
roleDict = self.roleVariables()
orgDict = self.orgVariables()
for cd in doc.componentDefinitions:
cdType = cd.type
# create a dictionary that has a key for the
# component definition's identity,
# and a value for all of its features
componentFeatures = {}
persistentIdentity = cd.properties['http://sbols.org/v2#persistentIdentity'][0]
# iterate through the properties of the component defintions
# and set them equal to propValue variable
for prop in cd.properties:
try:
propValue = cd.properties[prop][0]
except (IndexError):
propValue = cd.properties[prop]
# extract attribute property type
if propValue == []:
propValue = ''
prop = self.prop_convert(prop)
propValue = columnMethods(prop, propValue, doc, cdType,
roleDict, orgDict).colV
componentFeatures[prop] = str(propValue)
# append each componentFeatures dictionary as a
# value into the componentDefinitions
# dictionary with the 'persistentIdentity' serving as the key
componentDefinitions[persistentIdentity] = componentFeatures
# return the dictionary of information (temporary, maybe
# return true if read in correctly)
doc_chart = pd.DataFrame.from_dict(componentDefinitions, orient="index")
return doc_chart
def prop_convert(self, prop):
if type(prop) is str:
idx = prop.find('#')
# if parsing conditions meet, append them into the
# componentFeatures dictionary as necessary
if idx >= 1:
prop = prop[idx + 1:]
if prop == 'type':
prop = 'types'
if prop == 'http://purl.org/dc/terms/title':
prop = 'title'
if prop == 'http://purl.org/dc/terms/description':
prop = 'description'
if prop == 'http://purl.obolibrary.org/obo/OBI_0001617':
prop = 'OBI_0001617'
return (prop)
else:
raise ValueError()
def displayDocChart(self):
#display the dataframe
return pd.DataFrame.from_dict(self.readDocChart(), orient = "index")
def TEMP_readDocChart1(self):
#demo of table column names
columnNames = ['Part Name',
'Role',
'Design Notes',
'Altered Sequence',
'Part Description',
'Data Source Prefix',
'Data Source',
'Source Organism',
'Target Organism',
'Circular',
'length (bp)',
'Sequence',
'Data Source',
'Composite']
#import dataframe dictionary
#convert dictionary to dataframe
df = self.displayDocChart()
#type caste dataframe to a set
dfSet = set(df)
#type caste column names to a set
columnNameOrder = set(columnNames)
#check difference between the dataframe set and the column name order
dfSetDifference = dfSet.difference(columnNameOrder)
#check intersection between the datframe set and the column name order
dfSetIntersection = dfSet.intersection(columnNameOrder)
#combine the type casted difference and intersection
finalSetList = list(dfSetIntersection) + list(dfSetDifference)
#set list to dictionary
return finalSetList
# def displayDocChart(self):
# # display the dataframe
# return pd.DataFrame.from_dict(self.readDocChart(), orient="index")
def columnString(self, n):
# loop through column length in order to get string appropriate
# values for excel sheet rows and columns
string = ""
while n > 0:
n, remainder = divmod(n - 1, 26)
string = chr(65 + remainder) + string
return string
def returnExcelChart(self):
start_row = 18
start_cell = f'A{start_row}'
# load a workbook
wb = load_workbook(self.output_template)
ws = wb.active
# load raw dataframe to df
df = self.readDocChart()
# set font features
ft1 = Font(name='Arial', size=12, color='548235')
ft2 = Font(name='Calibri', size=11, bold=True)
hold = dataframe_to_rows(df, index=False, header=True)
# counter = 0
# loop through worksheet
ws[start_cell].value = ''
for r in hold:
# if a specific cell is empty, continue to loop past it
if r == [None]:
continue
ws.append(r)
# counter += 1
# set table features
tab = Table(displayName="Parts_Lib", ref=f"A{start_row +1}:{self.columnString(len(df.columns))}{(len(df) * 2) - 2}")
style = TableStyleInfo(name="TableStyleLight7", showFirstColumn=False,
showLastColumn=False, showRowStripes=True,
showColumnStripes=False)
cellColor = PatternFill(patternType='solid',
fgColor='DDEBF7')
cellBorder = Side(border_style='medium', color="000000")
# cellIndex = len(x)
# gives cells within specified range their table attributes
for col in range(1, len(df.columns) + 1):
alpha = self.columnString(col)
ws[f'{alpha}{start_row+1}'].fill = cellColor
ws[f'{alpha}{start_row+1}'].border = Border(top=cellBorder)
tab.tableStyleInfo = style
ws.add_table(tab)
# counter = 0
# gives cells within specified range their font attributes
for row in range(len(df) - 1, (len(df) * 2 - 1)):
# counter = counter + 1
for cell in ws[row]:
cell.font = ft1
# gives cells within specified range their font attributes
# (these are special features for the title)
num_rows = len(df)
if num_rows % 2 > 0:
num_rows = num_rows - 1
for j in range(19, num_rows):
for x in ws[j]:
x.font = ft2
# output the file
wb.save(self.output_path)
wb.close()
logging.warning(f'Your converted file has been output at {self.output_path}')
class columnMethods:
def __init__(self, colN, colV, doc, cdType, roleDict, orgDict):
# global varibales for dataframe switch statements
self.colN = colN
self.colV = colV
self.doc = doc
self.cdType = cdType
self.roleDict = roleDict
self.orgDict = orgDict
# if the column name matches the function name, call the function
try:
return getattr(self, self.colN)()
# if the column name does not match the function name, call 'no_change'
except AttributeError:
return getattr(self, 'no_change')()
def no_change(self):
pass
# if the specified column role value is within the role column
def role(self):
roleVal = str(self.colV)
if roleVal in self.roleDict:
self.colV = self.roleDict[roleVal]
def types(self):
self.colV = self.colV.split('#')[-1]
def sequence(self):
self.colV = self.doc.getSequence(self.colV).elements
def sourceOrganism(self):
orgVal = str(self.colV)
orgVal = orgVal.split('=')[-1]
txid = self.colV.split('=')[-1]
if orgVal in self.orgDict:
self.colV = self.orgDict[orgVal]
else:
session = HTMLSession()
r = session.get(self.colV)
v = r.html.find('strong', first=True)
self.colV = v.text
self.orgDict[txid] = self.colV
def targetOrganism(self):
orgVal = str(self.colV)
orgVal = orgVal.split('=')[-1]
txid = self.colV.split('=')[-1]
if orgVal in self.orgDict:
self.colV = self.orgDict[orgVal]
else:
session = HTMLSession()
r = session.get(self.colV)
v = r.html.find('strong', first=True)
self.colV = v.text
self.orgDict[txid] = self.colV
| 38.619529
| 124
| 0.588317
| 1,268
| 11,470
| 5.260252
| 0.265773
| 0.022789
| 0.011244
| 0.008996
| 0.244678
| 0.233883
| 0.206147
| 0.189955
| 0.175262
| 0.175262
| 0
| 0.01181
| 0.320837
| 11,470
| 296
| 125
| 38.75
| 0.844416
| 0.269486
| 0
| 0.15508
| 0
| 0.005348
| 0.084701
| 0.009894
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085562
| false
| 0.005348
| 0.048128
| 0.005348
| 0.192513
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
728f92f6b83a6233b200b60652e9836df6ba2fba
| 17,145
|
py
|
Python
|
ocean_lib/models/data_token.py
|
akshay-ap/ocean.py
|
1dab70d164ca36a6cff284e8be82ae04344ad13f
|
[
"Apache-2.0"
] | null | null | null |
ocean_lib/models/data_token.py
|
akshay-ap/ocean.py
|
1dab70d164ca36a6cff284e8be82ae04344ad13f
|
[
"Apache-2.0"
] | null | null | null |
ocean_lib/models/data_token.py
|
akshay-ap/ocean.py
|
1dab70d164ca36a6cff284e8be82ae04344ad13f
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2021 Ocean Protocol Foundation
# SPDX-License-Identifier: Apache-2.0
#
import json
import os
import time
from collections import namedtuple
import requests
from eth_utils import remove_0x_prefix
from ocean_lib.data_provider.data_service_provider import DataServiceProvider
from ocean_lib.enforce_typing_shim import enforce_types_shim
from ocean_lib.ocean.util import from_base_18, to_base_18
from ocean_lib.web3_internal.contract_base import ContractBase
from ocean_lib.web3_internal.event_filter import EventFilter
from ocean_lib.web3_internal.wallet import Wallet
from ocean_lib.web3_internal.web3_provider import Web3Provider
from ocean_utils.http_requests.requests_session import get_requests_session
from web3 import Web3
from web3.exceptions import MismatchedABI
from web3.utils.events import get_event_data
from websockets import ConnectionClosed
OrderValues = namedtuple(
"OrderValues",
("consumer", "amount", "serviceId", "startedAt", "marketFeeCollector", "marketFee"),
)
@enforce_types_shim
class DataToken(ContractBase):
CONTRACT_NAME = "DataTokenTemplate"
DEFAULT_CAP = 1000.0
DEFAULT_CAP_BASE = to_base_18(DEFAULT_CAP)
ORDER_STARTED_EVENT = "OrderStarted"
ORDER_FINISHED_EVENT = "OrderFinished"
OPF_FEE_PERCENTAGE = 0.001
MAX_MARKET_FEE_PERCENTAGE = 0.001
def get_event_signature(self, event_name):
try:
e = getattr(self.events, event_name)
except MismatchedABI:
raise ValueError(
f"Event {event_name} not found in {self.CONTRACT_NAME} contract."
)
abi = e().abi
types = [param["type"] for param in abi["inputs"]]
sig_str = f'{event_name}({",".join(types)})'
return Web3.sha3(text=sig_str).hex()
def get_start_order_logs(
self,
web3,
consumer_address=None,
from_block=0,
to_block="latest",
from_all_tokens=False,
):
topic0 = self.get_event_signature(self.ORDER_STARTED_EVENT)
topics = [topic0]
if consumer_address:
topic1 = f"0x000000000000000000000000{consumer_address[2:].lower()}"
topics = [topic0, None, topic1]
filter_params = {"fromBlock": from_block, "toBlock": to_block, "topics": topics}
if not from_all_tokens:
# get logs only for this token address
filter_params["address"] = self.address
e = getattr(self.events, self.ORDER_STARTED_EVENT)
event_abi = e().abi
logs = web3.eth.getLogs(filter_params)
parsed_logs = []
for lg in logs:
parsed_logs.append(get_event_data(event_abi, lg))
return parsed_logs
def get_transfer_events_in_range(self, from_block, to_block):
name = "Transfer"
event = getattr(self.events, name)
return self.getLogs(
event, Web3Provider.get_web3(), fromBlock=from_block, toBlock=to_block
)
def get_all_transfers_from_events(
self, start_block: int, end_block: int, chunk: int = 1000
) -> tuple:
_from = start_block
_to = _from + chunk - 1
transfer_records = []
error_count = 0
_to = min(_to, end_block)
while _from <= end_block:
try:
logs = self.get_transfer_events_in_range(_from, _to)
transfer_records.extend(
[
(
lg.args["from"],
lg.args.to,
lg.args.value,
lg.blockNumber,
lg.transactionHash.hex(),
lg.logIndex,
lg.transactionIndex,
)
for lg in logs
]
)
_from = _to + 1
_to = min(_from + chunk - 1, end_block)
error_count = 0
if (_from - start_block) % chunk == 0:
print(
f" So far processed {len(transfer_records)} Transfer events from {_from-start_block} blocks."
)
except requests.exceptions.ReadTimeout as err:
print(f"ReadTimeout ({_from}, {_to}): {err}")
error_count += 1
if error_count > 1:
break
return transfer_records, min(_to, end_block) # can have duplicates
def get_transfer_event(self, block_number, sender, receiver):
event = getattr(self.events, "Transfer")
filter_params = {"from": sender, "to": receiver}
event_filter = EventFilter(
"Transfer",
event,
filter_params,
from_block=block_number - 1,
to_block=block_number + 10,
)
logs = event_filter.get_all_entries(max_tries=10)
if not logs:
return None
if len(logs) > 1:
raise AssertionError(
f"Expected a single transfer event at "
f"block {block_number}, but found {len(logs)} events."
)
return logs[0]
def verify_transfer_tx(self, tx_id, sender, receiver):
w3 = Web3Provider.get_web3()
tx = w3.eth.getTransaction(tx_id)
if not tx:
raise AssertionError("Transaction is not found, or is not yet verified.")
if tx["from"] != sender or tx["to"] != self.address:
raise AssertionError(
f"Sender and receiver in the transaction {tx_id} "
f"do not match the expected consumer and contract addresses."
)
_iter = 0
while tx["blockNumber"] is None:
time.sleep(0.1)
tx = w3.eth.getTransaction(tx_id)
_iter = _iter + 1
if _iter > 100:
break
tx_receipt = self.get_tx_receipt(tx_id)
if tx_receipt.status == 0:
raise AssertionError("Transfer transaction failed.")
logs = getattr(self.events, "Transfer")().processReceipt(tx_receipt)
transfer_event = logs[0] if logs else None
# transfer_event = self.get_transfer_event(tx['blockNumber'], sender, receiver)
if not transfer_event:
raise AssertionError(
f"Cannot find the event for the transfer transaction with tx id {tx_id}."
)
assert (
len(logs) == 1
), f"Multiple Transfer events in the same transaction !!! {logs}"
if (
transfer_event.args["from"] != sender
or transfer_event.args["to"] != receiver
):
raise AssertionError(
"The transfer event from/to do not match the expected values."
)
return tx, transfer_event
def get_event_logs(
self, event_name, filter_args=None, from_block=0, to_block="latest"
):
event = getattr(self.events, event_name)
filter_params = filter_args or {}
event_filter = EventFilter(
event_name, event, filter_params, from_block=from_block, to_block=to_block
)
logs = event_filter.get_all_entries(max_tries=10)
if not logs:
return []
return logs
def verify_order_tx(self, web3, tx_id, did, service_id, amount_base, sender):
event = getattr(self.events, self.ORDER_STARTED_EVENT)
try:
tx_receipt = self.get_tx_receipt(tx_id)
except ConnectionClosed:
# try again in this case
tx_receipt = self.get_tx_receipt(tx_id)
if tx_receipt is None:
raise AssertionError(
"Failed to get tx receipt for the `startOrder` transaction.."
)
if tx_receipt.status == 0:
raise AssertionError("order transaction failed.")
receiver = self.contract_concise.minter()
event_logs = event().processReceipt(tx_receipt)
order_log = event_logs[0] if event_logs else None
if not order_log:
raise AssertionError(
f"Cannot find the event for the order transaction with tx id {tx_id}."
)
assert (
len(event_logs) == 1
), f"Multiple order events in the same transaction !!! {event_logs}"
asset_id = remove_0x_prefix(did).lower()
assert (
asset_id == remove_0x_prefix(self.address).lower()
), "asset-id does not match the datatoken id."
if str(order_log.args.serviceId) != str(service_id):
raise AssertionError(
f"The asset id (DID) or service id in the event does "
f"not match the requested asset. \n"
f"requested: (did={did}, serviceId={service_id}\n"
f"event: (serviceId={order_log.args.serviceId}"
)
target_amount = amount_base - self.calculate_fee(
amount_base, self.OPF_FEE_PERCENTAGE
)
if order_log.args.mrktFeeCollector and order_log.args.marketFee > 0:
assert order_log.args.marketFee <= (
self.calculate_fee(amount_base, self.MAX_MARKET_FEE_PERCENTAGE) + 5
), (
f"marketFee {order_log.args.marketFee} exceeds the expected maximum "
f"of {self.calculate_fee(amount_base, self.MAX_MARKET_FEE_PERCENTAGE)} "
f"based on feePercentage={self.MAX_MARKET_FEE_PERCENTAGE} ."
)
target_amount = target_amount - order_log.args.marketFee
# verify sender of the tx using the Tx record
tx = web3.eth.getTransaction(tx_id)
if sender not in [order_log.args.consumer, order_log.args.payer]:
raise AssertionError(
"sender of order transaction is not the consumer/payer."
)
transfer_logs = self.events.Transfer().processReceipt(tx_receipt)
receiver_to_transfers = {}
for tr in transfer_logs:
if tr.args.to not in receiver_to_transfers:
receiver_to_transfers[tr.args.to] = []
receiver_to_transfers[tr.args.to].append(tr)
if receiver not in receiver_to_transfers:
raise AssertionError(
f"receiver {receiver} is not found in the transfer events."
)
transfers = sorted(receiver_to_transfers[receiver], key=lambda x: x.args.value)
total = sum(tr.args.value for tr in transfers)
if total < (target_amount - 5):
raise ValueError(
f"transferred value does meet the service cost: "
f"service.cost - fees={from_base_18(target_amount)}, "
f"transferred value={from_base_18(total)}"
)
return tx, order_log, transfers[-1]
def download(self, wallet: Wallet, tx_id: str, destination_folder: str):
url = self.blob()
download_url = (
f"{url}?"
f"consumerAddress={wallet.address}"
f"&dataToken={self.address}"
f"&transferTxId={tx_id}"
)
response = get_requests_session().get(download_url, stream=True)
file_name = f"file-{self.address}"
DataServiceProvider.write_file(response, destination_folder, file_name)
return os.path.join(destination_folder, file_name)
def token_balance(self, account: str):
return from_base_18(self.balanceOf(account))
def _get_url_from_blob(self, int_code):
try:
url_object = json.loads(self.blob())
except json.decoder.JSONDecodeError:
return None
assert (
url_object["t"] == int_code
), "This datatoken does not appear to have a direct consume url."
return url_object.get("url")
def get_metadata_url(self):
# grab the metadatastore URL from the DataToken contract (@token_address)
return self._get_url_from_blob(1)
def get_simple_url(self):
return self._get_url_from_blob(0)
# ============================================================
# Token transactions using amount of tokens as a float instead of int
# amount of tokens will be converted to the base value before sending
# the transaction
def approve_tokens(
self, spender: str, value: float, from_wallet: Wallet, wait: bool = False
):
txid = self.approve(spender, to_base_18(value), from_wallet)
if wait:
self.get_tx_receipt(txid)
return txid
def mint_tokens(self, to_account: str, value: float, from_wallet: Wallet):
return self.mint(to_account, to_base_18(value), from_wallet)
def transfer_tokens(self, to: str, value: float, from_wallet: Wallet):
return self.transfer(to, to_base_18(value), from_wallet)
################
# Helpers
@staticmethod
def get_max_fee_percentage():
return DataToken.OPF_FEE_PERCENTAGE + DataToken.MAX_MARKET_FEE_PERCENTAGE
@staticmethod
def calculate_max_fee(amount):
return DataToken.calculate_fee(amount, DataToken.get_max_fee_percentage())
@staticmethod
def calculate_fee(amount, percentage):
return int(amount * to_base_18(percentage) / to_base_18(1.0))
@staticmethod
def calculate_balances(transfers):
_from = [t[0].lower() for t in transfers]
_to = [t[1].lower() for t in transfers]
_value = [t[2] for t in transfers]
a_to_value = dict()
a_to_value.update({a: 0 for a in _from})
a_to_value.update({a: 0 for a in _to})
for i, acc_f in enumerate(_from):
v = int(_value[i])
a_to_value[acc_f] -= v
a_to_value[_to[i]] += v
return a_to_value
def get_info(self, web3, from_block, to_block, include_holders=False):
contract = self.contract_concise
minter = contract.minter()
all_transfers, _ = self.get_all_transfers_from_events(from_block, to_block)
order_logs = self.get_start_order_logs(
web3, from_block=from_block, to_block=to_block
)
holders = []
if include_holders:
a_to_balance = DataToken.calculate_balances(all_transfers)
_min = to_base_18(0.000001)
holders = sorted(
[(a, from_base_18(b)) for a, b in a_to_balance.items() if b > _min],
key=lambda x: x[1],
reverse=True,
)
return {
"address": self.address,
"name": contract.name(),
"symbol": contract.symbol(),
"decimals": contract.decimals(),
"cap": from_base_18(contract.cap()),
"totalSupply": from_base_18(contract.totalSupply()),
"minter": minter,
"minterBalance": self.token_balance(minter),
"numHolders": len(holders),
"holders": holders,
"numOrders": len(order_logs),
}
# ============================================================
# reflect DataToken Solidity methods
def blob(self) -> str:
return self.contract_concise.blob()
def datatoken_name(self) -> str:
return self.contract_concise.name()
def symbol(self) -> str:
return self.contract_concise.symbol()
def cap(self) -> str:
return self.contract_concise.cap()
def decimals(self) -> str:
return self.contract_concise.decimals()
def totalSupply(self) -> str:
return self.contract_concise.totalSupply()
def allowance(self, owner_address: str, spender_address: str) -> str:
return self.contract_concise.allowance(owner_address, spender_address)
def balanceOf(self, account: str) -> int:
return self.contract_concise.balanceOf(account)
def mint(self, to_account: str, value_base: int, from_wallet: Wallet) -> str:
return self.send_transaction("mint", (to_account, value_base), from_wallet)
def approve(self, spender: str, value_base: int, from_wallet: Wallet) -> str:
return self.send_transaction("approve", (spender, value_base), from_wallet)
def transfer(self, to: str, value_base: int, from_wallet: Wallet) -> str:
return self.send_transaction("transfer", (to, value_base), from_wallet)
def proposeMinter(self, new_minter, from_wallet) -> str:
return self.send_transaction("proposeMinter", (new_minter,), from_wallet)
def approveMinter(self, from_wallet) -> str:
return self.send_transaction("approveMinter", (), from_wallet)
def startOrder(
self,
consumer: str,
amount: int,
serviceId: int,
mrktFeeCollector: str,
from_wallet: Wallet,
):
return self.send_transaction(
"startOrder", (consumer, amount, serviceId, mrktFeeCollector), from_wallet
)
def finishOrder(
self,
orderTxId: str,
consumer: str,
amount: int,
serviceId: int,
from_wallet: Wallet,
):
return self.send_transaction(
"finishOrder", (orderTxId, consumer, amount, serviceId), from_wallet
)
| 36.324153
| 120
| 0.602858
| 2,018
| 17,145
| 4.886521
| 0.156095
| 0.020282
| 0.01582
| 0.020282
| 0.2662
| 0.189332
| 0.12017
| 0.084576
| 0.060744
| 0.047358
| 0
| 0.013726
| 0.298863
| 17,145
| 471
| 121
| 36.401274
| 0.806588
| 0.038903
| 0
| 0.159269
| 0
| 0
| 0.130564
| 0.027183
| 0
| 0
| 0.001581
| 0
| 0.044386
| 1
| 0.093995
| false
| 0
| 0.046997
| 0.060052
| 0.263708
| 0.005222
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
728fbc5bfc6f1e3c71f6dc0d49a88fbd6f828cf5
| 18,655
|
py
|
Python
|
cgmodsel/prox.py
|
chrlen/cgmodsel
|
1d7336e173289468d55897b1aa044bf98c3c1a6b
|
[
"MIT"
] | null | null | null |
cgmodsel/prox.py
|
chrlen/cgmodsel
|
1d7336e173289468d55897b1aa044bf98c3c1a6b
|
[
"MIT"
] | null | null | null |
cgmodsel/prox.py
|
chrlen/cgmodsel
|
1d7336e173289468d55897b1aa044bf98c3c1a6b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: Frank Nussbaum (frank.nussbaum@uni-jena.de), 2019
"""
import numpy as np
#import scipy
#import abc
#import time
from scipy.optimize import approx_fprime
from scipy.linalg import eigh
from scipy import optimize
from cgmodsel.utils import _logsumexp_condprobs_red
#from cgmodsel.utils import logsumexp
from cgmodsel.base_solver import BaseGradSolver
# pylint: disable=unbalanced-tuple-unpacking
# pylint: disable=W0511 # todos
# pylint: disable=R0914 # too many locals
###############################################################################
# prox for PLH objective
###############################################################################
class LikelihoodProx(BaseGradSolver):
"""
solve pseudo-log-likelihood proximal operator
"""
def __init__(self, cat_data, cont_data, meta):
""""must provide with dictionary meta"""
super().__init__() # Python 3 syntax
self.cat_data = cat_data
self.cont_data = cont_data
self.meta = meta
self._fold = np.inf
# overridden attributes
ltot = meta['ltot']
n_cg = meta['n_cg']
self.shapes = [
('Q', (ltot, ltot)),
('u', (ltot, 1)),
('R', (n_cg, ltot)),
('F2tiL', (n_cg, n_cg)), # construct Lambda = A * A.T
('alpha', (n_cg, 1))
]
self.n_params = sum([np.prod(shape[1]) for shape in self.shapes])
def clean_theta(self, theta):
"""
make pairwise parameter matrix feasible for likelihood prox solver
-> modifies Theta
"""
# copies upper triangle of Theta to lower triangle to symmetrize
# Furthermore, all elements on the block-diagonal of the discrete
# are set to zero, except diagonal elements
# since these correspond to univariate discrete sufficient statistics
optvars = self._theta_to_x(theta, np.zeros(self.meta['n_cg']))
return self._x_to_thetaalpha(optvars)[0]
###############################################################################
# Solver for Pseudo-likelihood Prox operator
###############################################################################
def callback_plh(self, optvars, handle_fg):
"""callback to check for potential bugs"""
fnew = handle_fg(optvars)[0]
if not fnew <= self._fold:
string = 'Potential scipy bug, fvalue increased in last iteration'
print('Warning(CG_base_ADMM.callback_plh): %s' % (string))
self._fold = fnew
def solve(self, mat_z, prox_param, old_thetaalpha):
""" solve proximal mapping of negative pseudo loglikelihood
min_{Theta, alpha} l_p(Theta, alpha) + 1 / (2mu) * ||Theta-Z||_F^2
known issue with ADMM:
not doing warm starts may cause problems if solution is to inexact
generally ADMM convergence requires very exact solutions
-> use ftol to control tolerancy, or refine to control #restarts
"""
# split Z (since in determining the prox objective
# the split components are used)
ltot = self.meta['ltot']
n_cg = self.meta['n_cg']
zmat_q = mat_z[:ltot, :ltot].copy()
zmat_r = mat_z[ltot:, :ltot]
zmat_b = mat_z[ltot:, ltot:].copy()
zbeta = np.diag(zmat_b).copy().reshape((n_cg, 1))
zmat_b -= np.diag(np.diag(zmat_b))
zvec_u = np.diag(zmat_q).copy().reshape((ltot, 1))
zmat_q -= np.diag(np.diag(zmat_q))
components_z = zmat_q, zvec_u, zmat_r, zmat_b, zbeta
handle_fg = lambda optvars: \
self.get_fval_and_grad(optvars, components_z, prox_param)
## solve proximal mapping
# x0 = self.get_rand_startingpoint()
x_init = self._theta_to_x(*old_thetaalpha)
# starting point as vector, save for input parameters
f_init = handle_fg(x_init)[0]
self._fold = f_init
## bounds that respect identifiability constraints
bnds = ltot**2 * [(-np.inf, np.inf)] # Q, only upper triangle is used
bnds += ltot * [(-np.inf, np.inf)] # u
# TODO(franknu) note: if use_u = 0 this is enforced in main ADMM updates
bnds += (n_cg * ltot + n_cg**2) * [(-np.inf, np.inf)] # R, fac_lambda
if self.opts['use_alpha']:
bnds += n_cg * [(-np.inf, np.inf)]
else:
bnds += n_cg * [(0, 0)]
# TODO(franknu): use zerobounds for block diagonal of Q?
## further solver properties
callback = lambda optvars: self.callback_plh(optvars, handle_fg)
correctionpairs = min(len(bnds) - 1, 10)
res = optimize.minimize(handle_fg,
x_init,
method='L-BFGS-B',
jac=True,
bounds=bnds,
options={
'maxcor': correctionpairs,
'maxiter': self.opts['maxiter'],
'ftol': self.opts['tol']
},
callback=callback)
if not res.message.startswith(b'CONV'): # solver did not converge
print('PLH_prox scipy-solver message:', res.message)
_, _, _, fac_lambda, _ = self.unpack(res.x)
if np.linalg.norm(fac_lambda) < 1e-5 and n_cg > 0:
# TODO(franknu): certificate for optimality?
print('Warning(solve): Lambda = F F^T with F ~ zero')
theta, alpha = self._x_to_thetaalpha(res.x)
return theta, alpha
def preprocess(self, optvars):
""" unpack parameters from vector x and preprocess
this modifies x (x not save for reuse)"""
glims = self.meta['cat_glims']
sizes = self.meta['sizes']
mat_q, vec_u, mat_r, fac_lambda, alpha = self.unpack(optvars) # pylint: disable=unbalanced-tuple-unpacking
for r in range(self.meta['n_cat']): # set block-diagonal to zero
mat_q[glims[r]:glims[r+1], glims[r]:glims[r+1]] = \
np.zeros((sizes[r], sizes[r]))
mat_q = np.triu(mat_q)
mat_q = mat_q + mat_q.T
return mat_q, vec_u, mat_r, fac_lambda, alpha
def get_fval_and_grad(self, optvars, components_z, prox_param, eps=1e-15):
"""calculate function value f and gradient g of
plh(Theta, alpha) + 1 / (2prox_param) ||Theta - Z||_F^2,
where Theta, alpha are contained in the vector x of parameters
"""
ltot = self.meta['ltot']
n_cg = self.meta['n_cg']
n_data = self.meta['n_data']
glims = self.meta['cat_glims']
sizes = self.meta['sizes']
## unpack parameters from vector optvars
mat_q, vec_u, mat_r, fac_lambda, alpha = \
self.preprocess(optvars)
mat_b, beta = self._faclambda_to_bbeta(fac_lambda)
beta += eps * np.ones(beta.shape) # increase numerical instability
# this avoids beta that contains zeros
# precision matrix = FLa*FLa.T + eps * eye(n_cg)
# intitialize gradients
grad = np.zeros(self.n_params)
grad_q, grad_u, grad_r, grad_faclambda, grad_alpha = self.unpack(grad)
grad_tila = np.zeros((n_cg, n_cg))
grad_beta = np.zeros((n_cg, 1))
vec_ones = np.ones((n_data, 1))
## ** discrete node conditionals **
lh_cat = 0
mat_w = np.dot(self.cont_data, mat_r) + np.dot(self.cat_data, mat_q) \
+ np.dot(vec_ones, vec_u.T) # n_data by ltot
cond_probs = np.empty((n_data, ltot)) # conditional probs given data
for r in range(self.meta['n_cat']):
mat_wr = mat_w[:, glims[r]:glims[r + 1]] # view of W
mat_dr = self.cat_data[:, glims[r]:glims[r + 1]] # view
tmp_logsumexp, tmp_conditionalprobs = \
_logsumexp_condprobs_red(mat_wr)
# uses numerically stable exp
cond_probs[:, glims[r]:glims[r + 1]] = tmp_conditionalprobs
lh_catr = -np.sum(np.sum(np.multiply(mat_wr, mat_dr), axis=1) \
- tmp_logsumexp)
lh_cat += lh_catr
# print('lD', lh_cat/n_data)
# gradients
cond_probs = cond_probs - self.cat_data
grad_u = np.sum(cond_probs, 0) # Ltot by 1
grad_r = np.dot(self.cont_data.T, cond_probs)
grad_q = np.dot(self.cat_data.T, cond_probs)
# this is Phihat from the doc, later add transpose and zero out diagonal
## ** Gaussian node conditionals **
mat_m = np.dot(vec_ones, alpha.T) + np.dot(self.cat_data, mat_r.T) \
- np.dot(self.cont_data, mat_b) # n by dg, concatenation of mu_s
mat_delta = mat_m.copy()
for s in range(n_cg):
mat_delta[:, s] /= beta[s]
mat_delta -= self.cont_data # residual
tmp = np.dot(mat_delta, np.diag(np.sqrt(beta.flatten())))
lh_cont = - 0.5 * n_data * np.sum(np.log(beta)) \
+ 0.5 * np.linalg.norm(tmp, 'fro') ** 2
# print('lG', lh_cont/n_data)
# gradients
# grad_tila: n_cg by n_cg, later add transpose and zero out diagonal
grad_tila = -np.dot(self.cont_data.T, mat_delta)
grad_tila -= np.diag(np.diag(grad_tila))
grad_tila = 0.5 * (grad_tila + grad_tila.T)
for s in range(n_cg):
grad_beta[s] = -.5 * n_data / beta[s] + \
.5 * np.linalg.norm(mat_delta[:, s], 2) ** 2 \
- 1 / beta[s] * np.dot(mat_delta[:, s].T, mat_m[:, s])
grad_alpha = np.sum(mat_delta, 0).T # dg by 1
grad_r += np.dot(mat_delta.T, self.cat_data)
# scale gradients as likelihood
grad_q /= n_data
grad_u /= n_data
grad_r /= n_data
grad_tila /= n_data
grad_beta /= n_data
grad_alpha /= n_data
## add quad term 1/2mu * ||([Q+2diag(u)] & R^T \\ R &-Lambda)-Z||_F^2
zmat_q, zvec_u, zmat_r, zmat_b, zbeta = components_z
fsquare = 0
fsquare += np.sum(np.square(mat_q - zmat_q))
fsquare += np.sum(np.square(2 * vec_u - zvec_u))
# note that u is only half of discrete diagonal
fsquare += 2 * np.sum(np.square(mat_r - zmat_r))
fsquare += np.sum(np.square(-mat_b - zmat_b))
# remember neg sign of Lambda in Theta
fsquare += np.sum(np.square(-beta - zbeta))
fsquare /= 2 * prox_param
# print('fsquare', fsquare)
# gradients quadratic term
grad_q += (mat_q - zmat_q) / prox_param
grad_u = grad_u.reshape(
(ltot, 1)) # since with dc=0 gradu has shape (0,)
grad_u += 2 * (2 * vec_u - zvec_u) / prox_param
grad_r += 2 * (mat_r - zmat_r) / prox_param
grad_tila += (mat_b + zmat_b) / prox_param # has zero diagonal
grad_beta += (beta + zbeta) / prox_param
## gradients to only upper triangle
for r in range(self.meta['n_cat']): # set block-diagonal to zero
grad_q[glims[r]:glims[r+1], glims[r]:glims[r+1]] = \
np.zeros((sizes[r], sizes[r]))
grad_q = np.triu(grad_q) + np.tril(grad_q).T
grad_tila += np.diag(grad_beta.flatten()) # add gradient of diagonal
grad_faclambda = 2 * np.dot(grad_tila, fac_lambda)
# note that fac_lambda initialized at 0 always leads to 0 gradient
fval = 1 / n_data * (lh_cat + lh_cont) + fsquare
grad = self.pack((grad_q, grad_u, grad_r, grad_faclambda, grad_alpha))
return fval, grad.reshape(-1)
def callback(self, optvars, component_z, prox_param, approxgrad=1):
"""a callback function that serves primarily for debugging"""
fval, grad = self.get_fval_and_grad(optvars, component_z, prox_param)
print('f=', fval)
if approxgrad: # gradient check
func_handle_f = lambda optvars: \
self.get_fval_and_grad(optvars, component_z, prox_param)[0]
eps = np.sqrt(np.finfo(float).eps) # ~1.49E-08 at my machine
gprox = approx_fprime(optvars, func_handle_f, eps)
diff = grad - gprox
normdiff = np.linalg.norm(diff)
if normdiff > 1e-4:
print('g_exct', grad)
print('g_prox', gprox)
# print('g-gprox',self.unpack(diff))
# print('quot',g/proxg)
print('graddev=', np.linalg.norm(diff))
def _faclambda_to_bbeta(self, fac_lambda):
""" construct precision matrix, then extract diagonal """
mat_b = np.dot(fac_lambda, fac_lambda.T) # PSD precision matrix
beta = np.diag(mat_b).copy().reshape((self.meta['n_cg'], 1)) # diagonal
mat_b -= np.diag(np.diag(mat_b)) # off-diagonal elements
return mat_b, beta
def _theta_to_tuple(self, theta):
""" split Theta into its components
(save: returns copies from data in Theta, Theta is not modified)"""
ltot = self.meta['ltot']
glims = self.meta['cat_glims']
sizes = self.meta['sizes']
mat_q = theta[:ltot, :ltot].copy()
mat_r = theta[ltot:, :ltot].copy()
lbda = -theta[ltot:, ltot:]
# print(Lambda)
# FLa = np.linalg.cholesky(Lambda) # fails if not PD
if self.meta['n_cg'] > 0:
eig, mat_u = eigh(lbda)
# print('las', las)
eig[eig < 1e-16] = 0 # make more robust
fac_lambda = np.dot(mat_u, np.diag(np.sqrt(eig)))
# print('chol-error', np.linalg.norm(np.dot(FLa, FLa.T) - Lambda))
else:
fac_lambda = np.empty((0, 0))
vec_u = 0.5 * np.diag(mat_q).copy().reshape((ltot, 1))
for r in range(self.meta['n_cat']): # set block diagonal to zero
mat_q[glims[r]:glims[r+1], glims[r]:glims[r+1]] = \
np.zeros((sizes[r], sizes[r]))
mat_q = np.triu(mat_q) # use only upper triangle
mat_q = mat_q + mat_q.T
return mat_q, vec_u, mat_r, fac_lambda
def _theta_to_x(self, theta, alpha):
"""takes Theta, cleans it (symmetrize etc.) and pack into x
(save: Theta is not modified)"""
return self.pack(list(self._theta_to_tuple(theta)) + [alpha])
def _x_to_thetaalpha(self, optvars):
""" convert vectorized x to parameter matrix Theta
(save: optvars is not modified) """
mat_q, vec_u, mat_r, fac_lambda, alpha = self.unpack(optvars)
ltot = self.meta['ltot']
glims = self.meta['cat_glims']
sizes = self.meta['sizes']
dim = self.meta['dim']
# set parameters in upper triangle
theta = np.empty((dim, dim))
theta[:ltot, :ltot] = mat_q
for r in range(self.meta['n_cat']): # set block-diagonal to zero
theta[glims[r]:glims[r+1], glims[r]:glims[r+1]] = \
np.zeros((sizes[r], sizes[r]))
theta[:ltot, ltot:] = mat_r.T
## symmetric matrix from upper triangle
theta = np.triu(theta)
theta = theta + theta.T
## Lambda
mat_lbda = np.dot(fac_lambda, fac_lambda.T)
theta[ltot:, ltot:] = -mat_lbda
## add diagonal
theta[:ltot, :ltot] += 2 * np.diag(vec_u.flatten())
return theta, alpha
def get_rand_startingpoint(self):
""" not needed if using warm starts """
n_cg = self.meta['n_cg']
x_init = np.random.random(self.n_params)
x_init[self.n_params - n_cg:] = np.ones(n_cg)
return x_init
def plh(self, theta, alpha, cval=False):
""" return negative pseudo-log-likelihood function value
cval .. if True, calculate (node-wise) cross validation error"""
n_cg = self.meta['n_cg']
n_cat = self.meta['n_cat']
n_data = self.meta['n_data']
glims = self.meta['cat_glims']
if cval:
dis_errors = np.zeros(n_cat)
cts_errors = np.zeros(n_cg)
mat_q, vec_u, mat_r, fac_lambda = self._theta_to_tuple(theta) # save
mat_b, beta = self._faclambda_to_bbeta(fac_lambda)
fval = 0
## ** discrete node conditionals **
mat_w = np.dot(self.cont_data, mat_r) + np.dot(self.cat_data, mat_q) + \
np.dot(np.ones((n_data, 1)), vec_u.T) # n by Ltot
for r in range(n_cat):
mat_wr = mat_w[:, glims[r]:glims[r + 1]] # view of W
mat_dr = self.cat_data[:,
glims[r]:glims[r +
1]] # view of self.cat_data
tmp_logsumexp, tmp_conditionalprobs = \
_logsumexp_condprobs_red(mat_wr) # numerically more stable
if cval:
# sum of probabilities of missclassification
dis_errors[r] = n_data - \
np.sum(np.multiply(tmp_conditionalprobs, mat_dr))
# sum over both axes
lh_catr = - np.sum(np.sum(np.multiply(mat_wr, mat_dr), axis=1) \
- tmp_logsumexp)
fval += 1 / n_data * lh_catr
mat_m = np.dot(self.cat_data, mat_r.T) - \
np.dot(self.cont_data, mat_b) # n by dg, concatenation of mu_s
if n_cg > 0:
mat_m += np.outer(np.ones(n_data), alpha)
if cval:
for s in range(n_cg):
cts_errors[s] = np.linalg.norm(self.cont_data[:, s] \
- mat_m[:, s]/beta[s], 2) ** 2
mat_delta = mat_m.copy()
for s in range(n_cg):
mat_delta[:, s] /= beta[s]
mat_delta -= self.cont_data # residual
tmp = np.dot(mat_delta, np.diag(np.sqrt(beta.flatten())))
lh_cont = - 0.5 * n_data * np.sum(np.log(beta)) \
+ 0.5 * np.linalg.norm(tmp, 'fro') ** 2
fval += 1 / n_data * lh_cont
if cval:
return dis_errors, cts_errors, fval
return fval
def crossvalidate(self, theta, alpha):
"""perform cross validation (drop test data) """
n_cg = self.meta['n_cg']
n_cat = self.meta['n_cat']
n_data = self.meta['n_data']
dis_errors, cts_errors, _ = self.plh(theta, alpha, cval=True)
avg_dis_error = 1 / n_data * np.sum(dis_errors)
avg_cts_error = np.sum([np.sqrt(es / n_data) for es in cts_errors
]) # mean RMSEs
cvalerror = avg_dis_error + avg_cts_error
if n_cg > 0:
avg_cts_error /= n_cg
if n_cat > 0:
avg_dis_error /= n_cat
return cvalerror
| 38.071429
| 114
| 0.557438
| 2,584
| 18,655
| 3.828947
| 0.164861
| 0.012432
| 0.016374
| 0.015767
| 0.312412
| 0.255407
| 0.240752
| 0.228623
| 0.219426
| 0.193046
| 0
| 0.010336
| 0.305012
| 18,655
| 489
| 115
| 38.149284
| 0.752796
| 0.23554
| 0
| 0.270833
| 0
| 0
| 0.031859
| 0.002575
| 0
| 0
| 0
| 0.00409
| 0
| 1
| 0.048611
| false
| 0
| 0.020833
| 0
| 0.114583
| 0.024306
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7290644aa942c45a2f6b6c965febcc5dc6a44a31
| 17,758
|
py
|
Python
|
openmdao.main/src/openmdao/main/linearsolver.py
|
MrShoks/OpenMDAO-Framework
|
412f34ffe31a95631fbe55ca7d75b84669ae8f8c
|
[
"Apache-2.0"
] | 1
|
2020-06-28T20:38:56.000Z
|
2020-06-28T20:38:56.000Z
|
openmdao.main/src/openmdao/main/linearsolver.py
|
MrShoks/OpenMDAO-Framework
|
412f34ffe31a95631fbe55ca7d75b84669ae8f8c
|
[
"Apache-2.0"
] | null | null | null |
openmdao.main/src/openmdao/main/linearsolver.py
|
MrShoks/OpenMDAO-Framework
|
412f34ffe31a95631fbe55ca7d75b84669ae8f8c
|
[
"Apache-2.0"
] | null | null | null |
""" Linear solvers that are used to solve for the gradient of an OpenMDAO System.
(Not to be confused with the OpenMDAO Solver classes.)
"""
# pylint: disable=E0611, F0401
import numpy as np
from scipy.sparse.linalg import gmres, LinearOperator
from openmdao.main.mpiwrap import MPI
from openmdao.util.graph import fix_single_tuple
from openmdao.util.log import logger
if MPI:
from petsc4py import PETSc
else:
class PETSc(object):
# Dummy class so things parse.
pass
class LinearSolver(object):
""" A base class for linear solvers """
def __init__(self, system):
""" Set up any LinearSolver object """
self._system = system
self.options = system.options
def _norm(self):
""" Computes the norm of the linear residual """
system = self._system
system.rhs_vec.array[:] = 0.0
system.applyJ(system.vector_vars.keys())
system.rhs_vec.array[:] *= -1.0
system.rhs_vec.array[:] += system.rhs_buf[:]
if MPI:
system.rhs_vec.petsc.assemble()
return system.rhs_vec.petsc.norm()
else:
return np.linalg.norm(system.rhs_vec.array)
class ScipyGMRES(LinearSolver):
""" Scipy's GMRES Solver. This is a serial solver, so
it should never be used in an MPI setting.
"""
def __init__(self, system):
""" Set up ScipyGMRES object """
super(ScipyGMRES, self).__init__(system)
n_edge = system.vec['f'].array.size
system.rhs_buf = np.zeros((n_edge, ))
system.sol_buf = np.zeros((n_edge, ))
self.A = LinearOperator((n_edge, n_edge),
matvec=self.mult,
dtype=float)
def calc_gradient(self, inputs, outputs, return_format='array'):
""" Run GMRES solver to return a Jacobian of outputs
with respect to inputs.
"""
system = self._system
RHS = system.rhs_buf
A = self.A
# Size the problem
num_input = system.get_size(inputs)
num_output = system.get_size(outputs)
if return_format == 'dict':
J = {}
for okey in outputs:
J[okey] = {}
for ikey in inputs:
if isinstance(ikey, tuple):
ikey = ikey[0]
J[okey][ikey] = None
else:
J = np.zeros((num_output, num_input))
if system.mode == 'adjoint':
outputs, inputs = inputs, outputs
# If Forward mode, solve linear system for each parameter
# If Adjoint mode, solve linear system for each requested output
j = 0
for param in inputs:
if isinstance(param, tuple):
param = param[0]
in_indices = system.vec['u'].indices(system.scope, param)
jbase = j
for irhs in in_indices:
RHS[irhs] = 1.0
# Call GMRES to solve the linear system
dx = self.solve(RHS)
RHS[irhs] = 0.0
i = 0
for item in outputs:
if isinstance(item, tuple):
item = item[0]
out_indices = system.vec['u'].indices(system.scope, item)
nk = len(out_indices)
if return_format == 'dict':
if system.mode == 'forward':
if J[item][param] is None:
J[item][param] = np.zeros((nk, len(in_indices)))
J[item][param][:, j-jbase] = dx[out_indices]
else:
if J[param][item] is None:
J[param][item] = np.zeros((len(in_indices), nk))
J[param][item][j-jbase, :] = dx[out_indices]
else:
if system.mode == 'forward':
J[i:i+nk, j] = dx[out_indices]
else:
J[j, i:i+nk] = dx[out_indices]
i += nk
j += 1
#print inputs, '\n', outputs, '\n', J
return J
def solve(self, arg):
""" Solve the coupled equations for a new state vector that nulls the
residual. Used by the Newton solvers."""
system = self._system
options = self.options
A = self.A
#print system.name, 'Linear solution start vec', system.rhs_vec.array
# Call GMRES to solve the linear system
dx, info = gmres(A, arg,
tol=options.atol,
maxiter=options.maxiter)
if info > 0:
msg = "ERROR in calc_gradient in '%s': gmres failed to converge " \
"after %d iterations"
logger.error(msg, system.name, info)
elif info < 0:
msg = "ERROR in calc_gradient in '%s': gmres failed"
logger.error(msg, system.name)
#print system.name, 'Linear solution vec', -dx
return dx
def mult(self, arg):
""" GMRES Callback: applies Jacobian matrix. Mode is determined by the
system."""
system = self._system
system.sol_vec.array[:] = arg[:]
# Start with a clean slate
system.rhs_vec.array[:] = 0.0
system.clear_dp()
if system._parent_system:
vnames = system._parent_system._relevant_vars
else:
vnames = system.flat_vars.keys()
system.applyJ(vnames)
#print system.name, 'mult: arg, result', arg, system.rhs_vec.array[:]
#print system.rhs_vec.keys()
return system.rhs_vec.array[:]
class PETSc_KSP(LinearSolver):
""" PETSc's KSP solver with preconditioning. MPI is supported."""
def __init__(self, system):
""" Set up KSP object """
super(PETSc_KSP, self).__init__(system)
lsize = np.sum(system.local_var_sizes[system.mpi.rank, :])
size = np.sum(system.local_var_sizes)
jac_mat = PETSc.Mat().createPython([(lsize, size), (lsize, size)],
comm=system.mpi.comm)
jac_mat.setPythonContext(self)
jac_mat.setUp()
self.ksp = PETSc.KSP().create(comm=system.mpi.comm)
self.ksp.setOperators(jac_mat)
self.ksp.setType('fgmres')
self.ksp.setGMRESRestart(1000)
self.ksp.setPCSide(PETSc.PC.Side.RIGHT)
pc_mat = self.ksp.getPC()
pc_mat.setType('python')
pc_mat.setPythonContext(self)
# # Set these in the system
# #mpiprint("KSP: creating sol buf, size %d" % lsize)
system.sol_buf = PETSc.Vec().createWithArray(np.zeros(lsize),
comm=system.mpi.comm)
# #mpiprint("KSP: creating rhs buf, size %d" % lsize)
system.rhs_buf = PETSc.Vec().createWithArray(np.zeros(lsize),
comm=system.mpi.comm)
def calc_gradient(self, inputs, outputs, return_format='dict'):
"""Returns a nested dict of sensitivities if return_format == 'dict'.
"""
if return_format == 'dict':
return self._J_dict_solve(inputs, outputs)
else:
raise RuntimeError("unsupported solve return_format '%s'" % return_format)
def _J_dict_solve(self, inputs, outputs):
"""Returns a dict of sensitivities for given
inputs and outputs.
"""
system = self._system
options = self.options
name2collapsed = system.scope.name2collapsed
inputs = [fix_single_tuple(x) for x in inputs]
outputs = [fix_single_tuple(x) for x in outputs]
J = {}
for okey in outputs:
J[okey] = {}
for ikey in inputs:
J[okey][ikey] = None
if system.mode == 'adjoint':
outputs, inputs = inputs, outputs
self.ksp.setTolerances(max_it=options.maxiter,
atol=options.atol,
rtol=options.rtol)
j = 0
for param in inputs:
param_tup = name2collapsed[param]
param_size = system.get_size(param)
jbase = j
for irhs in xrange(param_size):
solvec = system._compute_derivatives(param_tup, irhs)
for out in outputs:
out_size = system.get_size(out)
if system.mode == 'forward':
if out in solvec:
if J[out][param] is None:
J[out][param] = np.zeros((out_size, param_size))
J[out][param][:, j-jbase] = solvec[out]
else:
del J[out][param]
else:
if out in solvec:
if J[param][out] is None:
J[param][out] = np.zeros((out_size, param_size))
J[param][out][j-jbase, :] = solvec[out]
else:
del J[param][out]
j += 1
return J
def newton(self):
""" Solve the coupled equations for a new state vector that nulls the
residual. Used by the Newton solvers."""
system = self._system
options = self.options
self.ksp.setTolerances(max_it=options.maxiter,
atol=options.atol,
rtol=options.rtol)
system.rhs_vec.array[:] = system.vec['f'].array[:]
#print 'newton start vec', system.vec['f'].array[:]
system.sol_buf.array[:] = system.sol_vec.array[:]
system.rhs_buf.array[:] = system.rhs_vec.array[:]
system.ln_solver.ksp.solve(system.rhs_buf, system.sol_buf)
system.vec['df'].array[:] = -system.sol_buf.array[:]
#print 'newton solution vec', system.vec['df'].array[:]
def mult(self, mat, sol_vec, rhs_vec):
""" KSP Callback: applies Jacobian matrix. Mode is determined by the
system."""
system = self._system
system.sol_vec.array[:] = sol_vec.array[:]
# Start with a clean slate
system.rhs_vec.array[:] = 0.0
system.clear_dp()
system.applyJ(system.vector_vars.keys())
rhs_vec.array[:] = system.rhs_vec.array[:]
# mpiprint('names = %s' % system.sol_vec.keys())
#mpiprint('arg = %s, result=%s' % (sol_vec.array, rhs_vec.array))
#mpiprint('df, du, dp', system.vec['df'].array, system.vec['du'].array, system.vec['dp'].array)
def apply(self, mat, sol_vec, rhs_vec):
""" Applies preconditioner """
#system = self._system
# TODO - Preconditioning is not supported yet, so mimic an Identity
# matrix.
rhs_vec.array[:] = sol_vec.array[:]
#system.rhs_vec.array[:] = sol_vec.array[:]
#system.solve_precon()
#rhs_vec.array[:] = system.sol_vec.array[:]
class LinearGS(LinearSolver):
""" Linear block Gauss Seidel. MPI is not supported yet.
Serial block solve of D x = b - (L+U) x """
def __init__(self, system):
""" Set up LinearGS object """
super(LinearGS, self).__init__(system)
lsize = np.sum(system.local_var_sizes[system.mpi.rank, :])
system.sol_buf = np.zeros(lsize)
system.rhs_buf = np.zeros(lsize)
def calc_gradient(self, inputs, outputs, return_format='array'):
""" Run GMRES solver to return a Jacobian of outputs
with respect to inputs.
"""
system = self._system
# Size the problem
# TODO - Support for array slice inputs/outputs
try:
num_input = system.get_size(inputs)
num_output = system.get_size(outputs)
except KeyError as exc:
if '[' in str(exc):
msg = 'Array slice inputs and outputs currently not supported.'
raise RuntimeError(msg)
else:
raise
n_edge = system.vec['f'].array.size
if return_format == 'dict':
J = {}
for okey in outputs:
J[okey] = {}
for ikey in inputs:
if isinstance(ikey, tuple):
ikey = ikey[0]
J[okey][ikey] = None
else:
J = np.zeros((num_output, num_input))
if system.mode == 'adjoint':
outputs, inputs = inputs, outputs
# If Forward mode, solve linear system for each parameter
# If Reverse mode, solve linear system for each requested output
j = 0
for param in inputs:
if isinstance(param, tuple):
param = param[0]
in_indices = system.vec['u'].indices(system.scope, param)
jbase = j
for irhs in in_indices:
system.clear_dp()
system.sol_vec.array[:] = 0.0
system.rhs_vec.array[:] = 0.0
system.rhs_vec.array[irhs] = 1.0
# Perform LinearGS solve
dx = self.solve(system.rhs_vec.array)
#system.rhs_vec.array[irhs] = 0.0
i = 0
for item in outputs:
if isinstance(item, tuple):
item = item[0]
out_indices = system.vec['u'].indices(system.scope, item)
nk = len(out_indices)
if return_format == 'dict':
if system.mode == 'forward':
if J[item][param] is None:
J[item][param] = np.zeros((nk, len(in_indices)))
J[item][param][:, j-jbase] = dx[out_indices]
else:
if J[param][item] is None:
J[param][item] = np.zeros((len(in_indices), nk))
J[param][item][j-jbase, :] = dx[out_indices]
else:
if system.mode == 'forward':
J[i:i+nk, j] = dx[out_indices]
else:
J[j, i:i+nk] = dx[out_indices]
i += nk
j += 1
#print inputs, '\n', outputs, '\n', J
return J
def solve(self, arg):
""" Executes an iterative solver """
system = self._system
system.rhs_buf[:] = arg[:]
system.sol_buf[:] = system.sol_vec.array[:]
options = self.options
system = self._system
norm0, norm = 1.0, 1.0
counter = 0
while counter < options.maxiter and norm > options.atol and \
norm/norm0 > options.rtol:
if system.mode == 'forward':
for subsystem in system.subsystems(local=True):
system.scatter('du', 'dp', subsystem=subsystem)
system.rhs_vec.array[:] = 0.0
subsystem.applyJ(system.vector_vars.keys())
system.rhs_vec.array[:] *= -1.0
system.rhs_vec.array[:] += system.rhs_buf[:]
sub_options = options if subsystem.options is None \
else subsystem.options
subsystem.solve_linear(sub_options)
elif system.mode == 'adjoint':
rev_systems = [item for item in reversed(system.subsystems(local=True))]
for subsystem in rev_systems:
#print '1)', system.name, subsystem.name
#print 'T0', system.vec['df'].array[:], system.vec['du'].array[:], system.vec['dp'].array[:]
system.sol_buf[:] = system.rhs_buf[:]
#print 'T1', system.vec['df'].array[:], system.vec['du'].array[:], system.vec['dp'].array[:]
for subsystem2 in rev_systems:
if subsystem is not subsystem2:
#print '2)', subsystem2.name, subsystem.name
system.rhs_vec.array[:] = 0.0
args = subsystem.vector_vars.keys()
#print 'T2', system.vec['df'].array[:], system.vec['du'].array[:], system.vec['dp'].array[:]
subsystem2.applyJ(args)
#print 'T3', system.vec['df'].array[:], system.vec['du'].array[:], system.vec['dp'].array[:]
system.scatter('du', 'dp', subsystem=subsystem2)
#print 'T4', system.vec['df'].array[:], system.vec['du'].array[:], system.vec['dp'].array[:]
system.vec['dp'].array[:] = 0.0
system.sol_buf[:] -= system.rhs_vec.array[:]
#print 'T5', system.vec['df'].array[:], system.vec['du'].array[:], system.vec['dp'].array[:]
system.rhs_vec.array[:] = system.sol_buf[:]
#print 'T6', system.vec['df'].array[:], system.vec['du'].array[:], system.vec['dp'].array[:]
subsystem.solve_linear(options)
#print 'T7', system.vec['df'].array[:], system.vec['du'].array[:], system.vec['dp'].array[:]
norm = self._norm()
counter += 1
#print 'return', options.parent.name, np.linalg.norm(system.rhs_vec.array), system.rhs_vec.array
#print 'Linear solution vec', system.sol_vec.array
return system.sol_vec.array
| 35.374502
| 121
| 0.508954
| 2,029
| 17,758
| 4.341055
| 0.136028
| 0.037239
| 0.036217
| 0.048252
| 0.584582
| 0.513965
| 0.472071
| 0.434151
| 0.40361
| 0.40361
| 0
| 0.007461
| 0.373578
| 17,758
| 501
| 122
| 35.44511
| 0.78434
| 0.207456
| 0
| 0.556667
| 0
| 0
| 0.02513
| 0
| 0
| 0
| 0
| 0.003992
| 0
| 1
| 0.05
| false
| 0.003333
| 0.02
| 0
| 0.116667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7290f4e2e5f1b65887b615200ae0e5b25c830766
| 4,134
|
py
|
Python
|
src/transform.py
|
Andres-CS/wallet-analysis
|
822b8b900a91ab7a2fd76743f174d320e45e98c9
|
[
"Apache-2.0"
] | null | null | null |
src/transform.py
|
Andres-CS/wallet-analysis
|
822b8b900a91ab7a2fd76743f174d320e45e98c9
|
[
"Apache-2.0"
] | null | null | null |
src/transform.py
|
Andres-CS/wallet-analysis
|
822b8b900a91ab7a2fd76743f174d320e45e98c9
|
[
"Apache-2.0"
] | null | null | null |
import csv
import re
'''
Delete char in substring of original string.
Used this function when, you want to delete
a character in a substring but not in the
rest of the original string.
Returns a string
-- PARAMETERS --
text: original string
start: start of subString
end: end of subString
char: char to delete, default is ','.
'''
def deleteInSubString(text, start, end, char=','):
subText = text[start:(end+1)]
commaPos = subText.find(char)
if commaPos >= 0:
subText = subText[:commaPos]+""+subText[commaPos+1:]
text = text[:start]+subText+text[end+1:]
return text
return text
'''
Get the position of the Description Column.
Loops through String and finds the first set
of enclosing quotes.
Returns array with initial and closing position.
-- PARAMETERS --
txt: string to loop
'''
def DescriptionColumn_Range(txt):
count = 0
pos=list()
for i in range(len(txt)):
if txt[i] == '"':
pos.append(i)
count += 1
if count == 2:
return pos
'''
Adds a delimiter
Returns a new string with the delimiter
added.
-- PARAMETERS --
text: string to be modified
delimiter: char or string to be inserted
flad: b - before target
a - after target
target: substring where delimiter will be
inserted
'''
def addDelimiter(text,delimiter,flag,target):
pos = text.find(target)
if not pos == -1:
if flag == "b":
text = text[:pos]+delimiter+text[pos:]
else:
offset = len(text[:pos])+len(target)
text = text[:offset+1]+delimiter+text[offset+1:]
return text
'''
Clean up of Description Column
Inital draft of data clean up on the
description column.
Removal of extra commas and 'garbage' data
Returns a string
-- PARAMETERS --
data: string
'''
def clean_Description_Column(data):
#Replace data 00/00 for ,
data = re.sub("[0-9]{2}\/[0-9]{2}", ",", data)
for i in ["'",",/20",",/21"]:
data = data.replace(i,"")
wordBank={
'c':["CREDITS","check","Check","CHARGE","CONSUMER"],
'd':["DEPOSIT","DEBITS"],
'f':["Fee","FEE","Funds"],
'o':["OVERDRAFT"],
'p':["PURCHASE","PAY","pymt","PMT","PMNT","Payment","PAYMENT","payment","PAYROLL"],
'r':["REFUND"],
't':["TAX","Transfer","transfer","TRANSFER"],
'w':["WITHDRWL","withdrawal","withdrwl"]
}
for k in wordBank:
for i in wordBank[k]:
i = i.lower()
if i in data:
data = addDelimiter(data,",", "b" , i)
data = addDelimiter(data,",", "a" , i)
#print(data)
#Get Rid of repeating commas.
data = re.sub("#[0-9]+","",data)
data = re.sub( '(,\s*,)',
',',
re.sub( '(,{1,10}|,\s*,\b)', ",", data)
)
for match in re.finditer("\s[a-zA-Z]{2}$",data):
data = addDelimiter(data,',','b',data[match.start():match.end()+1])
return data
'''
Re-arranges nested list to become a 1-level list
Descript column, item 1 in array, is a nested list
items are moved one level up to become a single list
and not a list of list.
Returns a list
-- PARAMETERS --
data: list
'''
def addNewColumns(data):
newR = list()
for R in range(len(data)):
if R == 1:
for subr in data[R].split(","):
newR.append(subr)
else:
newR.append(data[R])
return newR
'''
Takes charge of initializing clean up data
process.
Returns the 'idea' of a clean dataFrame
-- PARAMETERS --
srcF: path of raw file to clean up
'''
def cleanData(srcF):
dataframe = list()
with open(srcF,'r') as src:
for line in src:
line = line.lower()
rg = DescriptionColumn_Range(line)
row = deleteInSubString(line, rg[0], rg[1])
row = deleteInSubString(row, rg[0], rg[1], ';')
row = row.replace('"',"").split(',')
row[1] = clean_Description_Column(row[1])
row[3]=deleteInSubString(row[3],0,len(row[3]),"\n")
dataframe.append(addNewColumns(row))
return dataframe
#Save to CSV file
def saveToFile(data, trgFile):
with open(trgFile, 'w') as trg:
write = csv.writer(trg)
write.writerows(data)
if __name__ == "__main__":
sourceFile = "/home/delphinus/Devlp/WalletAnalysis/app/data/raw/stmt.csv"
targetFile = "/home/delphinus/Devlp/WalletAnalysis/app/data/modify/modf.csv"
dataFrame = cleanData(sourceFile)
saveToFile(dataFrame, targetFile)
| 20.773869
| 85
| 0.647315
| 601
| 4,134
| 4.429285
| 0.324459
| 0.031931
| 0.006762
| 0.018032
| 0.06311
| 0.029301
| 0
| 0
| 0
| 0
| 0
| 0.013049
| 0.184325
| 4,134
| 198
| 86
| 20.878788
| 0.776394
| 0.01911
| 0
| 0.057471
| 0
| 0
| 0.149533
| 0.044486
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08046
| false
| 0
| 0.022989
| 0
| 0.183908
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
72911e6ae35f06f9d987c47bf7d9ffd692afda2e
| 672
|
py
|
Python
|
game/views/credits_view.py
|
fisher60/pyweek-2021
|
294b45d768a7e0d85ac67dc4b12384e68fc4f399
|
[
"MIT"
] | null | null | null |
game/views/credits_view.py
|
fisher60/pyweek-2021
|
294b45d768a7e0d85ac67dc4b12384e68fc4f399
|
[
"MIT"
] | null | null | null |
game/views/credits_view.py
|
fisher60/pyweek-2021
|
294b45d768a7e0d85ac67dc4b12384e68fc4f399
|
[
"MIT"
] | null | null | null |
import arcade
from .menu_view import MenuView
TEXT_COLOR = arcade.csscolor.WHITE
class CreditsView(MenuView):
def __init__(self, parent_view):
super().__init__()
self.parent_view = parent_view
def on_draw(self):
arcade.start_render()
arcade.draw_text(
"Credits",
self.width // 2,
self.height * 0.75,
TEXT_COLOR,
20,
anchor_x="center",
)
self.draw_information_text(TEXT_COLOR, back=True, nav=True)
def on_key_press(self, symbol, modifiers):
if symbol == arcade.key.ESCAPE:
self.window.show_view(self.parent_view)
| 22.4
| 67
| 0.596726
| 80
| 672
| 4.7
| 0.5375
| 0.106383
| 0.111702
| 0.095745
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012848
| 0.30506
| 672
| 29
| 68
| 23.172414
| 0.792291
| 0
| 0
| 0
| 0
| 0
| 0.019345
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.095238
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|