max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
|---|---|---|---|---|---|---|---|---|---|---|
schedules/serializers.py
|
sevenstar77/coin_dev
| 0
|
6629251
|
from rest_framework.serializers import ModelSerializer, CharField
from .models import Emailschedule, Schedulemaster
class ScheduleSerializer(ModelSerializer):
class Meta:
model = Schedulemaster
#fields = '__all__'
fields = ('name', 'email', 'is_email_receive', 'schedule_type', 'currency', 'from_date', 'to_date',
'price', 'target_up_price', 'target_down_price', 'target_up_percent', 'target_down_percent')
def create(self, validated_data):
schedule = Schedulemaster.objects.create_object(**validated_data)
return schedule
class ScheduleDetailSerializer(ModelSerializer):
class Meta:
model = Schedulemaster
fields = '__all__'
class EmailSchedulesSerializer(ModelSerializer):
class Meta:
model = Emailschedule
fields = ('user_no', 'name', 'email', 'is_email_receive', 'schedule_type', 'currency', 'from_date'
, 'to_date')
# def validate_mail_currency_schedule_type(self, value):
# pass
def create(self, validated_data):
emailschedule = Emailschedule.objects.create_object(**validated_data)
return emailschedule
class AlarmSchedulesSerializer(ModelSerializer):
pass
|
from rest_framework.serializers import ModelSerializer, CharField
from .models import Emailschedule, Schedulemaster
class ScheduleSerializer(ModelSerializer):
class Meta:
model = Schedulemaster
#fields = '__all__'
fields = ('name', 'email', 'is_email_receive', 'schedule_type', 'currency', 'from_date', 'to_date',
'price', 'target_up_price', 'target_down_price', 'target_up_percent', 'target_down_percent')
def create(self, validated_data):
schedule = Schedulemaster.objects.create_object(**validated_data)
return schedule
class ScheduleDetailSerializer(ModelSerializer):
class Meta:
model = Schedulemaster
fields = '__all__'
class EmailSchedulesSerializer(ModelSerializer):
class Meta:
model = Emailschedule
fields = ('user_no', 'name', 'email', 'is_email_receive', 'schedule_type', 'currency', 'from_date'
, 'to_date')
# def validate_mail_currency_schedule_type(self, value):
# pass
def create(self, validated_data):
emailschedule = Emailschedule.objects.create_object(**validated_data)
return emailschedule
class AlarmSchedulesSerializer(ModelSerializer):
pass
|
en
| 0.397099
|
#fields = '__all__' # def validate_mail_currency_schedule_type(self, value): # pass
| 2.158508
| 2
|
luggage/run.py
|
moshez/luggage
| 0
|
6629252
|
<reponame>moshez/luggage
import os
import shutil
import sys
from ncolony import ctllib
def calcCommandline():
"""return a command-line prefix that will run me
:rettype: list of strings
"""
argv0 = sys.argv[0]
if not argv0.endswith('__main__.py'):
return [argv0]
prefix = os.path.dirname(argv0)
path = map(os.path.abspath, sys.path)
while prefix not in path:
up = os.path.dirname(prefix)
if up == prefix:
raise RuntimeError('Could not find prefix', argv0)
prefix = up
module = '.'.join(argv0[len(prefix):].split('/')[1:-1])
return [sys.executable, '-m', module]
def mkconfig(dirname):
"""create an NColony configuration
:param dirname: directory in which to create the configuration
:type dirname: string
:rettype: ncolony.ctllib.Places
"""
place = os.path.abspath(dirname)
if os.path.exists(place):
shutil.rmtree(place)
os.mkdir(place)
config = os.path.join(place, 'config')
messages = os.path.join(place, 'messages')
places = ctllib.Places(config=config, messages=messages)
for dr in places:
os.mkdir(dr)
return places
|
import os
import shutil
import sys
from ncolony import ctllib
def calcCommandline():
"""return a command-line prefix that will run me
:rettype: list of strings
"""
argv0 = sys.argv[0]
if not argv0.endswith('__main__.py'):
return [argv0]
prefix = os.path.dirname(argv0)
path = map(os.path.abspath, sys.path)
while prefix not in path:
up = os.path.dirname(prefix)
if up == prefix:
raise RuntimeError('Could not find prefix', argv0)
prefix = up
module = '.'.join(argv0[len(prefix):].split('/')[1:-1])
return [sys.executable, '-m', module]
def mkconfig(dirname):
"""create an NColony configuration
:param dirname: directory in which to create the configuration
:type dirname: string
:rettype: ncolony.ctllib.Places
"""
place = os.path.abspath(dirname)
if os.path.exists(place):
shutil.rmtree(place)
os.mkdir(place)
config = os.path.join(place, 'config')
messages = os.path.join(place, 'messages')
places = ctllib.Places(config=config, messages=messages)
for dr in places:
os.mkdir(dr)
return places
|
en
| 0.588093
|
return a command-line prefix that will run me :rettype: list of strings create an NColony configuration :param dirname: directory in which to create the configuration :type dirname: string :rettype: ncolony.ctllib.Places
| 2.76292
| 3
|
lino/modlib/extjs/elems.py
|
khchine5/lino
| 1
|
6629253
|
<filename>lino/modlib/extjs/elems.py<gh_stars>1-10
raise Exception("20180212 moved to lino.core.elems")
|
<filename>lino/modlib/extjs/elems.py<gh_stars>1-10
raise Exception("20180212 moved to lino.core.elems")
|
none
| 1
| 1.256195
| 1
|
|
scGNNsp_space/gae/utils.py
|
CyanStarNight/single_cell_spatial_image
| 5
|
6629254
|
<gh_stars>1-10
import pickle as pkl
import networkx as nx
import numpy as np
import scipy.sparse as sp
import torch
from sklearn.metrics import roc_auc_score, average_precision_score
def sample_mask(idx, l):
"""Create mask."""
mask = np.zeros(l)
mask[idx] = 1
return np.array(mask, dtype=np.bool)
def load_data(dataset):
# load the data: x, tx, allx, graph
names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']
objects = []
for i in range(len(names)):
'''
fix Pickle incompatibility of numpy arrays between Python 2 and 3
https://stackoverflow.com/questions/11305790/pickle-incompatibility-of-numpy-arrays-between-python-2-and-3
'''
with open("data/ind.{}.{}".format(dataset, names[i]), 'rb') as rf:
u = pkl._Unpickler(rf)
u.encoding = 'latin1'
cur_data = u.load()
objects.append(cur_data)
# objects.append(
# pkl.load(open("data/ind.{}.{}".format(dataset, names[i]), 'rb')))
x, y, tx, ty, allx, ally, graph = tuple(objects)
test_idx_reorder = parse_index_file(
"data/ind.{}.test.index".format(dataset))
test_idx_range = np.sort(test_idx_reorder)
if dataset == 'citeseer':
# Fix citeseer dataset (there are some isolated nodes in the graph)
# Find isolated nodes, add them as zero-vecs into the right position
test_idx_range_full = range(
min(test_idx_reorder), max(test_idx_reorder) + 1)
tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
tx_extended[test_idx_range - min(test_idx_range), :] = tx
tx = tx_extended
ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))
ty_extended[test_idx_range - min(test_idx_range), :] = ty
ty = ty_extended
features = sp.vstack((allx, tx)).tolil()
features[test_idx_reorder, :] = features[test_idx_range, :]
# features = torch.DoubleTensor(np.array(features.todense()))
features = torch.FloatTensor(np.array(features.todense()))
adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
labels = np.vstack((ally, ty))
labels[test_idx_reorder, :] = labels[test_idx_range, :]
idx_test = test_idx_range.tolist()
idx_train = range(len(y))
idx_val = range(len(y), len(y) + 500)
train_mask = sample_mask(idx_train, labels.shape[0])
val_mask = sample_mask(idx_val, labels.shape[0])
test_mask = sample_mask(idx_test, labels.shape[0])
y_train = np.zeros(labels.shape)
y_val = np.zeros(labels.shape)
y_test = np.zeros(labels.shape)
y_train[train_mask, :] = labels[train_mask, :]
y_val[val_mask, :] = labels[val_mask, :]
y_test[test_mask, :] = labels[test_mask, :]
return adj, features, y_test, tx, ty, test_mask, np.argmax(labels,1)
# Load data in GAT
# def load_data(path="./data/cora/", dataset="cora"):
def load_data_GAT(path="/Users/juexinwang/workspace/pyGAT/data/cora/", dataset="cora"):
"""Load citation network dataset (cora only for now)"""
print('Loading {} dataset...'.format(dataset))
idx_features_labels = np.genfromtxt("{}{}.content".format(path, dataset), dtype=np.dtype(str))
features = sp.csr_matrix(idx_features_labels[:, 1:-1], dtype=np.float32)
labels = encode_onehot(idx_features_labels[:, -1])
# build graph
idx = np.array(idx_features_labels[:, 0], dtype=np.int32)
idx_map = {j: i for i, j in enumerate(idx)}
edges_unordered = np.genfromtxt("{}{}.cites".format(path, dataset), dtype=np.int32)
edges = np.array(list(map(idx_map.get, edges_unordered.flatten())), dtype=np.int32).reshape(edges_unordered.shape)
adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])), shape=(labels.shape[0], labels.shape[0]), dtype=np.float32)
# build symmetric adjacency matrix
adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
features = normalize_features(features)
adj = normalize_adj(adj + sp.eye(adj.shape[0]))
idx_train = range(140)
idx_val = range(200, 500)
idx_test = range(500, 1500)
adj = torch.FloatTensor(np.array(adj.todense()))
features = torch.FloatTensor(np.array(features.todense()))
labels = torch.LongTensor(np.where(labels)[1])
idx_train = torch.LongTensor(idx_train)
idx_val = torch.LongTensor(idx_val)
idx_test = torch.LongTensor(idx_test)
return adj, features, labels, idx_train, idx_val, idx_test
def parse_index_file(filename):
index = []
for line in open(filename):
index.append(int(line.strip()))
return index
def sparse_to_tuple(sparse_mx):
if not sp.isspmatrix_coo(sparse_mx):
sparse_mx = sparse_mx.tocoo()
coords = np.vstack((sparse_mx.row, sparse_mx.col)).transpose()
values = sparse_mx.data
shape = sparse_mx.shape
return coords, values, shape
#Updated
def mask_test_edges(adj):
# Function to build test set with 10% positive links
# NOTE: Splits are randomized and results might slightly deviate from reported numbers in the paper.
# TODO: Clean up.
# Remove diagonal elements
adj = adj - sp.dia_matrix((adj.diagonal()[np.newaxis, :], [0]), shape=adj.shape)
adj.eliminate_zeros()
# Check that diag is zero:
assert np.diag(adj.todense()).sum() == 0
adj_triu = sp.triu(adj)
adj_tuple = sparse_to_tuple(adj_triu)
edges = adj_tuple[0]
edges_all = sparse_to_tuple(adj)[0]
num_test = int(np.floor(edges.shape[0] / 10.))
num_val = int(np.floor(edges.shape[0] / 20.))
all_edge_idx = np.arange(edges.shape[0])
np.random.shuffle(all_edge_idx)
val_edge_idx = all_edge_idx[:num_val]
test_edge_idx = all_edge_idx[num_val:(num_val + num_test)]
test_edges = edges[test_edge_idx]
val_edges = edges[val_edge_idx]
train_edges = np.delete(edges, np.hstack([test_edge_idx, val_edge_idx]), axis=0)
def ismember(a, b, tol=5):
rows_close = np.all(np.round(a - b[:, None], tol) == 0, axis=-1)
return np.any(rows_close)
test_edges_false = []
while len(test_edges_false) < len(test_edges):
idx_i = np.random.randint(0, adj.shape[0])
idx_j = np.random.randint(0, adj.shape[0])
if idx_i == idx_j:
continue
if ismember([idx_i, idx_j], edges_all):
continue
if test_edges_false:
if ismember([idx_j, idx_i], np.array(test_edges_false)):
continue
if ismember([idx_i, idx_j], np.array(test_edges_false)):
continue
test_edges_false.append([idx_i, idx_j])
val_edges_false = []
while len(val_edges_false) < len(val_edges):
idx_i = np.random.randint(0, adj.shape[0])
idx_j = np.random.randint(0, adj.shape[0])
if idx_i == idx_j:
continue
if ismember([idx_i, idx_j], train_edges):
continue
if ismember([idx_j, idx_i], train_edges):
continue
if ismember([idx_i, idx_j], val_edges):
continue
if ismember([idx_j, idx_i], val_edges):
continue
if val_edges_false:
if ismember([idx_j, idx_i], np.array(val_edges_false)):
continue
if ismember([idx_i, idx_j], np.array(val_edges_false)):
continue
if ~ismember([idx_i,idx_j],edges_all) and ~ismember([idx_j,idx_i],edges_all):
val_edges_false.append([idx_i, idx_j])
else:
# Debug
print(str(idx_i)+" "+str(idx_j))
# Original:
# val_edges_false.append([idx_i, idx_j])
#TODO: temporary disable for ismember function may require huge memory.
# assert ~ismember(test_edges_false, edges_all)
# assert ~ismember(val_edges_false, edges_all)
# assert ~ismember(val_edges, train_edges)
# assert ~ismember(test_edges, train_edges)
# assert ~ismember(val_edges, test_edges)
data = np.ones(train_edges.shape[0])
# Re-build adj matrix
adj_train = sp.csr_matrix((data, (train_edges[:, 0], train_edges[:, 1])), shape=adj.shape)
adj_train = adj_train + adj_train.T
# NOTE: these edge lists only contain single direction of edge!
return adj_train, train_edges, val_edges, val_edges_false, test_edges, test_edges_false
def preprocess_graph(adj):
adj = sp.coo_matrix(adj)
adj_ = adj + sp.eye(adj.shape[0])
rowsum = np.array(adj_.sum(1))
degree_mat_inv_sqrt = sp.diags(np.power(rowsum, -0.5).flatten())
adj_normalized = adj_.dot(degree_mat_inv_sqrt).transpose().dot(degree_mat_inv_sqrt).tocoo()
# return sparse_to_tuple(adj_normalized)
return sparse_mx_to_torch_sparse_tensor(adj_normalized)
def sparse_mx_to_torch_sparse_tensor(sparse_mx):
"""Convert a scipy sparse matrix to a torch sparse tensor."""
# sparse_mx = sparse_mx.tocoo().astype(np.float64)
sparse_mx = sparse_mx.tocoo().astype(np.float32)
indices = torch.from_numpy(
np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))
values = torch.from_numpy(sparse_mx.data)
shape = torch.Size(sparse_mx.shape)
# return torch.sparse.DoubleTensor(indices, values, shape)
return torch.sparse.FloatTensor(indices, values, shape)
def get_roc_score(emb, adj_orig, edges_pos, edges_neg):
def sigmoid(x):
return 1 / (1 + np.exp(-x))
# Predict on test set of edges
adj_rec = np.dot(emb, emb.T)
preds = []
pos = []
for e in edges_pos:
preds.append(sigmoid(adj_rec[e[0], e[1]]))
pos.append(adj_orig[e[0], e[1]])
preds_neg = []
neg = []
for e in edges_neg:
preds_neg.append(sigmoid(adj_rec[e[0], e[1]]))
neg.append(adj_orig[e[0], e[1]])
preds_all = np.hstack([preds, preds_neg])
labels_all = np.hstack([np.ones(len(preds)), np.zeros(len(preds))])
roc_score = roc_auc_score(labels_all, preds_all)
ap_score = average_precision_score(labels_all, preds_all)
return roc_score, ap_score
## This part is from GAT, need to update
# def encode_onehot(labels):
# # The classes must be sorted before encoding to enable static class encoding.
# # In other words, make sure the first class always maps to index 0.
# classes = sorted(list(set(labels)))
# classes_dict = {c: np.identity(len(classes))[i, :] for i, c in enumerate(classes)}
# labels_onehot = np.array(list(map(classes_dict.get, labels)), dtype=np.int32)
# return labels_onehot
def normalize_adj(mx):
"""Row-normalize sparse matrix"""
rowsum = np.array(mx.sum(1))
r_inv_sqrt = np.power(rowsum, -0.5).flatten()
r_inv_sqrt[np.isinf(r_inv_sqrt)] = 0.
r_mat_inv_sqrt = sp.diags(r_inv_sqrt)
return mx.dot(r_mat_inv_sqrt).transpose().dot(r_mat_inv_sqrt)
def normalize_features(mx):
"""Row-normalize sparse matrix"""
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
mx = r_mat_inv.dot(mx)
return mx
def accuracy(output, labels):
preds = output.max(1)[1].type_as(labels)
correct = preds.eq(labels).double()
correct = correct.sum()
return correct / len(labels)
|
import pickle as pkl
import networkx as nx
import numpy as np
import scipy.sparse as sp
import torch
from sklearn.metrics import roc_auc_score, average_precision_score
def sample_mask(idx, l):
"""Create mask."""
mask = np.zeros(l)
mask[idx] = 1
return np.array(mask, dtype=np.bool)
def load_data(dataset):
# load the data: x, tx, allx, graph
names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']
objects = []
for i in range(len(names)):
'''
fix Pickle incompatibility of numpy arrays between Python 2 and 3
https://stackoverflow.com/questions/11305790/pickle-incompatibility-of-numpy-arrays-between-python-2-and-3
'''
with open("data/ind.{}.{}".format(dataset, names[i]), 'rb') as rf:
u = pkl._Unpickler(rf)
u.encoding = 'latin1'
cur_data = u.load()
objects.append(cur_data)
# objects.append(
# pkl.load(open("data/ind.{}.{}".format(dataset, names[i]), 'rb')))
x, y, tx, ty, allx, ally, graph = tuple(objects)
test_idx_reorder = parse_index_file(
"data/ind.{}.test.index".format(dataset))
test_idx_range = np.sort(test_idx_reorder)
if dataset == 'citeseer':
# Fix citeseer dataset (there are some isolated nodes in the graph)
# Find isolated nodes, add them as zero-vecs into the right position
test_idx_range_full = range(
min(test_idx_reorder), max(test_idx_reorder) + 1)
tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
tx_extended[test_idx_range - min(test_idx_range), :] = tx
tx = tx_extended
ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))
ty_extended[test_idx_range - min(test_idx_range), :] = ty
ty = ty_extended
features = sp.vstack((allx, tx)).tolil()
features[test_idx_reorder, :] = features[test_idx_range, :]
# features = torch.DoubleTensor(np.array(features.todense()))
features = torch.FloatTensor(np.array(features.todense()))
adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
labels = np.vstack((ally, ty))
labels[test_idx_reorder, :] = labels[test_idx_range, :]
idx_test = test_idx_range.tolist()
idx_train = range(len(y))
idx_val = range(len(y), len(y) + 500)
train_mask = sample_mask(idx_train, labels.shape[0])
val_mask = sample_mask(idx_val, labels.shape[0])
test_mask = sample_mask(idx_test, labels.shape[0])
y_train = np.zeros(labels.shape)
y_val = np.zeros(labels.shape)
y_test = np.zeros(labels.shape)
y_train[train_mask, :] = labels[train_mask, :]
y_val[val_mask, :] = labels[val_mask, :]
y_test[test_mask, :] = labels[test_mask, :]
return adj, features, y_test, tx, ty, test_mask, np.argmax(labels,1)
# Load data in GAT
# def load_data(path="./data/cora/", dataset="cora"):
def load_data_GAT(path="/Users/juexinwang/workspace/pyGAT/data/cora/", dataset="cora"):
"""Load citation network dataset (cora only for now)"""
print('Loading {} dataset...'.format(dataset))
idx_features_labels = np.genfromtxt("{}{}.content".format(path, dataset), dtype=np.dtype(str))
features = sp.csr_matrix(idx_features_labels[:, 1:-1], dtype=np.float32)
labels = encode_onehot(idx_features_labels[:, -1])
# build graph
idx = np.array(idx_features_labels[:, 0], dtype=np.int32)
idx_map = {j: i for i, j in enumerate(idx)}
edges_unordered = np.genfromtxt("{}{}.cites".format(path, dataset), dtype=np.int32)
edges = np.array(list(map(idx_map.get, edges_unordered.flatten())), dtype=np.int32).reshape(edges_unordered.shape)
adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])), shape=(labels.shape[0], labels.shape[0]), dtype=np.float32)
# build symmetric adjacency matrix
adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
features = normalize_features(features)
adj = normalize_adj(adj + sp.eye(adj.shape[0]))
idx_train = range(140)
idx_val = range(200, 500)
idx_test = range(500, 1500)
adj = torch.FloatTensor(np.array(adj.todense()))
features = torch.FloatTensor(np.array(features.todense()))
labels = torch.LongTensor(np.where(labels)[1])
idx_train = torch.LongTensor(idx_train)
idx_val = torch.LongTensor(idx_val)
idx_test = torch.LongTensor(idx_test)
return adj, features, labels, idx_train, idx_val, idx_test
def parse_index_file(filename):
index = []
for line in open(filename):
index.append(int(line.strip()))
return index
def sparse_to_tuple(sparse_mx):
if not sp.isspmatrix_coo(sparse_mx):
sparse_mx = sparse_mx.tocoo()
coords = np.vstack((sparse_mx.row, sparse_mx.col)).transpose()
values = sparse_mx.data
shape = sparse_mx.shape
return coords, values, shape
#Updated
def mask_test_edges(adj):
# Function to build test set with 10% positive links
# NOTE: Splits are randomized and results might slightly deviate from reported numbers in the paper.
# TODO: Clean up.
# Remove diagonal elements
adj = adj - sp.dia_matrix((adj.diagonal()[np.newaxis, :], [0]), shape=adj.shape)
adj.eliminate_zeros()
# Check that diag is zero:
assert np.diag(adj.todense()).sum() == 0
adj_triu = sp.triu(adj)
adj_tuple = sparse_to_tuple(adj_triu)
edges = adj_tuple[0]
edges_all = sparse_to_tuple(adj)[0]
num_test = int(np.floor(edges.shape[0] / 10.))
num_val = int(np.floor(edges.shape[0] / 20.))
all_edge_idx = np.arange(edges.shape[0])
np.random.shuffle(all_edge_idx)
val_edge_idx = all_edge_idx[:num_val]
test_edge_idx = all_edge_idx[num_val:(num_val + num_test)]
test_edges = edges[test_edge_idx]
val_edges = edges[val_edge_idx]
train_edges = np.delete(edges, np.hstack([test_edge_idx, val_edge_idx]), axis=0)
def ismember(a, b, tol=5):
rows_close = np.all(np.round(a - b[:, None], tol) == 0, axis=-1)
return np.any(rows_close)
test_edges_false = []
while len(test_edges_false) < len(test_edges):
idx_i = np.random.randint(0, adj.shape[0])
idx_j = np.random.randint(0, adj.shape[0])
if idx_i == idx_j:
continue
if ismember([idx_i, idx_j], edges_all):
continue
if test_edges_false:
if ismember([idx_j, idx_i], np.array(test_edges_false)):
continue
if ismember([idx_i, idx_j], np.array(test_edges_false)):
continue
test_edges_false.append([idx_i, idx_j])
val_edges_false = []
while len(val_edges_false) < len(val_edges):
idx_i = np.random.randint(0, adj.shape[0])
idx_j = np.random.randint(0, adj.shape[0])
if idx_i == idx_j:
continue
if ismember([idx_i, idx_j], train_edges):
continue
if ismember([idx_j, idx_i], train_edges):
continue
if ismember([idx_i, idx_j], val_edges):
continue
if ismember([idx_j, idx_i], val_edges):
continue
if val_edges_false:
if ismember([idx_j, idx_i], np.array(val_edges_false)):
continue
if ismember([idx_i, idx_j], np.array(val_edges_false)):
continue
if ~ismember([idx_i,idx_j],edges_all) and ~ismember([idx_j,idx_i],edges_all):
val_edges_false.append([idx_i, idx_j])
else:
# Debug
print(str(idx_i)+" "+str(idx_j))
# Original:
# val_edges_false.append([idx_i, idx_j])
#TODO: temporary disable for ismember function may require huge memory.
# assert ~ismember(test_edges_false, edges_all)
# assert ~ismember(val_edges_false, edges_all)
# assert ~ismember(val_edges, train_edges)
# assert ~ismember(test_edges, train_edges)
# assert ~ismember(val_edges, test_edges)
data = np.ones(train_edges.shape[0])
# Re-build adj matrix
adj_train = sp.csr_matrix((data, (train_edges[:, 0], train_edges[:, 1])), shape=adj.shape)
adj_train = adj_train + adj_train.T
# NOTE: these edge lists only contain single direction of edge!
return adj_train, train_edges, val_edges, val_edges_false, test_edges, test_edges_false
def preprocess_graph(adj):
adj = sp.coo_matrix(adj)
adj_ = adj + sp.eye(adj.shape[0])
rowsum = np.array(adj_.sum(1))
degree_mat_inv_sqrt = sp.diags(np.power(rowsum, -0.5).flatten())
adj_normalized = adj_.dot(degree_mat_inv_sqrt).transpose().dot(degree_mat_inv_sqrt).tocoo()
# return sparse_to_tuple(adj_normalized)
return sparse_mx_to_torch_sparse_tensor(adj_normalized)
def sparse_mx_to_torch_sparse_tensor(sparse_mx):
"""Convert a scipy sparse matrix to a torch sparse tensor."""
# sparse_mx = sparse_mx.tocoo().astype(np.float64)
sparse_mx = sparse_mx.tocoo().astype(np.float32)
indices = torch.from_numpy(
np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))
values = torch.from_numpy(sparse_mx.data)
shape = torch.Size(sparse_mx.shape)
# return torch.sparse.DoubleTensor(indices, values, shape)
return torch.sparse.FloatTensor(indices, values, shape)
def get_roc_score(emb, adj_orig, edges_pos, edges_neg):
def sigmoid(x):
return 1 / (1 + np.exp(-x))
# Predict on test set of edges
adj_rec = np.dot(emb, emb.T)
preds = []
pos = []
for e in edges_pos:
preds.append(sigmoid(adj_rec[e[0], e[1]]))
pos.append(adj_orig[e[0], e[1]])
preds_neg = []
neg = []
for e in edges_neg:
preds_neg.append(sigmoid(adj_rec[e[0], e[1]]))
neg.append(adj_orig[e[0], e[1]])
preds_all = np.hstack([preds, preds_neg])
labels_all = np.hstack([np.ones(len(preds)), np.zeros(len(preds))])
roc_score = roc_auc_score(labels_all, preds_all)
ap_score = average_precision_score(labels_all, preds_all)
return roc_score, ap_score
## This part is from GAT, need to update
# def encode_onehot(labels):
# # The classes must be sorted before encoding to enable static class encoding.
# # In other words, make sure the first class always maps to index 0.
# classes = sorted(list(set(labels)))
# classes_dict = {c: np.identity(len(classes))[i, :] for i, c in enumerate(classes)}
# labels_onehot = np.array(list(map(classes_dict.get, labels)), dtype=np.int32)
# return labels_onehot
def normalize_adj(mx):
"""Row-normalize sparse matrix"""
rowsum = np.array(mx.sum(1))
r_inv_sqrt = np.power(rowsum, -0.5).flatten()
r_inv_sqrt[np.isinf(r_inv_sqrt)] = 0.
r_mat_inv_sqrt = sp.diags(r_inv_sqrt)
return mx.dot(r_mat_inv_sqrt).transpose().dot(r_mat_inv_sqrt)
def normalize_features(mx):
"""Row-normalize sparse matrix"""
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
mx = r_mat_inv.dot(mx)
return mx
def accuracy(output, labels):
preds = output.max(1)[1].type_as(labels)
correct = preds.eq(labels).double()
correct = correct.sum()
return correct / len(labels)
|
en
| 0.584828
|
Create mask. # load the data: x, tx, allx, graph fix Pickle incompatibility of numpy arrays between Python 2 and 3 https://stackoverflow.com/questions/11305790/pickle-incompatibility-of-numpy-arrays-between-python-2-and-3 # objects.append( # pkl.load(open("data/ind.{}.{}".format(dataset, names[i]), 'rb'))) # Fix citeseer dataset (there are some isolated nodes in the graph) # Find isolated nodes, add them as zero-vecs into the right position # features = torch.DoubleTensor(np.array(features.todense())) # Load data in GAT # def load_data(path="./data/cora/", dataset="cora"): Load citation network dataset (cora only for now) # build graph # build symmetric adjacency matrix #Updated # Function to build test set with 10% positive links # NOTE: Splits are randomized and results might slightly deviate from reported numbers in the paper. # TODO: Clean up. # Remove diagonal elements # Check that diag is zero: # Debug # Original: # val_edges_false.append([idx_i, idx_j]) #TODO: temporary disable for ismember function may require huge memory. # assert ~ismember(test_edges_false, edges_all) # assert ~ismember(val_edges_false, edges_all) # assert ~ismember(val_edges, train_edges) # assert ~ismember(test_edges, train_edges) # assert ~ismember(val_edges, test_edges) # Re-build adj matrix # NOTE: these edge lists only contain single direction of edge! # return sparse_to_tuple(adj_normalized) Convert a scipy sparse matrix to a torch sparse tensor. # sparse_mx = sparse_mx.tocoo().astype(np.float64) # return torch.sparse.DoubleTensor(indices, values, shape) # Predict on test set of edges ## This part is from GAT, need to update # def encode_onehot(labels): # # The classes must be sorted before encoding to enable static class encoding. # # In other words, make sure the first class always maps to index 0. # classes = sorted(list(set(labels))) # classes_dict = {c: np.identity(len(classes))[i, :] for i, c in enumerate(classes)} # labels_onehot = np.array(list(map(classes_dict.get, labels)), dtype=np.int32) # return labels_onehot Row-normalize sparse matrix Row-normalize sparse matrix
| 2.30485
| 2
|
cwbrowser/cw_connection.py
|
neurospin/rql_download
| 0
|
6629255
|
<filename>cwbrowser/cw_connection.py
##########################################################################
# NSAp - Copyright (C) CEA, 2013 - 2018
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
"""
A module to connect a CubicWeb service and send requests.
"""
# System import
from __future__ import print_function
import os
import sys
import json
import time
import stat
import glob
import csv
if sys.version_info[0] > 2:
basestring = str
from io import StringIO
else:
from StringIO import StringIO
# Third party import
import requests
import numpy
import paramiko
def load_csv(text, delimiter=";"):
""" Load a csv.
Parameters
----------
text: string (mandatory)
the csv text.
Returns
-------
csv_lines: list
a list containing all the csv lines.
"""
csv_stream = StringIO(text)
reader = csv.reader(csv_stream, delimiter=delimiter)
csv_lines = [line for line in reader]
return csv_lines
class CWInstanceConnection(object):
""" Tool to dump the data stored in a cw instance.
.. code-block:: python
# Import Connection module
from cwbrowser.cw_connection import CWInstanceConnection
# Create dummy rqls
rql1 = ("Any C, G Where X is Subject, X code_in_study C, "
"X handedness 'ambidextrous', X gender G")
rql2 = ("Any S WHERE S is Scan, S has_data A, A field '3T', "
"S in_assessment B, B timepoint 'V1', S format 'GIS', "
"S in_assessment C, C concerns D, D code_in_study 'ab100207'")
# HTTP test
url = @HTTPURL; login = @LOGIN; password = @PWD
connection = CWInstanceConnection(url, login, password, port=9191)
connection.execute(rql1, export_type="json")
connection.execute_with_sync(rql2, "/tmp/fuse", timer=1)
# HTTPS test
url = @HTTPSURL; login = @LOGIN; password = @PWD
connection = CWInstanceConnection(url, login, password,
server_root="/home/$login")
connection.execute(rql)
Attributes
----------
url : str
the url to the cw instance.
login : str
the cw login.
opener: OpenerDirector
object that contains the connexion to the cw instance.
"""
# Global variable that specify the supported export cw formats
_EXPORT_TYPES = ["json", "csv", "cw"]
importers = {
"json": json.loads,
"csv": load_csv,
"cw": json.loads,
"cwsearch": json.loads
}
def __init__(self, url, login, password, port=22, server_root=os.path.sep,
verify=True, verbosity=0):
""" Initilize the HTTPConnection class.
Parameters
----------
url: str (mandatory)
the url to the cw instance.
login: str (mandatory)
the cw login.
password: str (mandatory)
the <PASSWORD>.
port: int (optional default 22)
the sftp port.
server_root: str (optional default '/')
the server root directory where the user mount points (chroot) are
mapped.
verify: bool (optional, default True)
if unset, disable the security certificate check.
verbosity: int (optional default 0)
the verbosity level.
"""
# Class parameters
if not url.startswith("https"):
raise ValueError(
"Authentication was requested on a non secured URL ({0})."
"Request has been blocked for security reasons.".format(url))
self.url = url
self.login = login
self.password = password
self.host = self.url.split("/")[2].split(":")[0]
self.port = port
self.server_root = server_root
self.verify = verify
self.verbosity = verbosity
###########################################################################
# Public Members
###########################################################################
def execute(self, rql, export_type="json", nb_tries=2):
""" Method that loads the rset from a rql request.
Parameters
----------
rql: str (mandatory)
the rql rquest that will be executed on the cw instance.
export_type: str (optional default 'json')
the result set export format: one defined in '_EXPORT_TYPES'.
nb_tries: int (optional default 2)
number of times a request will be repeated if it fails.
Returns
-------
rset: list of list of str
a list that contains the requested entity parameters.
"""
# Debug message
if self.verbosity > 2:
print("Executing rql: '%s'", rql)
print("Exporting in: '%s'", export_type)
# Check export type
if export_type not in self._EXPORT_TYPES:
raise Exception("Unknown export type '{0}', expect one in "
"'{1}'.".format(export_type, self._EXPORT_TYPES))
# Create a dictionary with the request meta information
data = {
"__login": self.login,
"__password": <PASSWORD>,
"rql": rql,
"vid": export_type + "export",
"_binary": 1
}
if export_type == "cw":
del data["_binary"]
try_count = 0
while True:
try: # Get the result set, it will always try at least once
try_count += 1
response = requests.post(
self.url, data=data,verify=self.verify,
auth=(self.login, self.password))
if not response.ok:
raise ValueError(response.reason)
rset = self.importers[export_type](
response.content.decode("utf-8"))
break
except Exception as e:
if try_count >= nb_tries:
# keep original message of e and add infos
e.message += ("\nFailed to get data after {} tries.\n"
"Request: {}").format(nb_tries, data["rql"])
raise e
time.sleep(1) # wait 1 second before retrying
# Debug message
if self.verbosity > 2:
print("RQL result: '%s'", rset)
return rset
def execute_with_sync(self, rql, sync_dir, timer=3, nb_tries=3):
""" Method that loads the rset from a rql request through sftp protocol
using the CWSearch mechanism.
Parameters
----------
rql: str (mandatory)
the rql rquest that will be executed on the cw instance.
sync_dir: str (mandatory)
the destination folder where the rql data are synchronized.
timer: int (optional default 3)
the time in seconds we are waiting for the fuse or twisted
server update.
nb_tries: int (optional default 3)
if the update has not been detected after 'nb_of_try' trials
raise an exception.
Returns
-------
rset: list of list or list of dict
a list that contains the requested cubicweb database parameters
when a json rset is generated, a list of dictionaries if a csv
rset is generated.
"""
# Create the CWSearch
self._create_cwsearch(rql)
# Wait for the update: use double quote in rql
try_nb = 1
cwsearch_title = None
rql = rql.replace("'", '"')
while try_nb <= nb_tries:
# Timer
if self.verbosity > 2:
print("Sleeping: '%i sec'", timer)
time.sleep(timer)
# Get all the user CWSearch in the database
rset = self.execute(
"Any S, T, P Where S is CWSearch, S title T, S path P")
# Check if the cubicweb update has been done.
# If true, get the associated CWSearch title
for item in rset:
if item[2].replace("'", '"') == rql:
cwsearch_title = item[1]
break
if cwsearch_title is not None:
break
# Increment
try_nb += 1
# If the search is not created
if try_nb == (nb_tries + 1):
raise IOError("The search has not been created properly.")
# Get instance parameters
cw_params = self.execute(rql="", export_type="cw")
if self.verbosity > 2:
print("Autodetected sync parameters: '%s'", str(cw_params))
# Copy the data with the sftp fuse mount point
self._get_server_dataset(sync_dir, cwsearch_title, cw_params)
# Load the rset
local_dir = os.path.join(sync_dir, cwsearch_title)
rset_file = glob.glob(os.path.join(local_dir, "request_result.*"))
if self.verbosity > 2:
print("Autodetected json rset file at location '{0}'".format(
rset_file))
if len(rset_file) != 1:
raise IOError("'{0}' rset file not supported, expect a single "
"rset file.".format(rset_json_file))
rset_file = rset_file[0]
filext = os.path.splitext(rset_file)[1]
# > deal with json file
if filext == ".json":
with open(rset_file) as json_data:
rset = json.load(json_data)
# Tune the rset files in order to point in the local filesystem
if not local_dir.endswith(os.path.sep):
local_dir += os.path.sep
if not cw_params["basedir"].endswith(os.path.sep):
cw_params["basedir"] += os.path.sep
for rset_items in rset:
for item_index in range(len(rset_items)):
item = rset_items[item_index]
if (isinstance(item, basestring) and
item.startswith(cw_params["basedir"])):
rset_items[item_index] = item.replace(
cw_params["basedir"], local_dir, 1)
# > deal with csv file
elif filext == ".csv":
with open(rset_file) as csv_data:
data = csv.DictReader(csv_data, delimiter=";", quotechar="|")
rset = [item for item in data]
# > raise an error when the file extension is not supported
else:
raise IOError("Unknown '{0}' rset extension.".format(rset_file))
# Debug message
if self.verbosity > 2:
print("RQL result: '%s'", rset)
return rset
def get_genotype_measure(self, gene_name, genomic_measure, nb_tries=3):
""" Method that loads the genomic measures stored in PLINK format.
Parameters
----------
gene_name: str (mandatory)
a gene name used to limit the number of measures that will be
loaded.
genomic_measure: str (mandatory)
the genomic measure name associated to PLINK files.
nb_tries: int (optional default 3)
if the update has not been detected after 'nb_of_try' trials
raise an exception.
Returns
-------
rset: dict
dictionary with 'labels' and 'records' (that contains
the requested cubicweb database parameters).
"""
# Debug message
if self.verbosity > 2:
print("Genotype extraction: '{0}', '{1}'".format(
genomic_measure, gene_name))
# Create a dictionary with the request meta information
data = {
"__login": self.login,
"__password": <PASSWORD>,
"vid": "metagen-search-json",
"measure": genomic_measure,
"gene": gene_name,
"export": "data"
}
try_count = 0
while True:
try: # Get the result set, it will always try at least once
try_count += 1
response = requests.post(
self.url, data=data, verify=self.verify,
auth=(self.login, self.password))
if not response.ok:
raise ValueError(response.reason)
rset = self.importers["json"](response.content.decode("utf-8"))
break
except Exception as e:
if try_count >= nb_tries:
# keep original message of e and add infos
e.message += ("\nFailed to get data after {} tries.\n"
"Request: {}").format(nb_tries, data["rql"])
raise e
time.sleep(1) # wait 1 second before retrying
# Debug message
if self.verbosity > 2:
print("Genotype result: '%s'", rset)
return rset
###########################################################################
# Private Members
###########################################################################
def _get_server_dataset(self, sync_dir, cwsearch_title, cw_params):
""" Download the CWSearch result trough a sftp connection.
.. note::
If a folder 'sync_dir' + 'cwsearch_title' is detected on the local
machine, no download is run. We assume that the CWSearch has already
been downloaded properly.
Parameters
----------
sync_dir: str (mandatory)
the destination folder where the rql data are synchronized.
cwsearch_title: str (mandatory)
the title of the CWSearch that will be downloaded.
cw_params: dict (mandatory)
a dictionary containing cw/fuse parameters.
"""
# Build the mount point
mount_point = os.path.join(
self.server_root, cw_params["instance_name"])
# Get the virtual folder to sync
virtual_dir_to_sync = os.path.join(mount_point, cwsearch_title)
if self.verbosity > 2:
print("Autodetected sync directory: '%s'", virtual_dir_to_sync)
# Get the local folder
local_dir = os.path.join(sync_dir, cwsearch_title)
if os.path.isdir(local_dir):
print("The CWSearch '{0}' has been found at location "
"'{1}'. Do not download the data again.".format(
cwsearch_title, local_dir))
# Rsync via paramiko and sftp
else:
transport = paramiko.Transport((self.host, self.port))
transport.connect(username=self.login, password=self.password)
sftp = paramiko.SFTPClient.from_transport(transport)
if self.verbosity > 2:
print("Downloading: '%s' to '%s'", virtual_dir_to_sync,
local_dir)
self._sftp_get_recursive(virtual_dir_to_sync, local_dir, sftp)
if self.verbosity > 2:
print("Downloading done")
sftp.close()
transport.close()
def _sftp_get_recursive(self, path, dest, sftp):
""" Recursive download of the data through a sftp connection.
Parameters
----------
path: str (mandatory)
the sftp path to download.
dest: str (mandatory)
the destination folder on the local machine.
sftp: paramiko sftp connection (mandatory)
"""
# Go through the current sftp folder content
dir_items = sftp.listdir(path)
os.makedirs(dest)
for item in dir_items:
# Construct the item absolute path
item_path = os.path.join(path, item)
dest_path = os.path.join(dest, item)
# If a directory is found
if self._sftp_isdir(item_path, sftp):
self._sftp_get_recursive(item_path, dest_path, sftp)
# Otherwise transfer the data
else:
sftp.get(item_path, dest_path)
def _sftp_isdir(self, path, sftp):
""" Check if a distant path is a directory through a sftp connection.
Parameters
----------
path: str (mandatory)
the sftp path to download.
sftp: paramiko sftp connection (mandatory)
"""
try:
return stat.S_ISDIR(sftp.stat(path).st_mode)
#Path does not exist, so by definition not a directory
except IOError:
return False
def _create_cwsearch(self, rql, export_type="cwsearch"):
""" Method that creates a CWSearch entity from a rql.
.. note::
The CWSearch title has to be unique, build automatically title
of the form 'auto_generated_title_x' where x is incremented
each time an element is inserted in the data base.
Parameters
----------
rql: str (mandatory)
the rql rquest that will be executed on the cw instance.
"""
# Debug message
if self.verbosity > 2:
print("Executing rql: '%s'", rql)
print("Exporting in: '%s'", export_type)
# Create a dictionary with the request meta information
data = {
"__login": self.login,
"__password": <PASSWORD>,
"path": rql,
"vid": export_type + "export"
}
# Get the result set
response = requests.post(self.url, data=data, verify=self.verify,
auth=(self.login, self.password))
if not response.ok:
raise ValueError(response.reason)
status = self.importers[export_type](response.content.decode("utf-8"))
if status["exitcode"] != 0:
raise ValueError("Can't create 'CWSearch' from RQL '{0}': "
"{1}.".format(rql, status["stderr"]))
|
<filename>cwbrowser/cw_connection.py
##########################################################################
# NSAp - Copyright (C) CEA, 2013 - 2018
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
"""
A module to connect a CubicWeb service and send requests.
"""
# System import
from __future__ import print_function
import os
import sys
import json
import time
import stat
import glob
import csv
if sys.version_info[0] > 2:
basestring = str
from io import StringIO
else:
from StringIO import StringIO
# Third party import
import requests
import numpy
import paramiko
def load_csv(text, delimiter=";"):
""" Load a csv.
Parameters
----------
text: string (mandatory)
the csv text.
Returns
-------
csv_lines: list
a list containing all the csv lines.
"""
csv_stream = StringIO(text)
reader = csv.reader(csv_stream, delimiter=delimiter)
csv_lines = [line for line in reader]
return csv_lines
class CWInstanceConnection(object):
""" Tool to dump the data stored in a cw instance.
.. code-block:: python
# Import Connection module
from cwbrowser.cw_connection import CWInstanceConnection
# Create dummy rqls
rql1 = ("Any C, G Where X is Subject, X code_in_study C, "
"X handedness 'ambidextrous', X gender G")
rql2 = ("Any S WHERE S is Scan, S has_data A, A field '3T', "
"S in_assessment B, B timepoint 'V1', S format 'GIS', "
"S in_assessment C, C concerns D, D code_in_study 'ab100207'")
# HTTP test
url = @HTTPURL; login = @LOGIN; password = @PWD
connection = CWInstanceConnection(url, login, password, port=9191)
connection.execute(rql1, export_type="json")
connection.execute_with_sync(rql2, "/tmp/fuse", timer=1)
# HTTPS test
url = @HTTPSURL; login = @LOGIN; password = @PWD
connection = CWInstanceConnection(url, login, password,
server_root="/home/$login")
connection.execute(rql)
Attributes
----------
url : str
the url to the cw instance.
login : str
the cw login.
opener: OpenerDirector
object that contains the connexion to the cw instance.
"""
# Global variable that specify the supported export cw formats
_EXPORT_TYPES = ["json", "csv", "cw"]
importers = {
"json": json.loads,
"csv": load_csv,
"cw": json.loads,
"cwsearch": json.loads
}
def __init__(self, url, login, password, port=22, server_root=os.path.sep,
verify=True, verbosity=0):
""" Initilize the HTTPConnection class.
Parameters
----------
url: str (mandatory)
the url to the cw instance.
login: str (mandatory)
the cw login.
password: str (mandatory)
the <PASSWORD>.
port: int (optional default 22)
the sftp port.
server_root: str (optional default '/')
the server root directory where the user mount points (chroot) are
mapped.
verify: bool (optional, default True)
if unset, disable the security certificate check.
verbosity: int (optional default 0)
the verbosity level.
"""
# Class parameters
if not url.startswith("https"):
raise ValueError(
"Authentication was requested on a non secured URL ({0})."
"Request has been blocked for security reasons.".format(url))
self.url = url
self.login = login
self.password = password
self.host = self.url.split("/")[2].split(":")[0]
self.port = port
self.server_root = server_root
self.verify = verify
self.verbosity = verbosity
###########################################################################
# Public Members
###########################################################################
def execute(self, rql, export_type="json", nb_tries=2):
""" Method that loads the rset from a rql request.
Parameters
----------
rql: str (mandatory)
the rql rquest that will be executed on the cw instance.
export_type: str (optional default 'json')
the result set export format: one defined in '_EXPORT_TYPES'.
nb_tries: int (optional default 2)
number of times a request will be repeated if it fails.
Returns
-------
rset: list of list of str
a list that contains the requested entity parameters.
"""
# Debug message
if self.verbosity > 2:
print("Executing rql: '%s'", rql)
print("Exporting in: '%s'", export_type)
# Check export type
if export_type not in self._EXPORT_TYPES:
raise Exception("Unknown export type '{0}', expect one in "
"'{1}'.".format(export_type, self._EXPORT_TYPES))
# Create a dictionary with the request meta information
data = {
"__login": self.login,
"__password": <PASSWORD>,
"rql": rql,
"vid": export_type + "export",
"_binary": 1
}
if export_type == "cw":
del data["_binary"]
try_count = 0
while True:
try: # Get the result set, it will always try at least once
try_count += 1
response = requests.post(
self.url, data=data,verify=self.verify,
auth=(self.login, self.password))
if not response.ok:
raise ValueError(response.reason)
rset = self.importers[export_type](
response.content.decode("utf-8"))
break
except Exception as e:
if try_count >= nb_tries:
# keep original message of e and add infos
e.message += ("\nFailed to get data after {} tries.\n"
"Request: {}").format(nb_tries, data["rql"])
raise e
time.sleep(1) # wait 1 second before retrying
# Debug message
if self.verbosity > 2:
print("RQL result: '%s'", rset)
return rset
def execute_with_sync(self, rql, sync_dir, timer=3, nb_tries=3):
""" Method that loads the rset from a rql request through sftp protocol
using the CWSearch mechanism.
Parameters
----------
rql: str (mandatory)
the rql rquest that will be executed on the cw instance.
sync_dir: str (mandatory)
the destination folder where the rql data are synchronized.
timer: int (optional default 3)
the time in seconds we are waiting for the fuse or twisted
server update.
nb_tries: int (optional default 3)
if the update has not been detected after 'nb_of_try' trials
raise an exception.
Returns
-------
rset: list of list or list of dict
a list that contains the requested cubicweb database parameters
when a json rset is generated, a list of dictionaries if a csv
rset is generated.
"""
# Create the CWSearch
self._create_cwsearch(rql)
# Wait for the update: use double quote in rql
try_nb = 1
cwsearch_title = None
rql = rql.replace("'", '"')
while try_nb <= nb_tries:
# Timer
if self.verbosity > 2:
print("Sleeping: '%i sec'", timer)
time.sleep(timer)
# Get all the user CWSearch in the database
rset = self.execute(
"Any S, T, P Where S is CWSearch, S title T, S path P")
# Check if the cubicweb update has been done.
# If true, get the associated CWSearch title
for item in rset:
if item[2].replace("'", '"') == rql:
cwsearch_title = item[1]
break
if cwsearch_title is not None:
break
# Increment
try_nb += 1
# If the search is not created
if try_nb == (nb_tries + 1):
raise IOError("The search has not been created properly.")
# Get instance parameters
cw_params = self.execute(rql="", export_type="cw")
if self.verbosity > 2:
print("Autodetected sync parameters: '%s'", str(cw_params))
# Copy the data with the sftp fuse mount point
self._get_server_dataset(sync_dir, cwsearch_title, cw_params)
# Load the rset
local_dir = os.path.join(sync_dir, cwsearch_title)
rset_file = glob.glob(os.path.join(local_dir, "request_result.*"))
if self.verbosity > 2:
print("Autodetected json rset file at location '{0}'".format(
rset_file))
if len(rset_file) != 1:
raise IOError("'{0}' rset file not supported, expect a single "
"rset file.".format(rset_json_file))
rset_file = rset_file[0]
filext = os.path.splitext(rset_file)[1]
# > deal with json file
if filext == ".json":
with open(rset_file) as json_data:
rset = json.load(json_data)
# Tune the rset files in order to point in the local filesystem
if not local_dir.endswith(os.path.sep):
local_dir += os.path.sep
if not cw_params["basedir"].endswith(os.path.sep):
cw_params["basedir"] += os.path.sep
for rset_items in rset:
for item_index in range(len(rset_items)):
item = rset_items[item_index]
if (isinstance(item, basestring) and
item.startswith(cw_params["basedir"])):
rset_items[item_index] = item.replace(
cw_params["basedir"], local_dir, 1)
# > deal with csv file
elif filext == ".csv":
with open(rset_file) as csv_data:
data = csv.DictReader(csv_data, delimiter=";", quotechar="|")
rset = [item for item in data]
# > raise an error when the file extension is not supported
else:
raise IOError("Unknown '{0}' rset extension.".format(rset_file))
# Debug message
if self.verbosity > 2:
print("RQL result: '%s'", rset)
return rset
def get_genotype_measure(self, gene_name, genomic_measure, nb_tries=3):
""" Method that loads the genomic measures stored in PLINK format.
Parameters
----------
gene_name: str (mandatory)
a gene name used to limit the number of measures that will be
loaded.
genomic_measure: str (mandatory)
the genomic measure name associated to PLINK files.
nb_tries: int (optional default 3)
if the update has not been detected after 'nb_of_try' trials
raise an exception.
Returns
-------
rset: dict
dictionary with 'labels' and 'records' (that contains
the requested cubicweb database parameters).
"""
# Debug message
if self.verbosity > 2:
print("Genotype extraction: '{0}', '{1}'".format(
genomic_measure, gene_name))
# Create a dictionary with the request meta information
data = {
"__login": self.login,
"__password": <PASSWORD>,
"vid": "metagen-search-json",
"measure": genomic_measure,
"gene": gene_name,
"export": "data"
}
try_count = 0
while True:
try: # Get the result set, it will always try at least once
try_count += 1
response = requests.post(
self.url, data=data, verify=self.verify,
auth=(self.login, self.password))
if not response.ok:
raise ValueError(response.reason)
rset = self.importers["json"](response.content.decode("utf-8"))
break
except Exception as e:
if try_count >= nb_tries:
# keep original message of e and add infos
e.message += ("\nFailed to get data after {} tries.\n"
"Request: {}").format(nb_tries, data["rql"])
raise e
time.sleep(1) # wait 1 second before retrying
# Debug message
if self.verbosity > 2:
print("Genotype result: '%s'", rset)
return rset
###########################################################################
# Private Members
###########################################################################
def _get_server_dataset(self, sync_dir, cwsearch_title, cw_params):
""" Download the CWSearch result trough a sftp connection.
.. note::
If a folder 'sync_dir' + 'cwsearch_title' is detected on the local
machine, no download is run. We assume that the CWSearch has already
been downloaded properly.
Parameters
----------
sync_dir: str (mandatory)
the destination folder where the rql data are synchronized.
cwsearch_title: str (mandatory)
the title of the CWSearch that will be downloaded.
cw_params: dict (mandatory)
a dictionary containing cw/fuse parameters.
"""
# Build the mount point
mount_point = os.path.join(
self.server_root, cw_params["instance_name"])
# Get the virtual folder to sync
virtual_dir_to_sync = os.path.join(mount_point, cwsearch_title)
if self.verbosity > 2:
print("Autodetected sync directory: '%s'", virtual_dir_to_sync)
# Get the local folder
local_dir = os.path.join(sync_dir, cwsearch_title)
if os.path.isdir(local_dir):
print("The CWSearch '{0}' has been found at location "
"'{1}'. Do not download the data again.".format(
cwsearch_title, local_dir))
# Rsync via paramiko and sftp
else:
transport = paramiko.Transport((self.host, self.port))
transport.connect(username=self.login, password=self.password)
sftp = paramiko.SFTPClient.from_transport(transport)
if self.verbosity > 2:
print("Downloading: '%s' to '%s'", virtual_dir_to_sync,
local_dir)
self._sftp_get_recursive(virtual_dir_to_sync, local_dir, sftp)
if self.verbosity > 2:
print("Downloading done")
sftp.close()
transport.close()
def _sftp_get_recursive(self, path, dest, sftp):
""" Recursive download of the data through a sftp connection.
Parameters
----------
path: str (mandatory)
the sftp path to download.
dest: str (mandatory)
the destination folder on the local machine.
sftp: paramiko sftp connection (mandatory)
"""
# Go through the current sftp folder content
dir_items = sftp.listdir(path)
os.makedirs(dest)
for item in dir_items:
# Construct the item absolute path
item_path = os.path.join(path, item)
dest_path = os.path.join(dest, item)
# If a directory is found
if self._sftp_isdir(item_path, sftp):
self._sftp_get_recursive(item_path, dest_path, sftp)
# Otherwise transfer the data
else:
sftp.get(item_path, dest_path)
def _sftp_isdir(self, path, sftp):
""" Check if a distant path is a directory through a sftp connection.
Parameters
----------
path: str (mandatory)
the sftp path to download.
sftp: paramiko sftp connection (mandatory)
"""
try:
return stat.S_ISDIR(sftp.stat(path).st_mode)
#Path does not exist, so by definition not a directory
except IOError:
return False
def _create_cwsearch(self, rql, export_type="cwsearch"):
""" Method that creates a CWSearch entity from a rql.
.. note::
The CWSearch title has to be unique, build automatically title
of the form 'auto_generated_title_x' where x is incremented
each time an element is inserted in the data base.
Parameters
----------
rql: str (mandatory)
the rql rquest that will be executed on the cw instance.
"""
# Debug message
if self.verbosity > 2:
print("Executing rql: '%s'", rql)
print("Exporting in: '%s'", export_type)
# Create a dictionary with the request meta information
data = {
"__login": self.login,
"__password": <PASSWORD>,
"path": rql,
"vid": export_type + "export"
}
# Get the result set
response = requests.post(self.url, data=data, verify=self.verify,
auth=(self.login, self.password))
if not response.ok:
raise ValueError(response.reason)
status = self.importers[export_type](response.content.decode("utf-8"))
if status["exitcode"] != 0:
raise ValueError("Can't create 'CWSearch' from RQL '{0}': "
"{1}.".format(rql, status["stderr"]))
|
en
| 0.603077
|
########################################################################## # NSAp - Copyright (C) CEA, 2013 - 2018 # Distributed under the terms of the CeCILL-B license, as published by # the CEA-CNRS-INRIA. Refer to the LICENSE file or to # http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html # for details. ########################################################################## A module to connect a CubicWeb service and send requests. # System import # Third party import Load a csv. Parameters ---------- text: string (mandatory) the csv text. Returns ------- csv_lines: list a list containing all the csv lines. Tool to dump the data stored in a cw instance. .. code-block:: python # Import Connection module from cwbrowser.cw_connection import CWInstanceConnection # Create dummy rqls rql1 = ("Any C, G Where X is Subject, X code_in_study C, " "X handedness 'ambidextrous', X gender G") rql2 = ("Any S WHERE S is Scan, S has_data A, A field '3T', " "S in_assessment B, B timepoint 'V1', S format 'GIS', " "S in_assessment C, C concerns D, D code_in_study 'ab100207'") # HTTP test url = @HTTPURL; login = @LOGIN; password = @PWD connection = CWInstanceConnection(url, login, password, port=9191) connection.execute(rql1, export_type="json") connection.execute_with_sync(rql2, "/tmp/fuse", timer=1) # HTTPS test url = @HTTPSURL; login = @LOGIN; password = @PWD connection = CWInstanceConnection(url, login, password, server_root="/home/$login") connection.execute(rql) Attributes ---------- url : str the url to the cw instance. login : str the cw login. opener: OpenerDirector object that contains the connexion to the cw instance. # Global variable that specify the supported export cw formats Initilize the HTTPConnection class. Parameters ---------- url: str (mandatory) the url to the cw instance. login: str (mandatory) the cw login. password: str (mandatory) the <PASSWORD>. port: int (optional default 22) the sftp port. server_root: str (optional default '/') the server root directory where the user mount points (chroot) are mapped. verify: bool (optional, default True) if unset, disable the security certificate check. verbosity: int (optional default 0) the verbosity level. # Class parameters ########################################################################### # Public Members ########################################################################### Method that loads the rset from a rql request. Parameters ---------- rql: str (mandatory) the rql rquest that will be executed on the cw instance. export_type: str (optional default 'json') the result set export format: one defined in '_EXPORT_TYPES'. nb_tries: int (optional default 2) number of times a request will be repeated if it fails. Returns ------- rset: list of list of str a list that contains the requested entity parameters. # Debug message # Check export type # Create a dictionary with the request meta information # Get the result set, it will always try at least once # keep original message of e and add infos # wait 1 second before retrying # Debug message Method that loads the rset from a rql request through sftp protocol using the CWSearch mechanism. Parameters ---------- rql: str (mandatory) the rql rquest that will be executed on the cw instance. sync_dir: str (mandatory) the destination folder where the rql data are synchronized. timer: int (optional default 3) the time in seconds we are waiting for the fuse or twisted server update. nb_tries: int (optional default 3) if the update has not been detected after 'nb_of_try' trials raise an exception. Returns ------- rset: list of list or list of dict a list that contains the requested cubicweb database parameters when a json rset is generated, a list of dictionaries if a csv rset is generated. # Create the CWSearch # Wait for the update: use double quote in rql # Timer # Get all the user CWSearch in the database # Check if the cubicweb update has been done. # If true, get the associated CWSearch title # Increment # If the search is not created # Get instance parameters # Copy the data with the sftp fuse mount point # Load the rset # > deal with json file # Tune the rset files in order to point in the local filesystem # > deal with csv file # > raise an error when the file extension is not supported # Debug message Method that loads the genomic measures stored in PLINK format. Parameters ---------- gene_name: str (mandatory) a gene name used to limit the number of measures that will be loaded. genomic_measure: str (mandatory) the genomic measure name associated to PLINK files. nb_tries: int (optional default 3) if the update has not been detected after 'nb_of_try' trials raise an exception. Returns ------- rset: dict dictionary with 'labels' and 'records' (that contains the requested cubicweb database parameters). # Debug message # Create a dictionary with the request meta information # Get the result set, it will always try at least once # keep original message of e and add infos # wait 1 second before retrying # Debug message ########################################################################### # Private Members ########################################################################### Download the CWSearch result trough a sftp connection. .. note:: If a folder 'sync_dir' + 'cwsearch_title' is detected on the local machine, no download is run. We assume that the CWSearch has already been downloaded properly. Parameters ---------- sync_dir: str (mandatory) the destination folder where the rql data are synchronized. cwsearch_title: str (mandatory) the title of the CWSearch that will be downloaded. cw_params: dict (mandatory) a dictionary containing cw/fuse parameters. # Build the mount point # Get the virtual folder to sync # Get the local folder # Rsync via paramiko and sftp Recursive download of the data through a sftp connection. Parameters ---------- path: str (mandatory) the sftp path to download. dest: str (mandatory) the destination folder on the local machine. sftp: paramiko sftp connection (mandatory) # Go through the current sftp folder content # Construct the item absolute path # If a directory is found # Otherwise transfer the data Check if a distant path is a directory through a sftp connection. Parameters ---------- path: str (mandatory) the sftp path to download. sftp: paramiko sftp connection (mandatory) #Path does not exist, so by definition not a directory Method that creates a CWSearch entity from a rql. .. note:: The CWSearch title has to be unique, build automatically title of the form 'auto_generated_title_x' where x is incremented each time an element is inserted in the data base. Parameters ---------- rql: str (mandatory) the rql rquest that will be executed on the cw instance. # Debug message # Create a dictionary with the request meta information # Get the result set
| 2.671197
| 3
|
cloudroast/objectstorage/smoke/account_smoke.py
|
RULCSoft/cloudroast
| 0
|
6629256
|
"""
Copyright 2013 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cloudroast.objectstorage.fixtures import ObjectStorageFixture
STATUS_CODE_MSG = ("{method} expected status code {expected}"
" received status code {received}")
CONTAINER_NAME = 'account_smoke_test_container'
C_TYPE_TEXT = 'text/plain; charset=utf-8'
C_TYPE_JSON = 'application/json; charset=utf-8'
C_TYPE_XML = 'application/xml; charset=utf-8'
HTTP_OK = 200
class AccountSmokeTest(ObjectStorageFixture):
@classmethod
def setUpClass(cls):
super(AccountSmokeTest, cls).setUpClass()
cls.container_names = ['a_{0}'.format(CONTAINER_NAME),
'b_{0}'.format(CONTAINER_NAME),
'c_{0}'.format(CONTAINER_NAME)]
for container_name in cls.container_names:
cls.client.create_container(container_name)
@classmethod
def tearDownClass(cls):
super(AccountSmokeTest, cls).setUpClass()
for container_name in cls.container_names:
cls.behaviors.force_delete_containers([container_name])
def test_container_list(self):
response = self.client.list_containers()
method = "list containers"
expected = HTTP_OK
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
expected = C_TYPE_TEXT
received = response.headers.get('content-type')
self.assertEqual(
expected,
received,
msg="'x-account-object-count' header value expected: {0} received:"
" {1}".format(expected, received))
def test_container_list_with_format_json_query_parameter(self):
format_ = {"format": "json"}
response = self.client.list_containers(params=format_)
method = "list containers using content-type json"
expected = HTTP_OK
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
expected = C_TYPE_JSON
received = response.headers.get('content-type')
self.assertEqual(
expected,
received,
msg="'x-account-object-count' header value expected: {0} received:"
" {1}".format(expected, received))
def test_container_list_with_format_xml_query_parameter(self):
format_ = {"format": "xml"}
response = self.client.list_containers(params=format_)
method = "list containers using content-type xml"
expected = HTTP_OK
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
expected = C_TYPE_XML
received = response.headers.get('content-type')
self.assertEqual(
expected,
received,
msg="'x-account-object-count' header value expected: {0} received:"
" {1}".format(expected, received))
def test_container_list_with_accept_header(self):
headers = {"Accept": "*/*"}
response = self.client.list_containers(headers=headers)
method = "list containers using accept */*"
expected = HTTP_OK
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
expected = C_TYPE_TEXT
received = response.headers.get('content-type')
self.assertEqual(
expected,
received,
msg="'x-account-object-count' header value expected: {0} received:"
" {1}".format(expected, received))
def test_container_list_with_text_accept_header(self):
headers = {"Accept": "text/plain"}
response = self.client.list_containers(headers=headers)
method = "list containers using accept text/plain"
expected = HTTP_OK
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
expected = C_TYPE_TEXT
received = response.headers.get('content-type')
self.assertEqual(
expected,
received,
msg="'x-account-object-count' header value expected: {0} received:"
" {1}".format(expected, received))
def test_container_list_with_json_accept_header(self):
headers = {"Accept": "application/json"}
response = self.client.list_containers(headers=headers)
method = "list containers using accept application/json"
expected = HTTP_OK
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
expected = C_TYPE_JSON
received = response.headers.get('content-type')
self.assertEqual(
expected,
received,
msg="'x-account-object-count' header value expected: {0} received:"
" {1}".format(expected, received))
def test_container_list_with_xml_accept_header(self):
headers = {"Accept": "application/xml"}
response = self.client.list_containers(headers=headers)
method = "list containers using accept application/xml"
expected = HTTP_OK
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
expected = C_TYPE_XML
received = response.headers.get('content-type')
self.assertEqual(
expected,
received,
msg="'x-account-object-count' header value expected: {0} received:"
" {1}".format(expected, received))
def test_container_list_with_limit_query_parameter(self):
limit = {"limit": "10"}
response = self.client.list_containers(params=limit)
method = "list containers using limit query parameter"
expected = HTTP_OK
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
expected = C_TYPE_TEXT
received = response.headers.get('content-type')
self.assertEqual(
expected,
received,
msg="'x-account-object-count' header value expected: {0} received:"
" {1}".format(expected, received))
def test_container_list_with_marker_query_parameter(self):
marker = {"marker": "a"}
response = self.client.list_containers(params=marker)
method = "list containers using marker query parameter"
expected = HTTP_OK
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
expected = C_TYPE_TEXT
received = response.headers.get('content-type')
self.assertEqual(
expected,
received,
msg="'x-account-object-count' header value expected: {0} received:"
" {1}".format(expected, received))
def test_container_list_with_limit_and_marker_query_parameters(self):
limit_marker = {"limit": "3", "marker": "a"}
response = self.client.list_containers(params=limit_marker)
method = "list containers using limit and marker query parameters"
expected = HTTP_OK
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
expected = C_TYPE_TEXT
received = response.headers.get('content-type')
self.assertEqual(
expected,
received,
msg="'x-account-object-count' header value expected: {0} received:"
" {1}".format(expected, received))
def test_container_list_with_limit_marker_format_json(self):
limit_marker_format = {"limit": "3", "marker": "a", "format": "json"}
response = self.client.list_containers(params=limit_marker_format)
method = "list containers using limit, marker, and format json query" \
" parameters"
expected = HTTP_OK
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
expected = C_TYPE_JSON
received = response.headers.get('content-type')
self.assertEqual(
expected,
received,
msg="'x-account-object-count' header value expected: {0} received:"
" {1}".format(expected, received))
def test_container_list_with_limit_marker_format_xml(self):
limit_marker_format = {"limit": "3", "marker": "a", "format": "xml"}
response = self.client.list_containers(params=limit_marker_format)
method = "list containers using limit, marker, and format xml query" \
" parameters"
expected = HTTP_OK
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
expected = C_TYPE_XML
received = response.headers.get('content-type')
self.assertEqual(
expected,
received,
msg="'x-account-object-count' header value expected: {0} received:"
" {1}".format(expected, received))
def test_metadata_retrieval_with_existing_account(self):
response = self.client.get_account_metadata()
method = "account metadata retrieval"
expected = 204
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
self.assertIn('x-account-bytes-used', response.headers)
self.assertIn('date', response.headers)
self.assertIn('x-timestamp', response.headers)
self.assertIn('x-account-container-count', response.headers)
self.assertIn('x-account-object-count', response.headers)
expected = 'bytes'
received = response.headers.get('accept-ranges')
self.assertEqual(
expected,
received,
msg="'accept-ranges' header value expected: {0} received"
" {1}".format(expected, received))
expected = 0
received = int(response.headers.get('content-length', 0))
self.assertEqual(
expected,
received,
msg="'content-length' header value expected: {0} received:"
" {1}".format(expected, received))
expected = C_TYPE_TEXT
received = response.headers.get('content-type')
self.assertEqual(
expected,
received,
msg="'content-type' header value expected: {0} received:"
" {1}".format(expected, received))
|
"""
Copyright 2013 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cloudroast.objectstorage.fixtures import ObjectStorageFixture
STATUS_CODE_MSG = ("{method} expected status code {expected}"
" received status code {received}")
CONTAINER_NAME = 'account_smoke_test_container'
C_TYPE_TEXT = 'text/plain; charset=utf-8'
C_TYPE_JSON = 'application/json; charset=utf-8'
C_TYPE_XML = 'application/xml; charset=utf-8'
HTTP_OK = 200
class AccountSmokeTest(ObjectStorageFixture):
@classmethod
def setUpClass(cls):
super(AccountSmokeTest, cls).setUpClass()
cls.container_names = ['a_{0}'.format(CONTAINER_NAME),
'b_{0}'.format(CONTAINER_NAME),
'c_{0}'.format(CONTAINER_NAME)]
for container_name in cls.container_names:
cls.client.create_container(container_name)
@classmethod
def tearDownClass(cls):
super(AccountSmokeTest, cls).setUpClass()
for container_name in cls.container_names:
cls.behaviors.force_delete_containers([container_name])
def test_container_list(self):
response = self.client.list_containers()
method = "list containers"
expected = HTTP_OK
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
expected = C_TYPE_TEXT
received = response.headers.get('content-type')
self.assertEqual(
expected,
received,
msg="'x-account-object-count' header value expected: {0} received:"
" {1}".format(expected, received))
def test_container_list_with_format_json_query_parameter(self):
format_ = {"format": "json"}
response = self.client.list_containers(params=format_)
method = "list containers using content-type json"
expected = HTTP_OK
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
expected = C_TYPE_JSON
received = response.headers.get('content-type')
self.assertEqual(
expected,
received,
msg="'x-account-object-count' header value expected: {0} received:"
" {1}".format(expected, received))
def test_container_list_with_format_xml_query_parameter(self):
format_ = {"format": "xml"}
response = self.client.list_containers(params=format_)
method = "list containers using content-type xml"
expected = HTTP_OK
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
expected = C_TYPE_XML
received = response.headers.get('content-type')
self.assertEqual(
expected,
received,
msg="'x-account-object-count' header value expected: {0} received:"
" {1}".format(expected, received))
def test_container_list_with_accept_header(self):
headers = {"Accept": "*/*"}
response = self.client.list_containers(headers=headers)
method = "list containers using accept */*"
expected = HTTP_OK
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
expected = C_TYPE_TEXT
received = response.headers.get('content-type')
self.assertEqual(
expected,
received,
msg="'x-account-object-count' header value expected: {0} received:"
" {1}".format(expected, received))
def test_container_list_with_text_accept_header(self):
headers = {"Accept": "text/plain"}
response = self.client.list_containers(headers=headers)
method = "list containers using accept text/plain"
expected = HTTP_OK
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
expected = C_TYPE_TEXT
received = response.headers.get('content-type')
self.assertEqual(
expected,
received,
msg="'x-account-object-count' header value expected: {0} received:"
" {1}".format(expected, received))
def test_container_list_with_json_accept_header(self):
headers = {"Accept": "application/json"}
response = self.client.list_containers(headers=headers)
method = "list containers using accept application/json"
expected = HTTP_OK
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
expected = C_TYPE_JSON
received = response.headers.get('content-type')
self.assertEqual(
expected,
received,
msg="'x-account-object-count' header value expected: {0} received:"
" {1}".format(expected, received))
def test_container_list_with_xml_accept_header(self):
headers = {"Accept": "application/xml"}
response = self.client.list_containers(headers=headers)
method = "list containers using accept application/xml"
expected = HTTP_OK
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
expected = C_TYPE_XML
received = response.headers.get('content-type')
self.assertEqual(
expected,
received,
msg="'x-account-object-count' header value expected: {0} received:"
" {1}".format(expected, received))
def test_container_list_with_limit_query_parameter(self):
limit = {"limit": "10"}
response = self.client.list_containers(params=limit)
method = "list containers using limit query parameter"
expected = HTTP_OK
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
expected = C_TYPE_TEXT
received = response.headers.get('content-type')
self.assertEqual(
expected,
received,
msg="'x-account-object-count' header value expected: {0} received:"
" {1}".format(expected, received))
def test_container_list_with_marker_query_parameter(self):
marker = {"marker": "a"}
response = self.client.list_containers(params=marker)
method = "list containers using marker query parameter"
expected = HTTP_OK
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
expected = C_TYPE_TEXT
received = response.headers.get('content-type')
self.assertEqual(
expected,
received,
msg="'x-account-object-count' header value expected: {0} received:"
" {1}".format(expected, received))
def test_container_list_with_limit_and_marker_query_parameters(self):
limit_marker = {"limit": "3", "marker": "a"}
response = self.client.list_containers(params=limit_marker)
method = "list containers using limit and marker query parameters"
expected = HTTP_OK
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
expected = C_TYPE_TEXT
received = response.headers.get('content-type')
self.assertEqual(
expected,
received,
msg="'x-account-object-count' header value expected: {0} received:"
" {1}".format(expected, received))
def test_container_list_with_limit_marker_format_json(self):
limit_marker_format = {"limit": "3", "marker": "a", "format": "json"}
response = self.client.list_containers(params=limit_marker_format)
method = "list containers using limit, marker, and format json query" \
" parameters"
expected = HTTP_OK
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
expected = C_TYPE_JSON
received = response.headers.get('content-type')
self.assertEqual(
expected,
received,
msg="'x-account-object-count' header value expected: {0} received:"
" {1}".format(expected, received))
def test_container_list_with_limit_marker_format_xml(self):
limit_marker_format = {"limit": "3", "marker": "a", "format": "xml"}
response = self.client.list_containers(params=limit_marker_format)
method = "list containers using limit, marker, and format xml query" \
" parameters"
expected = HTTP_OK
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
expected = C_TYPE_XML
received = response.headers.get('content-type')
self.assertEqual(
expected,
received,
msg="'x-account-object-count' header value expected: {0} received:"
" {1}".format(expected, received))
def test_metadata_retrieval_with_existing_account(self):
response = self.client.get_account_metadata()
method = "account metadata retrieval"
expected = 204
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
self.assertIn('x-account-bytes-used', response.headers)
self.assertIn('date', response.headers)
self.assertIn('x-timestamp', response.headers)
self.assertIn('x-account-container-count', response.headers)
self.assertIn('x-account-object-count', response.headers)
expected = 'bytes'
received = response.headers.get('accept-ranges')
self.assertEqual(
expected,
received,
msg="'accept-ranges' header value expected: {0} received"
" {1}".format(expected, received))
expected = 0
received = int(response.headers.get('content-length', 0))
self.assertEqual(
expected,
received,
msg="'content-length' header value expected: {0} received:"
" {1}".format(expected, received))
expected = C_TYPE_TEXT
received = response.headers.get('content-type')
self.assertEqual(
expected,
received,
msg="'content-type' header value expected: {0} received:"
" {1}".format(expected, received))
|
en
| 0.852152
|
Copyright 2013 Rackspace Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
| 1.710023
| 2
|
xt/algorithm/impala/default_config.py
|
ZZHsunsky/xingtian
| 1
|
6629257
|
<gh_stars>1-10
"""
Static Variable in impala
"""
GAMMA = 0.99
BATCH_SIZE = 512
|
"""
Static Variable in impala
"""
GAMMA = 0.99
BATCH_SIZE = 512
|
en
| 0.821299
|
Static Variable in impala
| 0.691563
| 1
|
recommenders/knn_collaborative_user.py
|
edervishaj/spotify-recsys-challenge
| 3
|
6629258
|
<reponame>edervishaj/spotify-recsys-challenge<gh_stars>1-10
import time
from recommenders.recommender import Recommender
from recommenders.similarity.similarity import *
class Knn_collabrative_user(Recommender):
def __init__(self):
super()
def compute_model(self, top_k=50, sm_type="cosine", shrink=0, alpha=0, beta=0, threshold=0, verbose=False, binary=True):
"""
:param matrix: sparse matrix, urm for knn item, p3alpha, p3beta, urm.T for knn user
:param top_k: int, element to take for each row after model computation problem
:param sm_type: string, similarity to use (use constant in this class to specify)
:param shrink: float, shrink term for the similarity
:param alpha: float, parameter used for asimmetric cosine, p3alpha, rp3beta and tversky
:param beta: float, parameter used rp3beta and tversky
:param threshold: float, threshold to cut similarity value after computation
:param verbose: boolena, if true print debug information
:return: sparse matrix, model for all the similarity
"""
#TODO: remove after update
import warnings
warnings.warn('This function still use the old version of the metrics, not the s_plus ones')
if verbose:
print("[ Creating model with " + sm_type + " similarity ]")
start_time = time.time()
if sm_type == COSINE:
self.model = cosine(self.urm, k=top_k, shrink=shrink, threshold=threshold, binary=binary)
elif sm_type == JACCARD:
self.model = jaccard(self.urm, k=top_k, shrink=shrink, threshold=threshold, binary=binary)
elif sm_type == TANIMOTO:
self.model = tanimoto(self.urm, k=top_k, shrink=shrink, threshold=threshold, binary=binary)
elif sm_type == AS_COSINE:
self.model = cosine(self.urm, alpha=alpha, k=top_k, shrink=shrink)
elif sm_type == DICE:
self.model = dice(self.urm, k=top_k, shrink=shrink, threshold=threshold, binary=binary)
elif sm_type == TVERSKY:
self.model = tversky(self.urm, alpha=alpha, beta=beta, shrink=shrink)
if verbose:
print("time: " + str(int(time.time() - start_time) / 60))
def compute_rating(self, top_k=500, verbose=False, small=False):
"""
:param urm: sparse matrix
:param model: sparse matrix
:param top_k: int, element to take for each row after fitting process
:param verbose: boolean, if true print debug information
:return: sparse matrix, estimated urm
"""
if small:
self.model = sp.csr_matrix(self.model)[self.pid]
self.urm = sp.csr_matrix(self.urm)
self.model = sp.csr_matrix(self.model)
if verbose:
print("[ Compute ratings ]")
self.model = self.model
start_time = time.time()
print(self.model.shape, self.urm.T.shape)
self.eurm = dot(self.model, self.urm, k=top_k)
print("eurm shape: " + str(self.eurm.shape))
if small:
self.urm = sp.csr_matrix(self.urm)[self.pid]
tmp = self.urm.tocoo()
row = tmp.row
col = tmp.col
self.eurm = sp.lil_matrix(self.eurm)
self.eurm[row, col]=0
self.eurm = sp.csr_matrix(self.eurm)
if verbose:
print("time: " + str(int(time.time() - start_time) / 60))
#TODO: already seen elimination
return self.eurm
|
import time
from recommenders.recommender import Recommender
from recommenders.similarity.similarity import *
class Knn_collabrative_user(Recommender):
def __init__(self):
super()
def compute_model(self, top_k=50, sm_type="cosine", shrink=0, alpha=0, beta=0, threshold=0, verbose=False, binary=True):
"""
:param matrix: sparse matrix, urm for knn item, p3alpha, p3beta, urm.T for knn user
:param top_k: int, element to take for each row after model computation problem
:param sm_type: string, similarity to use (use constant in this class to specify)
:param shrink: float, shrink term for the similarity
:param alpha: float, parameter used for asimmetric cosine, p3alpha, rp3beta and tversky
:param beta: float, parameter used rp3beta and tversky
:param threshold: float, threshold to cut similarity value after computation
:param verbose: boolena, if true print debug information
:return: sparse matrix, model for all the similarity
"""
#TODO: remove after update
import warnings
warnings.warn('This function still use the old version of the metrics, not the s_plus ones')
if verbose:
print("[ Creating model with " + sm_type + " similarity ]")
start_time = time.time()
if sm_type == COSINE:
self.model = cosine(self.urm, k=top_k, shrink=shrink, threshold=threshold, binary=binary)
elif sm_type == JACCARD:
self.model = jaccard(self.urm, k=top_k, shrink=shrink, threshold=threshold, binary=binary)
elif sm_type == TANIMOTO:
self.model = tanimoto(self.urm, k=top_k, shrink=shrink, threshold=threshold, binary=binary)
elif sm_type == AS_COSINE:
self.model = cosine(self.urm, alpha=alpha, k=top_k, shrink=shrink)
elif sm_type == DICE:
self.model = dice(self.urm, k=top_k, shrink=shrink, threshold=threshold, binary=binary)
elif sm_type == TVERSKY:
self.model = tversky(self.urm, alpha=alpha, beta=beta, shrink=shrink)
if verbose:
print("time: " + str(int(time.time() - start_time) / 60))
def compute_rating(self, top_k=500, verbose=False, small=False):
"""
:param urm: sparse matrix
:param model: sparse matrix
:param top_k: int, element to take for each row after fitting process
:param verbose: boolean, if true print debug information
:return: sparse matrix, estimated urm
"""
if small:
self.model = sp.csr_matrix(self.model)[self.pid]
self.urm = sp.csr_matrix(self.urm)
self.model = sp.csr_matrix(self.model)
if verbose:
print("[ Compute ratings ]")
self.model = self.model
start_time = time.time()
print(self.model.shape, self.urm.T.shape)
self.eurm = dot(self.model, self.urm, k=top_k)
print("eurm shape: " + str(self.eurm.shape))
if small:
self.urm = sp.csr_matrix(self.urm)[self.pid]
tmp = self.urm.tocoo()
row = tmp.row
col = tmp.col
self.eurm = sp.lil_matrix(self.eurm)
self.eurm[row, col]=0
self.eurm = sp.csr_matrix(self.eurm)
if verbose:
print("time: " + str(int(time.time() - start_time) / 60))
#TODO: already seen elimination
return self.eurm
|
en
| 0.475626
|
:param matrix: sparse matrix, urm for knn item, p3alpha, p3beta, urm.T for knn user :param top_k: int, element to take for each row after model computation problem :param sm_type: string, similarity to use (use constant in this class to specify) :param shrink: float, shrink term for the similarity :param alpha: float, parameter used for asimmetric cosine, p3alpha, rp3beta and tversky :param beta: float, parameter used rp3beta and tversky :param threshold: float, threshold to cut similarity value after computation :param verbose: boolena, if true print debug information :return: sparse matrix, model for all the similarity #TODO: remove after update :param urm: sparse matrix :param model: sparse matrix :param top_k: int, element to take for each row after fitting process :param verbose: boolean, if true print debug information :return: sparse matrix, estimated urm #TODO: already seen elimination
| 2.484563
| 2
|
setup.py
|
chr6192/n-beats
| 0
|
6629259
|
import os
from setuptools import setup
BASE_VERSION = '1.2.0' # update regardless whether you update keras or pytorch or both.
FRAMEWORK = os.getenv('FRAMEWORK', 'keras') # keras, pytorch.
# common packages.
INSTALL_REQUIRES = [
'numpy==1.16.2',
'pandas>=0.25.3',
'matplotlib>=3.0'
]
if FRAMEWORK == 'keras':
LIB_PACKAGE = ['nbeats_keras']
INSTALL_REQUIRES.extend([
'keras',
'tensorflow'
])
elif FRAMEWORK == 'pytorch':
LIB_PACKAGE = ['nbeats_pytorch']
INSTALL_REQUIRES.extend([
'torch',
'torchvision'
])
else:
raise ValueError('Unknown framework.')
setup(
name=f'nbeats-{FRAMEWORK}',
version=BASE_VERSION,
description='N-Beats',
author='<NAME> (Pytorch), <NAME> (Keras)',
license='MIT',
long_description_content_type='text/markdown',
long_description=open('README.md').read(),
packages=LIB_PACKAGE,
install_requires=INSTALL_REQUIRES
)
|
import os
from setuptools import setup
BASE_VERSION = '1.2.0' # update regardless whether you update keras or pytorch or both.
FRAMEWORK = os.getenv('FRAMEWORK', 'keras') # keras, pytorch.
# common packages.
INSTALL_REQUIRES = [
'numpy==1.16.2',
'pandas>=0.25.3',
'matplotlib>=3.0'
]
if FRAMEWORK == 'keras':
LIB_PACKAGE = ['nbeats_keras']
INSTALL_REQUIRES.extend([
'keras',
'tensorflow'
])
elif FRAMEWORK == 'pytorch':
LIB_PACKAGE = ['nbeats_pytorch']
INSTALL_REQUIRES.extend([
'torch',
'torchvision'
])
else:
raise ValueError('Unknown framework.')
setup(
name=f'nbeats-{FRAMEWORK}',
version=BASE_VERSION,
description='N-Beats',
author='<NAME> (Pytorch), <NAME> (Keras)',
license='MIT',
long_description_content_type='text/markdown',
long_description=open('README.md').read(),
packages=LIB_PACKAGE,
install_requires=INSTALL_REQUIRES
)
|
en
| 0.319613
|
# update regardless whether you update keras or pytorch or both. # keras, pytorch. # common packages.
| 1.638289
| 2
|
config.py
|
aws-the-right-way/stock-of-the-day
| 0
|
6629260
|
import os
class Config(object):
STOCK_SYMBOL_SERVICE_HOST=os.environ.get('STOCK_SYMBOL_SERVICE_SERVICE_SERVICE_HOST')
STOCK_SYMBOL_SERVICE_PORT=os.environ.get('STOCK_SYMBOL_SERVICE_SERVICE_SERVICE_PORT')
LOGO_RESIZER_SERVICE_HOST = os.environ.get('LOGO_RESIZER_SERVICE_SERVICE_SERVICE_HOST')
LOGO_RESIZER_SERVICE_PORT = os.environ.get('LOGO_RESIZER_SERVICE_SERVICE_SERVICE_PORT')
SECRET_KEY = os.environ.get('SECRET_KEY') or 'not-today'
STOCK_SYMBOL_SERVICE_URL = 'http://' + STOCK_SYMBOL_SERVICE_HOST + ':' + STOCK_SYMBOL_SERVICE_PORT + '/api/stocks'
LOGO_RESIZER_SERVICE_URL = 'http://' + LOGO_RESIZER_SERVICE_HOST + ':' + LOGO_RESIZER_SERVICE_PORT + '/api/upload_image'
LOG_TO_STDOUT = os.environ.get('LOG_TO_STDOUT')
|
import os
class Config(object):
STOCK_SYMBOL_SERVICE_HOST=os.environ.get('STOCK_SYMBOL_SERVICE_SERVICE_SERVICE_HOST')
STOCK_SYMBOL_SERVICE_PORT=os.environ.get('STOCK_SYMBOL_SERVICE_SERVICE_SERVICE_PORT')
LOGO_RESIZER_SERVICE_HOST = os.environ.get('LOGO_RESIZER_SERVICE_SERVICE_SERVICE_HOST')
LOGO_RESIZER_SERVICE_PORT = os.environ.get('LOGO_RESIZER_SERVICE_SERVICE_SERVICE_PORT')
SECRET_KEY = os.environ.get('SECRET_KEY') or 'not-today'
STOCK_SYMBOL_SERVICE_URL = 'http://' + STOCK_SYMBOL_SERVICE_HOST + ':' + STOCK_SYMBOL_SERVICE_PORT + '/api/stocks'
LOGO_RESIZER_SERVICE_URL = 'http://' + LOGO_RESIZER_SERVICE_HOST + ':' + LOGO_RESIZER_SERVICE_PORT + '/api/upload_image'
LOG_TO_STDOUT = os.environ.get('LOG_TO_STDOUT')
|
none
| 1
| 2.041439
| 2
|
|
examples/plot_observed_catalogs/plot_observed_multiple.py
|
hematthi/SysSim_Plotting
| 0
|
6629261
|
# To import required modules:
import numpy as np
import time
import os
import sys
import matplotlib
import matplotlib.cm as cm #for color maps
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec #for specifying plot attributes
from matplotlib import ticker #for setting contour plots to log scale
import scipy.integrate #for numerical integration
import scipy.misc #for factorial function
from scipy.special import erf #error function, used in computing CDF of normal distribution
import scipy.interpolate #for interpolation functions
import corner #corner.py package for corner plots
#matplotlib.rc('text', usetex=True)
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))
from src.functions_general import *
from src.functions_compare_kepler import *
from src.functions_load_sims import *
from src.functions_plot_catalogs import *
from src.functions_plot_params import *
savefigures = False
savefigures_directory = '/Users/hematthi/Documents/GradSchool/Misc_Presentations/PhD_Thesis_Defense/Figures/'
save_name = 'Models_Compare_Kepler'
compute_ratios = compute_ratios_adjacent
AD_mod = True
weights_all = load_split_stars_weights_only()
dists_include = ['delta_f',
'mult_CRPD_r',
'periods_KS',
'period_ratios_KS',
#'durations_KS',
#'durations_norm_circ_KS',
'durations_norm_circ_singles_KS',
'durations_norm_circ_multis_KS',
'duration_ratios_nonmmr_KS',
'duration_ratios_mmr_KS',
'depths_KS',
'radius_ratios_KS',
'radii_partitioning_KS',
'radii_monotonicity_KS',
'gap_complexity_KS',
]
##### To load the files with the systems with observed planets:
# Model 1:
loadfiles_directory1 = '/Users/hematthi/Documents/GradSchool/Research/ACI/Simulated_Data/Split_stars/Clustered_P_R_fswp_bprp/Params13_KS/durations_KS/GP_med/incl0_ecc0p02/' #incl0_ecc0p02/ #ecc0_incl1/
run_number1 = ''
N_sim, cos_factor, P_min, P_max, radii_min, radii_max = read_targets_period_radius_bounds(loadfiles_directory1 + 'periods%s.out' % run_number1)
param_vals_all1 = read_sim_params(loadfiles_directory1 + 'periods%s.out' % run_number1)
sss_per_sys1, sss1 = compute_summary_stats_from_cat_obs(file_name_path=loadfiles_directory1, run_number=run_number1, compute_ratios=compute_ratios)
# Model 2:
loadfiles_directory2 = '/Users/hematthi/Documents/GradSchool/Research/ACI/Simulated_Data/Split_stars/Clustered_P_R_fswp_bprp/Params13_KS/durations_KS/GP_med/incl2_ecc0p02/' #incl2_ecc0p02/ #ecc0p1_incl1/
run_number2 = ''
param_vals_all2 = read_sim_params(loadfiles_directory2 + 'periods%s.out' % run_number2)
sss_per_sys2, sss2 = compute_summary_stats_from_cat_obs(file_name_path=loadfiles_directory2, run_number=run_number2, compute_ratios=compute_ratios)
# Model 3:
#loadfiles_directory3 = '/Users/hematthi/Documents/GradSchool/Research/ExoplanetsSysSim_Clusters/SysSimExClusters/examples/New_algorithm_AMD/Same_params/Stable/New_sampler_P_Pc/No_final_mHill_check/' #'ACI/Simulated_Data/Julia_v0.7/Kepler_catalog_optimization/q1q17_dr25_gaia_fgk_stars79935/Non_Clustered/f_high_incl_low_incl_mmr/Fit_rate_mult_P_Pratios_D_Dratios_dur_durratios_mmr/Some8_params_CRPDr_KS/Fixed_Rbreak3_Ncrit8/lc_1_8_alphaP_-2_2_alphaR1_-4_2_alphaR2_-6_0_ecc_0_0p1_incl_inclmmr_0_90/targs79935_maxincl0_maxiters5000/sigma_i_greater_sigma_i_mmr/GP_med/'
#run_number3 = ''
#param_vals_all3 = read_sim_params(loadfiles_directory3 + 'periods%s.out' % run_number3)
#sss_per_sys3, sss3 = compute_summary_stats_from_cat_obs(file_name_path=loadfiles_directory3, run_number=run_number3, compute_ratios=compute_ratios)
model_sss = [sss1, sss2]
model_sss_per_sys = [sss_per_sys1, sss_per_sys2]
model_names = [r'$\sigma_i = 0^\circ$', r'$\sigma_i = 2^\circ$'] #[r'$\sigma_e = 0$', r'$\sigma_e = 0.1$'] #[r'$\sigma_i = 0^\circ$', r'$\sigma_i = 2^\circ$'] #[r'$\omega = {\rm atan}(x,y)$', r'$\omega \sim {\rm Unif}(-\pi,\pi)$'] # Make sure this matches the models loaded!
model_linestyles = ['-', '-']
model_colors = ['b', 'r'] #['b', 'r']
# To load and process the observed Kepler catalog and compare with our simulated catalog:
ssk_per_sys, ssk = compute_summary_stats_from_Kepler_catalog(P_min, P_max, radii_min, radii_max)
logxi_Kep_2 = np.log10(ssk_per_sys['xi_obs'][ssk_per_sys['Mtot_obs'] == 2, 0])
logxi_Kep_3 = np.log10(ssk_per_sys['xi_obs'][ssk_per_sys['Mtot_obs'] == 3, :2].flatten())
logxi_Kep_4 = np.log10(ssk_per_sys['xi_obs'][ssk_per_sys['Mtot_obs'] == 4, :3].flatten())
xi_Kep_4p = ssk_per_sys['xi_obs'][ssk_per_sys['Mtot_obs'] >= 4]
logxi_Kep_4p = np.log10(xi_Kep_4p[xi_Kep_4p != -1])
xi_Kep_5p = ssk_per_sys['xi_obs'][ssk_per_sys['Mtot_obs'] >= 5]
logxi_Kep_5p = np.log10(xi_Kep_5p[xi_Kep_5p != -1])
##### To plot the simulated and Kepler catalogs as marginal distributions:
subdirectory = '' #'Paper_Figures/'; 'Talk_Figures/'
fig_size = (8,3) #size of each panel (figure)
fig_lbrt = [0.15, 0.3, 0.95, 0.925]
n_bins = 100
lw = 1 #linewidth
#alpha = 0.2 #transparency of histograms
afs = 20 #axes labels font size
tfs = 20 #text labels font size
lfs = 12 #legend labels font size
#'''
# Multiplicities:
plot_fig_counts_hist_simple(fig_size, [sss_per_sys['Mtot_obs'] for sss_per_sys in model_sss_per_sys], [ssk_per_sys['Mtot_obs']], x_min=0, y_min=1e-2, y_max=1e4, x_llim=0.5, N_sim_Kep_factor=float(N_sim)/N_Kep, log_y=True, c_sim=model_colors, ls_sim=model_linestyles, lw=lw, labels_sim=model_names, xlabel_text='Observed planets per system', afs=afs, tfs=tfs, lfs=lfs, legend=True, show_counts_sim=True, show_counts_Kep=True, fig_lbrt=fig_lbrt, save_name=savefigures_directory + subdirectory + save_name + '_multiplicities_compare.pdf', save_fig=savefigures)
# Periods:
plot_fig_pdf_simple(fig_size, [sss['P_obs'] for sss in model_sss], [ssk['P_obs']], x_min=3., x_max=300., y_min=1e-3, y_max=0.1, n_bins=n_bins, log_x=True, c_sim=model_colors, log_y=True, ls_sim=model_linestyles, lw=lw, labels_sim=model_names, xticks_custom=[3,10,30,100,300], xlabel_text=r'$P$ (days)', afs=afs, tfs=tfs, lfs=lfs, legend=True, fig_lbrt=fig_lbrt, save_name=savefigures_directory + subdirectory + save_name + '_periods_compare.pdf', save_fig=savefigures)
# Period ratios (all, with some upper cut-off):
R_max_cut = 30. #upper cut-off for plotting period ratios; np.max(sss['Rm_obs'])
plot_fig_pdf_simple(fig_size, [sss['Rm_obs'][sss['Rm_obs'] < R_max_cut] for sss in model_sss], [ssk['Rm_obs'][ssk['Rm_obs'] < R_max_cut]], x_min=1., x_max=R_max_cut, n_bins=n_bins, log_x=True, c_sim=model_colors, ls_sim=model_linestyles, lw=lw, labels_sim=model_names, xticks_custom=[1,2,3,4,5,10,20], xlabel_text=r'$P_{i+1}/P_i$', afs=afs, tfs=tfs, lfs=lfs, fig_lbrt=fig_lbrt, save_name=savefigures_directory + subdirectory + save_name + '_periodratios_compare.pdf', save_fig=savefigures)
# Period ratios (< 5):
plot_fig_pdf_simple(fig_size, [sss['Rm_obs'][sss['Rm_obs'] < 5.] for sss in model_sss], [ssk['Rm_obs'][ssk['Rm_obs'] < 5.]], x_min=1., x_max=5., n_bins=n_bins, c_sim=model_colors, ls_sim=model_linestyles, lw=lw, labels_sim=model_names, xlabel_text=r'$P_{i+1}/P_i$', afs=afs, tfs=tfs, lfs=lfs, fig_lbrt=fig_lbrt, save_name=savefigures_directory + subdirectory + save_name + '_periodratios_less5_compare.pdf', save_fig=savefigures)
# Transit durations:
plot_fig_pdf_simple(fig_size, [sss['tdur_obs'] for sss in model_sss], [ssk['tdur_obs']], x_min=0., x_max=15., n_bins=n_bins, c_sim=model_colors, ls_sim=model_linestyles, lw=lw, labels_sim=model_names, xlabel_text=r'$t_{\rm dur}$ (hrs)', afs=afs, tfs=tfs, lfs=lfs, legend=True, fig_lbrt=fig_lbrt, save_name=savefigures_directory + subdirectory + save_name + '_durations_compare.pdf', save_fig=savefigures)
# Circular normalized transit durations:
plot_fig_pdf_simple(fig_size, [sss['tdur_tcirc_obs'] for sss in model_sss], [ssk['tdur_tcirc_obs']], x_min=0., x_max=1.5, n_bins=n_bins, c_sim=model_colors, ls_sim=model_linestyles, lw=lw, labels_sim=model_names, xlabel_text=r'$t_{\rm dur}/t_{\rm circ}$', afs=afs, tfs=tfs, lfs=lfs, fig_lbrt=fig_lbrt, save_name=savefigures_directory + subdirectory + save_name + '_tdur_tcirc_compare.pdf', save_fig=savefigures)
# Transit depths:
plot_fig_pdf_simple(fig_size, [sss['D_obs'] for sss in model_sss], [ssk['D_obs']], x_min=10.**(-5.), x_max=10.**(-1.5), n_bins=n_bins, log_x=True, c_sim=model_colors, ls_sim=model_linestyles, lw=lw, labels_sim=model_names, xlabel_text=r'$\delta$', afs=afs, tfs=tfs, lfs=lfs, fig_lbrt=fig_lbrt, save_name=savefigures_directory + subdirectory + save_name + '_depths_compare.pdf', save_fig=savefigures)
# Planet radii:
plot_fig_pdf_simple(fig_size, [sss['radii_obs'] for sss in model_sss], [ssk['radii_obs']], x_min=radii_min, x_max=radii_max, n_bins=n_bins, c_sim=model_colors, ls_sim=model_linestyles, lw=lw, labels_sim=model_names, xlabel_text=r'$R_p (R_\oplus)$', afs=afs, tfs=tfs, lfs=lfs, fig_lbrt=fig_lbrt, save_name=savefigures_directory + subdirectory + save_name + '_radii_compare.pdf', save_fig=savefigures)
# Stellar radii:
plot_fig_pdf_simple(fig_size, [sss['Rstar_obs'] for sss in model_sss], [ssk['Rstar_obs']], x_min=0.5, x_max=2.5, n_bins=n_bins, c_sim=model_colors, ls_sim=model_linestyles, lw=lw, labels_sim=model_names, xlabel_text=r'$R_\star (R_\odot)$', afs=afs, tfs=tfs, lfs=lfs, fig_lbrt=fig_lbrt, save_name=savefigures_directory + subdirectory + save_name + '_stellar_radii_compare.pdf', save_fig=savefigures)
# Transit depth ratios:
plot_fig_pdf_simple(fig_size, [sss['D_ratio_obs'] for sss in model_sss], [ssk['D_ratio_obs']], x_min=10.**(-1.5), x_max=10.**(1.5), n_bins=n_bins, log_x=True, c_sim=model_colors, ls_sim=model_linestyles, lw=lw, labels_sim=model_names, xlabel_text=r'$\delta_{i+1}/\delta_i$', afs=afs, tfs=tfs, lfs=lfs, fig_lbrt=fig_lbrt, save_name=savefigures_directory + subdirectory + save_name + '_depthratios_compare.pdf', save_fig=savefigures)
# Log(xi):
plot_fig_pdf_simple(fig_size, [np.log10(sss['xi_obs']) for sss in model_sss], [np.log10(ssk['xi_obs'])], x_min=-0.5, x_max=0.5, n_bins=n_bins, c_sim=model_colors, ls_sim=model_linestyles, lw=lw, labels_sim=model_names, xlabel_text=r'$\log{\xi}$', afs=afs, tfs=tfs, lfs=lfs, fig_lbrt=fig_lbrt, save_name=savefigures_directory + subdirectory + save_name + '_logxi_all_compare.pdf', save_fig=savefigures)
plt.show()
plt.close()
#'''
##### To plot the inner vs. outer period ratios of triplets (in 3+ systems) (similar to Fig 6 in Zhu et al. 2019 and Fig 7 in Weiss et al. 2018a):
'''
compute_pratio_in_out_and_plot_fig_pdf([sss_per_sys['P_obs'] for sss_per_sys in model_sss_per_sys], last_is_Kep=True, fig_size=(8,6), n_bins=50, x_min=0.1, x_max=10., colors=['b','r'], ls=['-',':'], lw=2, labels=['Clustered P+R', 'Non-clustered'], afs=afs, tfs=tfs, lfs=lfs, save_name=savefigures_directory + subdirectory + save_name + '_observed_pratio_out_in_ratio.pdf', save_fig=savefigures)
plt.show()
plt.close()
'''
##### To plot the circular normalized transit durations again (observed singles vs. multis):
plot_fig_pdf_simple(fig_size, [sss['tdur_tcirc_1_obs'] for sss in model_sss], [ssk['tdur_tcirc_1_obs']], x_min=0., x_max=1.5, n_bins=n_bins, c_sim=model_colors, ls_sim=model_linestyles, lw=lw, labels_sim=model_names, xlabel_text=r'$t_{\rm dur}/t_{\rm circ}$', afs=afs, tfs=tfs, lfs=lfs, extra_text='Singles', fig_lbrt=fig_lbrt, save_name=savefigures_directory + subdirectory + save_name + '_tdur_tcirc_singles_compare.pdf', save_fig=savefigures)
plot_fig_pdf_simple(fig_size, [sss['tdur_tcirc_2p_obs'] for sss in model_sss], [ssk['tdur_tcirc_2p_obs']], x_min=0., x_max=1.5, n_bins=n_bins, c_sim=model_colors, ls_sim=model_linestyles, lw=lw, labels_sim=model_names, xlabel_text=r'$t_{\rm dur}/t_{\rm circ}$', afs=afs, tfs=tfs, lfs=lfs, extra_text='Multis', fig_lbrt=fig_lbrt, save_name=savefigures_directory + subdirectory + save_name + '_tdur_tcirc_multis_compare.pdf', save_fig=savefigures)
##### To plot the xi distribution separated by observed multiplicities (m=2,3,4+):
logxi_2_model1 = np.log10(sss_per_sys1['xi_obs'][sss_per_sys1['Mtot_obs'] == 2, 0])
logxi_3_model1 = np.log10(sss_per_sys1['xi_obs'][sss_per_sys1['Mtot_obs'] == 3, :2].flatten())
logxi_4_model1 = np.log10(sss_per_sys1['xi_obs'][sss_per_sys1['Mtot_obs'] == 4, :3].flatten())
xi_4p_model1 = sss_per_sys1['xi_obs'][sss_per_sys1['Mtot_obs'] >= 4]
logxi_4p_model1 = np.log10(xi_4p_model1[xi_4p_model1 != -1])
xi_5p_model1 = sss_per_sys1['xi_obs'][sss_per_sys1['Mtot_obs'] >= 5]
logxi_5p_model1 = np.log10(xi_5p_model1[xi_5p_model1 != -1])
logxi_2_model2 = np.log10(sss_per_sys2['xi_obs'][sss_per_sys2['Mtot_obs'] == 2, 0])
logxi_3_model2 = np.log10(sss_per_sys2['xi_obs'][sss_per_sys2['Mtot_obs'] == 3, :2].flatten())
logxi_4_model2 = np.log10(sss_per_sys2['xi_obs'][sss_per_sys2['Mtot_obs'] == 4, :3].flatten())
xi_4p_model2 = sss_per_sys2['xi_obs'][sss_per_sys2['Mtot_obs'] >= 4]
logxi_4p_model2 = np.log10(xi_4p_model2[xi_4p_model2 != -1])
xi_5p_model2 = sss_per_sys2['xi_obs'][sss_per_sys2['Mtot_obs'] >= 5]
logxi_5p_model2 = np.log10(xi_5p_model2[xi_5p_model2 != -1])
c2, c3, c4p = 'r', 'b', 'g'
ymax = 0.14
xi_bins = np.linspace(-0.5, 0.5, n_bins+1)
fig = plt.figure(figsize=(8,14))
plot = GridSpec(7,1,left=0.2,bottom=0.07,right=0.95,top=0.98,wspace=0,hspace=0)
ax = plt.subplot(plot[0,0])
plot_panel_cdf_simple(ax, [logxi_2_model1, logxi_3_model1, logxi_4p_model1], [logxi_Kep_2, logxi_Kep_3, logxi_Kep_4p], x_min=np.min(xi_bins), x_max=np.max(xi_bins), c_sim=[c2,c3,c4p], c_Kep=[c2,c3,c4p], ls_sim=['-','-','-'], ls_Kep=[':',':',':'], lw=2, labels_sim=[r'$m=2$', r'$m=3$', r'$m=4+$'], labels_Kep=['Kepler data',None,None], xlabel_text='', legend=True, afs=afs, tfs=tfs, lfs=lfs, label_dist=False)
ax = plt.subplot(plot[1:3,0])
plot_panel_pdf_simple(ax, [logxi_2_model1, logxi_3_model1, logxi_4p_model1], [], x_min=np.min(xi_bins), x_max=np.max(xi_bins), y_max=ymax, n_bins=n_bins, c_sim=[c2,c3,c4p], ls_sim=['-','-','-'], lw=2, labels_sim=[r'$m=2$', r'$m=3$', r'$m=4+$'], xlabel_text='', legend=False, afs=afs, tfs=tfs, lfs=lfs)
plt.text(x=0.98, y=0.9, s=model_names[0], ha='right', fontsize=lfs, transform=ax.transAxes)
ax = plt.subplot(plot[3:5,0])
plot_panel_pdf_simple(ax, [logxi_2_model2, logxi_3_model2, logxi_4p_model2], [], x_min=np.min(xi_bins), x_max=np.max(xi_bins), y_max=ymax, n_bins=n_bins, c_sim=[c2,c3,c4p], ls_sim=['-','-','-'], lw=2, labels_sim=[r'$m=2$', r'$m=3$', r'$m=4+$'], xlabel_text='', legend=False, afs=afs, tfs=tfs, lfs=lfs)
plt.text(x=0.98, y=0.9, s=model_names[1], ha='right', fontsize=lfs, transform=ax.transAxes)
ax = plt.subplot(plot[5:,0])
plot_panel_pdf_simple(ax, [logxi_Kep_2, logxi_Kep_3, logxi_Kep_4p], [], x_min=np.min(xi_bins), x_max=np.max(xi_bins), y_max=ymax, n_bins=n_bins, c_sim=[c2,c3,c4p], ls_sim=['-','-','-'], lw=2, labels_sim=[r'$m=2$', r'$m=3$', r'$m=4+$'], labels_Kep=[None], xlabel_text=r'$\log{\xi}$', legend=False, afs=afs, tfs=tfs, lfs=lfs)
plt.text(x=0.98, y=0.9, s='Kepler data', ha='right', fontsize=lfs, transform=ax.transAxes)
if savefigures:
plt.savefig(savefigures_directory + subdirectory + save_name + '_logxi_per_mult.pdf')
plt.close()
##### To remake the log(xi) plot for defense talk:
plot_fig_pdf_simple((8,4), [np.log10(sss['xi_obs']) for sss in model_sss], [], x_min=-0.5, x_max=0.5, n_bins=n_bins, c_sim=model_colors, ls_sim=model_linestyles, lw=3, labels_sim=model_names, xlabel_text=r'$\log{\xi}$', afs=afs, tfs=tfs, lfs=20, legend=True, fig_lbrt=[0.15, 0.2, 0.95, 0.925], save_name=savefigures_directory + subdirectory + save_name + '_logxi_incl.pdf', save_fig=savefigures)
plt.show()
|
# To import required modules:
import numpy as np
import time
import os
import sys
import matplotlib
import matplotlib.cm as cm #for color maps
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec #for specifying plot attributes
from matplotlib import ticker #for setting contour plots to log scale
import scipy.integrate #for numerical integration
import scipy.misc #for factorial function
from scipy.special import erf #error function, used in computing CDF of normal distribution
import scipy.interpolate #for interpolation functions
import corner #corner.py package for corner plots
#matplotlib.rc('text', usetex=True)
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))
from src.functions_general import *
from src.functions_compare_kepler import *
from src.functions_load_sims import *
from src.functions_plot_catalogs import *
from src.functions_plot_params import *
savefigures = False
savefigures_directory = '/Users/hematthi/Documents/GradSchool/Misc_Presentations/PhD_Thesis_Defense/Figures/'
save_name = 'Models_Compare_Kepler'
compute_ratios = compute_ratios_adjacent
AD_mod = True
weights_all = load_split_stars_weights_only()
dists_include = ['delta_f',
'mult_CRPD_r',
'periods_KS',
'period_ratios_KS',
#'durations_KS',
#'durations_norm_circ_KS',
'durations_norm_circ_singles_KS',
'durations_norm_circ_multis_KS',
'duration_ratios_nonmmr_KS',
'duration_ratios_mmr_KS',
'depths_KS',
'radius_ratios_KS',
'radii_partitioning_KS',
'radii_monotonicity_KS',
'gap_complexity_KS',
]
##### To load the files with the systems with observed planets:
# Model 1:
loadfiles_directory1 = '/Users/hematthi/Documents/GradSchool/Research/ACI/Simulated_Data/Split_stars/Clustered_P_R_fswp_bprp/Params13_KS/durations_KS/GP_med/incl0_ecc0p02/' #incl0_ecc0p02/ #ecc0_incl1/
run_number1 = ''
N_sim, cos_factor, P_min, P_max, radii_min, radii_max = read_targets_period_radius_bounds(loadfiles_directory1 + 'periods%s.out' % run_number1)
param_vals_all1 = read_sim_params(loadfiles_directory1 + 'periods%s.out' % run_number1)
sss_per_sys1, sss1 = compute_summary_stats_from_cat_obs(file_name_path=loadfiles_directory1, run_number=run_number1, compute_ratios=compute_ratios)
# Model 2:
loadfiles_directory2 = '/Users/hematthi/Documents/GradSchool/Research/ACI/Simulated_Data/Split_stars/Clustered_P_R_fswp_bprp/Params13_KS/durations_KS/GP_med/incl2_ecc0p02/' #incl2_ecc0p02/ #ecc0p1_incl1/
run_number2 = ''
param_vals_all2 = read_sim_params(loadfiles_directory2 + 'periods%s.out' % run_number2)
sss_per_sys2, sss2 = compute_summary_stats_from_cat_obs(file_name_path=loadfiles_directory2, run_number=run_number2, compute_ratios=compute_ratios)
# Model 3:
#loadfiles_directory3 = '/Users/hematthi/Documents/GradSchool/Research/ExoplanetsSysSim_Clusters/SysSimExClusters/examples/New_algorithm_AMD/Same_params/Stable/New_sampler_P_Pc/No_final_mHill_check/' #'ACI/Simulated_Data/Julia_v0.7/Kepler_catalog_optimization/q1q17_dr25_gaia_fgk_stars79935/Non_Clustered/f_high_incl_low_incl_mmr/Fit_rate_mult_P_Pratios_D_Dratios_dur_durratios_mmr/Some8_params_CRPDr_KS/Fixed_Rbreak3_Ncrit8/lc_1_8_alphaP_-2_2_alphaR1_-4_2_alphaR2_-6_0_ecc_0_0p1_incl_inclmmr_0_90/targs79935_maxincl0_maxiters5000/sigma_i_greater_sigma_i_mmr/GP_med/'
#run_number3 = ''
#param_vals_all3 = read_sim_params(loadfiles_directory3 + 'periods%s.out' % run_number3)
#sss_per_sys3, sss3 = compute_summary_stats_from_cat_obs(file_name_path=loadfiles_directory3, run_number=run_number3, compute_ratios=compute_ratios)
model_sss = [sss1, sss2]
model_sss_per_sys = [sss_per_sys1, sss_per_sys2]
model_names = [r'$\sigma_i = 0^\circ$', r'$\sigma_i = 2^\circ$'] #[r'$\sigma_e = 0$', r'$\sigma_e = 0.1$'] #[r'$\sigma_i = 0^\circ$', r'$\sigma_i = 2^\circ$'] #[r'$\omega = {\rm atan}(x,y)$', r'$\omega \sim {\rm Unif}(-\pi,\pi)$'] # Make sure this matches the models loaded!
model_linestyles = ['-', '-']
model_colors = ['b', 'r'] #['b', 'r']
# To load and process the observed Kepler catalog and compare with our simulated catalog:
ssk_per_sys, ssk = compute_summary_stats_from_Kepler_catalog(P_min, P_max, radii_min, radii_max)
logxi_Kep_2 = np.log10(ssk_per_sys['xi_obs'][ssk_per_sys['Mtot_obs'] == 2, 0])
logxi_Kep_3 = np.log10(ssk_per_sys['xi_obs'][ssk_per_sys['Mtot_obs'] == 3, :2].flatten())
logxi_Kep_4 = np.log10(ssk_per_sys['xi_obs'][ssk_per_sys['Mtot_obs'] == 4, :3].flatten())
xi_Kep_4p = ssk_per_sys['xi_obs'][ssk_per_sys['Mtot_obs'] >= 4]
logxi_Kep_4p = np.log10(xi_Kep_4p[xi_Kep_4p != -1])
xi_Kep_5p = ssk_per_sys['xi_obs'][ssk_per_sys['Mtot_obs'] >= 5]
logxi_Kep_5p = np.log10(xi_Kep_5p[xi_Kep_5p != -1])
##### To plot the simulated and Kepler catalogs as marginal distributions:
subdirectory = '' #'Paper_Figures/'; 'Talk_Figures/'
fig_size = (8,3) #size of each panel (figure)
fig_lbrt = [0.15, 0.3, 0.95, 0.925]
n_bins = 100
lw = 1 #linewidth
#alpha = 0.2 #transparency of histograms
afs = 20 #axes labels font size
tfs = 20 #text labels font size
lfs = 12 #legend labels font size
#'''
# Multiplicities:
plot_fig_counts_hist_simple(fig_size, [sss_per_sys['Mtot_obs'] for sss_per_sys in model_sss_per_sys], [ssk_per_sys['Mtot_obs']], x_min=0, y_min=1e-2, y_max=1e4, x_llim=0.5, N_sim_Kep_factor=float(N_sim)/N_Kep, log_y=True, c_sim=model_colors, ls_sim=model_linestyles, lw=lw, labels_sim=model_names, xlabel_text='Observed planets per system', afs=afs, tfs=tfs, lfs=lfs, legend=True, show_counts_sim=True, show_counts_Kep=True, fig_lbrt=fig_lbrt, save_name=savefigures_directory + subdirectory + save_name + '_multiplicities_compare.pdf', save_fig=savefigures)
# Periods:
plot_fig_pdf_simple(fig_size, [sss['P_obs'] for sss in model_sss], [ssk['P_obs']], x_min=3., x_max=300., y_min=1e-3, y_max=0.1, n_bins=n_bins, log_x=True, c_sim=model_colors, log_y=True, ls_sim=model_linestyles, lw=lw, labels_sim=model_names, xticks_custom=[3,10,30,100,300], xlabel_text=r'$P$ (days)', afs=afs, tfs=tfs, lfs=lfs, legend=True, fig_lbrt=fig_lbrt, save_name=savefigures_directory + subdirectory + save_name + '_periods_compare.pdf', save_fig=savefigures)
# Period ratios (all, with some upper cut-off):
R_max_cut = 30. #upper cut-off for plotting period ratios; np.max(sss['Rm_obs'])
plot_fig_pdf_simple(fig_size, [sss['Rm_obs'][sss['Rm_obs'] < R_max_cut] for sss in model_sss], [ssk['Rm_obs'][ssk['Rm_obs'] < R_max_cut]], x_min=1., x_max=R_max_cut, n_bins=n_bins, log_x=True, c_sim=model_colors, ls_sim=model_linestyles, lw=lw, labels_sim=model_names, xticks_custom=[1,2,3,4,5,10,20], xlabel_text=r'$P_{i+1}/P_i$', afs=afs, tfs=tfs, lfs=lfs, fig_lbrt=fig_lbrt, save_name=savefigures_directory + subdirectory + save_name + '_periodratios_compare.pdf', save_fig=savefigures)
# Period ratios (< 5):
plot_fig_pdf_simple(fig_size, [sss['Rm_obs'][sss['Rm_obs'] < 5.] for sss in model_sss], [ssk['Rm_obs'][ssk['Rm_obs'] < 5.]], x_min=1., x_max=5., n_bins=n_bins, c_sim=model_colors, ls_sim=model_linestyles, lw=lw, labels_sim=model_names, xlabel_text=r'$P_{i+1}/P_i$', afs=afs, tfs=tfs, lfs=lfs, fig_lbrt=fig_lbrt, save_name=savefigures_directory + subdirectory + save_name + '_periodratios_less5_compare.pdf', save_fig=savefigures)
# Transit durations:
plot_fig_pdf_simple(fig_size, [sss['tdur_obs'] for sss in model_sss], [ssk['tdur_obs']], x_min=0., x_max=15., n_bins=n_bins, c_sim=model_colors, ls_sim=model_linestyles, lw=lw, labels_sim=model_names, xlabel_text=r'$t_{\rm dur}$ (hrs)', afs=afs, tfs=tfs, lfs=lfs, legend=True, fig_lbrt=fig_lbrt, save_name=savefigures_directory + subdirectory + save_name + '_durations_compare.pdf', save_fig=savefigures)
# Circular normalized transit durations:
plot_fig_pdf_simple(fig_size, [sss['tdur_tcirc_obs'] for sss in model_sss], [ssk['tdur_tcirc_obs']], x_min=0., x_max=1.5, n_bins=n_bins, c_sim=model_colors, ls_sim=model_linestyles, lw=lw, labels_sim=model_names, xlabel_text=r'$t_{\rm dur}/t_{\rm circ}$', afs=afs, tfs=tfs, lfs=lfs, fig_lbrt=fig_lbrt, save_name=savefigures_directory + subdirectory + save_name + '_tdur_tcirc_compare.pdf', save_fig=savefigures)
# Transit depths:
plot_fig_pdf_simple(fig_size, [sss['D_obs'] for sss in model_sss], [ssk['D_obs']], x_min=10.**(-5.), x_max=10.**(-1.5), n_bins=n_bins, log_x=True, c_sim=model_colors, ls_sim=model_linestyles, lw=lw, labels_sim=model_names, xlabel_text=r'$\delta$', afs=afs, tfs=tfs, lfs=lfs, fig_lbrt=fig_lbrt, save_name=savefigures_directory + subdirectory + save_name + '_depths_compare.pdf', save_fig=savefigures)
# Planet radii:
plot_fig_pdf_simple(fig_size, [sss['radii_obs'] for sss in model_sss], [ssk['radii_obs']], x_min=radii_min, x_max=radii_max, n_bins=n_bins, c_sim=model_colors, ls_sim=model_linestyles, lw=lw, labels_sim=model_names, xlabel_text=r'$R_p (R_\oplus)$', afs=afs, tfs=tfs, lfs=lfs, fig_lbrt=fig_lbrt, save_name=savefigures_directory + subdirectory + save_name + '_radii_compare.pdf', save_fig=savefigures)
# Stellar radii:
plot_fig_pdf_simple(fig_size, [sss['Rstar_obs'] for sss in model_sss], [ssk['Rstar_obs']], x_min=0.5, x_max=2.5, n_bins=n_bins, c_sim=model_colors, ls_sim=model_linestyles, lw=lw, labels_sim=model_names, xlabel_text=r'$R_\star (R_\odot)$', afs=afs, tfs=tfs, lfs=lfs, fig_lbrt=fig_lbrt, save_name=savefigures_directory + subdirectory + save_name + '_stellar_radii_compare.pdf', save_fig=savefigures)
# Transit depth ratios:
plot_fig_pdf_simple(fig_size, [sss['D_ratio_obs'] for sss in model_sss], [ssk['D_ratio_obs']], x_min=10.**(-1.5), x_max=10.**(1.5), n_bins=n_bins, log_x=True, c_sim=model_colors, ls_sim=model_linestyles, lw=lw, labels_sim=model_names, xlabel_text=r'$\delta_{i+1}/\delta_i$', afs=afs, tfs=tfs, lfs=lfs, fig_lbrt=fig_lbrt, save_name=savefigures_directory + subdirectory + save_name + '_depthratios_compare.pdf', save_fig=savefigures)
# Log(xi):
plot_fig_pdf_simple(fig_size, [np.log10(sss['xi_obs']) for sss in model_sss], [np.log10(ssk['xi_obs'])], x_min=-0.5, x_max=0.5, n_bins=n_bins, c_sim=model_colors, ls_sim=model_linestyles, lw=lw, labels_sim=model_names, xlabel_text=r'$\log{\xi}$', afs=afs, tfs=tfs, lfs=lfs, fig_lbrt=fig_lbrt, save_name=savefigures_directory + subdirectory + save_name + '_logxi_all_compare.pdf', save_fig=savefigures)
plt.show()
plt.close()
#'''
##### To plot the inner vs. outer period ratios of triplets (in 3+ systems) (similar to Fig 6 in Zhu et al. 2019 and Fig 7 in Weiss et al. 2018a):
'''
compute_pratio_in_out_and_plot_fig_pdf([sss_per_sys['P_obs'] for sss_per_sys in model_sss_per_sys], last_is_Kep=True, fig_size=(8,6), n_bins=50, x_min=0.1, x_max=10., colors=['b','r'], ls=['-',':'], lw=2, labels=['Clustered P+R', 'Non-clustered'], afs=afs, tfs=tfs, lfs=lfs, save_name=savefigures_directory + subdirectory + save_name + '_observed_pratio_out_in_ratio.pdf', save_fig=savefigures)
plt.show()
plt.close()
'''
##### To plot the circular normalized transit durations again (observed singles vs. multis):
plot_fig_pdf_simple(fig_size, [sss['tdur_tcirc_1_obs'] for sss in model_sss], [ssk['tdur_tcirc_1_obs']], x_min=0., x_max=1.5, n_bins=n_bins, c_sim=model_colors, ls_sim=model_linestyles, lw=lw, labels_sim=model_names, xlabel_text=r'$t_{\rm dur}/t_{\rm circ}$', afs=afs, tfs=tfs, lfs=lfs, extra_text='Singles', fig_lbrt=fig_lbrt, save_name=savefigures_directory + subdirectory + save_name + '_tdur_tcirc_singles_compare.pdf', save_fig=savefigures)
plot_fig_pdf_simple(fig_size, [sss['tdur_tcirc_2p_obs'] for sss in model_sss], [ssk['tdur_tcirc_2p_obs']], x_min=0., x_max=1.5, n_bins=n_bins, c_sim=model_colors, ls_sim=model_linestyles, lw=lw, labels_sim=model_names, xlabel_text=r'$t_{\rm dur}/t_{\rm circ}$', afs=afs, tfs=tfs, lfs=lfs, extra_text='Multis', fig_lbrt=fig_lbrt, save_name=savefigures_directory + subdirectory + save_name + '_tdur_tcirc_multis_compare.pdf', save_fig=savefigures)
##### To plot the xi distribution separated by observed multiplicities (m=2,3,4+):
logxi_2_model1 = np.log10(sss_per_sys1['xi_obs'][sss_per_sys1['Mtot_obs'] == 2, 0])
logxi_3_model1 = np.log10(sss_per_sys1['xi_obs'][sss_per_sys1['Mtot_obs'] == 3, :2].flatten())
logxi_4_model1 = np.log10(sss_per_sys1['xi_obs'][sss_per_sys1['Mtot_obs'] == 4, :3].flatten())
xi_4p_model1 = sss_per_sys1['xi_obs'][sss_per_sys1['Mtot_obs'] >= 4]
logxi_4p_model1 = np.log10(xi_4p_model1[xi_4p_model1 != -1])
xi_5p_model1 = sss_per_sys1['xi_obs'][sss_per_sys1['Mtot_obs'] >= 5]
logxi_5p_model1 = np.log10(xi_5p_model1[xi_5p_model1 != -1])
logxi_2_model2 = np.log10(sss_per_sys2['xi_obs'][sss_per_sys2['Mtot_obs'] == 2, 0])
logxi_3_model2 = np.log10(sss_per_sys2['xi_obs'][sss_per_sys2['Mtot_obs'] == 3, :2].flatten())
logxi_4_model2 = np.log10(sss_per_sys2['xi_obs'][sss_per_sys2['Mtot_obs'] == 4, :3].flatten())
xi_4p_model2 = sss_per_sys2['xi_obs'][sss_per_sys2['Mtot_obs'] >= 4]
logxi_4p_model2 = np.log10(xi_4p_model2[xi_4p_model2 != -1])
xi_5p_model2 = sss_per_sys2['xi_obs'][sss_per_sys2['Mtot_obs'] >= 5]
logxi_5p_model2 = np.log10(xi_5p_model2[xi_5p_model2 != -1])
c2, c3, c4p = 'r', 'b', 'g'
ymax = 0.14
xi_bins = np.linspace(-0.5, 0.5, n_bins+1)
fig = plt.figure(figsize=(8,14))
plot = GridSpec(7,1,left=0.2,bottom=0.07,right=0.95,top=0.98,wspace=0,hspace=0)
ax = plt.subplot(plot[0,0])
plot_panel_cdf_simple(ax, [logxi_2_model1, logxi_3_model1, logxi_4p_model1], [logxi_Kep_2, logxi_Kep_3, logxi_Kep_4p], x_min=np.min(xi_bins), x_max=np.max(xi_bins), c_sim=[c2,c3,c4p], c_Kep=[c2,c3,c4p], ls_sim=['-','-','-'], ls_Kep=[':',':',':'], lw=2, labels_sim=[r'$m=2$', r'$m=3$', r'$m=4+$'], labels_Kep=['Kepler data',None,None], xlabel_text='', legend=True, afs=afs, tfs=tfs, lfs=lfs, label_dist=False)
ax = plt.subplot(plot[1:3,0])
plot_panel_pdf_simple(ax, [logxi_2_model1, logxi_3_model1, logxi_4p_model1], [], x_min=np.min(xi_bins), x_max=np.max(xi_bins), y_max=ymax, n_bins=n_bins, c_sim=[c2,c3,c4p], ls_sim=['-','-','-'], lw=2, labels_sim=[r'$m=2$', r'$m=3$', r'$m=4+$'], xlabel_text='', legend=False, afs=afs, tfs=tfs, lfs=lfs)
plt.text(x=0.98, y=0.9, s=model_names[0], ha='right', fontsize=lfs, transform=ax.transAxes)
ax = plt.subplot(plot[3:5,0])
plot_panel_pdf_simple(ax, [logxi_2_model2, logxi_3_model2, logxi_4p_model2], [], x_min=np.min(xi_bins), x_max=np.max(xi_bins), y_max=ymax, n_bins=n_bins, c_sim=[c2,c3,c4p], ls_sim=['-','-','-'], lw=2, labels_sim=[r'$m=2$', r'$m=3$', r'$m=4+$'], xlabel_text='', legend=False, afs=afs, tfs=tfs, lfs=lfs)
plt.text(x=0.98, y=0.9, s=model_names[1], ha='right', fontsize=lfs, transform=ax.transAxes)
ax = plt.subplot(plot[5:,0])
plot_panel_pdf_simple(ax, [logxi_Kep_2, logxi_Kep_3, logxi_Kep_4p], [], x_min=np.min(xi_bins), x_max=np.max(xi_bins), y_max=ymax, n_bins=n_bins, c_sim=[c2,c3,c4p], ls_sim=['-','-','-'], lw=2, labels_sim=[r'$m=2$', r'$m=3$', r'$m=4+$'], labels_Kep=[None], xlabel_text=r'$\log{\xi}$', legend=False, afs=afs, tfs=tfs, lfs=lfs)
plt.text(x=0.98, y=0.9, s='Kepler data', ha='right', fontsize=lfs, transform=ax.transAxes)
if savefigures:
plt.savefig(savefigures_directory + subdirectory + save_name + '_logxi_per_mult.pdf')
plt.close()
##### To remake the log(xi) plot for defense talk:
plot_fig_pdf_simple((8,4), [np.log10(sss['xi_obs']) for sss in model_sss], [], x_min=-0.5, x_max=0.5, n_bins=n_bins, c_sim=model_colors, ls_sim=model_linestyles, lw=3, labels_sim=model_names, xlabel_text=r'$\log{\xi}$', afs=afs, tfs=tfs, lfs=20, legend=True, fig_lbrt=[0.15, 0.2, 0.95, 0.925], save_name=savefigures_directory + subdirectory + save_name + '_logxi_incl.pdf', save_fig=savefigures)
plt.show()
|
en
| 0.493732
|
# To import required modules: #for color maps #for specifying plot attributes #for setting contour plots to log scale #for numerical integration #for factorial function #error function, used in computing CDF of normal distribution #for interpolation functions #corner.py package for corner plots #matplotlib.rc('text', usetex=True) #'durations_KS', #'durations_norm_circ_KS', ##### To load the files with the systems with observed planets: # Model 1: #incl0_ecc0p02/ #ecc0_incl1/ # Model 2: #incl2_ecc0p02/ #ecc0p1_incl1/ # Model 3: #loadfiles_directory3 = '/Users/hematthi/Documents/GradSchool/Research/ExoplanetsSysSim_Clusters/SysSimExClusters/examples/New_algorithm_AMD/Same_params/Stable/New_sampler_P_Pc/No_final_mHill_check/' #'ACI/Simulated_Data/Julia_v0.7/Kepler_catalog_optimization/q1q17_dr25_gaia_fgk_stars79935/Non_Clustered/f_high_incl_low_incl_mmr/Fit_rate_mult_P_Pratios_D_Dratios_dur_durratios_mmr/Some8_params_CRPDr_KS/Fixed_Rbreak3_Ncrit8/lc_1_8_alphaP_-2_2_alphaR1_-4_2_alphaR2_-6_0_ecc_0_0p1_incl_inclmmr_0_90/targs79935_maxincl0_maxiters5000/sigma_i_greater_sigma_i_mmr/GP_med/' #run_number3 = '' #param_vals_all3 = read_sim_params(loadfiles_directory3 + 'periods%s.out' % run_number3) #sss_per_sys3, sss3 = compute_summary_stats_from_cat_obs(file_name_path=loadfiles_directory3, run_number=run_number3, compute_ratios=compute_ratios) #[r'$\sigma_e = 0$', r'$\sigma_e = 0.1$'] #[r'$\sigma_i = 0^\circ$', r'$\sigma_i = 2^\circ$'] #[r'$\omega = {\rm atan}(x,y)$', r'$\omega \sim {\rm Unif}(-\pi,\pi)$'] # Make sure this matches the models loaded! #['b', 'r'] # To load and process the observed Kepler catalog and compare with our simulated catalog: ##### To plot the simulated and Kepler catalogs as marginal distributions: #'Paper_Figures/'; 'Talk_Figures/' #size of each panel (figure) #linewidth #alpha = 0.2 #transparency of histograms #axes labels font size #text labels font size #legend labels font size #''' # Multiplicities: # Periods: # Period ratios (all, with some upper cut-off): #upper cut-off for plotting period ratios; np.max(sss['Rm_obs']) # Period ratios (< 5): # Transit durations: # Circular normalized transit durations: # Transit depths: # Planet radii: # Stellar radii: # Transit depth ratios: # Log(xi): #''' ##### To plot the inner vs. outer period ratios of triplets (in 3+ systems) (similar to Fig 6 in Zhu et al. 2019 and Fig 7 in Weiss et al. 2018a): compute_pratio_in_out_and_plot_fig_pdf([sss_per_sys['P_obs'] for sss_per_sys in model_sss_per_sys], last_is_Kep=True, fig_size=(8,6), n_bins=50, x_min=0.1, x_max=10., colors=['b','r'], ls=['-',':'], lw=2, labels=['Clustered P+R', 'Non-clustered'], afs=afs, tfs=tfs, lfs=lfs, save_name=savefigures_directory + subdirectory + save_name + '_observed_pratio_out_in_ratio.pdf', save_fig=savefigures) plt.show() plt.close() ##### To plot the circular normalized transit durations again (observed singles vs. multis): ##### To plot the xi distribution separated by observed multiplicities (m=2,3,4+): ##### To remake the log(xi) plot for defense talk:
| 1.859798
| 2
|
azure-mgmt-monitor/azure/mgmt/monitor/models/log_metric_trigger_py3.py
|
NMijat1024/azure-sdk-for-python
| 1
|
6629262
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class LogMetricTrigger(Model):
"""LogMetricTrigger.
:param threshold_operator: Evaluation operation for Metric -'GreaterThan'
or 'LessThan' or 'Equal'. Possible values include: 'GreaterThan',
'LessThan', 'Equal'
:type threshold_operator: str or
~azure.mgmt.monitor.models.ConditionalOperator
:param threshold:
:type threshold: float
:param metric_trigger_type: Metric Trigger Type - 'Consecutive' or
'Total'. Possible values include: 'Consecutive', 'Total'
:type metric_trigger_type: str or
~azure.mgmt.monitor.models.MetricTriggerType
:param metric_column: Evaluation of metric on a particular column
:type metric_column: str
"""
_attribute_map = {
'threshold_operator': {'key': 'thresholdOperator', 'type': 'str'},
'threshold': {'key': 'threshold', 'type': 'float'},
'metric_trigger_type': {'key': 'metricTriggerType', 'type': 'str'},
'metric_column': {'key': 'metricColumn', 'type': 'str'},
}
def __init__(self, *, threshold_operator=None, threshold: float=None, metric_trigger_type=None, metric_column: str=None, **kwargs) -> None:
super(LogMetricTrigger, self).__init__(**kwargs)
self.threshold_operator = threshold_operator
self.threshold = threshold
self.metric_trigger_type = metric_trigger_type
self.metric_column = metric_column
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class LogMetricTrigger(Model):
"""LogMetricTrigger.
:param threshold_operator: Evaluation operation for Metric -'GreaterThan'
or 'LessThan' or 'Equal'. Possible values include: 'GreaterThan',
'LessThan', 'Equal'
:type threshold_operator: str or
~azure.mgmt.monitor.models.ConditionalOperator
:param threshold:
:type threshold: float
:param metric_trigger_type: Metric Trigger Type - 'Consecutive' or
'Total'. Possible values include: 'Consecutive', 'Total'
:type metric_trigger_type: str or
~azure.mgmt.monitor.models.MetricTriggerType
:param metric_column: Evaluation of metric on a particular column
:type metric_column: str
"""
_attribute_map = {
'threshold_operator': {'key': 'thresholdOperator', 'type': 'str'},
'threshold': {'key': 'threshold', 'type': 'float'},
'metric_trigger_type': {'key': 'metricTriggerType', 'type': 'str'},
'metric_column': {'key': 'metricColumn', 'type': 'str'},
}
def __init__(self, *, threshold_operator=None, threshold: float=None, metric_trigger_type=None, metric_column: str=None, **kwargs) -> None:
super(LogMetricTrigger, self).__init__(**kwargs)
self.threshold_operator = threshold_operator
self.threshold = threshold
self.metric_trigger_type = metric_trigger_type
self.metric_column = metric_column
|
en
| 0.552372
|
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- LogMetricTrigger. :param threshold_operator: Evaluation operation for Metric -'GreaterThan' or 'LessThan' or 'Equal'. Possible values include: 'GreaterThan', 'LessThan', 'Equal' :type threshold_operator: str or ~azure.mgmt.monitor.models.ConditionalOperator :param threshold: :type threshold: float :param metric_trigger_type: Metric Trigger Type - 'Consecutive' or 'Total'. Possible values include: 'Consecutive', 'Total' :type metric_trigger_type: str or ~azure.mgmt.monitor.models.MetricTriggerType :param metric_column: Evaluation of metric on a particular column :type metric_column: str
| 1.694742
| 2
|
ucscsdk/mometa/compute/ComputeResourceSetManager.py
|
parag-may4/ucscsdk
| 9
|
6629263
|
"""This module contains the general information for ComputeResourceSetManager ManagedObject."""
from ...ucscmo import ManagedObject
from ...ucsccoremeta import UcscVersion, MoPropertyMeta, MoMeta
from ...ucscmeta import VersionMeta
class ComputeResourceSetManagerConsts():
POLLING_STATE_COMPLETE = "complete"
POLLING_STATE_INIT_DONE = "init-done"
POLLING_STATE_PENDING_REBALANCE = "pending-rebalance"
POLLING_STATE_REBALANCING = "rebalancing"
POLLING_STATE_STARTED = "started"
POLLING_STATE_SYSTEM_INIT = "system-init"
class ComputeResourceSetManager(ManagedObject):
"""This is ComputeResourceSetManager class."""
consts = ComputeResourceSetManagerConsts()
naming_props = set([])
mo_meta = MoMeta("ComputeResourceSetManager", "computeResourceSetManager", "rsrc-set-mgr", VersionMeta.Version101a, "InputOutput", 0xf, [], ["admin"], [u'computeResourceAggrEp'], [u'computeResourceSet'], [None])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"current_set_in_polling": MoPropertyMeta("current_set_in_polling", "currentSetInPolling", "ushort", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version101a, MoPropertyMeta.READ_ONLY, 0x2, 0, 256, None, [], []),
"polling_state": MoPropertyMeta("polling_state", "pollingState", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["complete", "init-done", "pending-rebalance", "rebalancing", "started", "system-init"], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version101a, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version101a, MoPropertyMeta.READ_WRITE, 0x8, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"childAction": "child_action",
"currentSetInPolling": "current_set_in_polling",
"dn": "dn",
"pollingState": "polling_state",
"rn": "rn",
"status": "status",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.current_set_in_polling = None
self.polling_state = None
self.status = None
ManagedObject.__init__(self, "ComputeResourceSetManager", parent_mo_or_dn, **kwargs)
|
"""This module contains the general information for ComputeResourceSetManager ManagedObject."""
from ...ucscmo import ManagedObject
from ...ucsccoremeta import UcscVersion, MoPropertyMeta, MoMeta
from ...ucscmeta import VersionMeta
class ComputeResourceSetManagerConsts():
POLLING_STATE_COMPLETE = "complete"
POLLING_STATE_INIT_DONE = "init-done"
POLLING_STATE_PENDING_REBALANCE = "pending-rebalance"
POLLING_STATE_REBALANCING = "rebalancing"
POLLING_STATE_STARTED = "started"
POLLING_STATE_SYSTEM_INIT = "system-init"
class ComputeResourceSetManager(ManagedObject):
"""This is ComputeResourceSetManager class."""
consts = ComputeResourceSetManagerConsts()
naming_props = set([])
mo_meta = MoMeta("ComputeResourceSetManager", "computeResourceSetManager", "rsrc-set-mgr", VersionMeta.Version101a, "InputOutput", 0xf, [], ["admin"], [u'computeResourceAggrEp'], [u'computeResourceSet'], [None])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"current_set_in_polling": MoPropertyMeta("current_set_in_polling", "currentSetInPolling", "ushort", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version101a, MoPropertyMeta.READ_ONLY, 0x2, 0, 256, None, [], []),
"polling_state": MoPropertyMeta("polling_state", "pollingState", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["complete", "init-done", "pending-rebalance", "rebalancing", "started", "system-init"], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version101a, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version101a, MoPropertyMeta.READ_WRITE, 0x8, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"childAction": "child_action",
"currentSetInPolling": "current_set_in_polling",
"dn": "dn",
"pollingState": "polling_state",
"rn": "rn",
"status": "status",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.current_set_in_polling = None
self.polling_state = None
self.status = None
ManagedObject.__init__(self, "ComputeResourceSetManager", parent_mo_or_dn, **kwargs)
|
en
| 0.621918
|
This module contains the general information for ComputeResourceSetManager ManagedObject. This is ComputeResourceSetManager class. ((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1} ((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}
| 2.029954
| 2
|
app/backend/wells/tests/test_stack.py
|
bcgov/gwells
| 37
|
6629264
|
<reponame>bcgov/gwells
"""
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from datetime import date
import logging
from unittest.mock import patch
from django.test import TestCase
from rest_framework.exceptions import ValidationError, APIException
from rest_framework.status import HTTP_500_INTERNAL_SERVER_ERROR, HTTP_400_BAD_REQUEST
from gwells.models import ProvinceStateCode
from wells.models import Well, ActivitySubmission, Casing, Screen, LinerPerforation, LithologyDescription, FieldsProvided
from submissions.models import WellActivityCode
from wells.stack import StackWells, overlap, merge_series
from registries.models import Person
logger = logging.getLogger(__name__)
class OverlapTest(TestCase):
def test_overlap(self):
# 0-1 ; 1-2 ; does not overlap.
self.assertFalse(overlap((0, 1), (1, 2)))
def test_overlap(self):
# 0-1 ; 0-1 ; does overlap.
self.assertTrue(overlap((0, 1), (0, 1)))
def test_overlap(self):
# 0-2 ; 1-2 ; does overlap.
self.assertTrue(overlap((0, 2), (1, 2)))
def test_overlap(self):
# 0-2 ; 1-3 ; does overlap.
self.assertTrue(overlap((0, 2), (1, 3)))
class SeriesMergeTest(TestCase):
def test_new_data(self):
# Test scenario where there is only new data.
prev = []
incoming = [
{
"id": 1,
"from_": 0,
"to": 10
},
{
"id": 2,
"from_": 10,
"to": 20
}
]
expected = incoming
new = merge_series(prev, incoming)
self.assertEqual(new, expected)
def test_no_overlap(self):
# Test scenario where there is new and old data, but no overlap.
prev = [
{
"id": 1,
"start": 0,
"end": 10
},
]
incoming = [
{
"id": 2,
"start": 10,
"end": 20
},
]
expected = [
{
"id": 1,
"start": 0,
"end": 10
},
{
"id": 2,
"start": 10,
"end": 20
},
]
new = merge_series(prev, incoming)
self.assertEqual(new, expected)
def test_overlap(self):
# Test scenario where there is overlap
prev = [
{
"id": 1,
"start": 0,
"end": 10
},
{
"id": 3,
"start": 10,
"end": 20
},
]
incoming = [
{
"id": 2,
"start": 0,
"end": 10
},
]
expected = [
{
"id": 2,
"start": 0,
"end": 10
},
{
"id": 3,
"start": 10,
"end": 20
},
]
new = merge_series(prev, incoming)
self.assertEqual(new, expected)
def test_intersection(self):
# Test scenario where there is intersection
prev = [
{
"id": 1,
"start": 0,
"end": 10
},
{
"id": 3,
"start": 10,
"end": 20
},
]
incoming = [
{
"id": 2,
"start": 5,
"end": 15
},
]
expected = [
{
"id": 2,
"start": 5,
"end": 15
},
]
new = merge_series(prev, incoming)
self.assertEqual(new, expected)
def errors_side_effect(*args, **kwargs):
return []
def is_valid_side_effect(*args, **kwargs):
return False
class StackTest(TestCase):
fixtures = ['wellsearch-codetables.json', ]
def setUp(self):
self.driller = Person.objects.create(
first_name='Bobby',
surname='Driller'
)
self.province = ProvinceStateCode.objects.get_or_create(
province_state_code='BC',
description='British Columbia',
display_order=1
)[0]
def test_new_submission_gets_well_tag_number(self):
# Test that when a constrction submission is processed, it is asigned a well_tag_number
submission = ActivitySubmission.objects.create(
create_user='Something',
update_user='Something',
owner_full_name='Bob',
work_start_date=date(2018, 1, 1),
work_end_date=date(2018, 2, 1),
person_responsible=self.driller,
owner_province_state=self.province,
well_activity_type=WellActivityCode.types.construction(),
)
stacker = StackWells()
well = stacker.process(submission.filing_number)
submission = ActivitySubmission.objects.get(filing_number=submission.filing_number)
self.assertEqual(well.well_tag_number, submission.well.well_tag_number)
def test_construction_submission_no_current_well(self):
# Creating a brand new well that we only have a construction submission for.
owner_full_name = 'Bob'
submission = ActivitySubmission.objects.create(
create_user='Something',
update_user='Something',
owner_full_name=owner_full_name,
work_start_date=date(2018, 1, 1),
work_end_date=date(2018, 2, 1),
person_responsible=self.driller,
owner_province_state=self.province,
well_activity_type=WellActivityCode.types.construction(),
)
stacker = StackWells()
well = stacker.process(submission.filing_number)
Well.objects.get(well_tag_number=well.well_tag_number)
self.assertEqual(owner_full_name, well.owner_full_name)
def test_alteration_after_construction(self):
# Create a brand new well with a construction
owner_full_name = 'Bob'
new_owner_full_name = 'Joe'
construction = ActivitySubmission.objects.create(
create_user='Something',
update_user='Something',
owner_full_name=owner_full_name,
work_start_date=date(2018, 1, 1),
work_end_date=date(2018, 2, 1),
person_responsible=self.driller,
owner_province_state=self.province,
well_activity_type=WellActivityCode.types.construction(),
)
stacker = StackWells()
well = stacker.process(construction.filing_number)
# Update the well with an alteration
alteration = ActivitySubmission.objects.create(
create_user='Something',
update_user='Something',
owner_full_name=new_owner_full_name,
work_start_date=date(2018, 2, 1),
work_end_date=date(2018, 3, 1),
person_responsible=self.driller,
owner_province_state=self.province,
well_activity_type=WellActivityCode.types.alteration(),
well=well
)
well = stacker.process(alteration.filing_number)
self.assertEqual(new_owner_full_name, well.owner_full_name)
def test_alteration_submission_to_legacy_well(self):
# The well already exists, but has no construction submission.
original_full_name = 'Bob'
new_full_name = 'Jimbo'
# This is the original well record
well = Well.objects.create(
create_user='Something',
update_user='Something',
owner_full_name=original_full_name,
owner_province_state=self.province,
construction_start_date=date(2017, 1, 1),
construction_end_date=date(2017, 1, 2))
Casing.objects.create(start=0, end=10, well=well)
Casing.objects.create(start=10, end=20, well=well)
Screen.objects.create(start=0, end=10, well=well)
Screen.objects.create(start=10, end=20, well=well)
LinerPerforation.objects.create(start=0, end=10, well=well)
LinerPerforation.objects.create(start=10, end=10, well=well)
# Create a submission
submission = ActivitySubmission.objects.create(
create_user='Something',
update_user='Something',
owner_full_name=new_full_name,
work_start_date=date(2018, 1, 1),
work_end_date=date(2018, 2, 1),
person_responsible=self.driller,
owner_province_state=self.province,
well_activity_type=WellActivityCode.types.alteration(),
well=well
)
stacker = StackWells()
stacker.process(submission.filing_number)
well = Well.objects.get(well_tag_number=well.well_tag_number)
submissions = ActivitySubmission.objects.filter(well=well).order_by('work_start_date')
# There should be two submissions at this point.
# Submission 1: A legacy well submission generated using the original well record.
# Submission 2: The submission for an alteration.
self.assertEqual(submissions.count(), 2, "It is expected that a legacy submission be created")
self.assertEqual(new_full_name, well.owner_full_name)
# Test that all foreign key sets have also been copied
self.assertEqual(submissions[0].casing_set.count(), 2, "It is expected that the casings on the "
"original well make part of the legacy "
"submission")
self.assertEqual(submissions[0].screen_set.count(), 2, "It is expected that the screens on the "
"original well make part of the legacy "
"submission")
self.assertEqual(submissions[0].linerperforation_set.count(), 2, "It is expected that the liner "
"perforations on the original well "
"make part of the legacy submission")
self.assertEqual(
submissions[0].work_start_date,
well.construction_start_date,
"It is expected that the well date match the submission date")
self.assertEqual(
submissions[0].work_end_date,
well.construction_end_date,
"Is it expected that the well date match the submission date")
def test_construction_submission_to_legacy_well(self):
# The well already exists, and we're applying a construction submission to it.
# We're expecting a legacy record to be created, since we don't want to lose and information
# that may already be in the well. Furthermore, we expect the construction submission to be applied
# AFTER the legacy submission when stacking.
original_full_name = 'Bob'
new_full_name = 'Jimbo'
# This is the original well record.
well = Well.objects.create(
create_user='Something',
update_user='Something',
owner_full_name=original_full_name,
owner_province_state=self.province)
# Create a submission.
submission = ActivitySubmission.objects.create(
create_user='Something',
update_user='Something',
owner_full_name=new_full_name,
work_start_date=date(2018, 1, 1),
work_end_date=date(2018, 2, 1),
person_responsible=self.driller,
owner_province_state=self.province,
well_activity_type=WellActivityCode.types.construction(),
well=well
)
stacker = StackWells()
stacker.process(submission.filing_number)
# Load all the submissions.
submissions = ActivitySubmission.objects.filter(well=well)
# Load the updated well record.
well = Well.objects.get(well_tag_number=well.well_tag_number)
self.assertEqual(submissions.count(), 2, "It is expected that a legacy submission be created")
self.assertEqual(new_full_name, well.owner_full_name)
def test_decomission_submission_to_legacy_well(self):
# The well already exists, and we are applying a decommission submission to it.
original_full_name = 'Bob'
new_full_name = 'Jimbo'
# This is the original well record.
well = Well.objects.create(
create_user='Something',
update_user='Something',
owner_full_name=original_full_name,
owner_province_state=self.province)
# Create a submission.
submission = ActivitySubmission.objects.create(
create_user='Something',
update_user='Something',
owner_full_name=new_full_name,
work_start_date=date(2018, 1, 1),
work_end_date=date(2018, 2, 1),
person_responsible=self.driller,
owner_province_state=self.province,
well_activity_type=WellActivityCode.types.decommission(),
well=well
)
stacker = StackWells()
stacker.process(submission.filing_number)
# Load all the submissions.
submissions = ActivitySubmission.objects.filter(well=well)
# Load the updated well record.
well = Well.objects.get(well_tag_number=well.well_tag_number)
self.assertEqual(submissions.count(), 2, "It is expected that a legacy submission be created")
self.assertEqual(new_full_name, well.owner_full_name)
def test_construction_field_mapping(self):
# Fields such as "work_start_date" on a construction report, need to map to "construction_start_date"
# on a well.
start_date = date(2018, 1, 1)
end_date = date(2018, 1, 2)
submission = ActivitySubmission.objects.create(
create_user='Something',
update_user='Something',
work_start_date=start_date,
work_end_date=end_date,
well_activity_type=WellActivityCode.types.construction(),
)
stacker = StackWells()
well = stacker.process(submission.filing_number)
Well.objects.get(well_tag_number=well.well_tag_number)
self.assertEqual(start_date, well.construction_start_date)
self.assertEqual(end_date, well.construction_end_date)
def test_alteration_field_mapping(self):
# Fields such as "work_start_date" on an alteration report, need to map to "alteration_start_date"
# on a well.
start_date = date(2018, 1, 1)
end_date = date(2018, 1, 2)
submission = ActivitySubmission.objects.create(
create_user='Something',
update_user='Something',
work_start_date=start_date,
work_end_date=end_date,
well_activity_type=WellActivityCode.types.alteration(),
)
stacker = StackWells()
well = stacker.process(submission.filing_number)
Well.objects.get(well_tag_number=well.well_tag_number)
self.assertEqual(start_date, well.alteration_start_date)
self.assertEqual(end_date, well.alteration_end_date)
def test_decommission_field_mapping(self):
# Fields such as "work_start_date" on a decommission report, need to map to "decommission_start_date"
# on a well.
start_date = date(2018, 1, 1)
end_date = date(2018, 1, 2)
submission = ActivitySubmission.objects.create(
create_user='Something',
update_user='Something',
work_start_date=start_date,
work_end_date=end_date,
well_activity_type=WellActivityCode.types.decommission(),
)
stacker = StackWells()
well = stacker.process(submission.filing_number)
Well.objects.get(well_tag_number=well.well_tag_number)
self.assertEqual(start_date, well.decommission_start_date)
self.assertEqual(end_date, well.decommission_end_date)
def test_edit_comes_after_construction(self):
# Stacking only works when done in the correct order. It's important that construction/legacy
# submissions get processed 1st, and that alterations and edits get stacked on top of that.
# In this case, we're creating a construction, and then applying an edit. We need to check
# that the edit is applied over the construction.
original_full_name = 'Bob'
new_full_name = 'Jimbo'
# Create a construction submission.
submission = ActivitySubmission.objects.create(
create_user='Something',
update_user='Something',
owner_full_name=original_full_name,
work_start_date=date(2018, 1, 1),
work_end_date=date(2018, 2, 1),
person_responsible=self.driller,
owner_province_state=self.province,
well_activity_type=WellActivityCode.types.construction()
)
stacker = StackWells()
well = stacker.process(submission.filing_number)
# Create an edit submission.
submission = ActivitySubmission.objects.create(
create_user='Something',
update_user='Something',
owner_full_name=new_full_name,
well=well,
well_activity_type=WellActivityCode.types.staff_edit()
)
FieldsProvided.objects.create(activity_submission=submission)
stacker = StackWells()
well = stacker.process(submission.filing_number)
self.assertEqual(new_full_name, well.owner_full_name)
@patch('wells.stack.submissions.serializers.WellSubmissionLegacySerializer.is_valid',
side_effect=is_valid_side_effect)
@patch('wells.stack.submissions.serializers.WellSubmissionLegacySerializer.errors',
side_effect=errors_side_effect)
def test_failure_to_generate_legacy_results_in_server_error(self, errors, is_valid):
# We don't want failures to generate a legacy well to bubble up to client 400 errors, so we need
# to make sure it's caught, and re-thrown as 500.
# 1) Create the legacy well:
well = Well.objects.create(
create_user='Something',
update_user='Something')
# 2) Create the alteration:
submission = ActivitySubmission.objects.create(
well=well,
create_user='Something',
update_user='Something',
well_activity_type=WellActivityCode.types.alteration())
# 3) Attempt to stack:
stacker = StackWells()
# Assert that an exception is throw
with self.assertRaises(APIException):
try:
stacker.process(submission.filing_number)
except APIException as e:
# Assert that it's a 500 error.
self.assertEqual(e.status_code, HTTP_500_INTERNAL_SERVER_ERROR)
# Re-raise the exception, handing it to the assertRaises above.
raise
|
"""
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from datetime import date
import logging
from unittest.mock import patch
from django.test import TestCase
from rest_framework.exceptions import ValidationError, APIException
from rest_framework.status import HTTP_500_INTERNAL_SERVER_ERROR, HTTP_400_BAD_REQUEST
from gwells.models import ProvinceStateCode
from wells.models import Well, ActivitySubmission, Casing, Screen, LinerPerforation, LithologyDescription, FieldsProvided
from submissions.models import WellActivityCode
from wells.stack import StackWells, overlap, merge_series
from registries.models import Person
logger = logging.getLogger(__name__)
class OverlapTest(TestCase):
def test_overlap(self):
# 0-1 ; 1-2 ; does not overlap.
self.assertFalse(overlap((0, 1), (1, 2)))
def test_overlap(self):
# 0-1 ; 0-1 ; does overlap.
self.assertTrue(overlap((0, 1), (0, 1)))
def test_overlap(self):
# 0-2 ; 1-2 ; does overlap.
self.assertTrue(overlap((0, 2), (1, 2)))
def test_overlap(self):
# 0-2 ; 1-3 ; does overlap.
self.assertTrue(overlap((0, 2), (1, 3)))
class SeriesMergeTest(TestCase):
def test_new_data(self):
# Test scenario where there is only new data.
prev = []
incoming = [
{
"id": 1,
"from_": 0,
"to": 10
},
{
"id": 2,
"from_": 10,
"to": 20
}
]
expected = incoming
new = merge_series(prev, incoming)
self.assertEqual(new, expected)
def test_no_overlap(self):
# Test scenario where there is new and old data, but no overlap.
prev = [
{
"id": 1,
"start": 0,
"end": 10
},
]
incoming = [
{
"id": 2,
"start": 10,
"end": 20
},
]
expected = [
{
"id": 1,
"start": 0,
"end": 10
},
{
"id": 2,
"start": 10,
"end": 20
},
]
new = merge_series(prev, incoming)
self.assertEqual(new, expected)
def test_overlap(self):
# Test scenario where there is overlap
prev = [
{
"id": 1,
"start": 0,
"end": 10
},
{
"id": 3,
"start": 10,
"end": 20
},
]
incoming = [
{
"id": 2,
"start": 0,
"end": 10
},
]
expected = [
{
"id": 2,
"start": 0,
"end": 10
},
{
"id": 3,
"start": 10,
"end": 20
},
]
new = merge_series(prev, incoming)
self.assertEqual(new, expected)
def test_intersection(self):
# Test scenario where there is intersection
prev = [
{
"id": 1,
"start": 0,
"end": 10
},
{
"id": 3,
"start": 10,
"end": 20
},
]
incoming = [
{
"id": 2,
"start": 5,
"end": 15
},
]
expected = [
{
"id": 2,
"start": 5,
"end": 15
},
]
new = merge_series(prev, incoming)
self.assertEqual(new, expected)
def errors_side_effect(*args, **kwargs):
return []
def is_valid_side_effect(*args, **kwargs):
return False
class StackTest(TestCase):
fixtures = ['wellsearch-codetables.json', ]
def setUp(self):
self.driller = Person.objects.create(
first_name='Bobby',
surname='Driller'
)
self.province = ProvinceStateCode.objects.get_or_create(
province_state_code='BC',
description='British Columbia',
display_order=1
)[0]
def test_new_submission_gets_well_tag_number(self):
# Test that when a constrction submission is processed, it is asigned a well_tag_number
submission = ActivitySubmission.objects.create(
create_user='Something',
update_user='Something',
owner_full_name='Bob',
work_start_date=date(2018, 1, 1),
work_end_date=date(2018, 2, 1),
person_responsible=self.driller,
owner_province_state=self.province,
well_activity_type=WellActivityCode.types.construction(),
)
stacker = StackWells()
well = stacker.process(submission.filing_number)
submission = ActivitySubmission.objects.get(filing_number=submission.filing_number)
self.assertEqual(well.well_tag_number, submission.well.well_tag_number)
def test_construction_submission_no_current_well(self):
# Creating a brand new well that we only have a construction submission for.
owner_full_name = 'Bob'
submission = ActivitySubmission.objects.create(
create_user='Something',
update_user='Something',
owner_full_name=owner_full_name,
work_start_date=date(2018, 1, 1),
work_end_date=date(2018, 2, 1),
person_responsible=self.driller,
owner_province_state=self.province,
well_activity_type=WellActivityCode.types.construction(),
)
stacker = StackWells()
well = stacker.process(submission.filing_number)
Well.objects.get(well_tag_number=well.well_tag_number)
self.assertEqual(owner_full_name, well.owner_full_name)
def test_alteration_after_construction(self):
# Create a brand new well with a construction
owner_full_name = 'Bob'
new_owner_full_name = 'Joe'
construction = ActivitySubmission.objects.create(
create_user='Something',
update_user='Something',
owner_full_name=owner_full_name,
work_start_date=date(2018, 1, 1),
work_end_date=date(2018, 2, 1),
person_responsible=self.driller,
owner_province_state=self.province,
well_activity_type=WellActivityCode.types.construction(),
)
stacker = StackWells()
well = stacker.process(construction.filing_number)
# Update the well with an alteration
alteration = ActivitySubmission.objects.create(
create_user='Something',
update_user='Something',
owner_full_name=new_owner_full_name,
work_start_date=date(2018, 2, 1),
work_end_date=date(2018, 3, 1),
person_responsible=self.driller,
owner_province_state=self.province,
well_activity_type=WellActivityCode.types.alteration(),
well=well
)
well = stacker.process(alteration.filing_number)
self.assertEqual(new_owner_full_name, well.owner_full_name)
def test_alteration_submission_to_legacy_well(self):
# The well already exists, but has no construction submission.
original_full_name = 'Bob'
new_full_name = 'Jimbo'
# This is the original well record
well = Well.objects.create(
create_user='Something',
update_user='Something',
owner_full_name=original_full_name,
owner_province_state=self.province,
construction_start_date=date(2017, 1, 1),
construction_end_date=date(2017, 1, 2))
Casing.objects.create(start=0, end=10, well=well)
Casing.objects.create(start=10, end=20, well=well)
Screen.objects.create(start=0, end=10, well=well)
Screen.objects.create(start=10, end=20, well=well)
LinerPerforation.objects.create(start=0, end=10, well=well)
LinerPerforation.objects.create(start=10, end=10, well=well)
# Create a submission
submission = ActivitySubmission.objects.create(
create_user='Something',
update_user='Something',
owner_full_name=new_full_name,
work_start_date=date(2018, 1, 1),
work_end_date=date(2018, 2, 1),
person_responsible=self.driller,
owner_province_state=self.province,
well_activity_type=WellActivityCode.types.alteration(),
well=well
)
stacker = StackWells()
stacker.process(submission.filing_number)
well = Well.objects.get(well_tag_number=well.well_tag_number)
submissions = ActivitySubmission.objects.filter(well=well).order_by('work_start_date')
# There should be two submissions at this point.
# Submission 1: A legacy well submission generated using the original well record.
# Submission 2: The submission for an alteration.
self.assertEqual(submissions.count(), 2, "It is expected that a legacy submission be created")
self.assertEqual(new_full_name, well.owner_full_name)
# Test that all foreign key sets have also been copied
self.assertEqual(submissions[0].casing_set.count(), 2, "It is expected that the casings on the "
"original well make part of the legacy "
"submission")
self.assertEqual(submissions[0].screen_set.count(), 2, "It is expected that the screens on the "
"original well make part of the legacy "
"submission")
self.assertEqual(submissions[0].linerperforation_set.count(), 2, "It is expected that the liner "
"perforations on the original well "
"make part of the legacy submission")
self.assertEqual(
submissions[0].work_start_date,
well.construction_start_date,
"It is expected that the well date match the submission date")
self.assertEqual(
submissions[0].work_end_date,
well.construction_end_date,
"Is it expected that the well date match the submission date")
def test_construction_submission_to_legacy_well(self):
# The well already exists, and we're applying a construction submission to it.
# We're expecting a legacy record to be created, since we don't want to lose and information
# that may already be in the well. Furthermore, we expect the construction submission to be applied
# AFTER the legacy submission when stacking.
original_full_name = 'Bob'
new_full_name = 'Jimbo'
# This is the original well record.
well = Well.objects.create(
create_user='Something',
update_user='Something',
owner_full_name=original_full_name,
owner_province_state=self.province)
# Create a submission.
submission = ActivitySubmission.objects.create(
create_user='Something',
update_user='Something',
owner_full_name=new_full_name,
work_start_date=date(2018, 1, 1),
work_end_date=date(2018, 2, 1),
person_responsible=self.driller,
owner_province_state=self.province,
well_activity_type=WellActivityCode.types.construction(),
well=well
)
stacker = StackWells()
stacker.process(submission.filing_number)
# Load all the submissions.
submissions = ActivitySubmission.objects.filter(well=well)
# Load the updated well record.
well = Well.objects.get(well_tag_number=well.well_tag_number)
self.assertEqual(submissions.count(), 2, "It is expected that a legacy submission be created")
self.assertEqual(new_full_name, well.owner_full_name)
def test_decomission_submission_to_legacy_well(self):
# The well already exists, and we are applying a decommission submission to it.
original_full_name = 'Bob'
new_full_name = 'Jimbo'
# This is the original well record.
well = Well.objects.create(
create_user='Something',
update_user='Something',
owner_full_name=original_full_name,
owner_province_state=self.province)
# Create a submission.
submission = ActivitySubmission.objects.create(
create_user='Something',
update_user='Something',
owner_full_name=new_full_name,
work_start_date=date(2018, 1, 1),
work_end_date=date(2018, 2, 1),
person_responsible=self.driller,
owner_province_state=self.province,
well_activity_type=WellActivityCode.types.decommission(),
well=well
)
stacker = StackWells()
stacker.process(submission.filing_number)
# Load all the submissions.
submissions = ActivitySubmission.objects.filter(well=well)
# Load the updated well record.
well = Well.objects.get(well_tag_number=well.well_tag_number)
self.assertEqual(submissions.count(), 2, "It is expected that a legacy submission be created")
self.assertEqual(new_full_name, well.owner_full_name)
def test_construction_field_mapping(self):
# Fields such as "work_start_date" on a construction report, need to map to "construction_start_date"
# on a well.
start_date = date(2018, 1, 1)
end_date = date(2018, 1, 2)
submission = ActivitySubmission.objects.create(
create_user='Something',
update_user='Something',
work_start_date=start_date,
work_end_date=end_date,
well_activity_type=WellActivityCode.types.construction(),
)
stacker = StackWells()
well = stacker.process(submission.filing_number)
Well.objects.get(well_tag_number=well.well_tag_number)
self.assertEqual(start_date, well.construction_start_date)
self.assertEqual(end_date, well.construction_end_date)
def test_alteration_field_mapping(self):
# Fields such as "work_start_date" on an alteration report, need to map to "alteration_start_date"
# on a well.
start_date = date(2018, 1, 1)
end_date = date(2018, 1, 2)
submission = ActivitySubmission.objects.create(
create_user='Something',
update_user='Something',
work_start_date=start_date,
work_end_date=end_date,
well_activity_type=WellActivityCode.types.alteration(),
)
stacker = StackWells()
well = stacker.process(submission.filing_number)
Well.objects.get(well_tag_number=well.well_tag_number)
self.assertEqual(start_date, well.alteration_start_date)
self.assertEqual(end_date, well.alteration_end_date)
def test_decommission_field_mapping(self):
# Fields such as "work_start_date" on a decommission report, need to map to "decommission_start_date"
# on a well.
start_date = date(2018, 1, 1)
end_date = date(2018, 1, 2)
submission = ActivitySubmission.objects.create(
create_user='Something',
update_user='Something',
work_start_date=start_date,
work_end_date=end_date,
well_activity_type=WellActivityCode.types.decommission(),
)
stacker = StackWells()
well = stacker.process(submission.filing_number)
Well.objects.get(well_tag_number=well.well_tag_number)
self.assertEqual(start_date, well.decommission_start_date)
self.assertEqual(end_date, well.decommission_end_date)
def test_edit_comes_after_construction(self):
# Stacking only works when done in the correct order. It's important that construction/legacy
# submissions get processed 1st, and that alterations and edits get stacked on top of that.
# In this case, we're creating a construction, and then applying an edit. We need to check
# that the edit is applied over the construction.
original_full_name = 'Bob'
new_full_name = 'Jimbo'
# Create a construction submission.
submission = ActivitySubmission.objects.create(
create_user='Something',
update_user='Something',
owner_full_name=original_full_name,
work_start_date=date(2018, 1, 1),
work_end_date=date(2018, 2, 1),
person_responsible=self.driller,
owner_province_state=self.province,
well_activity_type=WellActivityCode.types.construction()
)
stacker = StackWells()
well = stacker.process(submission.filing_number)
# Create an edit submission.
submission = ActivitySubmission.objects.create(
create_user='Something',
update_user='Something',
owner_full_name=new_full_name,
well=well,
well_activity_type=WellActivityCode.types.staff_edit()
)
FieldsProvided.objects.create(activity_submission=submission)
stacker = StackWells()
well = stacker.process(submission.filing_number)
self.assertEqual(new_full_name, well.owner_full_name)
@patch('wells.stack.submissions.serializers.WellSubmissionLegacySerializer.is_valid',
side_effect=is_valid_side_effect)
@patch('wells.stack.submissions.serializers.WellSubmissionLegacySerializer.errors',
side_effect=errors_side_effect)
def test_failure_to_generate_legacy_results_in_server_error(self, errors, is_valid):
# We don't want failures to generate a legacy well to bubble up to client 400 errors, so we need
# to make sure it's caught, and re-thrown as 500.
# 1) Create the legacy well:
well = Well.objects.create(
create_user='Something',
update_user='Something')
# 2) Create the alteration:
submission = ActivitySubmission.objects.create(
well=well,
create_user='Something',
update_user='Something',
well_activity_type=WellActivityCode.types.alteration())
# 3) Attempt to stack:
stacker = StackWells()
# Assert that an exception is throw
with self.assertRaises(APIException):
try:
stacker.process(submission.filing_number)
except APIException as e:
# Assert that it's a 500 error.
self.assertEqual(e.status_code, HTTP_500_INTERNAL_SERVER_ERROR)
# Re-raise the exception, handing it to the assertRaises above.
raise
|
en
| 0.937873
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. # 0-1 ; 1-2 ; does not overlap. # 0-1 ; 0-1 ; does overlap. # 0-2 ; 1-2 ; does overlap. # 0-2 ; 1-3 ; does overlap. # Test scenario where there is only new data. # Test scenario where there is new and old data, but no overlap. # Test scenario where there is overlap # Test scenario where there is intersection # Test that when a constrction submission is processed, it is asigned a well_tag_number # Creating a brand new well that we only have a construction submission for. # Create a brand new well with a construction # Update the well with an alteration # The well already exists, but has no construction submission. # This is the original well record # Create a submission # There should be two submissions at this point. # Submission 1: A legacy well submission generated using the original well record. # Submission 2: The submission for an alteration. # Test that all foreign key sets have also been copied # The well already exists, and we're applying a construction submission to it. # We're expecting a legacy record to be created, since we don't want to lose and information # that may already be in the well. Furthermore, we expect the construction submission to be applied # AFTER the legacy submission when stacking. # This is the original well record. # Create a submission. # Load all the submissions. # Load the updated well record. # The well already exists, and we are applying a decommission submission to it. # This is the original well record. # Create a submission. # Load all the submissions. # Load the updated well record. # Fields such as "work_start_date" on a construction report, need to map to "construction_start_date" # on a well. # Fields such as "work_start_date" on an alteration report, need to map to "alteration_start_date" # on a well. # Fields such as "work_start_date" on a decommission report, need to map to "decommission_start_date" # on a well. # Stacking only works when done in the correct order. It's important that construction/legacy # submissions get processed 1st, and that alterations and edits get stacked on top of that. # In this case, we're creating a construction, and then applying an edit. We need to check # that the edit is applied over the construction. # Create a construction submission. # Create an edit submission. # We don't want failures to generate a legacy well to bubble up to client 400 errors, so we need # to make sure it's caught, and re-thrown as 500. # 1) Create the legacy well: # 2) Create the alteration: # 3) Attempt to stack: # Assert that an exception is throw # Assert that it's a 500 error. # Re-raise the exception, handing it to the assertRaises above.
| 1.834569
| 2
|
pypbm.py
|
baloo1379/io-convolultion
| 0
|
6629265
|
import tokenize as t
import numpy as np
class PyPBM:
formats = ['P1', 'P2', 'P3']
white = [0, 4, 60]
NAME = 1
NUMBER = 2
COMMENT = 60
def __init__(self, path, n):
self.file_name = path
self.id = n
self.type = None
self.width = 0
self.height = 0
self.max_value = 0
self.pixels = np.zeros((self.height, self.width), int)
with open(path, encoding='UTF-8') as f:
tokens = t.generate_tokens(f.read)
# Format
token = self.get_next_token(tokens)
if token.type == self.NAME and token.string in self.formats:
self.type = token.string
else:
raise ValueError("wrong header")
# Width
token = self.get_next_token(tokens)
if token.type == self.NUMBER:
self.width = int(token.string)
else:
raise ValueError("wrong width syntax")
# Height
token = self.get_next_token(tokens)
if token.type == self.NUMBER:
self.height = int(token.string)
else:
raise ValueError("wrong height syntax")
# Max value of pixel (if present)
if self.type != 'P1':
token = self.get_next_token(tokens)
if token.type == self.NUMBER:
self.max_value = int(token.string)
else:
raise ValueError("wrong height syntax")
else:
self.max_value = 1
# Pixels array
if self.type != 'P3':
self.pixels = np.zeros((self.height, self.width), dtype=int)
else:
self.pixels = np.zeros((self.height, self.width), dtype=(int, 3))
for i in range(self.height):
row = self.pixels[i]
for j in range(self.width):
if self.type != 'P3':
token = self.get_next_token(tokens)
if token.type != self.NUMBER:
raise ValueError("wrong pixel format at ", token.start)
a = int(token.string)
row[j] = a
else:
token = self.get_next_token(tokens)
if token.type != self.NUMBER:
raise ValueError("wrong pixel format at ", token.start)
r = int(token.string)
token = self.get_next_token(tokens)
if token.type != self.NUMBER:
raise ValueError("wrong pixel format at ", token.start)
g = int(token.string)
token = self.get_next_token(tokens)
if token.type != self.NUMBER:
raise ValueError("wrong pixel format at ", token.start)
b = int(token.string)
row[j] = (r, g, b)
self.pixels[i] = row
f.close()
def __repr__(self):
res = f"Image: {self.file_name} id:{self.id}"
res += f"Format: {self.type}\nSize: {self.width} x {self.height}\n"
res += f"Max value: {self.max_value}\n" if self.type != 'P1' else ""
res += f"{self.pixels}"
return res
def update_max_value(self):
self.max_value = np.amax(self.pixels)
def info(self):
return f"Image: {self.file_name} id: {self.id}"
def save(self, new_file):
with open(new_file, 'w') as f:
f.write(self.type+"\n")
f.write(str(self.width)+" ")
f.write(str(self.height)+"\n")
if self.type != 'P1':
f.write(str(self.max_value)+"\n")
for h in range(self.height):
row = ""
for w in range(self.width):
if self.type != 'P3':
row += str(self.pixels[h][w]) + " "
else:
for color in self.pixels[h][w]:
row += str(color) + " "
# row += " "
f.write(row+"\n")
@staticmethod
def get_next_token(gen):
token = next(gen)
if token.type not in PyPBM.white:
return token
else:
return PyPBM.get_next_token(gen)
@staticmethod
def scale_number(number, current_max):
return int(number / current_max) * 255
if __name__ == "__main__":
p = PyPBM('pbmlib.ascii.ppm', 0)
p.save("output/p.ppm")
|
import tokenize as t
import numpy as np
class PyPBM:
formats = ['P1', 'P2', 'P3']
white = [0, 4, 60]
NAME = 1
NUMBER = 2
COMMENT = 60
def __init__(self, path, n):
self.file_name = path
self.id = n
self.type = None
self.width = 0
self.height = 0
self.max_value = 0
self.pixels = np.zeros((self.height, self.width), int)
with open(path, encoding='UTF-8') as f:
tokens = t.generate_tokens(f.read)
# Format
token = self.get_next_token(tokens)
if token.type == self.NAME and token.string in self.formats:
self.type = token.string
else:
raise ValueError("wrong header")
# Width
token = self.get_next_token(tokens)
if token.type == self.NUMBER:
self.width = int(token.string)
else:
raise ValueError("wrong width syntax")
# Height
token = self.get_next_token(tokens)
if token.type == self.NUMBER:
self.height = int(token.string)
else:
raise ValueError("wrong height syntax")
# Max value of pixel (if present)
if self.type != 'P1':
token = self.get_next_token(tokens)
if token.type == self.NUMBER:
self.max_value = int(token.string)
else:
raise ValueError("wrong height syntax")
else:
self.max_value = 1
# Pixels array
if self.type != 'P3':
self.pixels = np.zeros((self.height, self.width), dtype=int)
else:
self.pixels = np.zeros((self.height, self.width), dtype=(int, 3))
for i in range(self.height):
row = self.pixels[i]
for j in range(self.width):
if self.type != 'P3':
token = self.get_next_token(tokens)
if token.type != self.NUMBER:
raise ValueError("wrong pixel format at ", token.start)
a = int(token.string)
row[j] = a
else:
token = self.get_next_token(tokens)
if token.type != self.NUMBER:
raise ValueError("wrong pixel format at ", token.start)
r = int(token.string)
token = self.get_next_token(tokens)
if token.type != self.NUMBER:
raise ValueError("wrong pixel format at ", token.start)
g = int(token.string)
token = self.get_next_token(tokens)
if token.type != self.NUMBER:
raise ValueError("wrong pixel format at ", token.start)
b = int(token.string)
row[j] = (r, g, b)
self.pixels[i] = row
f.close()
def __repr__(self):
res = f"Image: {self.file_name} id:{self.id}"
res += f"Format: {self.type}\nSize: {self.width} x {self.height}\n"
res += f"Max value: {self.max_value}\n" if self.type != 'P1' else ""
res += f"{self.pixels}"
return res
def update_max_value(self):
self.max_value = np.amax(self.pixels)
def info(self):
return f"Image: {self.file_name} id: {self.id}"
def save(self, new_file):
with open(new_file, 'w') as f:
f.write(self.type+"\n")
f.write(str(self.width)+" ")
f.write(str(self.height)+"\n")
if self.type != 'P1':
f.write(str(self.max_value)+"\n")
for h in range(self.height):
row = ""
for w in range(self.width):
if self.type != 'P3':
row += str(self.pixels[h][w]) + " "
else:
for color in self.pixels[h][w]:
row += str(color) + " "
# row += " "
f.write(row+"\n")
@staticmethod
def get_next_token(gen):
token = next(gen)
if token.type not in PyPBM.white:
return token
else:
return PyPBM.get_next_token(gen)
@staticmethod
def scale_number(number, current_max):
return int(number / current_max) * 255
if __name__ == "__main__":
p = PyPBM('pbmlib.ascii.ppm', 0)
p.save("output/p.ppm")
|
en
| 0.542759
|
# Format # Width # Height # Max value of pixel (if present) # Pixels array # row += " "
| 2.83736
| 3
|
techmeme/__init__.py
|
bmintz/technical-meme-helper
| 6
|
6629266
|
<filename>techmeme/__init__.py
#!/usr/bin/env python3
# encoding: utf-8
#
# © 2017 <NAME>
# https://bmintz.mit-license.org/@2017
#
"""
techmeme: class that turns videos into dank technical may-mays
"""
from .technicalmeme import TechnicalMeme
|
<filename>techmeme/__init__.py
#!/usr/bin/env python3
# encoding: utf-8
#
# © 2017 <NAME>
# https://bmintz.mit-license.org/@2017
#
"""
techmeme: class that turns videos into dank technical may-mays
"""
from .technicalmeme import TechnicalMeme
|
en
| 0.622092
|
#!/usr/bin/env python3 # encoding: utf-8 # # © 2017 <NAME> # https://bmintz.mit-license.org/@2017 # techmeme: class that turns videos into dank technical may-mays
| 1.512421
| 2
|
photofiler.py
|
aparkerlue/photo-filer
| 0
|
6629267
|
# -*- coding: utf-8 -*-
import argparse
import os
import sys
import shutil
from collections import defaultdict
from datetime import datetime
from PIL import Image
from geopy.geocoders import Nominatim
from get_lat_lon_exif_pil import get_exif_data, get_lat_lon
def get_location(exif_data):
lat_lon = get_lat_lon(exif_data)
geolocator = Nominatim()
location = geolocator.reverse(', '.join(str(d) for d in lat_lon))
return location
def get_datetime(exif_data):
dtstr = exif_data['DateTimeOriginal']
dt = datetime.strptime(dtstr, '%Y:%m:%d %H:%M:%S')
return dt
def get_location_symbol(exif_data):
try:
location = get_location(exif_data)
except Exception:
return 'NA'
raw_address = location.raw['address']
if 'footway' in raw_address:
locsym = raw_address['footway']
elif 'path' in raw_address:
locsym = raw_address['path']
elif 'neighbourhood' in raw_address:
locsym = raw_address['neighbourhood']
elif 'hamlet' in raw_address:
locsym = raw_address['hamlet']
elif 'village' in raw_address:
locsym = raw_address['village']
elif 'town' in raw_address:
locsym = raw_address['town']
elif 'city' in raw_address:
locsym = raw_address['city']
elif 'county' in raw_address:
locsym = raw_address['county']
elif 'state' in raw_address:
locsym = raw_address['state']
elif 'country' in raw_address:
locsym = raw_address['country']
else:
locsym = 'NA'
return locsym
parser = argparse.ArgumentParser(
description='Organize photos by date taken'
)
parser.add_argument('files', metavar='FILE', nargs='+',
help='''
Image file to organize. If directory, only children are read
'''.strip())
args = parser.parse_args()
metainfo = {}
geolocator = Nominatim()
files = []
for f in args.files:
if os.path.isfile(f):
files.append(f)
elif os.path.isdir(f):
# Include just the direct children of the directory. Including
# all of the descendants could result in name collisions.
#
# Providing multiple directories could also lead to name
# collisions, but we want to allow at least one directory in
# order to use this script on a Windows command line.
for g in os.listdir(f):
g_path = os.path.join(f, g)
if os.path.isfile(g_path):
files.append(g_path)
else:
print('warning: skipping {}'.format(f), file=sys.stderr)
for f in files:
try:
img = Image.open(f)
except OSError as err:
print('warning: {}: {}'.format(f, err), file=sys.stderr)
continue
try:
exif_data = get_exif_data(img)
except AttributeError as err:
print('warning: {}: {}'.format(f, err), file=sys.stderr)
continue
try:
dt = get_datetime(exif_data)
except KeyError as err:
print('warning: {}: KeyError `{}\''.format(f, err), file=sys.stderr)
continue
locsym = get_location_symbol(exif_data)
metainfo[f] = {
'dt': dt,
'loc': locsym,
}
last_k = None
last_dt = None
last_loc = None
imgdirs = defaultdict(list)
for f in sorted(
metainfo,
key=lambda x: '{} {}'.format(metainfo[x]['dt'], os.path.basename(x))
):
dt = metainfo[f]['dt']
loc = metainfo[f]['loc']
if (last_k is not None and last_dt is not None and last_loc is not None
and loc == last_loc
and (dt - last_dt).total_seconds() / 60 < 45):
k = last_k
else:
dtstr = dt.strftime('%Y-%m-%d %H.%M')
k = '{} {}'.format(dtstr, loc)
imgdirs[k].append(os.path.basename(f))
last_loc = loc
last_dt = dt
last_k = k
print('Proposed directory structure:')
for d in sorted(imgdirs):
print(d)
for f in sorted(imgdirs[d]):
print(' - {}'.format(f))
print()
do_organize = input('Create directories and organize files? (y/[n])? ')
if do_organize != 'y':
sys.exit()
for d in sorted(imgdirs):
if os.path.exists(d):
print('error: {} already exists!'.format(d))
sys.exit(1)
for d in sorted(imgdirs):
if os.path.exists(d):
print('error: {} already exists!'.format(d))
sys.exit(1)
os.mkdir(d)
for f in sorted(imgdirs[d]):
try:
shutil.move(f, d)
except PermissionError as err:
print('error: Can\'t move {} to {}: {}'.format(f, d, err))
sys.exit()
|
# -*- coding: utf-8 -*-
import argparse
import os
import sys
import shutil
from collections import defaultdict
from datetime import datetime
from PIL import Image
from geopy.geocoders import Nominatim
from get_lat_lon_exif_pil import get_exif_data, get_lat_lon
def get_location(exif_data):
lat_lon = get_lat_lon(exif_data)
geolocator = Nominatim()
location = geolocator.reverse(', '.join(str(d) for d in lat_lon))
return location
def get_datetime(exif_data):
dtstr = exif_data['DateTimeOriginal']
dt = datetime.strptime(dtstr, '%Y:%m:%d %H:%M:%S')
return dt
def get_location_symbol(exif_data):
try:
location = get_location(exif_data)
except Exception:
return 'NA'
raw_address = location.raw['address']
if 'footway' in raw_address:
locsym = raw_address['footway']
elif 'path' in raw_address:
locsym = raw_address['path']
elif 'neighbourhood' in raw_address:
locsym = raw_address['neighbourhood']
elif 'hamlet' in raw_address:
locsym = raw_address['hamlet']
elif 'village' in raw_address:
locsym = raw_address['village']
elif 'town' in raw_address:
locsym = raw_address['town']
elif 'city' in raw_address:
locsym = raw_address['city']
elif 'county' in raw_address:
locsym = raw_address['county']
elif 'state' in raw_address:
locsym = raw_address['state']
elif 'country' in raw_address:
locsym = raw_address['country']
else:
locsym = 'NA'
return locsym
parser = argparse.ArgumentParser(
description='Organize photos by date taken'
)
parser.add_argument('files', metavar='FILE', nargs='+',
help='''
Image file to organize. If directory, only children are read
'''.strip())
args = parser.parse_args()
metainfo = {}
geolocator = Nominatim()
files = []
for f in args.files:
if os.path.isfile(f):
files.append(f)
elif os.path.isdir(f):
# Include just the direct children of the directory. Including
# all of the descendants could result in name collisions.
#
# Providing multiple directories could also lead to name
# collisions, but we want to allow at least one directory in
# order to use this script on a Windows command line.
for g in os.listdir(f):
g_path = os.path.join(f, g)
if os.path.isfile(g_path):
files.append(g_path)
else:
print('warning: skipping {}'.format(f), file=sys.stderr)
for f in files:
try:
img = Image.open(f)
except OSError as err:
print('warning: {}: {}'.format(f, err), file=sys.stderr)
continue
try:
exif_data = get_exif_data(img)
except AttributeError as err:
print('warning: {}: {}'.format(f, err), file=sys.stderr)
continue
try:
dt = get_datetime(exif_data)
except KeyError as err:
print('warning: {}: KeyError `{}\''.format(f, err), file=sys.stderr)
continue
locsym = get_location_symbol(exif_data)
metainfo[f] = {
'dt': dt,
'loc': locsym,
}
last_k = None
last_dt = None
last_loc = None
imgdirs = defaultdict(list)
for f in sorted(
metainfo,
key=lambda x: '{} {}'.format(metainfo[x]['dt'], os.path.basename(x))
):
dt = metainfo[f]['dt']
loc = metainfo[f]['loc']
if (last_k is not None and last_dt is not None and last_loc is not None
and loc == last_loc
and (dt - last_dt).total_seconds() / 60 < 45):
k = last_k
else:
dtstr = dt.strftime('%Y-%m-%d %H.%M')
k = '{} {}'.format(dtstr, loc)
imgdirs[k].append(os.path.basename(f))
last_loc = loc
last_dt = dt
last_k = k
print('Proposed directory structure:')
for d in sorted(imgdirs):
print(d)
for f in sorted(imgdirs[d]):
print(' - {}'.format(f))
print()
do_organize = input('Create directories and organize files? (y/[n])? ')
if do_organize != 'y':
sys.exit()
for d in sorted(imgdirs):
if os.path.exists(d):
print('error: {} already exists!'.format(d))
sys.exit(1)
for d in sorted(imgdirs):
if os.path.exists(d):
print('error: {} already exists!'.format(d))
sys.exit(1)
os.mkdir(d)
for f in sorted(imgdirs[d]):
try:
shutil.move(f, d)
except PermissionError as err:
print('error: Can\'t move {} to {}: {}'.format(f, d, err))
sys.exit()
|
en
| 0.935692
|
# -*- coding: utf-8 -*- Image file to organize. If directory, only children are read # Include just the direct children of the directory. Including # all of the descendants could result in name collisions. # # Providing multiple directories could also lead to name # collisions, but we want to allow at least one directory in # order to use this script on a Windows command line.
| 2.700194
| 3
|
tile_generator/bosh.py
|
yfhsu/tile-generator
| 0
|
6629268
|
#!/usr/bin/env python
# tile-generator
#
# Copyright (c) 2015-Present Pivotal Software, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import os
import sys
import errno
import requests
import shutil
import subprocess
import tarfile
import tempfile
from distutils import spawn
from . import template
try:
# Python 3
from urllib.request import urlretrieve
except ImportError:
# Python 2
from urllib import urlretrieve
import zipfile
import yaml
import re
import datetime
from .util import *
class BoshRelease:
def __init__(self, release, context):
self.name = release['name']
self.release_dir = os.path.join('release', self.name)
self.path = release.get('path', None)
self.jobs = release.get('jobs', [])
self.packages = release.get('packages', [])
self.context = context
self.config = release
self.tarball = None
def get_metadata(self):
tarball = self.get_tarball()
manifest = self.get_manifest(tarball)
return {
'release_name': manifest['name'],
'version': manifest['version'],
'tarball': tarball,
'file': os.path.basename(tarball),
}
def get_manifest(self, tarball):
with tarfile.open(tarball) as tar:
if './release.MF' in tar.getnames():
manifest_file = tar.extractfile('./release.MF')
elif 'release.MF' in tar.getnames():
manifest_file = tar.extractfile('release.MF')
else:
raise Exception('No release manifest found in ' + tarball)
manifest = yaml.safe_load(manifest_file)
manifest_file.close()
return manifest
def get_tarball(self):
if self.tarball is not None and os.path.isfile(self.tarball):
return self.tarball
if self.path is not None:
print('download bosh release', self.name)
return self.download_tarball()
return self.build_tarball()
def download_tarball(self):
def semver_x_greater_or_equal_to_y(x, y):
# split the semver into major minor patch
x = [int(d) for d in x.split('.')]
y = [int(d) for d in y.split('.')]
return x >= y
mkdir_p(self.release_dir)
tarball = os.path.join(self.release_dir, self.name + '.tgz')
download(self.path, tarball, self.context.get('cache'))
manifest = self.get_manifest(tarball)
if manifest['name'] == 'cf-cli':
# Enforce at least version 1.15 as prior versions have a CVE
# https://docs.google.com/document/d/177QPJHKXMld1AD-GNHeildVfTGCWrGP-GSSlDmJY9eI/edit?ts=5ccd96fa
if not semver_x_greater_or_equal_to_y(manifest['version'], '1.15.0'):
raise RuntimeError('The cf-cli bosh release should be version 1.15.0 or higher. Detected %s' % manifest['version'])
self.tarball = os.path.join(self.release_dir, manifest['name'] + '-' + manifest['version'] + '.tgz')
os.rename(tarball, self.tarball)
return self.tarball
def build_tarball(self):
mkdir_p(self.release_dir)
self.__bosh('init-release')
template.render(
os.path.join(self.release_dir, 'config/final.yml'),
'config/final.yml',
self.context)
for package in self.packages:
self.add_package(package)
for job in self.jobs:
self.add_job(job)
self.__bosh('upload-blobs')
filename=self.name + '-' + self.context['version'] + '.tgz'
args = ['create-release', '--force','--final', '--tarball', filename, '--version', self.context['version']]
if self.context.get('sha1'):
args.insert(3, '--sha2')
self.tarball = self.__bosh(*args, capture='Release tarball')
self.tarball = os.path.join(self.release_dir,filename)
return self.tarball
def add_job(self, job):
job_name = job['name']
job_type = job.get('type', job_name)
job_template = job.get('template', job_type)
is_errand = job.get('lifecycle', None) == 'errand'
package = job.get('package', None)
packages = job.get('packages', [])
self.__bosh('generate-job', job_type)
job_context = {
'job_name': job_name,
'job_type': job_type,
'context': self.context,
'package': package,
'packages': packages,
'errand': is_errand,
}
if self.config.get('package-type') == 'kibosh' and job_type.startswith('charts_for_'):
path = os.path.join(self.release_dir, 'jobs', job_type)
shutil.rmtree(path, True)
mkdir_p(os.path.join(path, 'templates'))
with open(os.path.join(path, 'spec'), 'w') as f:
f.write(("---\n"
"name: %s\n\n"
'packages:\n'
'- %s\n') % (job_type, job_type))
path = os.path.join(path, 'monit')
with open(path, 'w'):
os.utime(path, None)
else:
template.render(
os.path.join(self.release_dir, 'jobs', job_type, 'spec'),
os.path.join('jobs', 'spec'),
job_context
)
template.render(
os.path.join(self.release_dir, 'jobs', job_type, 'templates', job_type + '.sh.erb'),
os.path.join('jobs', job_template + '.sh.erb'),
job_context
)
template.render(
os.path.join(self.release_dir, 'jobs', job_type, 'templates', 'opsmgr.env.erb'),
os.path.join('jobs', 'opsmgr.env.erb'),
job_context
)
template.render(
os.path.join(self.release_dir, 'jobs', job_type, 'monit'),
os.path.join('jobs', 'monit'),
job_context
)
def needs_zip(self, package):
# Only zip package types that require single files
if not package.get('is_cf', False) and not package.get('zip_if_needed', False):
return False
files = package['files']
# ...if it has more than one file...
if len(files) > 1:
return True
# ...or a single file is not already a zip.
elif len(files) == 1:
return not zipfile.is_zipfile(files[0]['path'])
return False
def add_blob(self,package):
for file in package ['files']:
self.__bosh('add-blob',os.path.realpath(file['path']),file['name'])
def add_package(self, package):
name = package['name']
dir = package.get('dir', 'blobs')
self.__bosh('generate-package', name)
target_dir = os.path.realpath(os.path.join(self.release_dir, dir, name))
package_dir = os.path.realpath(os.path.join(self.release_dir, 'packages', name))
mkdir_p(target_dir)
template_dir = 'packages'
# Download files for package
if self.needs_zip(package):
staging_dir = tempfile.mkdtemp()
file_options = dict()
for file in package.get('files', []):
for key in [k for k in file.keys() if k not in ['name', 'path']]:
file_options[key] = file[key]
download(file['path'], os.path.join(staging_dir, file['name']), cache=self.context.get('cache', None))
path = package.get('manifest', {}).get('path', '')
dir_to_zip = os.path.join(staging_dir, path) if path else staging_dir
zipfilename = os.path.realpath(os.path.join(target_dir, package['name'] + '.zip'))
zip_dir(zipfilename, dir_to_zip)
shutil.rmtree(staging_dir)
newpath = os.path.basename(zipfilename)
for job in self.jobs:
if job.get('manifest', {}).get(package['name'], {}).get('app_manifest'):
job['manifest'][package['name']]['app_manifest']['path'] = newpath
result = { 'path': zipfilename, 'name': os.path.basename(zipfilename) }
result.update(file_options)
package['files'] = [result]
self.__bosh('add-blob',zipfilename,os.path.join(name,os.path.basename(zipfilename)))
else:
for file in package.get('files', []):
download(file['path'], os.path.join(target_dir, file['name']), cache=self.context.get('cache', None))
self.__bosh('add-blob',os.path.join(target_dir, file['name']),os.path.join(name,file['name']))
# Construct context for template rendering
package_context = {
'context': self.context,
'package': package,
'files': package.get('files', []),
}
template.render(
os.path.join(package_dir, 'spec'),
os.path.join(template_dir, 'spec'),
package_context
)
template.render(
os.path.join(package_dir, 'packaging'),
os.path.join(template_dir, 'packaging'),
package_context
)
def __bosh(self, *argv, **kw):
return run_bosh(self.release_dir, *argv, **kw)
def ensure_bosh():
bosh_exec = spawn.find_executable('bosh')
if not bosh_exec:
print("'bosh' command should be on the path. See https://bosh.io for installation instructions")
sys.exit(1)
if bosh_exec:
output = subprocess.check_output(["bosh", "--version"], stderr=subprocess.STDOUT, cwd=".")
if output.startswith(b"version 1."):
print("You are running an older version of bosh. Please upgrade to the latest version. See https://bosh.io/docs/cli-v2.html for installation instructions")
sys.exit(1)
def run_bosh(working_dir, *argv, **kw):
ensure_bosh()
# Ensure that the working_dir is a git repo, needed for bosh's create-release.
# This is used to avoid this bug https://www.pivotaltracker.com/story/show/159156765
if 'create-release' in argv:
print(working_dir)
cmd = 'if ! git rev-parse --git-dir 2> /dev/null; then git init; fi'
subprocess.call(cmd, shell=True, cwd=working_dir)
# Change the commands
argv = list(argv)
print('bosh', ' '.join(argv))
command = ['bosh', '--no-color', '--non-interactive'] + argv
capture = kw.get('capture', None)
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT, cwd=working_dir)
if not isinstance(output, str):
# check_output always returns bytes. in python3 we want str (=unicode)
output = output.decode()
if capture is not None:
for l in output.split('\n'):
if l.startswith(capture):
output = l.split(':', 1)[-1].strip()
break
return output
except subprocess.CalledProcessError as e:
if argv[0] == 'init' and argv[1] == 'release' and 'Release already initialized' in e.output:
return e.output
if argv[0] == 'generate' and 'already exists' in e.output:
return e.output
print(e.output)
sys.exit(e.returncode)
|
#!/usr/bin/env python
# tile-generator
#
# Copyright (c) 2015-Present Pivotal Software, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import os
import sys
import errno
import requests
import shutil
import subprocess
import tarfile
import tempfile
from distutils import spawn
from . import template
try:
# Python 3
from urllib.request import urlretrieve
except ImportError:
# Python 2
from urllib import urlretrieve
import zipfile
import yaml
import re
import datetime
from .util import *
class BoshRelease:
def __init__(self, release, context):
self.name = release['name']
self.release_dir = os.path.join('release', self.name)
self.path = release.get('path', None)
self.jobs = release.get('jobs', [])
self.packages = release.get('packages', [])
self.context = context
self.config = release
self.tarball = None
def get_metadata(self):
tarball = self.get_tarball()
manifest = self.get_manifest(tarball)
return {
'release_name': manifest['name'],
'version': manifest['version'],
'tarball': tarball,
'file': os.path.basename(tarball),
}
def get_manifest(self, tarball):
with tarfile.open(tarball) as tar:
if './release.MF' in tar.getnames():
manifest_file = tar.extractfile('./release.MF')
elif 'release.MF' in tar.getnames():
manifest_file = tar.extractfile('release.MF')
else:
raise Exception('No release manifest found in ' + tarball)
manifest = yaml.safe_load(manifest_file)
manifest_file.close()
return manifest
def get_tarball(self):
if self.tarball is not None and os.path.isfile(self.tarball):
return self.tarball
if self.path is not None:
print('download bosh release', self.name)
return self.download_tarball()
return self.build_tarball()
def download_tarball(self):
def semver_x_greater_or_equal_to_y(x, y):
# split the semver into major minor patch
x = [int(d) for d in x.split('.')]
y = [int(d) for d in y.split('.')]
return x >= y
mkdir_p(self.release_dir)
tarball = os.path.join(self.release_dir, self.name + '.tgz')
download(self.path, tarball, self.context.get('cache'))
manifest = self.get_manifest(tarball)
if manifest['name'] == 'cf-cli':
# Enforce at least version 1.15 as prior versions have a CVE
# https://docs.google.com/document/d/177QPJHKXMld1AD-GNHeildVfTGCWrGP-GSSlDmJY9eI/edit?ts=5ccd96fa
if not semver_x_greater_or_equal_to_y(manifest['version'], '1.15.0'):
raise RuntimeError('The cf-cli bosh release should be version 1.15.0 or higher. Detected %s' % manifest['version'])
self.tarball = os.path.join(self.release_dir, manifest['name'] + '-' + manifest['version'] + '.tgz')
os.rename(tarball, self.tarball)
return self.tarball
def build_tarball(self):
mkdir_p(self.release_dir)
self.__bosh('init-release')
template.render(
os.path.join(self.release_dir, 'config/final.yml'),
'config/final.yml',
self.context)
for package in self.packages:
self.add_package(package)
for job in self.jobs:
self.add_job(job)
self.__bosh('upload-blobs')
filename=self.name + '-' + self.context['version'] + '.tgz'
args = ['create-release', '--force','--final', '--tarball', filename, '--version', self.context['version']]
if self.context.get('sha1'):
args.insert(3, '--sha2')
self.tarball = self.__bosh(*args, capture='Release tarball')
self.tarball = os.path.join(self.release_dir,filename)
return self.tarball
def add_job(self, job):
job_name = job['name']
job_type = job.get('type', job_name)
job_template = job.get('template', job_type)
is_errand = job.get('lifecycle', None) == 'errand'
package = job.get('package', None)
packages = job.get('packages', [])
self.__bosh('generate-job', job_type)
job_context = {
'job_name': job_name,
'job_type': job_type,
'context': self.context,
'package': package,
'packages': packages,
'errand': is_errand,
}
if self.config.get('package-type') == 'kibosh' and job_type.startswith('charts_for_'):
path = os.path.join(self.release_dir, 'jobs', job_type)
shutil.rmtree(path, True)
mkdir_p(os.path.join(path, 'templates'))
with open(os.path.join(path, 'spec'), 'w') as f:
f.write(("---\n"
"name: %s\n\n"
'packages:\n'
'- %s\n') % (job_type, job_type))
path = os.path.join(path, 'monit')
with open(path, 'w'):
os.utime(path, None)
else:
template.render(
os.path.join(self.release_dir, 'jobs', job_type, 'spec'),
os.path.join('jobs', 'spec'),
job_context
)
template.render(
os.path.join(self.release_dir, 'jobs', job_type, 'templates', job_type + '.sh.erb'),
os.path.join('jobs', job_template + '.sh.erb'),
job_context
)
template.render(
os.path.join(self.release_dir, 'jobs', job_type, 'templates', 'opsmgr.env.erb'),
os.path.join('jobs', 'opsmgr.env.erb'),
job_context
)
template.render(
os.path.join(self.release_dir, 'jobs', job_type, 'monit'),
os.path.join('jobs', 'monit'),
job_context
)
def needs_zip(self, package):
# Only zip package types that require single files
if not package.get('is_cf', False) and not package.get('zip_if_needed', False):
return False
files = package['files']
# ...if it has more than one file...
if len(files) > 1:
return True
# ...or a single file is not already a zip.
elif len(files) == 1:
return not zipfile.is_zipfile(files[0]['path'])
return False
def add_blob(self,package):
for file in package ['files']:
self.__bosh('add-blob',os.path.realpath(file['path']),file['name'])
def add_package(self, package):
name = package['name']
dir = package.get('dir', 'blobs')
self.__bosh('generate-package', name)
target_dir = os.path.realpath(os.path.join(self.release_dir, dir, name))
package_dir = os.path.realpath(os.path.join(self.release_dir, 'packages', name))
mkdir_p(target_dir)
template_dir = 'packages'
# Download files for package
if self.needs_zip(package):
staging_dir = tempfile.mkdtemp()
file_options = dict()
for file in package.get('files', []):
for key in [k for k in file.keys() if k not in ['name', 'path']]:
file_options[key] = file[key]
download(file['path'], os.path.join(staging_dir, file['name']), cache=self.context.get('cache', None))
path = package.get('manifest', {}).get('path', '')
dir_to_zip = os.path.join(staging_dir, path) if path else staging_dir
zipfilename = os.path.realpath(os.path.join(target_dir, package['name'] + '.zip'))
zip_dir(zipfilename, dir_to_zip)
shutil.rmtree(staging_dir)
newpath = os.path.basename(zipfilename)
for job in self.jobs:
if job.get('manifest', {}).get(package['name'], {}).get('app_manifest'):
job['manifest'][package['name']]['app_manifest']['path'] = newpath
result = { 'path': zipfilename, 'name': os.path.basename(zipfilename) }
result.update(file_options)
package['files'] = [result]
self.__bosh('add-blob',zipfilename,os.path.join(name,os.path.basename(zipfilename)))
else:
for file in package.get('files', []):
download(file['path'], os.path.join(target_dir, file['name']), cache=self.context.get('cache', None))
self.__bosh('add-blob',os.path.join(target_dir, file['name']),os.path.join(name,file['name']))
# Construct context for template rendering
package_context = {
'context': self.context,
'package': package,
'files': package.get('files', []),
}
template.render(
os.path.join(package_dir, 'spec'),
os.path.join(template_dir, 'spec'),
package_context
)
template.render(
os.path.join(package_dir, 'packaging'),
os.path.join(template_dir, 'packaging'),
package_context
)
def __bosh(self, *argv, **kw):
return run_bosh(self.release_dir, *argv, **kw)
def ensure_bosh():
bosh_exec = spawn.find_executable('bosh')
if not bosh_exec:
print("'bosh' command should be on the path. See https://bosh.io for installation instructions")
sys.exit(1)
if bosh_exec:
output = subprocess.check_output(["bosh", "--version"], stderr=subprocess.STDOUT, cwd=".")
if output.startswith(b"version 1."):
print("You are running an older version of bosh. Please upgrade to the latest version. See https://bosh.io/docs/cli-v2.html for installation instructions")
sys.exit(1)
def run_bosh(working_dir, *argv, **kw):
ensure_bosh()
# Ensure that the working_dir is a git repo, needed for bosh's create-release.
# This is used to avoid this bug https://www.pivotaltracker.com/story/show/159156765
if 'create-release' in argv:
print(working_dir)
cmd = 'if ! git rev-parse --git-dir 2> /dev/null; then git init; fi'
subprocess.call(cmd, shell=True, cwd=working_dir)
# Change the commands
argv = list(argv)
print('bosh', ' '.join(argv))
command = ['bosh', '--no-color', '--non-interactive'] + argv
capture = kw.get('capture', None)
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT, cwd=working_dir)
if not isinstance(output, str):
# check_output always returns bytes. in python3 we want str (=unicode)
output = output.decode()
if capture is not None:
for l in output.split('\n'):
if l.startswith(capture):
output = l.split(':', 1)[-1].strip()
break
return output
except subprocess.CalledProcessError as e:
if argv[0] == 'init' and argv[1] == 'release' and 'Release already initialized' in e.output:
return e.output
if argv[0] == 'generate' and 'already exists' in e.output:
return e.output
print(e.output)
sys.exit(e.returncode)
|
en
| 0.805249
|
#!/usr/bin/env python # tile-generator # # Copyright (c) 2015-Present Pivotal Software, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Python 3 # Python 2 # split the semver into major minor patch # Enforce at least version 1.15 as prior versions have a CVE # https://docs.google.com/document/d/177QPJHKXMld1AD-GNHeildVfTGCWrGP-GSSlDmJY9eI/edit?ts=5ccd96fa # Only zip package types that require single files # ...if it has more than one file... # ...or a single file is not already a zip. # Download files for package # Construct context for template rendering # Ensure that the working_dir is a git repo, needed for bosh's create-release. # This is used to avoid this bug https://www.pivotaltracker.com/story/show/159156765 # Change the commands # check_output always returns bytes. in python3 we want str (=unicode)
| 2.065131
| 2
|
webots_ros2_driver/webots_ros2_driver/webots_launcher.py
|
TaoYibo1866/webots_ros2
| 0
|
6629269
|
#!/usr/bin/env python
# Copyright 1996-2021 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This launcher simply starts Webots."""
import os
import sys
from launch.actions import ExecuteProcess
from launch.substitution import Substitution
from launch.substitutions import TextSubstitution
from webots_ros2_driver.utils import get_webots_home, handle_webots_installation
class _ConditionalSubstitution(Substitution):
def __init__(self, *, condition, false_value='', true_value=''):
self.__condition = condition if isinstance(condition, Substitution) else TextSubstitution(text=str(condition))
self.__false_value = false_value if isinstance(false_value, Substitution) else TextSubstitution(text=false_value)
self.__true_value = true_value if isinstance(true_value, Substitution) else TextSubstitution(text=true_value)
def perform(self, context):
if context.perform_substitution(self.__condition).lower() in ['false', '0', '']:
return context.perform_substitution(self.__false_value)
return context.perform_substitution(self.__true_value)
class WebotsLauncher(ExecuteProcess):
def __init__(self, output='screen', world=None, gui=True, mode='realtime', stream=False, **kwargs):
# Find Webots executable
webots_path = get_webots_home(show_warning=True)
if webots_path is None:
handle_webots_installation()
webots_path = get_webots_home()
if sys.platform == 'win32':
webots_path = os.path.join(webots_path, 'msys64', 'mingw64', 'bin')
webots_path = os.path.join(webots_path, 'webots')
mode = mode if isinstance(mode, Substitution) else TextSubstitution(text=mode)
world = world if isinstance(world, Substitution) else TextSubstitution(text=world)
no_rendering = _ConditionalSubstitution(condition=gui, false_value='--no-rendering')
stdout = _ConditionalSubstitution(condition=gui, false_value='--stdout')
stderr = _ConditionalSubstitution(condition=gui, false_value='--stderr')
no_sandbox = _ConditionalSubstitution(condition=gui, false_value='--no-sandbox')
if sys.platform == 'win32':
# Windows doesn't have the sandbox argument
no_sandbox = ''
minimize = _ConditionalSubstitution(condition=gui, false_value='--minimize')
stream_argument = _ConditionalSubstitution(condition=stream, true_value='--stream')
xvfb_run_prefix = []
if 'WEBOTS_OFFSCREEN' in os.environ:
xvfb_run_prefix.append('xvfb-run')
xvfb_run_prefix.append('--auto-servernum')
no_rendering = '--no-rendering'
# no_rendering, stdout, stderr, no_sandbox, minimize
super().__init__(
output=output,
cmd=xvfb_run_prefix + [
webots_path,
stream_argument,
no_rendering,
stdout,
stderr,
no_sandbox,
minimize,
world,
'--batch',
['--mode=', mode],
],
**kwargs
)
|
#!/usr/bin/env python
# Copyright 1996-2021 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This launcher simply starts Webots."""
import os
import sys
from launch.actions import ExecuteProcess
from launch.substitution import Substitution
from launch.substitutions import TextSubstitution
from webots_ros2_driver.utils import get_webots_home, handle_webots_installation
class _ConditionalSubstitution(Substitution):
def __init__(self, *, condition, false_value='', true_value=''):
self.__condition = condition if isinstance(condition, Substitution) else TextSubstitution(text=str(condition))
self.__false_value = false_value if isinstance(false_value, Substitution) else TextSubstitution(text=false_value)
self.__true_value = true_value if isinstance(true_value, Substitution) else TextSubstitution(text=true_value)
def perform(self, context):
if context.perform_substitution(self.__condition).lower() in ['false', '0', '']:
return context.perform_substitution(self.__false_value)
return context.perform_substitution(self.__true_value)
class WebotsLauncher(ExecuteProcess):
def __init__(self, output='screen', world=None, gui=True, mode='realtime', stream=False, **kwargs):
# Find Webots executable
webots_path = get_webots_home(show_warning=True)
if webots_path is None:
handle_webots_installation()
webots_path = get_webots_home()
if sys.platform == 'win32':
webots_path = os.path.join(webots_path, 'msys64', 'mingw64', 'bin')
webots_path = os.path.join(webots_path, 'webots')
mode = mode if isinstance(mode, Substitution) else TextSubstitution(text=mode)
world = world if isinstance(world, Substitution) else TextSubstitution(text=world)
no_rendering = _ConditionalSubstitution(condition=gui, false_value='--no-rendering')
stdout = _ConditionalSubstitution(condition=gui, false_value='--stdout')
stderr = _ConditionalSubstitution(condition=gui, false_value='--stderr')
no_sandbox = _ConditionalSubstitution(condition=gui, false_value='--no-sandbox')
if sys.platform == 'win32':
# Windows doesn't have the sandbox argument
no_sandbox = ''
minimize = _ConditionalSubstitution(condition=gui, false_value='--minimize')
stream_argument = _ConditionalSubstitution(condition=stream, true_value='--stream')
xvfb_run_prefix = []
if 'WEBOTS_OFFSCREEN' in os.environ:
xvfb_run_prefix.append('xvfb-run')
xvfb_run_prefix.append('--auto-servernum')
no_rendering = '--no-rendering'
# no_rendering, stdout, stderr, no_sandbox, minimize
super().__init__(
output=output,
cmd=xvfb_run_prefix + [
webots_path,
stream_argument,
no_rendering,
stdout,
stderr,
no_sandbox,
minimize,
world,
'--batch',
['--mode=', mode],
],
**kwargs
)
|
en
| 0.798369
|
#!/usr/bin/env python # Copyright 1996-2021 Cyberbotics Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. This launcher simply starts Webots. # Find Webots executable # Windows doesn't have the sandbox argument # no_rendering, stdout, stderr, no_sandbox, minimize
| 2.499748
| 2
|
bin/extract-admin-password.py
|
DimensionDataResearch/ddcloud-docker-swarm
| 0
|
6629270
|
#!/usr/bin/env python2
from __future__ import unicode_literals, print_function
import os
import re
root = os.path.normpath(
os.path.dirname(os.path.abspath(__file__)) + "/.."
)
terraform_script_file = os.path.join(root, "terraform/ddcloud-docker-swarm.tf")
terraform_script = open(terraform_script_file, mode='r')
with terraform_script:
script_lines = terraform_script.readlines()
found = False
variable_matcher = r'variable "admin_password"\s+\{\s+default\s+=\s+"(.*)"\s+\}'
for script_line in script_lines:
match = re.match(variable_matcher, script_line)
if not match:
continue
print(match.group(1))
found = True
break
if not found:
raise "Failed to find variable 'admin_password' in terraform script."
|
#!/usr/bin/env python2
from __future__ import unicode_literals, print_function
import os
import re
root = os.path.normpath(
os.path.dirname(os.path.abspath(__file__)) + "/.."
)
terraform_script_file = os.path.join(root, "terraform/ddcloud-docker-swarm.tf")
terraform_script = open(terraform_script_file, mode='r')
with terraform_script:
script_lines = terraform_script.readlines()
found = False
variable_matcher = r'variable "admin_password"\s+\{\s+default\s+=\s+"(.*)"\s+\}'
for script_line in script_lines:
match = re.match(variable_matcher, script_line)
if not match:
continue
print(match.group(1))
found = True
break
if not found:
raise "Failed to find variable 'admin_password' in terraform script."
|
ru
| 0.196695
|
#!/usr/bin/env python2
| 2.356435
| 2
|
src/articles/services/core/service.py
|
robzzy/articles-service
| 0
|
6629271
|
# -*- coding: utf-8 -*-
import json
from nameko_tracer import Tracer
from nameko.events import EventDispatcher
from nameko_sqlalchemy import DatabaseSession
from nameko.rpc import rpc
from nameko.web.handlers import http
from articles.models import DeclarativeBase
from articles.services.core.authors import AuthorMixin
from articles.services.core.reviews import ReviewMixin
from articles.services.core.articles import ArticleMixin
class ArticleService(AuthorMixin, ReviewMixin, ArticleMixin):
name = "articles"
tracer = Tracer()
db_session = DatabaseSession(DeclarativeBase)
event_dispatcher = EventDispatcher()
@http("GET", "/healthcheck")
def health_check_http(self, request):
return json.dumps(self.health_check())
@rpc
def health_check(self):
return {"status": "ok"}
|
# -*- coding: utf-8 -*-
import json
from nameko_tracer import Tracer
from nameko.events import EventDispatcher
from nameko_sqlalchemy import DatabaseSession
from nameko.rpc import rpc
from nameko.web.handlers import http
from articles.models import DeclarativeBase
from articles.services.core.authors import AuthorMixin
from articles.services.core.reviews import ReviewMixin
from articles.services.core.articles import ArticleMixin
class ArticleService(AuthorMixin, ReviewMixin, ArticleMixin):
name = "articles"
tracer = Tracer()
db_session = DatabaseSession(DeclarativeBase)
event_dispatcher = EventDispatcher()
@http("GET", "/healthcheck")
def health_check_http(self, request):
return json.dumps(self.health_check())
@rpc
def health_check(self):
return {"status": "ok"}
|
en
| 0.769321
|
# -*- coding: utf-8 -*-
| 2.069993
| 2
|
tuples-and-sets/1_count_same_values.py
|
Minkov/python-advanced-2020-01
| 5
|
6629272
|
<gh_stars>1-10
def solve(values):
occurances = {}
for value in values:
if value not in occurances:
occurances[value] = 0
occurances[value] += 1
for number, count in occurances.items():
print(f'{number} - {count} times')
values_strings = input().split(' ')
values = [float(x) for x in values_strings]
solve(values)
|
def solve(values):
occurances = {}
for value in values:
if value not in occurances:
occurances[value] = 0
occurances[value] += 1
for number, count in occurances.items():
print(f'{number} - {count} times')
values_strings = input().split(' ')
values = [float(x) for x in values_strings]
solve(values)
|
none
| 1
| 3.452361
| 3
|
|
utils.py
|
mateuszbuda/duke-dbt-detection
| 10
|
6629273
|
import numpy as np
from PIL import Image, ImageDraw, ImageFont
from skimage.transform import rescale
from dataset import TomoDetectionDataset
def log_images(x, y_true, y_pred):
images = []
y_true_np = y_true.detach().cpu().numpy()
y_pred_np = y_pred.detach().cpu().numpy()
x_np = x.detach().cpu().numpy()
for c in range(y_true_np.shape[0]):
pred_bboxes = label2bboxes(y_pred_np[c])
gt_bboxes = label2bboxes(y_true_np[c], n_boxes=np.sum(y_true_np[c] == 1))
image = np.squeeze(x_np[c, 0])
image -= np.min(image)
image /= np.max(image)
image_bboxes = draw_predictions(
image, pred_bboxes, gt_bboxes
)
images.append(image_bboxes)
return images
def label2bboxes(label, n_boxes=6, min_size=28):
obj = label[0]
loc = label[1:]
th = sorted(obj.flatten(), reverse=True)[n_boxes]
bboxes = {"X": [], "Y": [], "Width": [], "Height": [], "Score": []}
csz = TomoDetectionDataset.cell_size
anchor = TomoDetectionDataset.anchor
for i in range(obj.shape[0]):
for j in range(obj.shape[1]):
if obj[i, j] > th:
y_cell = i * csz + csz / 2
x_cell = j * csz + csz / 2
y_center = y_cell + (csz / 2) * loc[0, i, j]
x_center = x_cell + (csz / 2) * loc[1, i, j]
h = anchor[0] * loc[2, i, j] ** 2
w = anchor[1] * loc[3, i, j] ** 2
if obj[i, j] == 1:
h = max(h, min_size)
w = max(w, min_size)
bboxes["Y"].append(max(0, y_center - (h / 2)))
bboxes["X"].append(max(0, x_center - (w / 2)))
bboxes["Width"].append(w)
bboxes["Height"].append(h)
bboxes["Score"].append(obj[i, j])
return bboxes
def draw_predictions(image, pred_boxes, gt_boxes):
image = np.stack((image,) * 3, axis=-1)
red = [np.max(image), 0, 0]
green = [0, np.max(image), 0]
for i in range(len(gt_boxes["X"])):
x, y = int(gt_boxes["X"][i]), int(gt_boxes["Y"][i])
w, h = int(gt_boxes["Width"][i]), int(gt_boxes["Height"][i])
image = draw_bbox(image, x, y, w, h, c=green, lw=4)
boxes = zip(pred_boxes["X"], pred_boxes["Y"], pred_boxes["Width"], pred_boxes["Height"], pred_boxes["Score"])
for box in sorted(boxes, key=lambda a: a[-1]):
x, y = int(box[0]), int(box[1])
x, y = max(x, 0), max(y, 0)
w, h = int(box[2]), int(box[3])
image = draw_bbox(image, x, y, w, h, c=red, lw=3)
image = draw_score(image, box[-1], x, y)
return image
def draw_bbox(img, x, y, w, h, c=None, lw=4):
x = min(max(x, 0), img.shape[1] - 1)
y = min(max(y, 0), img.shape[0] - 1)
if c is None:
c = np.max(img)
if len(img.shape) > 2:
c = [c] + [0] * (img.shape[-1] - 1)
img[y : y + lw, x : x + w] = c
img[y + h - lw : y + h, x : x + w] = c
img[y : y + h, x : x + lw] = c
img[y : y + h, x + w - lw : x + w] = c
return img
def draw_score(img, score, x, y):
score = int(min(max(0, score * 100), 100))
txt_img = text_image(str(score) + "%") * np.max(img)
txt_h, txt_w = txt_img.shape[0], txt_img.shape[1]
if y + txt_h > img.shape[0]:
max_h = img.shape[0] - y
txt_img = txt_img[:max_h]
if x + txt_w > img.shape[1]:
max_w = img.shape[1] - x
txt_img = txt_img[:, :max_w]
if img[y : y + txt_h, x : x + txt_w].shape == txt_img.shape:
img[y : y + txt_h, x : x + txt_w] = txt_img
return img
def text_image(text, bg=(255, 0, 0), margin=4):
bg = tuple([255 - c for c in bg])
margin = margin // 2
font = ImageFont.load_default()
text_width, text_height = font.getsize(text)
canvas = Image.new("RGB", [text_width + 2 * margin - 1, text_height], bg)
draw = ImageDraw.Draw(canvas)
offset = (margin, 0)
black = "#FFFFFF"
draw.text(offset, text, font=font, fill=black)
image = (255 - np.asarray(canvas)) / 255.0
return rescale(
image,
2.0,
anti_aliasing=False,
preserve_range=True,
multichannel=True,
mode="edge",
)
def iou_3d(A, B):
x0a, y0a, z0a, x1a, y1a, z1a = A[0], A[1], A[2], A[3], A[4], A[5]
x0b, y0b, z0b, x1b, y1b, z1b = B[0], B[1], B[2], B[3], B[4], B[5]
x0i, x1i = max(x0a, x0b), min(x1a, x1b)
y0i, y1i = max(y0a, y0b), min(y1a, y1b)
z0i, z1i = max(z0a, z0b), min(z1a, z1b)
wi = x1i - x0i
if wi <= 0:
return 0.0
hi = y1i - y0i
if hi <= 0:
return 0.0
di = z1i - z0i
if di <= 0:
return 0.0
area_a = (x1a - x0a) * (y1a - y0a) * (z1a - z0a)
area_b = (x1b - x0b) * (y1b - y0b) * (z1b - z0b)
intersection = (x1i - x0i) * (y1i - y0i) * (z1i - z0i)
union = area_a + area_b - intersection
return float(intersection) / union
def box_union_3d(A, B):
x0 = min(A[0], B[0])
y0 = min(A[1], B[1])
z0 = min(A[2], B[2])
x1 = max(A[3], B[3])
y1 = max(A[4], B[4])
z1 = max(A[5], B[5])
score = max(A[6], B[6])
return [x0, y0, z0, x1, y1, z1, score]
|
import numpy as np
from PIL import Image, ImageDraw, ImageFont
from skimage.transform import rescale
from dataset import TomoDetectionDataset
def log_images(x, y_true, y_pred):
images = []
y_true_np = y_true.detach().cpu().numpy()
y_pred_np = y_pred.detach().cpu().numpy()
x_np = x.detach().cpu().numpy()
for c in range(y_true_np.shape[0]):
pred_bboxes = label2bboxes(y_pred_np[c])
gt_bboxes = label2bboxes(y_true_np[c], n_boxes=np.sum(y_true_np[c] == 1))
image = np.squeeze(x_np[c, 0])
image -= np.min(image)
image /= np.max(image)
image_bboxes = draw_predictions(
image, pred_bboxes, gt_bboxes
)
images.append(image_bboxes)
return images
def label2bboxes(label, n_boxes=6, min_size=28):
obj = label[0]
loc = label[1:]
th = sorted(obj.flatten(), reverse=True)[n_boxes]
bboxes = {"X": [], "Y": [], "Width": [], "Height": [], "Score": []}
csz = TomoDetectionDataset.cell_size
anchor = TomoDetectionDataset.anchor
for i in range(obj.shape[0]):
for j in range(obj.shape[1]):
if obj[i, j] > th:
y_cell = i * csz + csz / 2
x_cell = j * csz + csz / 2
y_center = y_cell + (csz / 2) * loc[0, i, j]
x_center = x_cell + (csz / 2) * loc[1, i, j]
h = anchor[0] * loc[2, i, j] ** 2
w = anchor[1] * loc[3, i, j] ** 2
if obj[i, j] == 1:
h = max(h, min_size)
w = max(w, min_size)
bboxes["Y"].append(max(0, y_center - (h / 2)))
bboxes["X"].append(max(0, x_center - (w / 2)))
bboxes["Width"].append(w)
bboxes["Height"].append(h)
bboxes["Score"].append(obj[i, j])
return bboxes
def draw_predictions(image, pred_boxes, gt_boxes):
image = np.stack((image,) * 3, axis=-1)
red = [np.max(image), 0, 0]
green = [0, np.max(image), 0]
for i in range(len(gt_boxes["X"])):
x, y = int(gt_boxes["X"][i]), int(gt_boxes["Y"][i])
w, h = int(gt_boxes["Width"][i]), int(gt_boxes["Height"][i])
image = draw_bbox(image, x, y, w, h, c=green, lw=4)
boxes = zip(pred_boxes["X"], pred_boxes["Y"], pred_boxes["Width"], pred_boxes["Height"], pred_boxes["Score"])
for box in sorted(boxes, key=lambda a: a[-1]):
x, y = int(box[0]), int(box[1])
x, y = max(x, 0), max(y, 0)
w, h = int(box[2]), int(box[3])
image = draw_bbox(image, x, y, w, h, c=red, lw=3)
image = draw_score(image, box[-1], x, y)
return image
def draw_bbox(img, x, y, w, h, c=None, lw=4):
x = min(max(x, 0), img.shape[1] - 1)
y = min(max(y, 0), img.shape[0] - 1)
if c is None:
c = np.max(img)
if len(img.shape) > 2:
c = [c] + [0] * (img.shape[-1] - 1)
img[y : y + lw, x : x + w] = c
img[y + h - lw : y + h, x : x + w] = c
img[y : y + h, x : x + lw] = c
img[y : y + h, x + w - lw : x + w] = c
return img
def draw_score(img, score, x, y):
score = int(min(max(0, score * 100), 100))
txt_img = text_image(str(score) + "%") * np.max(img)
txt_h, txt_w = txt_img.shape[0], txt_img.shape[1]
if y + txt_h > img.shape[0]:
max_h = img.shape[0] - y
txt_img = txt_img[:max_h]
if x + txt_w > img.shape[1]:
max_w = img.shape[1] - x
txt_img = txt_img[:, :max_w]
if img[y : y + txt_h, x : x + txt_w].shape == txt_img.shape:
img[y : y + txt_h, x : x + txt_w] = txt_img
return img
def text_image(text, bg=(255, 0, 0), margin=4):
bg = tuple([255 - c for c in bg])
margin = margin // 2
font = ImageFont.load_default()
text_width, text_height = font.getsize(text)
canvas = Image.new("RGB", [text_width + 2 * margin - 1, text_height], bg)
draw = ImageDraw.Draw(canvas)
offset = (margin, 0)
black = "#FFFFFF"
draw.text(offset, text, font=font, fill=black)
image = (255 - np.asarray(canvas)) / 255.0
return rescale(
image,
2.0,
anti_aliasing=False,
preserve_range=True,
multichannel=True,
mode="edge",
)
def iou_3d(A, B):
x0a, y0a, z0a, x1a, y1a, z1a = A[0], A[1], A[2], A[3], A[4], A[5]
x0b, y0b, z0b, x1b, y1b, z1b = B[0], B[1], B[2], B[3], B[4], B[5]
x0i, x1i = max(x0a, x0b), min(x1a, x1b)
y0i, y1i = max(y0a, y0b), min(y1a, y1b)
z0i, z1i = max(z0a, z0b), min(z1a, z1b)
wi = x1i - x0i
if wi <= 0:
return 0.0
hi = y1i - y0i
if hi <= 0:
return 0.0
di = z1i - z0i
if di <= 0:
return 0.0
area_a = (x1a - x0a) * (y1a - y0a) * (z1a - z0a)
area_b = (x1b - x0b) * (y1b - y0b) * (z1b - z0b)
intersection = (x1i - x0i) * (y1i - y0i) * (z1i - z0i)
union = area_a + area_b - intersection
return float(intersection) / union
def box_union_3d(A, B):
x0 = min(A[0], B[0])
y0 = min(A[1], B[1])
z0 = min(A[2], B[2])
x1 = max(A[3], B[3])
y1 = max(A[4], B[4])
z1 = max(A[5], B[5])
score = max(A[6], B[6])
return [x0, y0, z0, x1, y1, z1, score]
|
none
| 1
| 2.563923
| 3
|
|
Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/python/autograph/utils/py_func.py
|
JustinACoder/H22-GR3-UnrealAI
| 6
|
6629274
|
<gh_stars>1-10
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Pyfunc creation utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import script_ops
class MatchDType(namedtuple('MatchDType', ('arg_number',))):
"""Allows matching the dtype of an argument.
Used in conjunction with function calls. For example, MatchDType(0) will
match the DType of the first argument.
"""
pass
def wrap_py_func(f, return_dtypes, args, kwargs=None, use_dummy_return=False):
"""Helper that wraps a callable to py_func.
The helper passes tensor arguments through the py_func interface. Non-tensor
arguments are allowed, and will be passed to f directly. Note that non-tensor
arguments are captured by f will not update every time the wrapper is
called (this is consistent with its argument list, which only includes
the tensor arguments). In general, it's safest not to reuse this wrapper.
Args:
f: Callable
return_dtypes: None, individual of tuple/list of DType or MatchDType, the
data type for each of f's return value(s). Set to None if f has no
return values or use_dummy_return is True. Use MatchDType to define a
dtype identical to that of `i`th argument (argument 0 is the first);
an argument must of Tensor type if it is to be used with MatchDType.
args: Positional arguments for f, as list or tuple.
kwargs: Keyword arguments for f, as dict with string keys. May be None.
use_dummy_return: If True, the function will return a dummy value of 1
and discard its actual return value.
Returns:
The return values of f converted to tensor.
Raises:
ValueError: if any of the arguments are incorrect.
"""
if return_dtypes and use_dummy_return:
raise ValueError('if use_dummy_return is True, return_dtypes must be empty')
tensor_args = []
tensor_args_idx = {}
# Of the positional arguments, only grab the tensor ones to be passed through
# the py_func.
n_args = len(args)
arg_is_tensor = tuple(map(tensor_util.is_tensor, args))
for i in range(n_args):
if arg_is_tensor[i]:
tensor_args_idx[i] = len(tensor_args)
tensor_args.append(args[i])
# We essentially take the tensor kwargs, if any, and add them to the list of
# positional arguments. The kwargs are then reconstructed inside the py_func.
#
# For example, if
#
# args = [Tensor(1), 'foo']
# kwargs = {'a': Tensor(2), 'b': 'bar'}
#
# Then
#
# tensor_args = (Tensor(1), Tensor(2))
# kwarg_keys = ('a', 'b')
if kwargs:
kwarg_keys = tuple(kwargs.keys())
kwarg_is_tensor = {k: tensor_util.is_tensor(kwargs[k]) for k in kwarg_keys}
for k in kwarg_keys:
if kwarg_is_tensor[k]:
tensor_args_idx[k] = len(tensor_args)
tensor_args.append(kwargs[k])
else:
kwarg_keys = ()
# Set up return dtypes.
def match_arg_dtype(arg_number):
arg = args[arg_number]
if not arg_is_tensor[arg_number]:
raise ValueError(
'argument %d was used with MatchDType and must be a tf.Tensor, but '
'was %s instead' % (arg_number, type(arg)))
return arg.dtype
if return_dtypes:
if isinstance(return_dtypes, MatchDType):
return_dtypes = match_arg_dtype(return_dtypes.arg_number)
elif isinstance(return_dtypes, (list, tuple)):
return_dtypes = tuple(
match_arg_dtype(a.arg_number) if isinstance(a, MatchDType) else a
for a in return_dtypes)
else:
assert isinstance(return_dtypes, dtypes.DType)
def f_wrapper(*tensor_args):
f_args = tuple(tensor_args[tensor_args_idx[i]] if arg_is_tensor[i] else a
for i, a in enumerate(args))
f_kwargs = {
k: tensor_args[tensor_args_idx[k]] if kwarg_is_tensor[k] else kwargs[k]
for i, k in enumerate(kwarg_keys)
}
retval = f(*f_args, **f_kwargs)
return 1 if use_dummy_return else retval
return script_ops.py_func(f_wrapper, tensor_args, dtypes.int64
if use_dummy_return else return_dtypes)
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Pyfunc creation utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import script_ops
class MatchDType(namedtuple('MatchDType', ('arg_number',))):
"""Allows matching the dtype of an argument.
Used in conjunction with function calls. For example, MatchDType(0) will
match the DType of the first argument.
"""
pass
def wrap_py_func(f, return_dtypes, args, kwargs=None, use_dummy_return=False):
"""Helper that wraps a callable to py_func.
The helper passes tensor arguments through the py_func interface. Non-tensor
arguments are allowed, and will be passed to f directly. Note that non-tensor
arguments are captured by f will not update every time the wrapper is
called (this is consistent with its argument list, which only includes
the tensor arguments). In general, it's safest not to reuse this wrapper.
Args:
f: Callable
return_dtypes: None, individual of tuple/list of DType or MatchDType, the
data type for each of f's return value(s). Set to None if f has no
return values or use_dummy_return is True. Use MatchDType to define a
dtype identical to that of `i`th argument (argument 0 is the first);
an argument must of Tensor type if it is to be used with MatchDType.
args: Positional arguments for f, as list or tuple.
kwargs: Keyword arguments for f, as dict with string keys. May be None.
use_dummy_return: If True, the function will return a dummy value of 1
and discard its actual return value.
Returns:
The return values of f converted to tensor.
Raises:
ValueError: if any of the arguments are incorrect.
"""
if return_dtypes and use_dummy_return:
raise ValueError('if use_dummy_return is True, return_dtypes must be empty')
tensor_args = []
tensor_args_idx = {}
# Of the positional arguments, only grab the tensor ones to be passed through
# the py_func.
n_args = len(args)
arg_is_tensor = tuple(map(tensor_util.is_tensor, args))
for i in range(n_args):
if arg_is_tensor[i]:
tensor_args_idx[i] = len(tensor_args)
tensor_args.append(args[i])
# We essentially take the tensor kwargs, if any, and add them to the list of
# positional arguments. The kwargs are then reconstructed inside the py_func.
#
# For example, if
#
# args = [Tensor(1), 'foo']
# kwargs = {'a': Tensor(2), 'b': 'bar'}
#
# Then
#
# tensor_args = (Tensor(1), Tensor(2))
# kwarg_keys = ('a', 'b')
if kwargs:
kwarg_keys = tuple(kwargs.keys())
kwarg_is_tensor = {k: tensor_util.is_tensor(kwargs[k]) for k in kwarg_keys}
for k in kwarg_keys:
if kwarg_is_tensor[k]:
tensor_args_idx[k] = len(tensor_args)
tensor_args.append(kwargs[k])
else:
kwarg_keys = ()
# Set up return dtypes.
def match_arg_dtype(arg_number):
arg = args[arg_number]
if not arg_is_tensor[arg_number]:
raise ValueError(
'argument %d was used with MatchDType and must be a tf.Tensor, but '
'was %s instead' % (arg_number, type(arg)))
return arg.dtype
if return_dtypes:
if isinstance(return_dtypes, MatchDType):
return_dtypes = match_arg_dtype(return_dtypes.arg_number)
elif isinstance(return_dtypes, (list, tuple)):
return_dtypes = tuple(
match_arg_dtype(a.arg_number) if isinstance(a, MatchDType) else a
for a in return_dtypes)
else:
assert isinstance(return_dtypes, dtypes.DType)
def f_wrapper(*tensor_args):
f_args = tuple(tensor_args[tensor_args_idx[i]] if arg_is_tensor[i] else a
for i, a in enumerate(args))
f_kwargs = {
k: tensor_args[tensor_args_idx[k]] if kwarg_is_tensor[k] else kwargs[k]
for i, k in enumerate(kwarg_keys)
}
retval = f(*f_args, **f_kwargs)
return 1 if use_dummy_return else retval
return script_ops.py_func(f_wrapper, tensor_args, dtypes.int64
if use_dummy_return else return_dtypes)
|
en
| 0.711019
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== Pyfunc creation utilities. Allows matching the dtype of an argument.
Used in conjunction with function calls. For example, MatchDType(0) will
match the DType of the first argument. Helper that wraps a callable to py_func.
The helper passes tensor arguments through the py_func interface. Non-tensor
arguments are allowed, and will be passed to f directly. Note that non-tensor
arguments are captured by f will not update every time the wrapper is
called (this is consistent with its argument list, which only includes
the tensor arguments). In general, it's safest not to reuse this wrapper.
Args:
f: Callable
return_dtypes: None, individual of tuple/list of DType or MatchDType, the
data type for each of f's return value(s). Set to None if f has no
return values or use_dummy_return is True. Use MatchDType to define a
dtype identical to that of `i`th argument (argument 0 is the first);
an argument must of Tensor type if it is to be used with MatchDType.
args: Positional arguments for f, as list or tuple.
kwargs: Keyword arguments for f, as dict with string keys. May be None.
use_dummy_return: If True, the function will return a dummy value of 1
and discard its actual return value.
Returns:
The return values of f converted to tensor.
Raises:
ValueError: if any of the arguments are incorrect. # Of the positional arguments, only grab the tensor ones to be passed through # the py_func. # We essentially take the tensor kwargs, if any, and add them to the list of # positional arguments. The kwargs are then reconstructed inside the py_func. # # For example, if # # args = [Tensor(1), 'foo'] # kwargs = {'a': Tensor(2), 'b': 'bar'} # # Then # # tensor_args = (Tensor(1), Tensor(2)) # kwarg_keys = ('a', 'b') # Set up return dtypes.
| 2.434228
| 2
|
mmaction/models/tenons/spatial_temporal_modules/__init__.py
|
arpanmangal/coinaction
| 1
|
6629275
|
from .simple_spatial_module import SimpleSpatialModule
from .simple_spatial_temporal_module import SimpleSpatialTemporalModule
__all__ = [
'SimpleSpatialModule',
'SimpleSpatialTemporalModule'
]
|
from .simple_spatial_module import SimpleSpatialModule
from .simple_spatial_temporal_module import SimpleSpatialTemporalModule
__all__ = [
'SimpleSpatialModule',
'SimpleSpatialTemporalModule'
]
|
none
| 1
| 1.057032
| 1
|
|
graph_embedding/dmon/train_dgi_batched.py
|
pedersor/google-research
| 0
|
6629276
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TODO(tsitsulin): add headers, tests, and improve style."""
from absl import app
from absl import flags
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.metrics import normalized_mutual_info_score
import tensorflow.compat.v2 as tf
from graph_embedding.dmon.layers.gcn import GCN
from graph_embedding.dmon.models.dgi import deep_graph_infomax
from graph_embedding.dmon.synthetic_data.graph_util import construct_knn_graph
from graph_embedding.dmon.synthetic_data.overlapping_gaussians import line_gaussians
from graph_embedding.dmon.utilities.batching import make_batch
from graph_embedding.dmon.utilities.batching import random_batch
from graph_embedding.dmon.utilities.shuffling import shuffle_inbatch
tf.compat.v1.enable_v2_behavior()
FLAGS = flags.FLAGS
flags.DEFINE_integer(
'n_nodes', 1000, 'Number of nodes for the synthetic graph.', lower_bound=0)
flags.DEFINE_integer(
'n_clusters',
2,
'Number of clusters for the synthetic graph.',
lower_bound=0)
flags.DEFINE_integer(
'batch_size', 16, 'Batch size to use for training.', lower_bound=0)
flags.DEFINE_float(
'train_size', 0.2, 'Training data proportion.', lower_bound=0)
flags.DEFINE_integer(
'n_epochs', 200, 'Number of epochs to train.', lower_bound=0)
flags.DEFINE_float(
'learning_rate', 0.01, 'Optimizer\'s learning rate.', lower_bound=0)
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
print('Bröther may i have some self-lööps')
n_nodes = FLAGS.n_nodes
n_clusters = FLAGS.n_clusters
train_size = FLAGS.train_size
batch_size = FLAGS.batch_size
data_clean, data_dirty, labels = line_gaussians(n_nodes, n_clusters)
graph_clean = construct_knn_graph(data_clean)
n_neighbors = [15, 10] # TODO(tsitsulin): move to FLAGS.
total_matrix_size = 1 + np.cumprod(n_neighbors).sum()
train_mask = np.zeros(n_nodes, dtype=np.bool)
train_mask[np.random.choice(
np.arange(n_nodes), int(n_nodes * train_size), replace=False)] = True
test_mask = ~train_mask
print(
f'Data shape: {data_clean.shape}, graph shape: {graph_clean.shape}'
)
print(f'Train size: {train_mask.sum()}, test size: {test_mask.sum()}')
input_features = tf.keras.layers.Input(shape=(
total_matrix_size,
2,
))
input_features_corrupted = tf.keras.layers.Input(
shape=(
total_matrix_size,
2,
))
input_graph = tf.keras.layers.Input((
total_matrix_size,
total_matrix_size,
))
encoder = [GCN(64), GCN(32), tf.keras.layers.Lambda(lambda x: x[0][:, 0, :])]
model = deep_graph_infomax(
[input_features, input_features_corrupted, input_graph], encoder)
def loss(model, x, y, training):
_, y_ = model(x, training=training)
return loss_object(y_true=y, y_pred=y_)
def grad(model, inputs, targets):
with tf.GradientTape() as tape:
loss_value = loss(model, inputs, targets, training=True)
for loss_internal in model.losses:
loss_value += loss_internal
return loss_value, tape.gradient(loss_value, model.trainable_variables)
labels_dgi = tf.concat([tf.zeros([batch_size, 1]),
tf.ones([batch_size, 1])], 0)
loss_object = tf.keras.losses.BinaryCrossentropy(from_logits=True)
optimizer = tf.keras.optimizers.Adam(FLAGS.learning_rate)
for epoch in range(FLAGS.n_epochs):
subgraph_mat, features_mat, _, nonzero_indices = random_batch(
graph_clean, data_dirty, batch_size, n_neighbors)
perc_shuffle = 1 # np.linspace(1, 0.25, max_epoch)[epoch]
features_corrupted = shuffle_inbatch(features_mat, nonzero_indices,
perc_shuffle)
loss_value, grads = grad(model,
[features_mat, features_corrupted, subgraph_mat],
labels_dgi)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
print(
f'epoch {epoch}, loss: {loss_value.numpy():.4f}, shuffle %: {100*perc_shuffle:.2f}'
)
subgraph_mat, features_mat, _ = make_batch(graph_clean, data_dirty,
np.arange(n_nodes), n_neighbors)
representations, _ = model([features_mat, features_mat, subgraph_mat],
training=False)
representations = representations.numpy()
clf = LogisticRegression(solver='lbfgs', multi_class='multinomial')
clf.fit(representations[train_mask], labels[train_mask])
clusters = clf.predict(representations[test_mask])
print(
'NMI:',
normalized_mutual_info_score(
labels[test_mask], clusters, average_method='arithmetic'))
print('Accuracy:', 100 * accuracy_score(labels[test_mask], clusters))
if __name__ == '__main__':
app.run(main)
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TODO(tsitsulin): add headers, tests, and improve style."""
from absl import app
from absl import flags
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.metrics import normalized_mutual_info_score
import tensorflow.compat.v2 as tf
from graph_embedding.dmon.layers.gcn import GCN
from graph_embedding.dmon.models.dgi import deep_graph_infomax
from graph_embedding.dmon.synthetic_data.graph_util import construct_knn_graph
from graph_embedding.dmon.synthetic_data.overlapping_gaussians import line_gaussians
from graph_embedding.dmon.utilities.batching import make_batch
from graph_embedding.dmon.utilities.batching import random_batch
from graph_embedding.dmon.utilities.shuffling import shuffle_inbatch
tf.compat.v1.enable_v2_behavior()
FLAGS = flags.FLAGS
flags.DEFINE_integer(
'n_nodes', 1000, 'Number of nodes for the synthetic graph.', lower_bound=0)
flags.DEFINE_integer(
'n_clusters',
2,
'Number of clusters for the synthetic graph.',
lower_bound=0)
flags.DEFINE_integer(
'batch_size', 16, 'Batch size to use for training.', lower_bound=0)
flags.DEFINE_float(
'train_size', 0.2, 'Training data proportion.', lower_bound=0)
flags.DEFINE_integer(
'n_epochs', 200, 'Number of epochs to train.', lower_bound=0)
flags.DEFINE_float(
'learning_rate', 0.01, 'Optimizer\'s learning rate.', lower_bound=0)
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
print('Bröther may i have some self-lööps')
n_nodes = FLAGS.n_nodes
n_clusters = FLAGS.n_clusters
train_size = FLAGS.train_size
batch_size = FLAGS.batch_size
data_clean, data_dirty, labels = line_gaussians(n_nodes, n_clusters)
graph_clean = construct_knn_graph(data_clean)
n_neighbors = [15, 10] # TODO(tsitsulin): move to FLAGS.
total_matrix_size = 1 + np.cumprod(n_neighbors).sum()
train_mask = np.zeros(n_nodes, dtype=np.bool)
train_mask[np.random.choice(
np.arange(n_nodes), int(n_nodes * train_size), replace=False)] = True
test_mask = ~train_mask
print(
f'Data shape: {data_clean.shape}, graph shape: {graph_clean.shape}'
)
print(f'Train size: {train_mask.sum()}, test size: {test_mask.sum()}')
input_features = tf.keras.layers.Input(shape=(
total_matrix_size,
2,
))
input_features_corrupted = tf.keras.layers.Input(
shape=(
total_matrix_size,
2,
))
input_graph = tf.keras.layers.Input((
total_matrix_size,
total_matrix_size,
))
encoder = [GCN(64), GCN(32), tf.keras.layers.Lambda(lambda x: x[0][:, 0, :])]
model = deep_graph_infomax(
[input_features, input_features_corrupted, input_graph], encoder)
def loss(model, x, y, training):
_, y_ = model(x, training=training)
return loss_object(y_true=y, y_pred=y_)
def grad(model, inputs, targets):
with tf.GradientTape() as tape:
loss_value = loss(model, inputs, targets, training=True)
for loss_internal in model.losses:
loss_value += loss_internal
return loss_value, tape.gradient(loss_value, model.trainable_variables)
labels_dgi = tf.concat([tf.zeros([batch_size, 1]),
tf.ones([batch_size, 1])], 0)
loss_object = tf.keras.losses.BinaryCrossentropy(from_logits=True)
optimizer = tf.keras.optimizers.Adam(FLAGS.learning_rate)
for epoch in range(FLAGS.n_epochs):
subgraph_mat, features_mat, _, nonzero_indices = random_batch(
graph_clean, data_dirty, batch_size, n_neighbors)
perc_shuffle = 1 # np.linspace(1, 0.25, max_epoch)[epoch]
features_corrupted = shuffle_inbatch(features_mat, nonzero_indices,
perc_shuffle)
loss_value, grads = grad(model,
[features_mat, features_corrupted, subgraph_mat],
labels_dgi)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
print(
f'epoch {epoch}, loss: {loss_value.numpy():.4f}, shuffle %: {100*perc_shuffle:.2f}'
)
subgraph_mat, features_mat, _ = make_batch(graph_clean, data_dirty,
np.arange(n_nodes), n_neighbors)
representations, _ = model([features_mat, features_mat, subgraph_mat],
training=False)
representations = representations.numpy()
clf = LogisticRegression(solver='lbfgs', multi_class='multinomial')
clf.fit(representations[train_mask], labels[train_mask])
clusters = clf.predict(representations[test_mask])
print(
'NMI:',
normalized_mutual_info_score(
labels[test_mask], clusters, average_method='arithmetic'))
print('Accuracy:', 100 * accuracy_score(labels[test_mask], clusters))
if __name__ == '__main__':
app.run(main)
|
en
| 0.790846
|
# coding=utf-8 # Copyright 2022 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. TODO(tsitsulin): add headers, tests, and improve style. # TODO(tsitsulin): move to FLAGS. # np.linspace(1, 0.25, max_epoch)[epoch]
| 1.741108
| 2
|
django_project/tests/test_forms.py
|
jsolly/blogthedata
| 1
|
6629277
|
from .base import SetUp
from blog.forms import PostForm
from users.forms import UserRegisterForm, UserUpdateForm, ProfileUpdateForm
class TestForms(SetUp):
def test_post_form_valid_data(self):
form = PostForm(
data={
"title": "My Second Post",
"slug": "second-post",
"category": self.category1,
"metadesc": "I can make you more productive!",
"draft": False,
# "metaimg" : ""
# "metaimg"_mimetype : ""
"snippet": "Do the things",
"content": "Do the things. All the things",
# date_posted : ""
"author": self.super_user,
"metaimg_alt_txt": "Meta Image Alt-Text",
# "likes"
# "views"
}
)
self.assertTrue(form.is_valid())
def test_post_form_no_data(self):
post_form = PostForm(data={})
self.assertFalse(post_form.is_valid())
self.assertEqual(len(post_form.errors), 3)
# Users Forms
def test_user_register_form_valid_data(self):
user_form = UserRegisterForm(
data={
"username": "test",
"email": "<EMAIL>",
"first_name": "Tester",
"last_name": "Smith",
"password1": "<PASSWORD>!",
"password2": "<PASSWORD>!",
"secret_password": "<PASSWORD>",
"captcha_0": "dummy-value",
"captcha_1": "PASSED",
}
)
self.assertTrue(user_form.is_valid())
def test_user_update_form_valid_data(self):
form = UserUpdateForm(data={"email": "<EMAIL>", "username": "test"})
self.assertTrue(form.is_valid())
# Might want to add image validation
def test_profile_update_form_valid_data(self):
form = ProfileUpdateForm(data={"image": "image1"})
self.assertTrue(form.is_valid())
|
from .base import SetUp
from blog.forms import PostForm
from users.forms import UserRegisterForm, UserUpdateForm, ProfileUpdateForm
class TestForms(SetUp):
def test_post_form_valid_data(self):
form = PostForm(
data={
"title": "My Second Post",
"slug": "second-post",
"category": self.category1,
"metadesc": "I can make you more productive!",
"draft": False,
# "metaimg" : ""
# "metaimg"_mimetype : ""
"snippet": "Do the things",
"content": "Do the things. All the things",
# date_posted : ""
"author": self.super_user,
"metaimg_alt_txt": "Meta Image Alt-Text",
# "likes"
# "views"
}
)
self.assertTrue(form.is_valid())
def test_post_form_no_data(self):
post_form = PostForm(data={})
self.assertFalse(post_form.is_valid())
self.assertEqual(len(post_form.errors), 3)
# Users Forms
def test_user_register_form_valid_data(self):
user_form = UserRegisterForm(
data={
"username": "test",
"email": "<EMAIL>",
"first_name": "Tester",
"last_name": "Smith",
"password1": "<PASSWORD>!",
"password2": "<PASSWORD>!",
"secret_password": "<PASSWORD>",
"captcha_0": "dummy-value",
"captcha_1": "PASSED",
}
)
self.assertTrue(user_form.is_valid())
def test_user_update_form_valid_data(self):
form = UserUpdateForm(data={"email": "<EMAIL>", "username": "test"})
self.assertTrue(form.is_valid())
# Might want to add image validation
def test_profile_update_form_valid_data(self):
form = ProfileUpdateForm(data={"image": "image1"})
self.assertTrue(form.is_valid())
|
en
| 0.727937
|
# "metaimg" : "" # "metaimg"_mimetype : "" # date_posted : "" # "likes" # "views" # Users Forms # Might want to add image validation
| 2.55995
| 3
|
examples/plotting/lat_lon_lines.py
|
mridullpandey/sunpy
| 0
|
6629278
|
"""
=================================================
Drawing heliographic longitude and latitude lines
=================================================
How to draw your own (Stonyhurst) longitude and latitude lines
"""
import numpy as np
import astropy.units as u
import matplotlib.pyplot as plt
from astropy.coordinates import SkyCoord
import sunpy.map
from sunpy.coordinates import frames
from sunpy.data.sample import AIA_171_IMAGE
###############################################################################
# The purpose of this example is to demonstrate the coordinate transformations
# that occur under the hood to show the heliographic grid lines of longitude
# latitude. We first create the Map using the sample data.
aia = sunpy.map.Map(AIA_171_IMAGE)
###############################################################################
# Let's first transform a single heliographic point coordinate.
stonyhurst_center = SkyCoord(12 * u.deg, 12 * u.deg,
frame=frames.HeliographicStonyhurst)
###############################################################################
# Next we transform it into the coordinate frame of our map which is in
# helioprojective coordinates.
hpc_stonyhurst_center = stonyhurst_center.transform_to(aia.coordinate_frame)
print(hpc_stonyhurst_center)
###############################################################################
# Now let's transform two lines, one of longitude and one of
# of latitude. We define the coordinates as we did before and then
# transform them.
num_points = 100
lat_value = 12 * u.deg
lon_value = 35 * u.deg
lon0 = SkyCoord(np.linspace(-80, 80, num_points) * u.deg,
np.ones(num_points) * lon_value, frame=frames.HeliographicStonyhurst)
lat0 = SkyCoord(np.ones(num_points) * lat_value,
np.linspace(-90, 90, num_points) * u.deg,
frame=frames.HeliographicStonyhurst)
hpc_lon0 = lon0.transform_to(aia.coordinate_frame)
hpc_lat0 = lat0.transform_to(aia.coordinate_frame)
###############################################################################
# Now let's plot the results. We'll overlay the autogenerated lon/lat
# grid as well for comparison.
fig = plt.figure()
ax = plt.subplot(projection=aia)
aia.plot()
ax.plot_coord(hpc_lat0, color="C0")
ax.plot_coord(hpc_lon0, color="C0")
aia.draw_grid()
plt.show()
|
"""
=================================================
Drawing heliographic longitude and latitude lines
=================================================
How to draw your own (Stonyhurst) longitude and latitude lines
"""
import numpy as np
import astropy.units as u
import matplotlib.pyplot as plt
from astropy.coordinates import SkyCoord
import sunpy.map
from sunpy.coordinates import frames
from sunpy.data.sample import AIA_171_IMAGE
###############################################################################
# The purpose of this example is to demonstrate the coordinate transformations
# that occur under the hood to show the heliographic grid lines of longitude
# latitude. We first create the Map using the sample data.
aia = sunpy.map.Map(AIA_171_IMAGE)
###############################################################################
# Let's first transform a single heliographic point coordinate.
stonyhurst_center = SkyCoord(12 * u.deg, 12 * u.deg,
frame=frames.HeliographicStonyhurst)
###############################################################################
# Next we transform it into the coordinate frame of our map which is in
# helioprojective coordinates.
hpc_stonyhurst_center = stonyhurst_center.transform_to(aia.coordinate_frame)
print(hpc_stonyhurst_center)
###############################################################################
# Now let's transform two lines, one of longitude and one of
# of latitude. We define the coordinates as we did before and then
# transform them.
num_points = 100
lat_value = 12 * u.deg
lon_value = 35 * u.deg
lon0 = SkyCoord(np.linspace(-80, 80, num_points) * u.deg,
np.ones(num_points) * lon_value, frame=frames.HeliographicStonyhurst)
lat0 = SkyCoord(np.ones(num_points) * lat_value,
np.linspace(-90, 90, num_points) * u.deg,
frame=frames.HeliographicStonyhurst)
hpc_lon0 = lon0.transform_to(aia.coordinate_frame)
hpc_lat0 = lat0.transform_to(aia.coordinate_frame)
###############################################################################
# Now let's plot the results. We'll overlay the autogenerated lon/lat
# grid as well for comparison.
fig = plt.figure()
ax = plt.subplot(projection=aia)
aia.plot()
ax.plot_coord(hpc_lat0, color="C0")
ax.plot_coord(hpc_lon0, color="C0")
aia.draw_grid()
plt.show()
|
en
| 0.330892
|
================================================= Drawing heliographic longitude and latitude lines ================================================= How to draw your own (Stonyhurst) longitude and latitude lines ############################################################################### # The purpose of this example is to demonstrate the coordinate transformations # that occur under the hood to show the heliographic grid lines of longitude # latitude. We first create the Map using the sample data. ############################################################################### # Let's first transform a single heliographic point coordinate. ############################################################################### # Next we transform it into the coordinate frame of our map which is in # helioprojective coordinates. ############################################################################### # Now let's transform two lines, one of longitude and one of # of latitude. We define the coordinates as we did before and then # transform them. ############################################################################### # Now let's plot the results. We'll overlay the autogenerated lon/lat # grid as well for comparison.
| 3.377384
| 3
|
inheritance_exercise/zoo/project/reptile.py
|
Veselin-Stoilov/software-university-OOP
| 0
|
6629279
|
from inheritance_exercise.zoo.project.animal import Animal
class Reptile(Animal):
pass
|
from inheritance_exercise.zoo.project.animal import Animal
class Reptile(Animal):
pass
|
none
| 1
| 1.384794
| 1
|
|
src/utils/testfilemanager.py
|
webappetizers/webappetizers
| 0
|
6629280
|
import filemanager
elevations = [111,2,13]
# Save to file
try:
manager = filemanager.Manager('elevations', '.csv',elevations,'a')
manager.write()
except Exception as e:
print(e)
|
import filemanager
elevations = [111,2,13]
# Save to file
try:
manager = filemanager.Manager('elevations', '.csv',elevations,'a')
manager.write()
except Exception as e:
print(e)
|
en
| 0.945986
|
# Save to file
| 2.372135
| 2
|
Heap/1046. Last Stone Weight.py
|
xli1110/LC
| 2
|
6629281
|
import heapq
class Solution:
def lastStoneWeight(self, stones: List[int]) -> int:
if not stones:
raise Exception("Empty Array")
h = [-x for x in stones]
heapq.heapify(h)
while len(h) > 1:
x = -heapq.heappop(h)
y = -heapq.heappop(h)
if x != y:
heapq.heappush(h, -abs(x - y))
return 0 if not h else -h[0]
|
import heapq
class Solution:
def lastStoneWeight(self, stones: List[int]) -> int:
if not stones:
raise Exception("Empty Array")
h = [-x for x in stones]
heapq.heapify(h)
while len(h) > 1:
x = -heapq.heappop(h)
y = -heapq.heappop(h)
if x != y:
heapq.heappush(h, -abs(x - y))
return 0 if not h else -h[0]
|
none
| 1
| 3.129853
| 3
|
|
code/libamrfile/meshplot.py
|
cemacrr/bisicles_gia
| 0
|
6629282
|
from amrfile import io as amrio
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as col
import matplotlib.patches as pat
plt.figure(figsize=(4,6))
plt.subplot(111,aspect='equal')
hmin = 0.0
hmax = 3000.0
amrio.freeAll()
amrID = amrio.load("plot.amundsen.2d.hdf5")
time = amrio.queryTime(amrID)
n_lev = amrio.queryLevelNumber(amrID)
lev_col = ['grey','blue','purple','red','orange']
for lev in range(0,n_lev):
n_fab = amrio.queryFABNumber(amrID,lev)
print(lev, n_fab)
for fab in range(0,n_fab):
x,y,h = amrio.readFAB(amrID,lev,fab,0)
dx = (x[1]-x[0])
x0 = x[0] - 0.5*dx
y0 = y[0] - 0.5*dx
x1 = x[-1] + 0.5*dx
y1 = y[-1] + 0.5*dx
eps = dx * 0.1
xx = np.arange(x0,x1+eps,dx)
yy = np.arange(y0,y1+eps,dx)
plt.pcolormesh(xx,yy,h,vmin=hmin,vmax=hmax)
plt.plot( [x0,x0,x1,x1,x0],[y0,y1,y1,y0,y0], color=lev_col[lev],
lw=0.5, label = r'$\Delta x = ${} m'.format(dx))
plt.legend()
amrio.free(amrID)
plt.savefig("libamrfile_python_mesh.png")
|
from amrfile import io as amrio
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as col
import matplotlib.patches as pat
plt.figure(figsize=(4,6))
plt.subplot(111,aspect='equal')
hmin = 0.0
hmax = 3000.0
amrio.freeAll()
amrID = amrio.load("plot.amundsen.2d.hdf5")
time = amrio.queryTime(amrID)
n_lev = amrio.queryLevelNumber(amrID)
lev_col = ['grey','blue','purple','red','orange']
for lev in range(0,n_lev):
n_fab = amrio.queryFABNumber(amrID,lev)
print(lev, n_fab)
for fab in range(0,n_fab):
x,y,h = amrio.readFAB(amrID,lev,fab,0)
dx = (x[1]-x[0])
x0 = x[0] - 0.5*dx
y0 = y[0] - 0.5*dx
x1 = x[-1] + 0.5*dx
y1 = y[-1] + 0.5*dx
eps = dx * 0.1
xx = np.arange(x0,x1+eps,dx)
yy = np.arange(y0,y1+eps,dx)
plt.pcolormesh(xx,yy,h,vmin=hmin,vmax=hmax)
plt.plot( [x0,x0,x1,x1,x0],[y0,y1,y1,y0,y0], color=lev_col[lev],
lw=0.5, label = r'$\Delta x = ${} m'.format(dx))
plt.legend()
amrio.free(amrID)
plt.savefig("libamrfile_python_mesh.png")
|
none
| 1
| 1.889389
| 2
|
|
fs_image/compiler/items/make_subvol.py
|
singhaditya28/fs_image
| 0
|
6629283
|
<gh_stars>0
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
'''
Exactly one item must exist in this phase. If none is specified by the
`.bzl` code, then `dep_graph.py` injects a `FilesystemRootItem`.
'''
from dataclasses import dataclass
from typing import Iterable
from fs_image.fs_utils import open_for_read_decompress
from fs_image.subvol_utils import Subvol
from .common import ensure_meta_dir_exists, ImageItem, LayerOpts, PhaseOrder
from .mount_utils import clone_mounts
@dataclass(init=False, frozen=True)
class ParentLayerItem(ImageItem):
subvol: Subvol
def phase_order(self):
return PhaseOrder.MAKE_SUBVOL
@classmethod
def get_phase_builder(
cls, items: Iterable['ParentLayerItem'], layer_opts: LayerOpts,
):
parent, = items
assert isinstance(parent, ParentLayerItem), parent
def builder(subvol: Subvol):
subvol.snapshot(parent.subvol)
# This assumes that the parent has everything mounted already.
clone_mounts(parent.subvol, subvol)
ensure_meta_dir_exists(subvol, layer_opts)
return builder
@dataclass(init=False, frozen=True)
class FilesystemRootItem(ImageItem):
'A simple item to endow parent-less layers with a standard-permissions /'
def phase_order(self):
return PhaseOrder.MAKE_SUBVOL
@classmethod
def get_phase_builder(
cls, items: Iterable['FilesystemRootItem'], layer_opts: LayerOpts,
):
parent, = items
assert isinstance(parent, FilesystemRootItem), parent
def builder(subvol: Subvol):
subvol.create()
# Guarantee standard / permissions. This could be a setting,
# but in practice, probably any other choice would be wrong.
subvol.run_as_root(['chmod', '0755', subvol.path()])
subvol.run_as_root(['chown', 'root:root', subvol.path()])
ensure_meta_dir_exists(subvol, layer_opts)
return builder
@dataclass(init=False, frozen=True)
class ReceiveSendstreamItem(ImageItem):
source: str
def phase_order(self):
return PhaseOrder.MAKE_SUBVOL
@classmethod
def get_phase_builder(
cls, items: Iterable['ReceiveSendstreamItem'], layer_opts: LayerOpts,
):
item, = items
def builder(subvol: Subvol):
with open_for_read_decompress(item.source) as sendstream, \
subvol.receive(sendstream):
pass
return builder
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
'''
Exactly one item must exist in this phase. If none is specified by the
`.bzl` code, then `dep_graph.py` injects a `FilesystemRootItem`.
'''
from dataclasses import dataclass
from typing import Iterable
from fs_image.fs_utils import open_for_read_decompress
from fs_image.subvol_utils import Subvol
from .common import ensure_meta_dir_exists, ImageItem, LayerOpts, PhaseOrder
from .mount_utils import clone_mounts
@dataclass(init=False, frozen=True)
class ParentLayerItem(ImageItem):
subvol: Subvol
def phase_order(self):
return PhaseOrder.MAKE_SUBVOL
@classmethod
def get_phase_builder(
cls, items: Iterable['ParentLayerItem'], layer_opts: LayerOpts,
):
parent, = items
assert isinstance(parent, ParentLayerItem), parent
def builder(subvol: Subvol):
subvol.snapshot(parent.subvol)
# This assumes that the parent has everything mounted already.
clone_mounts(parent.subvol, subvol)
ensure_meta_dir_exists(subvol, layer_opts)
return builder
@dataclass(init=False, frozen=True)
class FilesystemRootItem(ImageItem):
'A simple item to endow parent-less layers with a standard-permissions /'
def phase_order(self):
return PhaseOrder.MAKE_SUBVOL
@classmethod
def get_phase_builder(
cls, items: Iterable['FilesystemRootItem'], layer_opts: LayerOpts,
):
parent, = items
assert isinstance(parent, FilesystemRootItem), parent
def builder(subvol: Subvol):
subvol.create()
# Guarantee standard / permissions. This could be a setting,
# but in practice, probably any other choice would be wrong.
subvol.run_as_root(['chmod', '0755', subvol.path()])
subvol.run_as_root(['chown', 'root:root', subvol.path()])
ensure_meta_dir_exists(subvol, layer_opts)
return builder
@dataclass(init=False, frozen=True)
class ReceiveSendstreamItem(ImageItem):
source: str
def phase_order(self):
return PhaseOrder.MAKE_SUBVOL
@classmethod
def get_phase_builder(
cls, items: Iterable['ReceiveSendstreamItem'], layer_opts: LayerOpts,
):
item, = items
def builder(subvol: Subvol):
with open_for_read_decompress(item.source) as sendstream, \
subvol.receive(sendstream):
pass
return builder
|
en
| 0.898363
|
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. Exactly one item must exist in this phase. If none is specified by the `.bzl` code, then `dep_graph.py` injects a `FilesystemRootItem`. # This assumes that the parent has everything mounted already. # Guarantee standard / permissions. This could be a setting, # but in practice, probably any other choice would be wrong.
| 2.139314
| 2
|
standard training/utils.py
|
wangaxe/DEAT
| 1
|
6629284
|
<filename>standard training/utils.py
# Adopted from https://github.com/P2333/Bag-of-Tricks-for-AT
import torch
import torch.nn.functional as F
from torchvision import datasets, transforms
from torch.utils.data.sampler import SubsetRandomSampler
import numpy as np
cifar10_mean = (0.4914, 0.4822, 0.4465)
cifar10_std = (0.2471, 0.2435, 0.2616)
# mu = torch.tensor(cifar10_mean).view(3,1,1).cuda()
# std = torch.tensor(cifar10_std).view(3,1,1).cuda()
upper_limit = 1.
lower_limit = 0.
def normalize(X, mu, std):
return (X - mu)/std
def clamp(X, lower_limit, upper_limit):
return torch.max(torch.min(X, upper_limit), lower_limit)
def get_loaders(dir_, batch_size):
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
])
test_transform = transforms.Compose([
transforms.ToTensor(),
])
num_workers = 2
train_dataset = datasets.CIFAR10(
dir_, train=True, transform=train_transform, download=True)
test_dataset = datasets.CIFAR10(
dir_, train=False, transform=test_transform, download=True)
train_loader = torch.utils.data.DataLoader(
dataset=train_dataset,
batch_size=batch_size,
shuffle=True,
pin_memory=True,
num_workers=num_workers,
)
test_loader = torch.utils.data.DataLoader(
dataset=test_dataset,
batch_size=batch_size,
shuffle=False,
pin_memory=True,
num_workers=2,
)
return train_loader, test_loader
def CW_loss(x, y):
x_sorted, ind_sorted = x.sort(dim=1)
ind = (ind_sorted[:, -1] == y).float()
loss_value = -(x[np.arange(x.shape[0]), y] - x_sorted[:, -2] * ind - x_sorted[:, -1] * (1. - ind))
return loss_value.mean()
def attack_pgd(model, X, y, epsilon,
alpha, attack_iters, restarts,
mu, std, use_CWloss=False):
max_loss = torch.zeros(y.shape[0]).cuda()
max_delta = torch.zeros_like(X).cuda()
for zz in range(restarts):
delta = torch.zeros_like(X).cuda()
delta.uniform_(-epsilon, epsilon)
delta.data = clamp(delta, lower_limit - X, upper_limit - X)
delta.requires_grad = True
for _ in range(attack_iters):
output = model(normalize(X + delta, mu, std))
index = torch.where(output.max(1)[1] == y)
if len(index[0]) == 0:
break
if use_CWloss:
loss = CW_loss(output, y)
else:
loss = F.cross_entropy(output, y)
loss.backward()
grad = delta.grad.detach()
d = delta[index[0], :, :, :]
g = grad[index[0], :, :, :]
d = torch.clamp(d + alpha * torch.sign(g), -epsilon, epsilon)
d = clamp(d, lower_limit - X[index[0], :, :, :], upper_limit - X[index[0], :, :, :])
delta.data[index[0], :, :, :] = d
delta.grad.zero_()
all_loss = F.cross_entropy(model(normalize(X+delta, mu, std)), y, reduction='none').detach()
max_delta[all_loss >= max_loss] = delta.detach()[all_loss >= max_loss]
max_loss = torch.max(max_loss, all_loss)
return max_delta
def evaluate_pgd(test_loader, model, mu, std, attack_iters, restarts=1, step=2,
val=None, use_CWloss=False):
epsilon = (8 / 255.)
if attack_iters == 1:
alpha = epsilon
else:
alpha = (step / 255.)
pgd_loss = 0
pgd_acc = 0
n = 0
model.eval()
for i, (X, y) in enumerate(test_loader):
X, y = X.cuda(), y.cuda()
pgd_delta = attack_pgd(model, X, y, epsilon, alpha, attack_iters, restarts, mu, std, use_CWloss=use_CWloss)
with torch.no_grad():
output = model(normalize(X + pgd_delta, mu, std))
loss = F.cross_entropy(output, y)
pgd_loss += loss.item() * y.size(0)
pgd_acc += (output.max(1)[1] == y).sum().item()
n += y.size(0)
if val and i == val - 1:
break
return pgd_loss/n, pgd_acc/n
def evaluate_standard(test_loader, model, mu, std, val=None):
test_loss = 0
test_acc = 0
n = 0
model.eval()
with torch.no_grad():
for i, (X, y) in enumerate(test_loader):
X, y = X.cuda(), y.cuda()
output = model(normalize(X, mu, std))
loss = F.cross_entropy(output, y)
test_loss += loss.item() * y.size(0)
test_acc += (output.max(1)[1] == y).sum().item()
n += y.size(0)
if val and i == val - 1:
break
return test_loss/n, test_acc/n
|
<filename>standard training/utils.py
# Adopted from https://github.com/P2333/Bag-of-Tricks-for-AT
import torch
import torch.nn.functional as F
from torchvision import datasets, transforms
from torch.utils.data.sampler import SubsetRandomSampler
import numpy as np
cifar10_mean = (0.4914, 0.4822, 0.4465)
cifar10_std = (0.2471, 0.2435, 0.2616)
# mu = torch.tensor(cifar10_mean).view(3,1,1).cuda()
# std = torch.tensor(cifar10_std).view(3,1,1).cuda()
upper_limit = 1.
lower_limit = 0.
def normalize(X, mu, std):
return (X - mu)/std
def clamp(X, lower_limit, upper_limit):
return torch.max(torch.min(X, upper_limit), lower_limit)
def get_loaders(dir_, batch_size):
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
])
test_transform = transforms.Compose([
transforms.ToTensor(),
])
num_workers = 2
train_dataset = datasets.CIFAR10(
dir_, train=True, transform=train_transform, download=True)
test_dataset = datasets.CIFAR10(
dir_, train=False, transform=test_transform, download=True)
train_loader = torch.utils.data.DataLoader(
dataset=train_dataset,
batch_size=batch_size,
shuffle=True,
pin_memory=True,
num_workers=num_workers,
)
test_loader = torch.utils.data.DataLoader(
dataset=test_dataset,
batch_size=batch_size,
shuffle=False,
pin_memory=True,
num_workers=2,
)
return train_loader, test_loader
def CW_loss(x, y):
x_sorted, ind_sorted = x.sort(dim=1)
ind = (ind_sorted[:, -1] == y).float()
loss_value = -(x[np.arange(x.shape[0]), y] - x_sorted[:, -2] * ind - x_sorted[:, -1] * (1. - ind))
return loss_value.mean()
def attack_pgd(model, X, y, epsilon,
alpha, attack_iters, restarts,
mu, std, use_CWloss=False):
max_loss = torch.zeros(y.shape[0]).cuda()
max_delta = torch.zeros_like(X).cuda()
for zz in range(restarts):
delta = torch.zeros_like(X).cuda()
delta.uniform_(-epsilon, epsilon)
delta.data = clamp(delta, lower_limit - X, upper_limit - X)
delta.requires_grad = True
for _ in range(attack_iters):
output = model(normalize(X + delta, mu, std))
index = torch.where(output.max(1)[1] == y)
if len(index[0]) == 0:
break
if use_CWloss:
loss = CW_loss(output, y)
else:
loss = F.cross_entropy(output, y)
loss.backward()
grad = delta.grad.detach()
d = delta[index[0], :, :, :]
g = grad[index[0], :, :, :]
d = torch.clamp(d + alpha * torch.sign(g), -epsilon, epsilon)
d = clamp(d, lower_limit - X[index[0], :, :, :], upper_limit - X[index[0], :, :, :])
delta.data[index[0], :, :, :] = d
delta.grad.zero_()
all_loss = F.cross_entropy(model(normalize(X+delta, mu, std)), y, reduction='none').detach()
max_delta[all_loss >= max_loss] = delta.detach()[all_loss >= max_loss]
max_loss = torch.max(max_loss, all_loss)
return max_delta
def evaluate_pgd(test_loader, model, mu, std, attack_iters, restarts=1, step=2,
val=None, use_CWloss=False):
epsilon = (8 / 255.)
if attack_iters == 1:
alpha = epsilon
else:
alpha = (step / 255.)
pgd_loss = 0
pgd_acc = 0
n = 0
model.eval()
for i, (X, y) in enumerate(test_loader):
X, y = X.cuda(), y.cuda()
pgd_delta = attack_pgd(model, X, y, epsilon, alpha, attack_iters, restarts, mu, std, use_CWloss=use_CWloss)
with torch.no_grad():
output = model(normalize(X + pgd_delta, mu, std))
loss = F.cross_entropy(output, y)
pgd_loss += loss.item() * y.size(0)
pgd_acc += (output.max(1)[1] == y).sum().item()
n += y.size(0)
if val and i == val - 1:
break
return pgd_loss/n, pgd_acc/n
def evaluate_standard(test_loader, model, mu, std, val=None):
test_loss = 0
test_acc = 0
n = 0
model.eval()
with torch.no_grad():
for i, (X, y) in enumerate(test_loader):
X, y = X.cuda(), y.cuda()
output = model(normalize(X, mu, std))
loss = F.cross_entropy(output, y)
test_loss += loss.item() * y.size(0)
test_acc += (output.max(1)[1] == y).sum().item()
n += y.size(0)
if val and i == val - 1:
break
return test_loss/n, test_acc/n
|
en
| 0.436539
|
# Adopted from https://github.com/P2333/Bag-of-Tricks-for-AT # mu = torch.tensor(cifar10_mean).view(3,1,1).cuda() # std = torch.tensor(cifar10_std).view(3,1,1).cuda()
| 2.243087
| 2
|
sort_list_of_dict.py
|
zabuchan/sort_recipe
| 0
|
6629285
|
<filename>sort_list_of_dict.py
import json
from operator import itemgetter
filename = "room_price.json"
with open(filename, 'r') as fin:
offices = json.load(fin)
room_list = offices['office']
rooms_by_price = sorted(room_list, key=itemgetter('price'))
rooms_by_room_number = sorted(room_list, key=itemgetter('room-number'))
print("Sorted by price:\n{}".format(rooms_by_price))
print("Sorted by room-number:\n{}".format(rooms_by_room_number))
|
<filename>sort_list_of_dict.py
import json
from operator import itemgetter
filename = "room_price.json"
with open(filename, 'r') as fin:
offices = json.load(fin)
room_list = offices['office']
rooms_by_price = sorted(room_list, key=itemgetter('price'))
rooms_by_room_number = sorted(room_list, key=itemgetter('room-number'))
print("Sorted by price:\n{}".format(rooms_by_price))
print("Sorted by room-number:\n{}".format(rooms_by_room_number))
|
none
| 1
| 3.728563
| 4
|
|
Version 2/test_file.py
|
jesus-333/CSP
| 6
|
6629286
|
# -*- coding: utf-8 -*-
"""
Contain the implementation of the FBCSP algorithm. Developed for the train part of dataset IV-1-a of BCI competition.
@author: <NAME> (Jesus)
@organization: University of Padua (Italy)
"""
#%%
from CSP_support_function import cleanWorkspaec
# cleanWorkspaec()
#%%
from CSP_support_function import loadDataset100Hz, computeTrial
from CSP import CSP
import numpy as np
from sklearn.svm import SVC
# x1 = np.linspace(1, sx_0.shape[0], sx_0.shape[0])
# x2 = x1 + 0.35
# # Mean through trials of all the features
# y1 = sx_0
# y2 = dx_0
# fig, ax = plt.subplots(figsize = (15, 10))
# ax.bar(x1, y1, width = 0.3, color = 'b', align='center')
# ax.bar(x2, y2, width = 0.3, color = 'r', align='center')
# ax.set_xlim(0.5, 59.5)
#%%
tmp_string = 'abcdefg'
for idx in tmp_string:
#%% Data load
path = 'Dataset/D1_100Hz/Train/BCICIV_calib_ds1'
# path = 'Dataset/D1_100Hz/Test/BCICIV_eval_ds1'
# idx = 'a'
plot_var = False
data, labels, cue_position, other_info = loadDataset100Hz(path, idx, type_dataset = 'train')
#%% Extract trials from data (Works only on dataset IV-1-a of BCI competition)
fs = other_info['sample_rate']
trials_dict = computeTrial(data, cue_position, labels, fs,other_info['class_label'])
#%%
CSP_clf = CSP(trials_dict, fs)
# CSP_clf.plotFeatures()
# CSP_clf.plotPSD(15, 12)
CSP_clf.trainClassifier(print_var = True)
CSP_clf.trainLDA()
# CSP_clf.trainClassifier(classifier = SVC(kernel = 'linear'))
CSP_clf.plotFeaturesScatter()
i = 79
for key in trials_dict.keys():
a = trials_dict[key][i, :, :]
y = CSP_clf.evaluate(a)
print(key, ' (sklearn): ', y)
y = CSP_clf.evaluate(a, mode = 2)
print(key, '(handmade): ', y)
print("- - - - - - - - - - - - - - - - - - - - - - - -\n")
|
# -*- coding: utf-8 -*-
"""
Contain the implementation of the FBCSP algorithm. Developed for the train part of dataset IV-1-a of BCI competition.
@author: <NAME> (Jesus)
@organization: University of Padua (Italy)
"""
#%%
from CSP_support_function import cleanWorkspaec
# cleanWorkspaec()
#%%
from CSP_support_function import loadDataset100Hz, computeTrial
from CSP import CSP
import numpy as np
from sklearn.svm import SVC
# x1 = np.linspace(1, sx_0.shape[0], sx_0.shape[0])
# x2 = x1 + 0.35
# # Mean through trials of all the features
# y1 = sx_0
# y2 = dx_0
# fig, ax = plt.subplots(figsize = (15, 10))
# ax.bar(x1, y1, width = 0.3, color = 'b', align='center')
# ax.bar(x2, y2, width = 0.3, color = 'r', align='center')
# ax.set_xlim(0.5, 59.5)
#%%
tmp_string = 'abcdefg'
for idx in tmp_string:
#%% Data load
path = 'Dataset/D1_100Hz/Train/BCICIV_calib_ds1'
# path = 'Dataset/D1_100Hz/Test/BCICIV_eval_ds1'
# idx = 'a'
plot_var = False
data, labels, cue_position, other_info = loadDataset100Hz(path, idx, type_dataset = 'train')
#%% Extract trials from data (Works only on dataset IV-1-a of BCI competition)
fs = other_info['sample_rate']
trials_dict = computeTrial(data, cue_position, labels, fs,other_info['class_label'])
#%%
CSP_clf = CSP(trials_dict, fs)
# CSP_clf.plotFeatures()
# CSP_clf.plotPSD(15, 12)
CSP_clf.trainClassifier(print_var = True)
CSP_clf.trainLDA()
# CSP_clf.trainClassifier(classifier = SVC(kernel = 'linear'))
CSP_clf.plotFeaturesScatter()
i = 79
for key in trials_dict.keys():
a = trials_dict[key][i, :, :]
y = CSP_clf.evaluate(a)
print(key, ' (sklearn): ', y)
y = CSP_clf.evaluate(a, mode = 2)
print(key, '(handmade): ', y)
print("- - - - - - - - - - - - - - - - - - - - - - - -\n")
|
en
| 0.60606
|
# -*- coding: utf-8 -*- Contain the implementation of the FBCSP algorithm. Developed for the train part of dataset IV-1-a of BCI competition. @author: <NAME> (Jesus) @organization: University of Padua (Italy) #%% # cleanWorkspaec() #%% # x1 = np.linspace(1, sx_0.shape[0], sx_0.shape[0]) # x2 = x1 + 0.35 # # Mean through trials of all the features # y1 = sx_0 # y2 = dx_0 # fig, ax = plt.subplots(figsize = (15, 10)) # ax.bar(x1, y1, width = 0.3, color = 'b', align='center') # ax.bar(x2, y2, width = 0.3, color = 'r', align='center') # ax.set_xlim(0.5, 59.5) #%% #%% Data load # path = 'Dataset/D1_100Hz/Test/BCICIV_eval_ds1' # idx = 'a' #%% Extract trials from data (Works only on dataset IV-1-a of BCI competition) #%% # CSP_clf.plotFeatures() # CSP_clf.plotPSD(15, 12) # CSP_clf.trainClassifier(classifier = SVC(kernel = 'linear'))
| 2.534722
| 3
|
pynetdicom/tests/test_ae.py
|
Jesse-Back/pynetdicom
| 274
|
6629287
|
<gh_stars>100-1000
"""Tests for the ae module."""
import logging
import os
import signal
import threading
import time
import pytest
from pydicom import read_file
from pydicom.dataset import Dataset
from pydicom.uid import UID, ImplicitVRLittleEndian
from pynetdicom import (
AE, evt, debug_logger, build_context,
DEFAULT_TRANSFER_SYNTAXES,
StoragePresentationContexts,
VerificationPresentationContexts,
PYNETDICOM_IMPLEMENTATION_UID,
PYNETDICOM_IMPLEMENTATION_VERSION
)
from pynetdicom.presentation import build_context
from pynetdicom.sop_class import RTImageStorage, Verification
from pynetdicom.transport import AssociationServer, RequestHandler
# debug_logger()
TEST_DS_DIR = os.path.join(os.path.dirname(__file__), 'dicom_files')
DATASET = read_file(os.path.join(TEST_DS_DIR, 'RTImageStorage.dcm'))
COMP_DATASET = read_file(os.path.join(TEST_DS_DIR, 'MRImageStorage_JPG2000_Lossless.dcm'))
def test_blocking_handler():
"""Test binding events to the blocking AssociationServer."""
ae = AE()
ae.add_supported_context('1.2.840.10008.1.1')
def handle_echo(event):
return 0x0000
handlers = [(evt.EVT_C_ECHO, handle_echo)]
thread = threading.Thread(
target=ae.start_server,
args=(('', 11112), ),
kwargs={'evt_handlers' : handlers}
)
thread.daemon = True
thread.start()
time.sleep(0.1)
ae.shutdown()
class TestMakeServer:
"""Tests for AE.make_server()"""
def setup(self):
"""Run prior to each test"""
self.ae = None
def teardown(self):
"""Clear any active threads"""
if self.ae:
self.ae.shutdown()
def test_default_arguments(self):
self.ae = ae = AE()
ae.add_supported_context(Verification)
server = ae.make_server(('', 11112))
assert isinstance(server, AssociationServer)
def test_custom_request_handler(self):
class MyRequestHandler(RequestHandler):
pass
self.ae = ae = AE()
ae.add_supported_context(Verification)
server = ae.make_server(('', 11112), request_handler=MyRequestHandler)
assert server.RequestHandlerClass is MyRequestHandler
def test_aet_bytes_deprecation(self):
"""Test warning if using bytes to set an AE title."""
self.ae = ae = AE()
ae.add_supported_context(Verification)
msg = (
r"The use of bytes with 'ae_title' is deprecated, use an ASCII "
r"str instead"
)
with pytest.warns(DeprecationWarning, match=msg):
server = ae.start_server(('', 11112), block=False, ae_title=b'BADAE2')
assert server.ae_title == 'BADAE2'
server.shutdown()
class TestStartServer:
"""Tests for AE.start_server()"""
def setup(self):
"""Run prior to each test"""
self.ae = None
def teardown(self):
"""Clear any active threads"""
if self.ae:
self.ae.shutdown()
def test_ae_title(self):
"""Test the `ae_title` keyword parameter."""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.ae_title = 'TESTAET'
assert ae.ae_title == 'TESTAET'
ae.add_supported_context(Verification)
server = ae.start_server(('', 11112), block=False)
assert server.ae_title == ae.ae_title
server.shutdown()
server = ae.start_server(('', 11112), block=False, ae_title='MYAE')
assert server.ae_title == 'MYAE'
ae.require_called_aet = True
ae.add_requested_context(Verification)
assoc = ae.associate('localhost', 11112, ae_title='MYAE')
assert assoc.is_established
assoc.release()
assert assoc.is_released
server.shutdown()
def test_contexts(self):
"""Test the `contexts` keyword parameter."""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.ae_title = 'TESTAET'
assert ae.ae_title == 'TESTAET'
cx = build_context(Verification)
server = ae.start_server(('', 11112), block=False, contexts=[cx])
ae.add_requested_context(Verification)
assoc = ae.associate('localhost', 11112, ae_title='MYAE')
assert assoc.is_established
assert (
assoc.accepted_contexts[0].abstract_syntax == Verification
)
assoc.release()
assert assoc.is_released
server.shutdown()
class TestAEVerificationSCP:
"""Check verification SCP"""
def setup(self):
"""Run prior to each test"""
self.ae = None
def teardown(self):
"""Clear any active threads"""
if self.ae:
self.ae.shutdown()
@pytest.mark.skipif(os.name == "nt", reason="Kills pytest on windows")
def test_start_server_keyboard_interrupt(self):
"""Test stopping the SCP with keyboard"""
pid = os.getpid()
def trigger_signal():
time.sleep(0.1)
os.kill(pid, signal.SIGINT)
self.ae = ae = AE()
ae.add_supported_context('1.2.3')
thread = threading.Thread(target=trigger_signal)
thread.daemon = True
thread.start()
ae.start_server(('', 11112))
ae.shutdown()
def test_no_supported_contexts(self):
"""Test starting with no contexts raises"""
ae = AE()
with pytest.raises(ValueError, match=r"No supported Presentation"):
ae.start_server(('', 11112))
def test_new_scu_scp_warning(self):
"""Test that a warning is given if scu_role and scp_role bad."""
ae = AE()
ae.add_supported_context('1.2.3.4', scp_role=False)
msg = r"The following presentation contexts have "
with pytest.raises(ValueError, match=msg):
ae.start_server(('', 11112))
def test_str_empty(self):
"""Test str output for default AE"""
ae = AE()
ae.__str__()
class TestAEPresentationSCU:
"""Tests for AE presentation contexts when running as an SCU"""
def setup(self):
"""Run prior to each test"""
self.ae = None
def teardown(self):
"""Clear any active threads"""
if self.ae:
self.ae.shutdown()
def test_associate_context(self):
"""Test that AE.associate doesn't modify the supplied contexts"""
# Test AE.requested_contexts
self.ae = ae = AE()
ae.add_supported_context(Verification)
scp = ae.start_server(('', 11112), block=False)
ae.requested_contexts = VerificationPresentationContexts
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assert ae.requested_contexts[0].context_id is None
assert len(assoc.requestor.requested_contexts) == 1
assert assoc.requestor.requested_contexts[0].abstract_syntax == (
'1.2.840.10008.1.1'
)
assert assoc.requestor.requested_contexts[0].context_id == 1
assoc.release()
assert not assoc.is_established
assert assoc.is_released
# Test associate(contexts=...)
ae.requested_contexts = []
assoc = ae.associate('localhost', 11112,
contexts=VerificationPresentationContexts)
assert assoc.is_established
assert VerificationPresentationContexts[0].context_id is None
assert len(assoc.requestor.requested_contexts) == 1
assert assoc.requestor.requested_contexts[0].abstract_syntax == (
'1.2.840.10008.1.1'
)
assert assoc.requestor.requested_contexts[0].context_id == 1
assoc.release()
assert not assoc.is_established
assert assoc.is_released
scp.shutdown()
def test_associate_context_raises(self):
"""Test that AE.associate raises exception if no requested contexts"""
self.ae = ae = AE()
with pytest.raises(RuntimeError):
assoc = ae.associate('localhost', 11112)
class TestAEGoodTimeoutSetters:
def test_acse_timeout(self):
""" Check AE ACSE timeout change produces good value """
ae = AE()
assert ae.acse_timeout == 30
ae.acse_timeout = None
assert ae.acse_timeout is None
ae.acse_timeout = -100
assert ae.acse_timeout == 30
ae.acse_timeout = 'a'
assert ae.acse_timeout == 30
ae.acse_timeout = 0
assert ae.acse_timeout == 0
ae.acse_timeout = 30
assert ae.acse_timeout == 30
def test_dimse_timeout(self):
""" Check AE DIMSE timeout change produces good value """
ae = AE()
assert ae.dimse_timeout == 30
ae.dimse_timeout = None
assert ae.dimse_timeout is None
ae.dimse_timeout = -100
assert ae.dimse_timeout == 30
ae.dimse_timeout = 'a'
assert ae.dimse_timeout == 30
ae.dimse_timeout = 0
assert ae.dimse_timeout == 0
ae.dimse_timeout = 30
assert ae.dimse_timeout == 30
def test_network_timeout(self):
""" Check AE network timeout change produces good value """
ae = AE()
assert ae.network_timeout == 60
ae.network_timeout = None
assert ae.network_timeout is None
ae.network_timeout = -100
assert ae.network_timeout == 60
ae.network_timeout = 'a'
assert ae.network_timeout == 60
ae.network_timeout = 0
assert ae.network_timeout == 0
ae.network_timeout = 30
assert ae.network_timeout == 30
def test_connection_timeout(self):
""" Check AE connection timeout change produces good value """
ae = AE()
assert ae.connection_timeout is None
ae.connection_timeout = None
assert ae.connection_timeout is None
ae.connection_timeout = -100
assert ae.connection_timeout is None
ae.connection_timeout = 'a'
assert ae.connection_timeout is None
ae.connection_timeout = 0
assert ae.connection_timeout is None
ae.connection_timeout = 30
assert ae.connection_timeout == 30
def test_active_acse(self):
"""Test changing acse_timeout with active associations."""
ae = AE()
ae.add_supported_context('1.2.840.10008.1.1')
scp = ae.start_server(('', 11112), block=False)
ae.add_requested_context('1.2.840.10008.1.1')
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assert assoc.acse_timeout == 30
ae.acse_timeout = 5
assert assoc.acse_timeout == 5
assoc.release()
scp.shutdown()
ae.shutdown()
def test_active_dimse(self):
"""Test changing dimse_timeout with active associations."""
ae = AE()
ae.add_supported_context('1.2.840.10008.1.1')
scp = ae.start_server(('', 11112), block=False)
ae.add_requested_context('1.2.840.10008.1.1')
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assert assoc.dimse_timeout == 30
ae.dimse_timeout = 5
assert assoc.dimse_timeout == 5
assoc.release()
scp.shutdown()
def test_active_network(self):
"""Test changing network_timeout with active associations."""
ae = AE()
ae.add_supported_context('1.2.840.10008.1.1')
scp = ae.start_server(('', 11112), block=False)
ae.add_requested_context('1.2.840.10008.1.1')
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assert assoc.network_timeout == 60
ae.network_timeout = 5
assert assoc.network_timeout == 5
assoc.release()
scp.shutdown()
def test_active_connection(self):
"""Test changing connection_timeout with active associations."""
ae = AE()
ae.add_supported_context('1.2.840.10008.1.1')
scp = ae.start_server(('', 11112), block=False)
ae.add_requested_context('1.2.840.10008.1.1')
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assert assoc.connection_timeout is None
ae.connection_timeout = 5
assert assoc.connection_timeout == 5
assoc.release()
scp.shutdown()
class TestAEGoodAssociation:
def setup(self):
"""Run prior to each test"""
self.ae = None
def teardown(self):
"""Clear any active threads"""
if self.ae:
self.ae.shutdown()
def test_associate_establish_release(self):
""" Check SCU Association with SCP """
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(Verification)
scp = ae.start_server(('', 11112), block=False)
ae.add_requested_context(Verification)
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assoc.release()
assert not assoc.is_established
assert assoc.is_released
scp.shutdown()
def test_associate_max_pdu(self):
""" Check Association has correct max PDUs on either end """
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.maximum_pdu_size = 54321
ae.add_supported_context(Verification)
scp = ae.start_server(('', 11112), block=False)
scu_ae = AE()
scu_ae.acse_timeout = 5
scu_ae.dimse_timeout = 5
scu_ae.network_timeout = 5
scu_ae.add_requested_context(Verification)
assoc = scu_ae.associate('localhost', 11112, max_pdu=12345)
assert assoc.is_established
assert scp.active_associations[0].acceptor.maximum_length == (
54321
)
assert scp.active_associations[0].requestor.maximum_length == (
12345
)
assert assoc.requestor.maximum_length == 12345
assert assoc.acceptor.maximum_length == 54321
assoc.release()
time.sleep(0.1)
assert scp.active_associations == []
# Check 0 max pdu value - max PDU value maps to 0x10000 internally
assoc = scu_ae.associate('localhost', 11112, max_pdu=0)
assert assoc.requestor.maximum_length == 0
assert scp.active_associations[0].requestor.maximum_length == 0
assoc.release()
scp.shutdown()
def test_association_timeouts(self):
""" Check that the Association timeouts are being set correctly and
work """
acse_delay = None
dimse_delay = None
def handle_echo(event):
if dimse_delay:
time.sleep(dimse_delay)
return 0x0000
def handle_acse_recv(event):
if acse_delay:
time.sleep(acse_delay)
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 0.5
ae.add_supported_context(Verification)
scp = ae.start_server(
('', 11112), block=False, evt_handlers=[(evt.EVT_ACSE_RECV, handle_acse_recv), (evt.EVT_C_ECHO, handle_echo)]
)
scu_ae = AE()
scu_ae.acse_timeout = 30
scu_ae.dimse_timeout = 30
scu_ae.network_timeout = 30
scu_ae.add_requested_context(Verification)
assoc = scu_ae.associate('localhost', 11112)
assert assoc.is_established
# Hit the network timeout
time.sleep(1.0)
assert assoc.is_aborted
assert len(scp.active_associations) == 0
ae.acse_timeout = None
ae.dimse_timeout = None
ae.network_timeout = None
scu_ae.acse_timeout = 30
scu_ae.dimse_timeout = 0
dimse_delay = 1
assoc = scu_ae.associate('localhost', 11112)
assert assoc.is_established
status = assoc.send_c_echo()
time.sleep(1.5)
assert assoc.is_aborted
assert len(scp.active_associations) == 0
# FIXME: If this is `0` we can process an ABORT primitive where
# we expect an ASSOCIATION primitive.
scu_ae.acse_timeout = 0.5
scu_ae.dimse_timeout = 30
acse_delay = 1
assoc = scu_ae.associate('localhost', 11112)
assert not assoc.is_established
assert assoc.is_aborted
time.sleep(1.5)
assert len(scp.active_associations) == 0
scu_ae.acse_timeout = 30
# `0` is an invalid value
scu_ae.connection_timeout = 0.5
scu_ae.dimse_timeout = 30
# The host exists and is routable, but there is a middlebox ignoring
# the initial TCP SYN.
assoc = scu_ae.associate('example.com', 11112)
assert not assoc.is_established
assert assoc.is_aborted
assert len(scp.active_associations) == 0
ae.acse_timeout = 21
ae.dimse_timeout = 22
scu_ae.acse_timeout = 31
scu_ae.connection_timeout = None
scu_ae.dimse_timeout = 32
assoc = scu_ae.associate('localhost', 11112)
assert assoc.is_established
assert scp.active_associations[0].acse_timeout == 21
assert scp.active_associations[0].dimse_timeout == 22
assert assoc.acse_timeout == 31
assert assoc.dimse_timeout == 32
assoc.release()
scp.shutdown()
def test_connection_timeout(self, caplog):
# * ACSE timeout does not start until connection timeout completes
# * Logs indicate that we hit the timeout case
scu_ae = AE()
scu_ae.acse_timeout = 1
scu_ae.connection_timeout = 2
scu_ae.add_requested_context(Verification)
with caplog.at_level(logging.ERROR, logger='pynetdicom'):
assoc = scu_ae.associate('example.com', 11112)
assert not assoc.is_established
assert assoc.is_aborted
msgs = [
"TCP Initialisation Error: timed out",
"TCP Initialisation Error: [Errno -2] Name or service "
"not known"
]
assert len([m for m in msgs if m in caplog.text]) == 1
def test_select_timeout_okay(self):
"""Test that using start works OK with timeout."""
# Multiple release/association in a sort time causes an OSError as
# the port is still in use due to the use of select.select() with
# a timeout. Fixed by using socket.shutdown in stop()
for ii in range(3):
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(Verification)
scp = ae.start_server(('', 11112), block=False)
ae.add_requested_context(Verification)
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assoc.release()
assert assoc.is_released
assert not assoc.is_established
scp.shutdown()
def test_aet_bytes_deprecation(self):
"""Test warning if using bytes to set an AE title."""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(Verification)
server = ae.start_server(('', 11112), block=False)
ae.add_requested_context(Verification)
msg = (
r"The use of bytes with 'ae_title' is deprecated, use an ASCII "
r"str instead"
)
with pytest.warns(DeprecationWarning, match=msg):
assoc = ae.associate('', 11112, ae_title=b'BADAE2')
assert assoc.acceptor.ae_title == 'BADAE2'
assert assoc.requestor.ae_title == 'PYNETDICOM'
server.shutdown()
class TestAEBadAssociation:
def test_raise(self):
"""Test bad associate call"""
ae = AE()
ae.add_requested_context(Verification)
with pytest.raises(TypeError):
ae.associate(1112, 11112)
with pytest.raises(TypeError):
ae.associate('localhost', '1.2.3.4')
def test_invalid_ae_title(self):
"""Test invalid AE.ae_title"""
ae = AE()
ae.add_requested_context(Verification
)
msg = r"Invalid 'ae_title' value - must not consist entirely of spaces"
with pytest.raises(ValueError, match=msg):
ae.associate('localhost', 11112, ae_title=' ')
msg = (
r"Invalid 'ae_title' value '\u200b5' "
r"- must only contain ASCII characters"
)
with pytest.raises(ValueError, match=msg):
aet = b"\xe2\x80\x8b\x35".decode('utf8')
ae.associate('localhost', 11112, ae_title=aet)
msg = (
r"Invalid 'ae_title' value '1234567890ABCDEFG' "
r"- must not exceed 16 characters"
)
with pytest.raises(ValueError, match=msg):
ae.associate('localhost', 11112, ae_title='1234567890ABCDEFG')
msg = r"Invalid 'ae_title' value - must not be an empty str"
with pytest.raises(ValueError, match=msg):
ae.associate('localhost', 11112, ae_title='')
msg = (
r"Invalid 'ae_title' value 'TEST\\ME' - must not contain control "
r"characters or backslashes"
)
with pytest.raises(ValueError, match=msg):
ae.associate('localhost', 11112, ae_title='TEST\\ME')
msg = r"'ae_title' must be str, not 'int'"
with pytest.raises(TypeError, match=msg):
ae.associate('localhost', 11112, ae_title=12345)
class TestAEGoodMiscSetters:
def setup(self):
self.ae = None
def teardown(self):
if self.ae:
self.ae.shutdown()
def test_ae_title_good(self):
""" Check AE title change produces good value """
ae = AE()
ae.ae_title = ' TEST '
assert ae.ae_title == ' TEST '
ae.ae_title = ' TEST'
assert ae.ae_title == ' TEST'
ae.ae_title = 'a TES'
assert ae.ae_title == 'a TES'
ae.ae_title = 'a TEST'
assert ae.ae_title == 'a TEST'
def test_aet_bytes_deprecation(self):
"""Test warning if using bytes to set an AE title."""
msg = (
r"The use of bytes with 'ae_title' is deprecated, use an ASCII "
r"str instead"
)
with pytest.warns(DeprecationWarning, match=msg):
ae = AE(b'BADAE')
assert ae.ae_title == 'BADAE'
def test_implementation(self):
"""Check the implementation version name and class UID setters"""
ae = AE()
ae.implementation_version_name = None
assert ae.implementation_version_name is None
ae.implementation_class_uid = '1.2.3'
assert ae.implementation_class_uid == '1.2.3'
def test_max_assoc_good(self):
""" Check AE maximum association change produces good value """
ae = AE()
ae.maximum_associations = -10
assert ae.maximum_associations == 1
ae.maximum_associations = ['a']
assert ae.maximum_associations == 1
ae.maximum_associations = '10'
assert ae.maximum_associations == 1
ae.maximum_associations = 0
assert ae.maximum_associations == 1
ae.maximum_associations = 5
assert ae.maximum_associations == 5
def test_max_pdu_good(self):
""" Check AE maximum pdu size change produces good value """
ae = AE()
ae.maximum_pdu_size = -10
assert ae.maximum_pdu_size == 16382
ae.maximum_pdu_size = 0
assert ae.maximum_pdu_size == 0
ae.maximum_pdu_size = 5000
assert ae.maximum_pdu_size == 5000
def test_require_calling_aet(self):
"""Test AE.require_calling_aet"""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(Verification)
scp = ae.start_server(('', 11112), block=False)
ae.add_requested_context(Verification)
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assoc.release()
assert assoc.is_released
assert not assoc.is_established
ae.require_calling_aet = ['MYAE']
assert ae.require_calling_aet == ['MYAE']
assoc = ae.associate('localhost', 11112)
assert assoc.is_rejected
ae.require_calling_aet = ['PYNETDICOM']
assert ae.require_calling_aet == ['PYNETDICOM']
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assoc.release()
msg = r"Invalid 'require_calling_aet' value - must not be an empty str"
with pytest.raises(ValueError, match=msg):
ae.require_calling_aet = ['']
assert ae.require_calling_aet == ['PYNETDICOM']
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assoc.release()
scp.shutdown()
def test_aec_bytes_deprecation(self):
"""Test warning if using bytes to set an AE title."""
ae = AE()
msg = (
r"The use of a list of bytes with 'require_calling_aet' is "
r"deprecated, use a list of ASCII str instead"
)
with pytest.warns(DeprecationWarning, match=msg):
ae.require_calling_aet = [b'BADAE', 'GOODAE']
assert ae.require_calling_aet == ['BADAE', 'GOODAE']
def test_require_called_aet(self):
"""Test AE.require_called_aet"""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(Verification)
scp = ae.start_server(('', 11112), block=False)
ae.add_requested_context(Verification)
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assoc.release()
assert assoc.is_released
assert not assoc.is_established
ae.require_called_aet = True
assert ae.require_called_aet is True
assoc = ae.associate('localhost', 11112)
assert assoc.is_rejected
assoc = ae.associate('localhost', 11112, ae_title='PYNETDICOM')
assert assoc.is_established
assoc.release()
scp.shutdown()
def test_req_calling_aet(self):
""" Check AE require calling aet change produces good value """
ae = AE()
ae.require_calling_aet = ['10', 'asdf']
assert ae.require_calling_aet == ['10', 'asdf']
def test_req_called_aet(self):
""" Check AE require called aet change produces good value """
ae = AE()
assert ae.require_called_aet is False
ae.require_called_aet = True
assert ae.require_called_aet is True
ae.require_called_aet = False
assert ae.require_called_aet is False
def test_string_output(self):
"""Test string output"""
ae = AE()
ae.add_requested_context(Verification)
ae.require_calling_aet = ['something']
ae.require_called_aet = True
assert 'Explicit VR' in ae.__str__()
assert 'Verification' in ae.__str__()
assert '0/10' in ae.__str__()
assert 'something' in ae.__str__()
assert 'Require called AE title: True' in ae.__str__()
ae.supported_contexts = StoragePresentationContexts
assert 'CT Image' in ae.__str__()
ae = AE()
ae.add_requested_context(Verification)
assert 'None' in ae.__str__()
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(Verification)
scp = ae.start_server(('', 11112), block=False)
ae.add_requested_context(Verification)
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assert assoc.is_established
assert 'Explicit VR' in ae.__str__()
assert 'Peer' in ae.__str__()
assoc.release()
assert assoc.is_released
assert not assoc.is_established
scp.shutdown()
def test_init_implementation_class(self):
"""Test the default implementation class uid"""
ae = AE()
assert ae.implementation_class_uid == PYNETDICOM_IMPLEMENTATION_UID
def test_init_implementation_version(self):
"""Test the default implementation version name"""
ae = AE()
assert ae.implementation_version_name == PYNETDICOM_IMPLEMENTATION_VERSION
def test_implementation_version(self):
"""Test implementation_version_name"""
ae = AE()
ae.implementation_version_name = None
assert ae.implementation_version_name is None
ae.implementation_version_name = " "
assert ae.implementation_version_name == " "
msg = "'implementation_version_name' must be str or None, not 'int'"
with pytest.raises(TypeError, match=msg):
ae.implementation_version_name = 1234
msg = (
"Invalid 'implementation_version_name' value - must not be an "
"empty str"
)
with pytest.raises(ValueError, match=msg):
ae.implementation_version_name = ""
assert ae.implementation_version_name == " "
def test_implementation_class(self):
"""Test implementation_class_uid"""
ae = AE()
ae.implementation_class_uid = '12.3.4'
assert isinstance(ae.implementation_class_uid, UID)
assert ae.implementation_class_uid == UID('12.3.4')
msg = (
r"'implementation_class_uid' must be str, bytes or UID, not "
r"'NoneType'"
)
with pytest.raises(TypeError, match=msg):
ae.implementation_class_uid = None
assert ae.implementation_class_uid == UID('12.3.4')
msg = (
r"Invalid 'implementation_class_uid' value - must not be an "
r"empty str"
)
with pytest.raises(ValueError, match=msg):
ae.implementation_class_uid = ''
msg = r"Invalid 'implementation_class_uid' value '1.2.04'"
with pytest.raises(ValueError, match=msg):
ae.implementation_class_uid = '1.2.04'
assert ae.implementation_class_uid == UID('12.3.4')
class TestAEBadInitialisation:
def test_invalid_ae_title(self):
"""Test invalid AE.ae_title"""
msg = r"Invalid 'ae_title' value - must not consist entirely of spaces"
with pytest.raises(ValueError, match=msg):
AE(ae_title=' ')
msg = (
r"Invalid 'ae_title' value '\u200b5' "
r"- must only contain ASCII characters"
)
with pytest.raises(ValueError, match=msg):
AE(ae_title=b"\xe2\x80\x8b\x35".decode('utf8'))
msg = (
r"Invalid 'ae_title' value '1234567890ABCDEFG' "
r"- must not exceed 16 characters"
)
with pytest.raises(ValueError, match=msg):
AE(ae_title='1234567890ABCDEFG')
msg = r"Invalid 'ae_title' value - must not be an empty str"
with pytest.raises(ValueError, match=msg):
AE(ae_title='')
msg = (
r"Invalid 'ae_title' value 'TEST\\ME' - must not contain control "
r"characters or backslashes"
)
with pytest.raises(ValueError, match=msg):
AE(ae_title='TEST\\ME')
msg = r"'ae_title' must be str, not 'NoneType'"
with pytest.raises(TypeError, match=msg):
AE(ae_title=None)
class TestAE_GoodExit:
def setup(self):
"""Run prior to each test"""
self.ae = None
def teardown(self):
"""Clear any active threads"""
if self.ae:
self.ae.shutdown()
def test_ae_release_assoc(self):
""" Association releases OK """
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(Verification)
scp = ae.start_server(('', 11112), block=False)
ae.add_requested_context(Verification)
# Test N associate/release cycles
for ii in range(5):
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assoc.release()
assert not assoc.is_established
assert not assoc.is_aborted
assert assoc.is_released
assert not assoc.is_rejected
scp.shutdown()
def test_ae_aborts_assoc(self):
""" Association aborts OK """
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(Verification)
scp = ae.start_server(('', 11112), block=False)
ae.add_requested_context(Verification)
# Test N associate/abort cycles
for ii in range(5):
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assoc.abort()
assert not assoc.is_established
assert assoc.is_aborted
assert not assoc.is_released
assert not assoc.is_rejected
scp.shutdown()
class TestAESupportedPresentationContexts:
"""Tests for AE's presentation contexts when acting as an SCP"""
def setup(self):
self.ae = AE()
def test_add_supported_context_str(self):
"""Tests for AE.add_supported_context using str."""
self.ae.add_supported_context('1.2.840.10008.1.1')
context = self.ae.supported_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert context.context_id is None
def test_add_supported_context_sop_class(self):
"""Tests for AE.add_supported_context using SOPClass."""
self.ae.add_supported_context(RTImageStorage)
context = self.ae.supported_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.5.1.4.1.1.481.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
def test_add_supported_context_uid(self):
"""Tests for AE.add_supported_context using UID."""
self.ae.add_supported_context(UID('1.2.840.10008.1.1'))
context = self.ae.supported_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
def test_add_supported_context_duplicate(self):
"""Tests for AE.add_supported_context using a duplicate UID."""
self.ae.add_supported_context(UID('1.2.840.10008.1.1'))
self.ae.add_supported_context(UID('1.2.840.10008.1.1'))
contexts = self.ae.supported_contexts
assert len(contexts) == 1
assert contexts[0].abstract_syntax == '1.2.840.10008.1.1'
assert contexts[0].transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
def test_add_supported_context_transfer_single(self):
"""Test adding a single transfer syntax without a list"""
self.ae.add_supported_context('1.2', '1.3')
contexts = self.ae.supported_contexts
assert len(contexts) == 1
assert contexts[0].abstract_syntax == '1.2'
assert contexts[0].transfer_syntax == ['1.3']
self.ae.add_supported_context('1.2', UID('1.4'))
contexts = self.ae.supported_contexts
assert len(contexts) == 1
assert contexts[0].abstract_syntax == '1.2'
assert contexts[0].transfer_syntax == ['1.3', '1.4']
def test_add_supported_context_duplicate_transfer(self):
"""Test adding duplicate transfer syntaxes."""
self.ae.add_supported_context('1.2', ['1.3', '1.3'])
contexts = self.ae.supported_contexts
assert len(contexts) == 1
assert contexts[0].abstract_syntax == '1.2'
assert contexts[0].transfer_syntax == ['1.3']
self.ae.supported_contexts = []
self.ae.add_supported_context('1.2.840.10008.1.1')
self.ae.add_supported_context('1.2.840.10008.1.1')
contexts = self.ae.supported_contexts
assert len(contexts) == 1
assert contexts[0].abstract_syntax == '1.2.840.10008.1.1'
assert contexts[0].transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
self.ae.supported_contexts = []
self.ae.add_supported_context('1.2.840.10008.1.1')
self.ae.add_supported_context('1.2.840.10008.1.1', [DEFAULT_TRANSFER_SYNTAXES[0]])
contexts = self.ae.supported_contexts
assert len(contexts) == 1
assert contexts[0].abstract_syntax == '1.2.840.10008.1.1'
assert contexts[0].transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
def test_add_supported_context_duplicate_multi(self):
"""Tests for AE.add_supported_context using a duplicate UID."""
self.ae.add_supported_context('1.2.840.10008.1.1',
[DEFAULT_TRANSFER_SYNTAXES[0]])
self.ae.add_supported_context('1.2.840.10008.1.1',
DEFAULT_TRANSFER_SYNTAXES[1:])
contexts = self.ae.supported_contexts
assert len(contexts) == 1
assert contexts[0].abstract_syntax == '1.2.840.10008.1.1'
assert contexts[0].transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
def test_add_supported_context_private_abs(self):
"""Test AE.add_supported_context with a private abstract syntax"""
self.ae.add_supported_context('1.2.3.4')
contexts = self.ae.supported_contexts
assert len(contexts) == 1
assert contexts[0].abstract_syntax == '1.2.3.4'
assert contexts[0].transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
def test_add_supported_context_private_tran(self):
"""Test AE.add_supported_context with a private transfer syntax"""
self.ae.add_supported_context('1.2.3.4',
['1.2.3', '1.2.840.10008.1.1'])
contexts = self.ae.supported_contexts
assert len(contexts) == 1
assert contexts[0].abstract_syntax == '1.2.3.4'
assert contexts[0].transfer_syntax == ['1.2.3', '1.2.840.10008.1.1']
def test_add_supported_context_more_128(self):
"""Test adding more than 128 presentation contexts"""
for ii in range(300):
self.ae.add_supported_context(str(ii))
contexts = self.ae.supported_contexts
assert len(contexts) == 300
def test_supported_contexts_setter(self):
"""Test the AE.supported_contexts property setter."""
context = build_context('1.2.840.10008.1.1')
self.ae.supported_contexts = [context]
contexts = self.ae.supported_contexts
assert len(contexts) == 1
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert context.context_id is None
def test_supported_contexts_empty(self):
"""Test the setting supported_contexts to an empty list."""
context = build_context('1.2.840.10008.1.1')
self.ae.supported_contexts = [context]
assert len(self.ae.supported_contexts) == 1
self.ae.supported_contexts = []
assert len(self.ae.supported_contexts) == 0
def test_supported_contexts_setter_raises(self):
"""Test the AE.supported_contexts property raises if not context."""
with pytest.raises(ValueError):
self.ae.supported_contexts = ['1.2.3']
def test_supported_contexts_sorted(self):
"""Test that the supported_contexts returns contexts in order."""
self.ae.add_supported_context('1.2.3.4')
self.ae.add_supported_context('1.2.3.5')
asyntaxes = [
cntx.abstract_syntax for cntx in self.ae.supported_contexts
]
assert asyntaxes == ['1.2.3.4', '1.2.3.5']
self.ae.add_supported_context('0.1.2.3')
self.ae.add_supported_context('2.1.2.3')
asyntaxes = [
cntx.abstract_syntax for cntx in self.ae.supported_contexts
]
assert asyntaxes == ['0.1.2.3', '1.2.3.4', '1.2.3.5', '2.1.2.3']
def test_supported_contexts_more_128(self):
"""Test setting supported_contexts with more than 128 contexts."""
contexts = []
for ii in range(300):
contexts.append(build_context(str(ii)))
self.ae.supported_contexts = contexts
assert len(self.ae.supported_contexts) == 300
def test_remove_supported_context_str(self):
"""Tests for AE.remove_supported_context using str."""
self.ae.add_supported_context('1.2.840.10008.1.1')
context = self.ae.supported_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
self.ae.remove_supported_context('1.2.840.10008.1.1')
assert len(self.ae.supported_contexts) == 0
# Test multiple
self.ae.add_supported_context('1.2.840.10008.1.1')
self.ae.add_supported_context('1.2.840.10008.1.4', ['1.2.3.4'])
assert len(self.ae.supported_contexts) == 2
self.ae.remove_supported_context('1.2.840.10008.1.1')
assert len(self.ae.supported_contexts) == 1
for context in self.ae.supported_contexts:
assert context.abstract_syntax != '1.2.840.10008.1.1'
def test_remove_supported_context_uid(self):
"""Tests for AE.remove_supported_context using UID."""
self.ae.add_supported_context('1.2.840.10008.1.1')
context = self.ae.supported_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
self.ae.remove_supported_context(UID('1.2.840.10008.1.1'))
assert len(self.ae.supported_contexts) == 0
def test_remove_supported_context_sop_class(self):
"""Tests for AE.remove_supported_context using SOPClass."""
self.ae.add_supported_context(RTImageStorage)
context = self.ae.supported_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.5.1.4.1.1.481.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
self.ae.remove_supported_context(RTImageStorage)
assert len(self.ae.supported_contexts) == 0
def test_remove_supported_context_default(self):
"""Tests for AE.remove_supported_context with default transfers."""
self.ae.add_supported_context('1.2.840.10008.1.1')
context = self.ae.supported_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert len(context.transfer_syntax) == 4
self.ae.remove_supported_context('1.2.840.10008.1.1')
assert len(self.ae.supported_contexts) == 0
def test_remove_supported_context_single_transfer(self):
"""Tests for AE.remove_supported_context with single transfer."""
self.ae.add_supported_context('1.2.840.10008.1.1')
context = self.ae.supported_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert len(context.transfer_syntax) == 4
self.ae.remove_supported_context('1.2.840.10008.1.1', DEFAULT_TRANSFER_SYNTAXES[0])
context = self.ae.supported_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES[1:]
def test_remove_supported_context_partial(self):
"""Tests for AE.remove_supported_context with partial transfers."""
# Test singular
self.ae.add_supported_context('1.2.840.10008.1.1')
context = self.ae.supported_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert len(context.transfer_syntax) == 4
self.ae.remove_supported_context('1.2.840.10008.1.1',
['1.2.840.10008.1.2'])
assert len(self.ae.supported_contexts) == 1
context = self.ae.supported_contexts[0]
assert len(context.transfer_syntax) == 3
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES[1:]
assert context.abstract_syntax == '1.2.840.10008.1.1'
# Test multiple
self.ae.add_supported_context('1.2.840.10008.1.1')
self.ae.add_supported_context(RTImageStorage)
self.ae.remove_supported_context('1.2.840.10008.1.1',
['1.2.840.10008.1.2'])
assert len(self.ae.supported_contexts) == 2
context = self.ae.supported_contexts[0]
assert len(context.transfer_syntax) == 3
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES[1:]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert self.ae.supported_contexts[1].transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
def test_remove_supported_context_all(self):
"""Tests for AE.remove_supported_context with all transfers."""
self.ae.add_supported_context('1.2.840.10008.1.1')
context = self.ae.supported_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert len(context.transfer_syntax) == 4
# Test singular
self.ae.remove_supported_context('1.2.840.10008.1.1',
DEFAULT_TRANSFER_SYNTAXES)
assert len(self.ae.supported_contexts) == 0
# Test multiple
self.ae.add_supported_context('1.2.840.10008.1.1')
self.ae.add_supported_context(RTImageStorage)
self.ae.remove_supported_context('1.2.840.10008.1.1',
DEFAULT_TRANSFER_SYNTAXES)
context = self.ae.supported_contexts[0]
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert context.abstract_syntax == '1.2.840.10008.5.1.4.1.1.481.1'
def test_remove_supported_context_all_plus(self):
"""Test remove_supported_context with extra transfers"""
tsyntax = DEFAULT_TRANSFER_SYNTAXES[:]
tsyntax.append('1.2.3')
self.ae.add_supported_context('1.2.840.10008.1.1')
context = self.ae.supported_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert len(context.transfer_syntax) == 4
self.ae.remove_supported_context('1.2.840.10008.1.1', tsyntax)
assert len(self.ae.supported_contexts) == 0
def test_scu_role(self):
"""Test add_supported_context with scu_role parameter."""
self.ae.add_supported_context('1.2.3')
context = self.ae.supported_contexts[0]
assert context.scu_role is None
assert context.scp_role is None
self.ae.supported_context = []
self.ae.add_supported_context('1.2.3', scu_role=None)
context = self.ae.supported_contexts[0]
assert context.scu_role is None
assert context.scp_role is None
self.ae.supported_context = []
self.ae.add_supported_context('1.2.3', scu_role=True)
context = self.ae.supported_contexts[0]
assert context.scu_role is True
assert context.scp_role is None
self.ae.supported_context = []
self.ae.add_supported_context('1.2.3', scu_role=False)
context = self.ae.supported_contexts[0]
assert context.scu_role is False
assert context.scp_role is None
def test_scu_role_update(self):
"""Test updating add_supported_context with scu_role parameter."""
self.ae.add_supported_context('1.2.3')
context = self.ae.supported_contexts[0]
assert context.scu_role is None
assert context.scp_role is None
self.ae.add_supported_context('1.2.3', scu_role=None)
context = self.ae.supported_contexts[0]
assert context.scu_role is None
assert context.scp_role is None
self.ae.add_supported_context('1.2.3', scu_role=True)
context = self.ae.supported_contexts[0]
assert context.scu_role is True
assert context.scp_role is None
self.ae.add_supported_context('1.2.3', scu_role=False)
context = self.ae.supported_contexts[0]
assert context.scu_role is False
assert context.scp_role is None
def test_scu_role_raises(self):
"""Test add_supported_context raises if scu_role wrong type."""
with pytest.raises(TypeError, match=""):
self.ae.add_supported_context('1.2.3', scu_role='abc')
assert self.ae.supported_contexts == []
def test_scp_role(self):
"""Test add_supported_context with scu_role parameter."""
self.ae.add_supported_context('1.2.3')
context = self.ae.supported_contexts[0]
assert context.scu_role is None
assert context.scp_role is None
self.ae.supported_context = []
self.ae.add_supported_context('1.2.3', scp_role=None)
context = self.ae.supported_contexts[0]
assert context.scu_role is None
assert context.scp_role is None
self.ae.supported_context = []
self.ae.add_supported_context('1.2.3', scp_role=True)
context = self.ae.supported_contexts[0]
assert context.scu_role is None
assert context.scp_role is True
self.ae.supported_context = []
self.ae.add_supported_context('1.2.3', scp_role=False)
context = self.ae.supported_contexts[0]
assert context.scu_role is None
assert context.scp_role is False
def test_scp_role_update(self):
"""Test updating add_supported_context with scp_role parameter."""
self.ae.add_supported_context('1.2.3')
context = self.ae.supported_contexts[0]
assert context.scu_role is None
assert context.scp_role is None
self.ae.add_supported_context('1.2.3', scp_role=None)
context = self.ae.supported_contexts[0]
assert context.scu_role is None
assert context.scp_role is None
self.ae.add_supported_context('1.2.3', scp_role=True)
context = self.ae.supported_contexts[0]
assert context.scu_role is None
assert context.scp_role is True
self.ae.add_supported_context('1.2.3', scp_role=False)
context = self.ae.supported_contexts[0]
assert context.scu_role is None
assert context.scp_role is False
def test_scp_role_raises(self):
"""Test add_supported_context raises if scp_role wrong type."""
with pytest.raises(TypeError, match=""):
self.ae.add_supported_context('1.2.3', scp_role='abc')
assert self.ae.supported_contexts == []
class TestAERequestedPresentationContexts:
"""Tests for AE's presentation contexts when acting as an SCU"""
def setup(self):
self.ae = AE()
def test_add_requested_context_str(self):
"""Tests for AE.add_requested_context using str."""
self.ae.add_requested_context('1.2.840.10008.1.1')
context = self.ae.requested_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert context.context_id is None
def test_add_requested_context_sop_class(self):
"""Tests for AE.add_requested_context using SOPClass."""
self.ae.add_requested_context(RTImageStorage)
context = self.ae.requested_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.5.1.4.1.1.481.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
def test_add_requested_context_uid(self):
"""Tests for AE.add_requested_context using UID."""
self.ae.add_requested_context(UID('1.2.840.10008.1.1'))
context = self.ae.requested_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
def test_add_requested_context_duplicate(self):
"""Test AE.add_requested_context using a duplicate UID."""
self.ae.add_requested_context(UID('1.2.840.10008.1.1'))
self.ae.add_requested_context(UID('1.2.840.10008.1.1'))
contexts = self.ae.requested_contexts
assert len(contexts) == 2
assert contexts[0].abstract_syntax == '1.2.840.10008.1.1'
assert contexts[0].transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert contexts[1].abstract_syntax == '1.2.840.10008.1.1'
assert contexts[1].transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
def test_add_requested_context_duplicate_multi(self):
"""Tests for AE.add_requested_context using a duplicate UID."""
self.ae.add_requested_context('1.2.840.10008.1.1',
[DEFAULT_TRANSFER_SYNTAXES[0]])
self.ae.add_requested_context('1.2.840.10008.1.1',
DEFAULT_TRANSFER_SYNTAXES[1:])
contexts = self.ae.requested_contexts
assert len(contexts) == 2
assert contexts[0].abstract_syntax == '1.2.840.10008.1.1'
assert contexts[0].transfer_syntax == [DEFAULT_TRANSFER_SYNTAXES[0]]
assert contexts[1].abstract_syntax == '1.2.840.10008.1.1'
assert contexts[1].transfer_syntax == DEFAULT_TRANSFER_SYNTAXES[1:]
def test_add_supported_context_transfer_single(self):
"""Test adding a single transfer syntax without a list"""
self.ae.add_requested_context('1.2', '1.3')
contexts = self.ae.requested_contexts
assert len(contexts) == 1
assert contexts[0].abstract_syntax == '1.2'
assert contexts[0].transfer_syntax == ['1.3']
self.ae.add_requested_context('1.2', UID('1.4'))
contexts = self.ae.requested_contexts
assert len(contexts) == 2
assert contexts[1].abstract_syntax == '1.2'
assert contexts[1].transfer_syntax == ['1.4']
def test_add_requested_context_duplicate_transfer(self):
"""Test add_requested_context using duplicate transfer syntaxes"""
self.ae.add_requested_context('1.2', ['1.3', '1.3'])
contexts = self.ae.requested_contexts
assert contexts[0].transfer_syntax == ['1.3']
def test_add_requested_context_private_abs(self):
"""Test AE.add_requested_context with a private abstract syntax"""
self.ae.add_requested_context('1.2.3.4')
contexts = self.ae.requested_contexts
assert len(contexts) == 1
assert contexts[0].abstract_syntax == '1.2.3.4'
assert contexts[0].transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
def test_add_requested_context_private_tran(self):
"""Test AE.add_requested_context with a private transfer syntax"""
self.ae.add_requested_context('1.2.3.4',
['1.2.3', '1.2.840.10008.1.1'])
contexts = self.ae.requested_contexts
assert len(contexts) == 1
assert contexts[0].abstract_syntax == '1.2.3.4'
assert contexts[0].transfer_syntax == ['1.2.3', '1.2.840.10008.1.1']
def test_add_requested_context_more_128_raises(self):
"""Test adding more than 128 presentation contexts"""
for ii in range(128):
self.ae.add_requested_context(str(ii))
assert len(self.ae.requested_contexts) == 128
with pytest.raises(ValueError):
self.ae.add_requested_context('129')
assert len(self.ae.requested_contexts) == 128
def test_requested_contexts_setter(self):
"""Test the AE.requested_contexts property setter."""
context = build_context('1.2.840.10008.1.1')
self.ae.requested_contexts = [context]
contexts = self.ae.requested_contexts
assert len(contexts) == 1
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert context.context_id is None
def test_requested_contexts_empty(self):
"""Test the setting requested_contexts to an empty list."""
context = build_context('1.2.840.10008.1.1')
self.ae.requested_contexts = [context]
assert len(self.ae.requested_contexts) == 1
self.ae.requested_contexts = []
assert len(self.ae.requested_contexts) == 0
def test_requested_contexts_setter_raises(self):
"""Test the AE.requested_contexts property raises if not context."""
with pytest.raises(ValueError):
self.ae.requested_contexts = ['1.2.3']
def test_requested_contexts_not_sorted(self):
"""Test that requested_contexts returns contexts in supplied order."""
self.ae.add_requested_context('1.2.3.4')
self.ae.add_requested_context('1.2.3.5')
asyntaxes = [
cntx.abstract_syntax for cntx in self.ae.requested_contexts
]
assert asyntaxes == ['1.2.3.4', '1.2.3.5']
self.ae.add_requested_context('0.1.2.3')
self.ae.add_requested_context('2.1.2.3')
asyntaxes = [
cntx.abstract_syntax for cntx in self.ae.requested_contexts
]
assert asyntaxes == ['1.2.3.4', '1.2.3.5', '0.1.2.3', '2.1.2.3']
def test_requested_contexts_more_128(self):
"""Test setting requested_contexts with more than 128 contexts."""
contexts = []
for ii in range(128):
contexts.append(build_context(str(ii)))
self.ae.requested_contexts = contexts
assert len(self.ae.requested_contexts) == 128
contexts.append(build_context('129'))
with pytest.raises(ValueError):
self.ae.requested_contexts = contexts
def test_remove_requested_context_str(self):
"""Tests for AE.remove_requested_context using str."""
# Test singular
self.ae.add_requested_context('1.2.840.10008.1.1')
context = self.ae.requested_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
self.ae.remove_requested_context('1.2.840.10008.1.1')
assert len(self.ae.requested_contexts) == 0
# Test multiple
self.ae.add_requested_context('1.2.840.10008.1.1')
self.ae.add_requested_context('1.2.840.10008.1.1', ['1.2.3.4'])
self.ae.add_requested_context('1.2.840.10008.1.4', ['1.2.3.4'])
assert len(self.ae.requested_contexts) == 3
self.ae.remove_requested_context('1.2.840.10008.1.1')
assert len(self.ae.requested_contexts) == 1
for context in self.ae.requested_contexts:
assert context.abstract_syntax != '1.2.840.10008.1.1'
def test_remove_requested_context_uid(self):
"""Tests for AE.remove_requested_context using UID."""
self.ae.add_requested_context('1.2.840.10008.1.1')
context = self.ae.requested_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
self.ae.remove_requested_context(UID('1.2.840.10008.1.1'))
assert len(self.ae.requested_contexts) == 0
def test_remove_requested_context_sop_class(self):
"""Tests for AE.remove_requested_context using SOPClass."""
self.ae.add_requested_context(RTImageStorage)
context = self.ae.requested_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.5.1.4.1.1.481.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
self.ae.remove_requested_context(RTImageStorage)
assert len(self.ae.requested_contexts) == 0
def test_remove_requested_context_default(self):
"""Tests for AE.remove_requested_context with default transfers."""
self.ae.add_requested_context('1.2.840.10008.1.1')
context = self.ae.requested_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert len(context.transfer_syntax) == 4
self.ae.remove_requested_context('1.2.840.10008.1.1')
assert len(self.ae.requested_contexts) == 0
def test_remove_requested_context_single(self):
"""Tests for AE.remove_requested_context with single transfer."""
self.ae.add_requested_context('1.2.840.10008.1.1')
context = self.ae.requested_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert len(context.transfer_syntax) == 4
self.ae.remove_requested_context('1.2.840.10008.1.1', DEFAULT_TRANSFER_SYNTAXES[0])
context = self.ae.requested_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES[1:]
def test_remove_requested_context_partial(self):
"""Tests for AE.remove_supported_context with partial transfers."""
# Test singular
self.ae.add_requested_context('1.2.840.10008.1.1')
context = self.ae.requested_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert len(context.transfer_syntax) == 4
self.ae.remove_requested_context('1.2.840.10008.1.1',
['1.2.840.10008.1.2'])
assert len(self.ae.requested_contexts) == 1
context = self.ae.requested_contexts[0]
assert len(context.transfer_syntax) == 3
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES[1:]
assert context.abstract_syntax == '1.2.840.10008.1.1'
self.ae.remove_requested_context('1.2.840.10008.1.1')
assert len(self.ae.requested_contexts) == 0
# Test multiple
self.ae.add_requested_context('1.2.840.10008.1.1')
self.ae.add_requested_context(RTImageStorage)
self.ae.add_requested_context('1.2.840.10008.1.1', ['1.2.3.4'])
self.ae.remove_requested_context('1.2.840.10008.1.1',
['1.2.840.10008.1.2'])
assert len(self.ae.requested_contexts) == 3
context = self.ae.requested_contexts[0]
assert len(context.transfer_syntax) == 3
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES[1:]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert self.ae.requested_contexts[1].transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert self.ae.requested_contexts[2].transfer_syntax == ['1.2.3.4']
assert self.ae.requested_contexts[2].abstract_syntax == '1.2.840.10008.1.1'
self.ae.remove_requested_context('1.2.840.10008.1.1')
assert len(self.ae.requested_contexts) == 1
assert self.ae.requested_contexts[0].abstract_syntax == '1.2.840.10008.5.1.4.1.1.481.1'
def test_remove_requested_context_all(self):
"""Tests for AE.remove_requested_context with all transfers."""
self.ae.add_requested_context('1.2.840.10008.1.1')
context = self.ae.requested_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert len(context.transfer_syntax) == 4
# Test singular
self.ae.remove_requested_context('1.2.840.10008.1.1',
DEFAULT_TRANSFER_SYNTAXES)
assert len(self.ae.requested_contexts) == 0
# Test multiple
self.ae.add_requested_context('1.2.840.10008.1.1',
[DEFAULT_TRANSFER_SYNTAXES[0]])
self.ae.add_requested_context('1.2.840.10008.1.1',
DEFAULT_TRANSFER_SYNTAXES[1:])
self.ae.add_requested_context(RTImageStorage)
self.ae.remove_requested_context('1.2.840.10008.1.1',
DEFAULT_TRANSFER_SYNTAXES)
assert len(self.ae.requested_contexts) == 1
context = self.ae.requested_contexts[0]
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert context.abstract_syntax == '1.2.840.10008.5.1.4.1.1.481.1'
def test_remove_requested_context_all_plus(self):
"""Test remove_requested_context with extra transfers"""
tsyntax = DEFAULT_TRANSFER_SYNTAXES[:]
tsyntax.append('1.2.3')
# Test singular
self.ae.add_requested_context('1.2.840.10008.1.1')
context = self.ae.requested_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert len(context.transfer_syntax) == 4
self.ae.remove_requested_context('1.2.840.10008.1.1', tsyntax)
assert len(self.ae.requested_contexts) == 0
# Test multiple
self.ae.add_requested_context('1.2.840.10008.1.1',
[DEFAULT_TRANSFER_SYNTAXES[0]])
self.ae.add_requested_context('1.2.840.10008.1.1',
DEFAULT_TRANSFER_SYNTAXES[1:])
self.ae.add_requested_context(RTImageStorage)
self.ae.remove_requested_context('1.2.840.10008.1.1', tsyntax)
assert len(self.ae.requested_contexts) == 1
context = self.ae.requested_contexts[0]
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert context.abstract_syntax == '1.2.840.10008.5.1.4.1.1.481.1'
|
"""Tests for the ae module."""
import logging
import os
import signal
import threading
import time
import pytest
from pydicom import read_file
from pydicom.dataset import Dataset
from pydicom.uid import UID, ImplicitVRLittleEndian
from pynetdicom import (
AE, evt, debug_logger, build_context,
DEFAULT_TRANSFER_SYNTAXES,
StoragePresentationContexts,
VerificationPresentationContexts,
PYNETDICOM_IMPLEMENTATION_UID,
PYNETDICOM_IMPLEMENTATION_VERSION
)
from pynetdicom.presentation import build_context
from pynetdicom.sop_class import RTImageStorage, Verification
from pynetdicom.transport import AssociationServer, RequestHandler
# debug_logger()
TEST_DS_DIR = os.path.join(os.path.dirname(__file__), 'dicom_files')
DATASET = read_file(os.path.join(TEST_DS_DIR, 'RTImageStorage.dcm'))
COMP_DATASET = read_file(os.path.join(TEST_DS_DIR, 'MRImageStorage_JPG2000_Lossless.dcm'))
def test_blocking_handler():
"""Test binding events to the blocking AssociationServer."""
ae = AE()
ae.add_supported_context('1.2.840.10008.1.1')
def handle_echo(event):
return 0x0000
handlers = [(evt.EVT_C_ECHO, handle_echo)]
thread = threading.Thread(
target=ae.start_server,
args=(('', 11112), ),
kwargs={'evt_handlers' : handlers}
)
thread.daemon = True
thread.start()
time.sleep(0.1)
ae.shutdown()
class TestMakeServer:
"""Tests for AE.make_server()"""
def setup(self):
"""Run prior to each test"""
self.ae = None
def teardown(self):
"""Clear any active threads"""
if self.ae:
self.ae.shutdown()
def test_default_arguments(self):
self.ae = ae = AE()
ae.add_supported_context(Verification)
server = ae.make_server(('', 11112))
assert isinstance(server, AssociationServer)
def test_custom_request_handler(self):
class MyRequestHandler(RequestHandler):
pass
self.ae = ae = AE()
ae.add_supported_context(Verification)
server = ae.make_server(('', 11112), request_handler=MyRequestHandler)
assert server.RequestHandlerClass is MyRequestHandler
def test_aet_bytes_deprecation(self):
"""Test warning if using bytes to set an AE title."""
self.ae = ae = AE()
ae.add_supported_context(Verification)
msg = (
r"The use of bytes with 'ae_title' is deprecated, use an ASCII "
r"str instead"
)
with pytest.warns(DeprecationWarning, match=msg):
server = ae.start_server(('', 11112), block=False, ae_title=b'BADAE2')
assert server.ae_title == 'BADAE2'
server.shutdown()
class TestStartServer:
"""Tests for AE.start_server()"""
def setup(self):
"""Run prior to each test"""
self.ae = None
def teardown(self):
"""Clear any active threads"""
if self.ae:
self.ae.shutdown()
def test_ae_title(self):
"""Test the `ae_title` keyword parameter."""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.ae_title = 'TESTAET'
assert ae.ae_title == 'TESTAET'
ae.add_supported_context(Verification)
server = ae.start_server(('', 11112), block=False)
assert server.ae_title == ae.ae_title
server.shutdown()
server = ae.start_server(('', 11112), block=False, ae_title='MYAE')
assert server.ae_title == 'MYAE'
ae.require_called_aet = True
ae.add_requested_context(Verification)
assoc = ae.associate('localhost', 11112, ae_title='MYAE')
assert assoc.is_established
assoc.release()
assert assoc.is_released
server.shutdown()
def test_contexts(self):
"""Test the `contexts` keyword parameter."""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.ae_title = 'TESTAET'
assert ae.ae_title == 'TESTAET'
cx = build_context(Verification)
server = ae.start_server(('', 11112), block=False, contexts=[cx])
ae.add_requested_context(Verification)
assoc = ae.associate('localhost', 11112, ae_title='MYAE')
assert assoc.is_established
assert (
assoc.accepted_contexts[0].abstract_syntax == Verification
)
assoc.release()
assert assoc.is_released
server.shutdown()
class TestAEVerificationSCP:
"""Check verification SCP"""
def setup(self):
"""Run prior to each test"""
self.ae = None
def teardown(self):
"""Clear any active threads"""
if self.ae:
self.ae.shutdown()
@pytest.mark.skipif(os.name == "nt", reason="Kills pytest on windows")
def test_start_server_keyboard_interrupt(self):
"""Test stopping the SCP with keyboard"""
pid = os.getpid()
def trigger_signal():
time.sleep(0.1)
os.kill(pid, signal.SIGINT)
self.ae = ae = AE()
ae.add_supported_context('1.2.3')
thread = threading.Thread(target=trigger_signal)
thread.daemon = True
thread.start()
ae.start_server(('', 11112))
ae.shutdown()
def test_no_supported_contexts(self):
"""Test starting with no contexts raises"""
ae = AE()
with pytest.raises(ValueError, match=r"No supported Presentation"):
ae.start_server(('', 11112))
def test_new_scu_scp_warning(self):
"""Test that a warning is given if scu_role and scp_role bad."""
ae = AE()
ae.add_supported_context('1.2.3.4', scp_role=False)
msg = r"The following presentation contexts have "
with pytest.raises(ValueError, match=msg):
ae.start_server(('', 11112))
def test_str_empty(self):
"""Test str output for default AE"""
ae = AE()
ae.__str__()
class TestAEPresentationSCU:
"""Tests for AE presentation contexts when running as an SCU"""
def setup(self):
"""Run prior to each test"""
self.ae = None
def teardown(self):
"""Clear any active threads"""
if self.ae:
self.ae.shutdown()
def test_associate_context(self):
"""Test that AE.associate doesn't modify the supplied contexts"""
# Test AE.requested_contexts
self.ae = ae = AE()
ae.add_supported_context(Verification)
scp = ae.start_server(('', 11112), block=False)
ae.requested_contexts = VerificationPresentationContexts
ae.acse_timeout = 5
ae.dimse_timeout = 5
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assert ae.requested_contexts[0].context_id is None
assert len(assoc.requestor.requested_contexts) == 1
assert assoc.requestor.requested_contexts[0].abstract_syntax == (
'1.2.840.10008.1.1'
)
assert assoc.requestor.requested_contexts[0].context_id == 1
assoc.release()
assert not assoc.is_established
assert assoc.is_released
# Test associate(contexts=...)
ae.requested_contexts = []
assoc = ae.associate('localhost', 11112,
contexts=VerificationPresentationContexts)
assert assoc.is_established
assert VerificationPresentationContexts[0].context_id is None
assert len(assoc.requestor.requested_contexts) == 1
assert assoc.requestor.requested_contexts[0].abstract_syntax == (
'1.2.840.10008.1.1'
)
assert assoc.requestor.requested_contexts[0].context_id == 1
assoc.release()
assert not assoc.is_established
assert assoc.is_released
scp.shutdown()
def test_associate_context_raises(self):
"""Test that AE.associate raises exception if no requested contexts"""
self.ae = ae = AE()
with pytest.raises(RuntimeError):
assoc = ae.associate('localhost', 11112)
class TestAEGoodTimeoutSetters:
def test_acse_timeout(self):
""" Check AE ACSE timeout change produces good value """
ae = AE()
assert ae.acse_timeout == 30
ae.acse_timeout = None
assert ae.acse_timeout is None
ae.acse_timeout = -100
assert ae.acse_timeout == 30
ae.acse_timeout = 'a'
assert ae.acse_timeout == 30
ae.acse_timeout = 0
assert ae.acse_timeout == 0
ae.acse_timeout = 30
assert ae.acse_timeout == 30
def test_dimse_timeout(self):
""" Check AE DIMSE timeout change produces good value """
ae = AE()
assert ae.dimse_timeout == 30
ae.dimse_timeout = None
assert ae.dimse_timeout is None
ae.dimse_timeout = -100
assert ae.dimse_timeout == 30
ae.dimse_timeout = 'a'
assert ae.dimse_timeout == 30
ae.dimse_timeout = 0
assert ae.dimse_timeout == 0
ae.dimse_timeout = 30
assert ae.dimse_timeout == 30
def test_network_timeout(self):
""" Check AE network timeout change produces good value """
ae = AE()
assert ae.network_timeout == 60
ae.network_timeout = None
assert ae.network_timeout is None
ae.network_timeout = -100
assert ae.network_timeout == 60
ae.network_timeout = 'a'
assert ae.network_timeout == 60
ae.network_timeout = 0
assert ae.network_timeout == 0
ae.network_timeout = 30
assert ae.network_timeout == 30
def test_connection_timeout(self):
""" Check AE connection timeout change produces good value """
ae = AE()
assert ae.connection_timeout is None
ae.connection_timeout = None
assert ae.connection_timeout is None
ae.connection_timeout = -100
assert ae.connection_timeout is None
ae.connection_timeout = 'a'
assert ae.connection_timeout is None
ae.connection_timeout = 0
assert ae.connection_timeout is None
ae.connection_timeout = 30
assert ae.connection_timeout == 30
def test_active_acse(self):
"""Test changing acse_timeout with active associations."""
ae = AE()
ae.add_supported_context('1.2.840.10008.1.1')
scp = ae.start_server(('', 11112), block=False)
ae.add_requested_context('1.2.840.10008.1.1')
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assert assoc.acse_timeout == 30
ae.acse_timeout = 5
assert assoc.acse_timeout == 5
assoc.release()
scp.shutdown()
ae.shutdown()
def test_active_dimse(self):
"""Test changing dimse_timeout with active associations."""
ae = AE()
ae.add_supported_context('1.2.840.10008.1.1')
scp = ae.start_server(('', 11112), block=False)
ae.add_requested_context('1.2.840.10008.1.1')
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assert assoc.dimse_timeout == 30
ae.dimse_timeout = 5
assert assoc.dimse_timeout == 5
assoc.release()
scp.shutdown()
def test_active_network(self):
"""Test changing network_timeout with active associations."""
ae = AE()
ae.add_supported_context('1.2.840.10008.1.1')
scp = ae.start_server(('', 11112), block=False)
ae.add_requested_context('1.2.840.10008.1.1')
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assert assoc.network_timeout == 60
ae.network_timeout = 5
assert assoc.network_timeout == 5
assoc.release()
scp.shutdown()
def test_active_connection(self):
"""Test changing connection_timeout with active associations."""
ae = AE()
ae.add_supported_context('1.2.840.10008.1.1')
scp = ae.start_server(('', 11112), block=False)
ae.add_requested_context('1.2.840.10008.1.1')
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assert assoc.connection_timeout is None
ae.connection_timeout = 5
assert assoc.connection_timeout == 5
assoc.release()
scp.shutdown()
class TestAEGoodAssociation:
def setup(self):
"""Run prior to each test"""
self.ae = None
def teardown(self):
"""Clear any active threads"""
if self.ae:
self.ae.shutdown()
def test_associate_establish_release(self):
""" Check SCU Association with SCP """
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(Verification)
scp = ae.start_server(('', 11112), block=False)
ae.add_requested_context(Verification)
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assoc.release()
assert not assoc.is_established
assert assoc.is_released
scp.shutdown()
def test_associate_max_pdu(self):
""" Check Association has correct max PDUs on either end """
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.maximum_pdu_size = 54321
ae.add_supported_context(Verification)
scp = ae.start_server(('', 11112), block=False)
scu_ae = AE()
scu_ae.acse_timeout = 5
scu_ae.dimse_timeout = 5
scu_ae.network_timeout = 5
scu_ae.add_requested_context(Verification)
assoc = scu_ae.associate('localhost', 11112, max_pdu=12345)
assert assoc.is_established
assert scp.active_associations[0].acceptor.maximum_length == (
54321
)
assert scp.active_associations[0].requestor.maximum_length == (
12345
)
assert assoc.requestor.maximum_length == 12345
assert assoc.acceptor.maximum_length == 54321
assoc.release()
time.sleep(0.1)
assert scp.active_associations == []
# Check 0 max pdu value - max PDU value maps to 0x10000 internally
assoc = scu_ae.associate('localhost', 11112, max_pdu=0)
assert assoc.requestor.maximum_length == 0
assert scp.active_associations[0].requestor.maximum_length == 0
assoc.release()
scp.shutdown()
def test_association_timeouts(self):
""" Check that the Association timeouts are being set correctly and
work """
acse_delay = None
dimse_delay = None
def handle_echo(event):
if dimse_delay:
time.sleep(dimse_delay)
return 0x0000
def handle_acse_recv(event):
if acse_delay:
time.sleep(acse_delay)
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 0.5
ae.add_supported_context(Verification)
scp = ae.start_server(
('', 11112), block=False, evt_handlers=[(evt.EVT_ACSE_RECV, handle_acse_recv), (evt.EVT_C_ECHO, handle_echo)]
)
scu_ae = AE()
scu_ae.acse_timeout = 30
scu_ae.dimse_timeout = 30
scu_ae.network_timeout = 30
scu_ae.add_requested_context(Verification)
assoc = scu_ae.associate('localhost', 11112)
assert assoc.is_established
# Hit the network timeout
time.sleep(1.0)
assert assoc.is_aborted
assert len(scp.active_associations) == 0
ae.acse_timeout = None
ae.dimse_timeout = None
ae.network_timeout = None
scu_ae.acse_timeout = 30
scu_ae.dimse_timeout = 0
dimse_delay = 1
assoc = scu_ae.associate('localhost', 11112)
assert assoc.is_established
status = assoc.send_c_echo()
time.sleep(1.5)
assert assoc.is_aborted
assert len(scp.active_associations) == 0
# FIXME: If this is `0` we can process an ABORT primitive where
# we expect an ASSOCIATION primitive.
scu_ae.acse_timeout = 0.5
scu_ae.dimse_timeout = 30
acse_delay = 1
assoc = scu_ae.associate('localhost', 11112)
assert not assoc.is_established
assert assoc.is_aborted
time.sleep(1.5)
assert len(scp.active_associations) == 0
scu_ae.acse_timeout = 30
# `0` is an invalid value
scu_ae.connection_timeout = 0.5
scu_ae.dimse_timeout = 30
# The host exists and is routable, but there is a middlebox ignoring
# the initial TCP SYN.
assoc = scu_ae.associate('example.com', 11112)
assert not assoc.is_established
assert assoc.is_aborted
assert len(scp.active_associations) == 0
ae.acse_timeout = 21
ae.dimse_timeout = 22
scu_ae.acse_timeout = 31
scu_ae.connection_timeout = None
scu_ae.dimse_timeout = 32
assoc = scu_ae.associate('localhost', 11112)
assert assoc.is_established
assert scp.active_associations[0].acse_timeout == 21
assert scp.active_associations[0].dimse_timeout == 22
assert assoc.acse_timeout == 31
assert assoc.dimse_timeout == 32
assoc.release()
scp.shutdown()
def test_connection_timeout(self, caplog):
# * ACSE timeout does not start until connection timeout completes
# * Logs indicate that we hit the timeout case
scu_ae = AE()
scu_ae.acse_timeout = 1
scu_ae.connection_timeout = 2
scu_ae.add_requested_context(Verification)
with caplog.at_level(logging.ERROR, logger='pynetdicom'):
assoc = scu_ae.associate('example.com', 11112)
assert not assoc.is_established
assert assoc.is_aborted
msgs = [
"TCP Initialisation Error: timed out",
"TCP Initialisation Error: [Errno -2] Name or service "
"not known"
]
assert len([m for m in msgs if m in caplog.text]) == 1
def test_select_timeout_okay(self):
"""Test that using start works OK with timeout."""
# Multiple release/association in a sort time causes an OSError as
# the port is still in use due to the use of select.select() with
# a timeout. Fixed by using socket.shutdown in stop()
for ii in range(3):
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(Verification)
scp = ae.start_server(('', 11112), block=False)
ae.add_requested_context(Verification)
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assoc.release()
assert assoc.is_released
assert not assoc.is_established
scp.shutdown()
def test_aet_bytes_deprecation(self):
"""Test warning if using bytes to set an AE title."""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(Verification)
server = ae.start_server(('', 11112), block=False)
ae.add_requested_context(Verification)
msg = (
r"The use of bytes with 'ae_title' is deprecated, use an ASCII "
r"str instead"
)
with pytest.warns(DeprecationWarning, match=msg):
assoc = ae.associate('', 11112, ae_title=b'BADAE2')
assert assoc.acceptor.ae_title == 'BADAE2'
assert assoc.requestor.ae_title == 'PYNETDICOM'
server.shutdown()
class TestAEBadAssociation:
def test_raise(self):
"""Test bad associate call"""
ae = AE()
ae.add_requested_context(Verification)
with pytest.raises(TypeError):
ae.associate(1112, 11112)
with pytest.raises(TypeError):
ae.associate('localhost', '1.2.3.4')
def test_invalid_ae_title(self):
"""Test invalid AE.ae_title"""
ae = AE()
ae.add_requested_context(Verification
)
msg = r"Invalid 'ae_title' value - must not consist entirely of spaces"
with pytest.raises(ValueError, match=msg):
ae.associate('localhost', 11112, ae_title=' ')
msg = (
r"Invalid 'ae_title' value '\u200b5' "
r"- must only contain ASCII characters"
)
with pytest.raises(ValueError, match=msg):
aet = b"\xe2\x80\x8b\x35".decode('utf8')
ae.associate('localhost', 11112, ae_title=aet)
msg = (
r"Invalid 'ae_title' value '1234567890ABCDEFG' "
r"- must not exceed 16 characters"
)
with pytest.raises(ValueError, match=msg):
ae.associate('localhost', 11112, ae_title='1234567890ABCDEFG')
msg = r"Invalid 'ae_title' value - must not be an empty str"
with pytest.raises(ValueError, match=msg):
ae.associate('localhost', 11112, ae_title='')
msg = (
r"Invalid 'ae_title' value 'TEST\\ME' - must not contain control "
r"characters or backslashes"
)
with pytest.raises(ValueError, match=msg):
ae.associate('localhost', 11112, ae_title='TEST\\ME')
msg = r"'ae_title' must be str, not 'int'"
with pytest.raises(TypeError, match=msg):
ae.associate('localhost', 11112, ae_title=12345)
class TestAEGoodMiscSetters:
def setup(self):
self.ae = None
def teardown(self):
if self.ae:
self.ae.shutdown()
def test_ae_title_good(self):
""" Check AE title change produces good value """
ae = AE()
ae.ae_title = ' TEST '
assert ae.ae_title == ' TEST '
ae.ae_title = ' TEST'
assert ae.ae_title == ' TEST'
ae.ae_title = 'a TES'
assert ae.ae_title == 'a TES'
ae.ae_title = 'a TEST'
assert ae.ae_title == 'a TEST'
def test_aet_bytes_deprecation(self):
"""Test warning if using bytes to set an AE title."""
msg = (
r"The use of bytes with 'ae_title' is deprecated, use an ASCII "
r"str instead"
)
with pytest.warns(DeprecationWarning, match=msg):
ae = AE(b'BADAE')
assert ae.ae_title == 'BADAE'
def test_implementation(self):
"""Check the implementation version name and class UID setters"""
ae = AE()
ae.implementation_version_name = None
assert ae.implementation_version_name is None
ae.implementation_class_uid = '1.2.3'
assert ae.implementation_class_uid == '1.2.3'
def test_max_assoc_good(self):
""" Check AE maximum association change produces good value """
ae = AE()
ae.maximum_associations = -10
assert ae.maximum_associations == 1
ae.maximum_associations = ['a']
assert ae.maximum_associations == 1
ae.maximum_associations = '10'
assert ae.maximum_associations == 1
ae.maximum_associations = 0
assert ae.maximum_associations == 1
ae.maximum_associations = 5
assert ae.maximum_associations == 5
def test_max_pdu_good(self):
""" Check AE maximum pdu size change produces good value """
ae = AE()
ae.maximum_pdu_size = -10
assert ae.maximum_pdu_size == 16382
ae.maximum_pdu_size = 0
assert ae.maximum_pdu_size == 0
ae.maximum_pdu_size = 5000
assert ae.maximum_pdu_size == 5000
def test_require_calling_aet(self):
"""Test AE.require_calling_aet"""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(Verification)
scp = ae.start_server(('', 11112), block=False)
ae.add_requested_context(Verification)
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assoc.release()
assert assoc.is_released
assert not assoc.is_established
ae.require_calling_aet = ['MYAE']
assert ae.require_calling_aet == ['MYAE']
assoc = ae.associate('localhost', 11112)
assert assoc.is_rejected
ae.require_calling_aet = ['PYNETDICOM']
assert ae.require_calling_aet == ['PYNETDICOM']
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assoc.release()
msg = r"Invalid 'require_calling_aet' value - must not be an empty str"
with pytest.raises(ValueError, match=msg):
ae.require_calling_aet = ['']
assert ae.require_calling_aet == ['PYNETDICOM']
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assoc.release()
scp.shutdown()
def test_aec_bytes_deprecation(self):
"""Test warning if using bytes to set an AE title."""
ae = AE()
msg = (
r"The use of a list of bytes with 'require_calling_aet' is "
r"deprecated, use a list of ASCII str instead"
)
with pytest.warns(DeprecationWarning, match=msg):
ae.require_calling_aet = [b'BADAE', 'GOODAE']
assert ae.require_calling_aet == ['BADAE', 'GOODAE']
def test_require_called_aet(self):
"""Test AE.require_called_aet"""
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(Verification)
scp = ae.start_server(('', 11112), block=False)
ae.add_requested_context(Verification)
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assoc.release()
assert assoc.is_released
assert not assoc.is_established
ae.require_called_aet = True
assert ae.require_called_aet is True
assoc = ae.associate('localhost', 11112)
assert assoc.is_rejected
assoc = ae.associate('localhost', 11112, ae_title='PYNETDICOM')
assert assoc.is_established
assoc.release()
scp.shutdown()
def test_req_calling_aet(self):
""" Check AE require calling aet change produces good value """
ae = AE()
ae.require_calling_aet = ['10', 'asdf']
assert ae.require_calling_aet == ['10', 'asdf']
def test_req_called_aet(self):
""" Check AE require called aet change produces good value """
ae = AE()
assert ae.require_called_aet is False
ae.require_called_aet = True
assert ae.require_called_aet is True
ae.require_called_aet = False
assert ae.require_called_aet is False
def test_string_output(self):
"""Test string output"""
ae = AE()
ae.add_requested_context(Verification)
ae.require_calling_aet = ['something']
ae.require_called_aet = True
assert 'Explicit VR' in ae.__str__()
assert 'Verification' in ae.__str__()
assert '0/10' in ae.__str__()
assert 'something' in ae.__str__()
assert 'Require called AE title: True' in ae.__str__()
ae.supported_contexts = StoragePresentationContexts
assert 'CT Image' in ae.__str__()
ae = AE()
ae.add_requested_context(Verification)
assert 'None' in ae.__str__()
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(Verification)
scp = ae.start_server(('', 11112), block=False)
ae.add_requested_context(Verification)
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assert assoc.is_established
assert 'Explicit VR' in ae.__str__()
assert 'Peer' in ae.__str__()
assoc.release()
assert assoc.is_released
assert not assoc.is_established
scp.shutdown()
def test_init_implementation_class(self):
"""Test the default implementation class uid"""
ae = AE()
assert ae.implementation_class_uid == PYNETDICOM_IMPLEMENTATION_UID
def test_init_implementation_version(self):
"""Test the default implementation version name"""
ae = AE()
assert ae.implementation_version_name == PYNETDICOM_IMPLEMENTATION_VERSION
def test_implementation_version(self):
"""Test implementation_version_name"""
ae = AE()
ae.implementation_version_name = None
assert ae.implementation_version_name is None
ae.implementation_version_name = " "
assert ae.implementation_version_name == " "
msg = "'implementation_version_name' must be str or None, not 'int'"
with pytest.raises(TypeError, match=msg):
ae.implementation_version_name = 1234
msg = (
"Invalid 'implementation_version_name' value - must not be an "
"empty str"
)
with pytest.raises(ValueError, match=msg):
ae.implementation_version_name = ""
assert ae.implementation_version_name == " "
def test_implementation_class(self):
"""Test implementation_class_uid"""
ae = AE()
ae.implementation_class_uid = '12.3.4'
assert isinstance(ae.implementation_class_uid, UID)
assert ae.implementation_class_uid == UID('12.3.4')
msg = (
r"'implementation_class_uid' must be str, bytes or UID, not "
r"'NoneType'"
)
with pytest.raises(TypeError, match=msg):
ae.implementation_class_uid = None
assert ae.implementation_class_uid == UID('12.3.4')
msg = (
r"Invalid 'implementation_class_uid' value - must not be an "
r"empty str"
)
with pytest.raises(ValueError, match=msg):
ae.implementation_class_uid = ''
msg = r"Invalid 'implementation_class_uid' value '1.2.04'"
with pytest.raises(ValueError, match=msg):
ae.implementation_class_uid = '1.2.04'
assert ae.implementation_class_uid == UID('12.3.4')
class TestAEBadInitialisation:
def test_invalid_ae_title(self):
"""Test invalid AE.ae_title"""
msg = r"Invalid 'ae_title' value - must not consist entirely of spaces"
with pytest.raises(ValueError, match=msg):
AE(ae_title=' ')
msg = (
r"Invalid 'ae_title' value '\u200b5' "
r"- must only contain ASCII characters"
)
with pytest.raises(ValueError, match=msg):
AE(ae_title=b"\xe2\x80\x8b\x35".decode('utf8'))
msg = (
r"Invalid 'ae_title' value '1234567890ABCDEFG' "
r"- must not exceed 16 characters"
)
with pytest.raises(ValueError, match=msg):
AE(ae_title='1234567890ABCDEFG')
msg = r"Invalid 'ae_title' value - must not be an empty str"
with pytest.raises(ValueError, match=msg):
AE(ae_title='')
msg = (
r"Invalid 'ae_title' value 'TEST\\ME' - must not contain control "
r"characters or backslashes"
)
with pytest.raises(ValueError, match=msg):
AE(ae_title='TEST\\ME')
msg = r"'ae_title' must be str, not 'NoneType'"
with pytest.raises(TypeError, match=msg):
AE(ae_title=None)
class TestAE_GoodExit:
def setup(self):
"""Run prior to each test"""
self.ae = None
def teardown(self):
"""Clear any active threads"""
if self.ae:
self.ae.shutdown()
def test_ae_release_assoc(self):
""" Association releases OK """
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(Verification)
scp = ae.start_server(('', 11112), block=False)
ae.add_requested_context(Verification)
# Test N associate/release cycles
for ii in range(5):
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assoc.release()
assert not assoc.is_established
assert not assoc.is_aborted
assert assoc.is_released
assert not assoc.is_rejected
scp.shutdown()
def test_ae_aborts_assoc(self):
""" Association aborts OK """
self.ae = ae = AE()
ae.acse_timeout = 5
ae.dimse_timeout = 5
ae.network_timeout = 5
ae.add_supported_context(Verification)
scp = ae.start_server(('', 11112), block=False)
ae.add_requested_context(Verification)
# Test N associate/abort cycles
for ii in range(5):
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assoc.abort()
assert not assoc.is_established
assert assoc.is_aborted
assert not assoc.is_released
assert not assoc.is_rejected
scp.shutdown()
class TestAESupportedPresentationContexts:
"""Tests for AE's presentation contexts when acting as an SCP"""
def setup(self):
self.ae = AE()
def test_add_supported_context_str(self):
"""Tests for AE.add_supported_context using str."""
self.ae.add_supported_context('1.2.840.10008.1.1')
context = self.ae.supported_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert context.context_id is None
def test_add_supported_context_sop_class(self):
"""Tests for AE.add_supported_context using SOPClass."""
self.ae.add_supported_context(RTImageStorage)
context = self.ae.supported_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.5.1.4.1.1.481.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
def test_add_supported_context_uid(self):
"""Tests for AE.add_supported_context using UID."""
self.ae.add_supported_context(UID('1.2.840.10008.1.1'))
context = self.ae.supported_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
def test_add_supported_context_duplicate(self):
"""Tests for AE.add_supported_context using a duplicate UID."""
self.ae.add_supported_context(UID('1.2.840.10008.1.1'))
self.ae.add_supported_context(UID('1.2.840.10008.1.1'))
contexts = self.ae.supported_contexts
assert len(contexts) == 1
assert contexts[0].abstract_syntax == '1.2.840.10008.1.1'
assert contexts[0].transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
def test_add_supported_context_transfer_single(self):
"""Test adding a single transfer syntax without a list"""
self.ae.add_supported_context('1.2', '1.3')
contexts = self.ae.supported_contexts
assert len(contexts) == 1
assert contexts[0].abstract_syntax == '1.2'
assert contexts[0].transfer_syntax == ['1.3']
self.ae.add_supported_context('1.2', UID('1.4'))
contexts = self.ae.supported_contexts
assert len(contexts) == 1
assert contexts[0].abstract_syntax == '1.2'
assert contexts[0].transfer_syntax == ['1.3', '1.4']
def test_add_supported_context_duplicate_transfer(self):
"""Test adding duplicate transfer syntaxes."""
self.ae.add_supported_context('1.2', ['1.3', '1.3'])
contexts = self.ae.supported_contexts
assert len(contexts) == 1
assert contexts[0].abstract_syntax == '1.2'
assert contexts[0].transfer_syntax == ['1.3']
self.ae.supported_contexts = []
self.ae.add_supported_context('1.2.840.10008.1.1')
self.ae.add_supported_context('1.2.840.10008.1.1')
contexts = self.ae.supported_contexts
assert len(contexts) == 1
assert contexts[0].abstract_syntax == '1.2.840.10008.1.1'
assert contexts[0].transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
self.ae.supported_contexts = []
self.ae.add_supported_context('1.2.840.10008.1.1')
self.ae.add_supported_context('1.2.840.10008.1.1', [DEFAULT_TRANSFER_SYNTAXES[0]])
contexts = self.ae.supported_contexts
assert len(contexts) == 1
assert contexts[0].abstract_syntax == '1.2.840.10008.1.1'
assert contexts[0].transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
def test_add_supported_context_duplicate_multi(self):
"""Tests for AE.add_supported_context using a duplicate UID."""
self.ae.add_supported_context('1.2.840.10008.1.1',
[DEFAULT_TRANSFER_SYNTAXES[0]])
self.ae.add_supported_context('1.2.840.10008.1.1',
DEFAULT_TRANSFER_SYNTAXES[1:])
contexts = self.ae.supported_contexts
assert len(contexts) == 1
assert contexts[0].abstract_syntax == '1.2.840.10008.1.1'
assert contexts[0].transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
def test_add_supported_context_private_abs(self):
"""Test AE.add_supported_context with a private abstract syntax"""
self.ae.add_supported_context('1.2.3.4')
contexts = self.ae.supported_contexts
assert len(contexts) == 1
assert contexts[0].abstract_syntax == '1.2.3.4'
assert contexts[0].transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
def test_add_supported_context_private_tran(self):
"""Test AE.add_supported_context with a private transfer syntax"""
self.ae.add_supported_context('1.2.3.4',
['1.2.3', '1.2.840.10008.1.1'])
contexts = self.ae.supported_contexts
assert len(contexts) == 1
assert contexts[0].abstract_syntax == '1.2.3.4'
assert contexts[0].transfer_syntax == ['1.2.3', '1.2.840.10008.1.1']
def test_add_supported_context_more_128(self):
"""Test adding more than 128 presentation contexts"""
for ii in range(300):
self.ae.add_supported_context(str(ii))
contexts = self.ae.supported_contexts
assert len(contexts) == 300
def test_supported_contexts_setter(self):
"""Test the AE.supported_contexts property setter."""
context = build_context('1.2.840.10008.1.1')
self.ae.supported_contexts = [context]
contexts = self.ae.supported_contexts
assert len(contexts) == 1
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert context.context_id is None
def test_supported_contexts_empty(self):
"""Test the setting supported_contexts to an empty list."""
context = build_context('1.2.840.10008.1.1')
self.ae.supported_contexts = [context]
assert len(self.ae.supported_contexts) == 1
self.ae.supported_contexts = []
assert len(self.ae.supported_contexts) == 0
def test_supported_contexts_setter_raises(self):
"""Test the AE.supported_contexts property raises if not context."""
with pytest.raises(ValueError):
self.ae.supported_contexts = ['1.2.3']
def test_supported_contexts_sorted(self):
"""Test that the supported_contexts returns contexts in order."""
self.ae.add_supported_context('1.2.3.4')
self.ae.add_supported_context('1.2.3.5')
asyntaxes = [
cntx.abstract_syntax for cntx in self.ae.supported_contexts
]
assert asyntaxes == ['1.2.3.4', '1.2.3.5']
self.ae.add_supported_context('0.1.2.3')
self.ae.add_supported_context('2.1.2.3')
asyntaxes = [
cntx.abstract_syntax for cntx in self.ae.supported_contexts
]
assert asyntaxes == ['0.1.2.3', '1.2.3.4', '1.2.3.5', '2.1.2.3']
def test_supported_contexts_more_128(self):
"""Test setting supported_contexts with more than 128 contexts."""
contexts = []
for ii in range(300):
contexts.append(build_context(str(ii)))
self.ae.supported_contexts = contexts
assert len(self.ae.supported_contexts) == 300
def test_remove_supported_context_str(self):
"""Tests for AE.remove_supported_context using str."""
self.ae.add_supported_context('1.2.840.10008.1.1')
context = self.ae.supported_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
self.ae.remove_supported_context('1.2.840.10008.1.1')
assert len(self.ae.supported_contexts) == 0
# Test multiple
self.ae.add_supported_context('1.2.840.10008.1.1')
self.ae.add_supported_context('1.2.840.10008.1.4', ['1.2.3.4'])
assert len(self.ae.supported_contexts) == 2
self.ae.remove_supported_context('1.2.840.10008.1.1')
assert len(self.ae.supported_contexts) == 1
for context in self.ae.supported_contexts:
assert context.abstract_syntax != '1.2.840.10008.1.1'
def test_remove_supported_context_uid(self):
"""Tests for AE.remove_supported_context using UID."""
self.ae.add_supported_context('1.2.840.10008.1.1')
context = self.ae.supported_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
self.ae.remove_supported_context(UID('1.2.840.10008.1.1'))
assert len(self.ae.supported_contexts) == 0
def test_remove_supported_context_sop_class(self):
"""Tests for AE.remove_supported_context using SOPClass."""
self.ae.add_supported_context(RTImageStorage)
context = self.ae.supported_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.5.1.4.1.1.481.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
self.ae.remove_supported_context(RTImageStorage)
assert len(self.ae.supported_contexts) == 0
def test_remove_supported_context_default(self):
"""Tests for AE.remove_supported_context with default transfers."""
self.ae.add_supported_context('1.2.840.10008.1.1')
context = self.ae.supported_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert len(context.transfer_syntax) == 4
self.ae.remove_supported_context('1.2.840.10008.1.1')
assert len(self.ae.supported_contexts) == 0
def test_remove_supported_context_single_transfer(self):
"""Tests for AE.remove_supported_context with single transfer."""
self.ae.add_supported_context('1.2.840.10008.1.1')
context = self.ae.supported_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert len(context.transfer_syntax) == 4
self.ae.remove_supported_context('1.2.840.10008.1.1', DEFAULT_TRANSFER_SYNTAXES[0])
context = self.ae.supported_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES[1:]
def test_remove_supported_context_partial(self):
"""Tests for AE.remove_supported_context with partial transfers."""
# Test singular
self.ae.add_supported_context('1.2.840.10008.1.1')
context = self.ae.supported_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert len(context.transfer_syntax) == 4
self.ae.remove_supported_context('1.2.840.10008.1.1',
['1.2.840.10008.1.2'])
assert len(self.ae.supported_contexts) == 1
context = self.ae.supported_contexts[0]
assert len(context.transfer_syntax) == 3
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES[1:]
assert context.abstract_syntax == '1.2.840.10008.1.1'
# Test multiple
self.ae.add_supported_context('1.2.840.10008.1.1')
self.ae.add_supported_context(RTImageStorage)
self.ae.remove_supported_context('1.2.840.10008.1.1',
['1.2.840.10008.1.2'])
assert len(self.ae.supported_contexts) == 2
context = self.ae.supported_contexts[0]
assert len(context.transfer_syntax) == 3
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES[1:]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert self.ae.supported_contexts[1].transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
def test_remove_supported_context_all(self):
"""Tests for AE.remove_supported_context with all transfers."""
self.ae.add_supported_context('1.2.840.10008.1.1')
context = self.ae.supported_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert len(context.transfer_syntax) == 4
# Test singular
self.ae.remove_supported_context('1.2.840.10008.1.1',
DEFAULT_TRANSFER_SYNTAXES)
assert len(self.ae.supported_contexts) == 0
# Test multiple
self.ae.add_supported_context('1.2.840.10008.1.1')
self.ae.add_supported_context(RTImageStorage)
self.ae.remove_supported_context('1.2.840.10008.1.1',
DEFAULT_TRANSFER_SYNTAXES)
context = self.ae.supported_contexts[0]
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert context.abstract_syntax == '1.2.840.10008.5.1.4.1.1.481.1'
def test_remove_supported_context_all_plus(self):
"""Test remove_supported_context with extra transfers"""
tsyntax = DEFAULT_TRANSFER_SYNTAXES[:]
tsyntax.append('1.2.3')
self.ae.add_supported_context('1.2.840.10008.1.1')
context = self.ae.supported_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert len(context.transfer_syntax) == 4
self.ae.remove_supported_context('1.2.840.10008.1.1', tsyntax)
assert len(self.ae.supported_contexts) == 0
def test_scu_role(self):
"""Test add_supported_context with scu_role parameter."""
self.ae.add_supported_context('1.2.3')
context = self.ae.supported_contexts[0]
assert context.scu_role is None
assert context.scp_role is None
self.ae.supported_context = []
self.ae.add_supported_context('1.2.3', scu_role=None)
context = self.ae.supported_contexts[0]
assert context.scu_role is None
assert context.scp_role is None
self.ae.supported_context = []
self.ae.add_supported_context('1.2.3', scu_role=True)
context = self.ae.supported_contexts[0]
assert context.scu_role is True
assert context.scp_role is None
self.ae.supported_context = []
self.ae.add_supported_context('1.2.3', scu_role=False)
context = self.ae.supported_contexts[0]
assert context.scu_role is False
assert context.scp_role is None
def test_scu_role_update(self):
"""Test updating add_supported_context with scu_role parameter."""
self.ae.add_supported_context('1.2.3')
context = self.ae.supported_contexts[0]
assert context.scu_role is None
assert context.scp_role is None
self.ae.add_supported_context('1.2.3', scu_role=None)
context = self.ae.supported_contexts[0]
assert context.scu_role is None
assert context.scp_role is None
self.ae.add_supported_context('1.2.3', scu_role=True)
context = self.ae.supported_contexts[0]
assert context.scu_role is True
assert context.scp_role is None
self.ae.add_supported_context('1.2.3', scu_role=False)
context = self.ae.supported_contexts[0]
assert context.scu_role is False
assert context.scp_role is None
def test_scu_role_raises(self):
"""Test add_supported_context raises if scu_role wrong type."""
with pytest.raises(TypeError, match=""):
self.ae.add_supported_context('1.2.3', scu_role='abc')
assert self.ae.supported_contexts == []
def test_scp_role(self):
"""Test add_supported_context with scu_role parameter."""
self.ae.add_supported_context('1.2.3')
context = self.ae.supported_contexts[0]
assert context.scu_role is None
assert context.scp_role is None
self.ae.supported_context = []
self.ae.add_supported_context('1.2.3', scp_role=None)
context = self.ae.supported_contexts[0]
assert context.scu_role is None
assert context.scp_role is None
self.ae.supported_context = []
self.ae.add_supported_context('1.2.3', scp_role=True)
context = self.ae.supported_contexts[0]
assert context.scu_role is None
assert context.scp_role is True
self.ae.supported_context = []
self.ae.add_supported_context('1.2.3', scp_role=False)
context = self.ae.supported_contexts[0]
assert context.scu_role is None
assert context.scp_role is False
def test_scp_role_update(self):
"""Test updating add_supported_context with scp_role parameter."""
self.ae.add_supported_context('1.2.3')
context = self.ae.supported_contexts[0]
assert context.scu_role is None
assert context.scp_role is None
self.ae.add_supported_context('1.2.3', scp_role=None)
context = self.ae.supported_contexts[0]
assert context.scu_role is None
assert context.scp_role is None
self.ae.add_supported_context('1.2.3', scp_role=True)
context = self.ae.supported_contexts[0]
assert context.scu_role is None
assert context.scp_role is True
self.ae.add_supported_context('1.2.3', scp_role=False)
context = self.ae.supported_contexts[0]
assert context.scu_role is None
assert context.scp_role is False
def test_scp_role_raises(self):
"""Test add_supported_context raises if scp_role wrong type."""
with pytest.raises(TypeError, match=""):
self.ae.add_supported_context('1.2.3', scp_role='abc')
assert self.ae.supported_contexts == []
class TestAERequestedPresentationContexts:
"""Tests for AE's presentation contexts when acting as an SCU"""
def setup(self):
self.ae = AE()
def test_add_requested_context_str(self):
"""Tests for AE.add_requested_context using str."""
self.ae.add_requested_context('1.2.840.10008.1.1')
context = self.ae.requested_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert context.context_id is None
def test_add_requested_context_sop_class(self):
"""Tests for AE.add_requested_context using SOPClass."""
self.ae.add_requested_context(RTImageStorage)
context = self.ae.requested_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.5.1.4.1.1.481.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
def test_add_requested_context_uid(self):
"""Tests for AE.add_requested_context using UID."""
self.ae.add_requested_context(UID('1.2.840.10008.1.1'))
context = self.ae.requested_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
def test_add_requested_context_duplicate(self):
"""Test AE.add_requested_context using a duplicate UID."""
self.ae.add_requested_context(UID('1.2.840.10008.1.1'))
self.ae.add_requested_context(UID('1.2.840.10008.1.1'))
contexts = self.ae.requested_contexts
assert len(contexts) == 2
assert contexts[0].abstract_syntax == '1.2.840.10008.1.1'
assert contexts[0].transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert contexts[1].abstract_syntax == '1.2.840.10008.1.1'
assert contexts[1].transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
def test_add_requested_context_duplicate_multi(self):
"""Tests for AE.add_requested_context using a duplicate UID."""
self.ae.add_requested_context('1.2.840.10008.1.1',
[DEFAULT_TRANSFER_SYNTAXES[0]])
self.ae.add_requested_context('1.2.840.10008.1.1',
DEFAULT_TRANSFER_SYNTAXES[1:])
contexts = self.ae.requested_contexts
assert len(contexts) == 2
assert contexts[0].abstract_syntax == '1.2.840.10008.1.1'
assert contexts[0].transfer_syntax == [DEFAULT_TRANSFER_SYNTAXES[0]]
assert contexts[1].abstract_syntax == '1.2.840.10008.1.1'
assert contexts[1].transfer_syntax == DEFAULT_TRANSFER_SYNTAXES[1:]
def test_add_supported_context_transfer_single(self):
"""Test adding a single transfer syntax without a list"""
self.ae.add_requested_context('1.2', '1.3')
contexts = self.ae.requested_contexts
assert len(contexts) == 1
assert contexts[0].abstract_syntax == '1.2'
assert contexts[0].transfer_syntax == ['1.3']
self.ae.add_requested_context('1.2', UID('1.4'))
contexts = self.ae.requested_contexts
assert len(contexts) == 2
assert contexts[1].abstract_syntax == '1.2'
assert contexts[1].transfer_syntax == ['1.4']
def test_add_requested_context_duplicate_transfer(self):
"""Test add_requested_context using duplicate transfer syntaxes"""
self.ae.add_requested_context('1.2', ['1.3', '1.3'])
contexts = self.ae.requested_contexts
assert contexts[0].transfer_syntax == ['1.3']
def test_add_requested_context_private_abs(self):
"""Test AE.add_requested_context with a private abstract syntax"""
self.ae.add_requested_context('1.2.3.4')
contexts = self.ae.requested_contexts
assert len(contexts) == 1
assert contexts[0].abstract_syntax == '1.2.3.4'
assert contexts[0].transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
def test_add_requested_context_private_tran(self):
"""Test AE.add_requested_context with a private transfer syntax"""
self.ae.add_requested_context('1.2.3.4',
['1.2.3', '1.2.840.10008.1.1'])
contexts = self.ae.requested_contexts
assert len(contexts) == 1
assert contexts[0].abstract_syntax == '1.2.3.4'
assert contexts[0].transfer_syntax == ['1.2.3', '1.2.840.10008.1.1']
def test_add_requested_context_more_128_raises(self):
"""Test adding more than 128 presentation contexts"""
for ii in range(128):
self.ae.add_requested_context(str(ii))
assert len(self.ae.requested_contexts) == 128
with pytest.raises(ValueError):
self.ae.add_requested_context('129')
assert len(self.ae.requested_contexts) == 128
def test_requested_contexts_setter(self):
"""Test the AE.requested_contexts property setter."""
context = build_context('1.2.840.10008.1.1')
self.ae.requested_contexts = [context]
contexts = self.ae.requested_contexts
assert len(contexts) == 1
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert context.context_id is None
def test_requested_contexts_empty(self):
"""Test the setting requested_contexts to an empty list."""
context = build_context('1.2.840.10008.1.1')
self.ae.requested_contexts = [context]
assert len(self.ae.requested_contexts) == 1
self.ae.requested_contexts = []
assert len(self.ae.requested_contexts) == 0
def test_requested_contexts_setter_raises(self):
"""Test the AE.requested_contexts property raises if not context."""
with pytest.raises(ValueError):
self.ae.requested_contexts = ['1.2.3']
def test_requested_contexts_not_sorted(self):
"""Test that requested_contexts returns contexts in supplied order."""
self.ae.add_requested_context('1.2.3.4')
self.ae.add_requested_context('1.2.3.5')
asyntaxes = [
cntx.abstract_syntax for cntx in self.ae.requested_contexts
]
assert asyntaxes == ['1.2.3.4', '1.2.3.5']
self.ae.add_requested_context('0.1.2.3')
self.ae.add_requested_context('2.1.2.3')
asyntaxes = [
cntx.abstract_syntax for cntx in self.ae.requested_contexts
]
assert asyntaxes == ['1.2.3.4', '1.2.3.5', '0.1.2.3', '2.1.2.3']
def test_requested_contexts_more_128(self):
"""Test setting requested_contexts with more than 128 contexts."""
contexts = []
for ii in range(128):
contexts.append(build_context(str(ii)))
self.ae.requested_contexts = contexts
assert len(self.ae.requested_contexts) == 128
contexts.append(build_context('129'))
with pytest.raises(ValueError):
self.ae.requested_contexts = contexts
def test_remove_requested_context_str(self):
"""Tests for AE.remove_requested_context using str."""
# Test singular
self.ae.add_requested_context('1.2.840.10008.1.1')
context = self.ae.requested_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
self.ae.remove_requested_context('1.2.840.10008.1.1')
assert len(self.ae.requested_contexts) == 0
# Test multiple
self.ae.add_requested_context('1.2.840.10008.1.1')
self.ae.add_requested_context('1.2.840.10008.1.1', ['1.2.3.4'])
self.ae.add_requested_context('1.2.840.10008.1.4', ['1.2.3.4'])
assert len(self.ae.requested_contexts) == 3
self.ae.remove_requested_context('1.2.840.10008.1.1')
assert len(self.ae.requested_contexts) == 1
for context in self.ae.requested_contexts:
assert context.abstract_syntax != '1.2.840.10008.1.1'
def test_remove_requested_context_uid(self):
"""Tests for AE.remove_requested_context using UID."""
self.ae.add_requested_context('1.2.840.10008.1.1')
context = self.ae.requested_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
self.ae.remove_requested_context(UID('1.2.840.10008.1.1'))
assert len(self.ae.requested_contexts) == 0
def test_remove_requested_context_sop_class(self):
"""Tests for AE.remove_requested_context using SOPClass."""
self.ae.add_requested_context(RTImageStorage)
context = self.ae.requested_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.5.1.4.1.1.481.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
self.ae.remove_requested_context(RTImageStorage)
assert len(self.ae.requested_contexts) == 0
def test_remove_requested_context_default(self):
"""Tests for AE.remove_requested_context with default transfers."""
self.ae.add_requested_context('1.2.840.10008.1.1')
context = self.ae.requested_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert len(context.transfer_syntax) == 4
self.ae.remove_requested_context('1.2.840.10008.1.1')
assert len(self.ae.requested_contexts) == 0
def test_remove_requested_context_single(self):
"""Tests for AE.remove_requested_context with single transfer."""
self.ae.add_requested_context('1.2.840.10008.1.1')
context = self.ae.requested_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert len(context.transfer_syntax) == 4
self.ae.remove_requested_context('1.2.840.10008.1.1', DEFAULT_TRANSFER_SYNTAXES[0])
context = self.ae.requested_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES[1:]
def test_remove_requested_context_partial(self):
"""Tests for AE.remove_supported_context with partial transfers."""
# Test singular
self.ae.add_requested_context('1.2.840.10008.1.1')
context = self.ae.requested_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert len(context.transfer_syntax) == 4
self.ae.remove_requested_context('1.2.840.10008.1.1',
['1.2.840.10008.1.2'])
assert len(self.ae.requested_contexts) == 1
context = self.ae.requested_contexts[0]
assert len(context.transfer_syntax) == 3
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES[1:]
assert context.abstract_syntax == '1.2.840.10008.1.1'
self.ae.remove_requested_context('1.2.840.10008.1.1')
assert len(self.ae.requested_contexts) == 0
# Test multiple
self.ae.add_requested_context('1.2.840.10008.1.1')
self.ae.add_requested_context(RTImageStorage)
self.ae.add_requested_context('1.2.840.10008.1.1', ['1.2.3.4'])
self.ae.remove_requested_context('1.2.840.10008.1.1',
['1.2.840.10008.1.2'])
assert len(self.ae.requested_contexts) == 3
context = self.ae.requested_contexts[0]
assert len(context.transfer_syntax) == 3
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES[1:]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert self.ae.requested_contexts[1].transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert self.ae.requested_contexts[2].transfer_syntax == ['1.2.3.4']
assert self.ae.requested_contexts[2].abstract_syntax == '1.2.840.10008.1.1'
self.ae.remove_requested_context('1.2.840.10008.1.1')
assert len(self.ae.requested_contexts) == 1
assert self.ae.requested_contexts[0].abstract_syntax == '1.2.840.10008.5.1.4.1.1.481.1'
def test_remove_requested_context_all(self):
"""Tests for AE.remove_requested_context with all transfers."""
self.ae.add_requested_context('1.2.840.10008.1.1')
context = self.ae.requested_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert len(context.transfer_syntax) == 4
# Test singular
self.ae.remove_requested_context('1.2.840.10008.1.1',
DEFAULT_TRANSFER_SYNTAXES)
assert len(self.ae.requested_contexts) == 0
# Test multiple
self.ae.add_requested_context('1.2.840.10008.1.1',
[DEFAULT_TRANSFER_SYNTAXES[0]])
self.ae.add_requested_context('1.2.840.10008.1.1',
DEFAULT_TRANSFER_SYNTAXES[1:])
self.ae.add_requested_context(RTImageStorage)
self.ae.remove_requested_context('1.2.840.10008.1.1',
DEFAULT_TRANSFER_SYNTAXES)
assert len(self.ae.requested_contexts) == 1
context = self.ae.requested_contexts[0]
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert context.abstract_syntax == '1.2.840.10008.5.1.4.1.1.481.1'
def test_remove_requested_context_all_plus(self):
"""Test remove_requested_context with extra transfers"""
tsyntax = DEFAULT_TRANSFER_SYNTAXES[:]
tsyntax.append('1.2.3')
# Test singular
self.ae.add_requested_context('1.2.840.10008.1.1')
context = self.ae.requested_contexts[0]
assert context.abstract_syntax == '1.2.840.10008.1.1'
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert len(context.transfer_syntax) == 4
self.ae.remove_requested_context('1.2.840.10008.1.1', tsyntax)
assert len(self.ae.requested_contexts) == 0
# Test multiple
self.ae.add_requested_context('1.2.840.10008.1.1',
[DEFAULT_TRANSFER_SYNTAXES[0]])
self.ae.add_requested_context('1.2.840.10008.1.1',
DEFAULT_TRANSFER_SYNTAXES[1:])
self.ae.add_requested_context(RTImageStorage)
self.ae.remove_requested_context('1.2.840.10008.1.1', tsyntax)
assert len(self.ae.requested_contexts) == 1
context = self.ae.requested_contexts[0]
assert context.transfer_syntax == DEFAULT_TRANSFER_SYNTAXES
assert context.abstract_syntax == '1.2.840.10008.5.1.4.1.1.481.1'
|
en
| 0.612743
|
Tests for the ae module. # debug_logger() Test binding events to the blocking AssociationServer. Tests for AE.make_server() Run prior to each test Clear any active threads Test warning if using bytes to set an AE title. Tests for AE.start_server() Run prior to each test Clear any active threads Test the `ae_title` keyword parameter. Test the `contexts` keyword parameter. Check verification SCP Run prior to each test Clear any active threads Test stopping the SCP with keyboard Test starting with no contexts raises Test that a warning is given if scu_role and scp_role bad. Test str output for default AE Tests for AE presentation contexts when running as an SCU Run prior to each test Clear any active threads Test that AE.associate doesn't modify the supplied contexts # Test AE.requested_contexts # Test associate(contexts=...) Test that AE.associate raises exception if no requested contexts Check AE ACSE timeout change produces good value Check AE DIMSE timeout change produces good value Check AE network timeout change produces good value Check AE connection timeout change produces good value Test changing acse_timeout with active associations. Test changing dimse_timeout with active associations. Test changing network_timeout with active associations. Test changing connection_timeout with active associations. Run prior to each test Clear any active threads Check SCU Association with SCP Check Association has correct max PDUs on either end # Check 0 max pdu value - max PDU value maps to 0x10000 internally Check that the Association timeouts are being set correctly and work # Hit the network timeout # FIXME: If this is `0` we can process an ABORT primitive where # we expect an ASSOCIATION primitive. # `0` is an invalid value # The host exists and is routable, but there is a middlebox ignoring # the initial TCP SYN. # * ACSE timeout does not start until connection timeout completes # * Logs indicate that we hit the timeout case Test that using start works OK with timeout. # Multiple release/association in a sort time causes an OSError as # the port is still in use due to the use of select.select() with # a timeout. Fixed by using socket.shutdown in stop() Test warning if using bytes to set an AE title. Test bad associate call Test invalid AE.ae_title Check AE title change produces good value Test warning if using bytes to set an AE title. Check the implementation version name and class UID setters Check AE maximum association change produces good value Check AE maximum pdu size change produces good value Test AE.require_calling_aet Test warning if using bytes to set an AE title. Test AE.require_called_aet Check AE require calling aet change produces good value Check AE require called aet change produces good value Test string output Test the default implementation class uid Test the default implementation version name Test implementation_version_name Test implementation_class_uid Test invalid AE.ae_title Run prior to each test Clear any active threads Association releases OK # Test N associate/release cycles Association aborts OK # Test N associate/abort cycles Tests for AE's presentation contexts when acting as an SCP Tests for AE.add_supported_context using str. Tests for AE.add_supported_context using SOPClass. Tests for AE.add_supported_context using UID. Tests for AE.add_supported_context using a duplicate UID. Test adding a single transfer syntax without a list Test adding duplicate transfer syntaxes. Tests for AE.add_supported_context using a duplicate UID. Test AE.add_supported_context with a private abstract syntax Test AE.add_supported_context with a private transfer syntax Test adding more than 128 presentation contexts Test the AE.supported_contexts property setter. Test the setting supported_contexts to an empty list. Test the AE.supported_contexts property raises if not context. Test that the supported_contexts returns contexts in order. Test setting supported_contexts with more than 128 contexts. Tests for AE.remove_supported_context using str. # Test multiple Tests for AE.remove_supported_context using UID. Tests for AE.remove_supported_context using SOPClass. Tests for AE.remove_supported_context with default transfers. Tests for AE.remove_supported_context with single transfer. Tests for AE.remove_supported_context with partial transfers. # Test singular # Test multiple Tests for AE.remove_supported_context with all transfers. # Test singular # Test multiple Test remove_supported_context with extra transfers Test add_supported_context with scu_role parameter. Test updating add_supported_context with scu_role parameter. Test add_supported_context raises if scu_role wrong type. Test add_supported_context with scu_role parameter. Test updating add_supported_context with scp_role parameter. Test add_supported_context raises if scp_role wrong type. Tests for AE's presentation contexts when acting as an SCU Tests for AE.add_requested_context using str. Tests for AE.add_requested_context using SOPClass. Tests for AE.add_requested_context using UID. Test AE.add_requested_context using a duplicate UID. Tests for AE.add_requested_context using a duplicate UID. Test adding a single transfer syntax without a list Test add_requested_context using duplicate transfer syntaxes Test AE.add_requested_context with a private abstract syntax Test AE.add_requested_context with a private transfer syntax Test adding more than 128 presentation contexts Test the AE.requested_contexts property setter. Test the setting requested_contexts to an empty list. Test the AE.requested_contexts property raises if not context. Test that requested_contexts returns contexts in supplied order. Test setting requested_contexts with more than 128 contexts. Tests for AE.remove_requested_context using str. # Test singular # Test multiple Tests for AE.remove_requested_context using UID. Tests for AE.remove_requested_context using SOPClass. Tests for AE.remove_requested_context with default transfers. Tests for AE.remove_requested_context with single transfer. Tests for AE.remove_supported_context with partial transfers. # Test singular # Test multiple Tests for AE.remove_requested_context with all transfers. # Test singular # Test multiple Test remove_requested_context with extra transfers # Test singular # Test multiple
| 2.109421
| 2
|
source/soca/cluster_web_ui/app.py
|
SystemFabricWorks/scale-out-computing-on-aws
| 0
|
6629288
|
import logging.config
from flask import Flask, redirect, jsonify
from flask_restful import Api
from flask_session import Session
from flask_sqlalchemy import SQLAlchemy
from api.v1.scheduler.pbspro.job import Job
from api.v1.scheduler.pbspro.jobs import Jobs
from api.v1.scheduler.pbspro.queue import Queue
from api.v1.scheduler.pbspro.queues import Queues
from api.v1.ldap.sudo import Sudo
from api.v1.ldap.ids import Ids
from api.v1.ldap.user import User
from api.v1.ldap.users import Users
from api.v1.user.reset_password import Reset
from api.v1.user.api_key import ApiKey
from api.v1.ldap.group import Group
from api.v1.ldap.groups import Groups
from api.v1.ldap.authenticate import Authenticate
from api.v1.system.files import Files
from api.v1.system.aws_price import AwsPrice
from views.index import index
from views.ssh import ssh
from views.sftp import sftp
from views.my_api_key import my_api_key
from views.admin.users import admin_users
from views.admin.queues import admin_queues
from views.admin.groups import admin_groups
from views.admin.applications import admin_applications
from views.my_jobs import my_jobs
from views.my_activity import my_activity
from views.dashboard import dashboard
from views.remote_desktop import remote_desktop
from views.my_account import my_account
from views.my_files import my_files
from views.submit_job import submit_job
from flask_wtf.csrf import CSRFProtect
from config import app_config
from models import db
from flask_swagger import swagger
from swagger_ui import api_doc
import config
from apscheduler.schedulers.background import BackgroundScheduler
import glob
import os
app = Flask(__name__)
csrf = CSRFProtect(app)
csrf.exempt("api")
# Register routes
app.config.from_object(app_config)
# Add API
api = Api(app, decorators=[csrf.exempt])
# LDAP
api.add_resource(Sudo, '/api/ldap/sudo')
api.add_resource(Authenticate, '/api/ldap/authenticate')
api.add_resource(Ids, '/api/ldap/ids')
api.add_resource(User, '/api/ldap/user')
api.add_resource(Users, '/api/ldap/users')
api.add_resource(Group, '/api/ldap/group')
api.add_resource(Groups, '/api/ldap/groups')
# Users
api.add_resource(ApiKey, '/api/user/api_key')
api.add_resource(Reset, '/api/user/reset_password')
# System
api.add_resource(Files, '/api/system/files')
api.add_resource(AwsPrice, '/api/system/aws_price')
# Scheduler
api.add_resource(Job, '/api/scheduler/job')
api.add_resource(Jobs, '/api/scheduler/jobs')
api.add_resource(Queue, '/api/scheduler/queue')
api.add_resource(Queues, '/api/scheduler/queues')
# Register views
app.register_blueprint(index)
app.register_blueprint(my_api_key)
app.register_blueprint(my_account)
app.register_blueprint(admin_users)
app.register_blueprint(admin_queues)
app.register_blueprint(admin_groups)
app.register_blueprint(admin_applications)
app.register_blueprint(my_files)
app.register_blueprint(submit_job)
app.register_blueprint(ssh)
app.register_blueprint(sftp)
app.register_blueprint(my_jobs)
app.register_blueprint(remote_desktop)
app.register_blueprint(dashboard)
app.register_blueprint(my_activity)
# Custom Jinja2 filters
@app.template_filter('folder_name_truncate')
def folder_name_truncate(folder_name):
# This make sure folders with long name on /my_files are displayed correctly
if folder_name.__len__() < 20:
return folder_name
else:
split_number = [20, 40, 60]
for number in split_number:
try:
if folder_name[number] != "-" and folder_name[number-1] != "-" and folder_name[number+1] != "-":
folder_name = folder_name[:number] + '-' + folder_name[number:]
except IndexError:
break
return folder_name
app.jinja_env.filters['folder_name_truncate'] = folder_name_truncate
@app.route("/api/swagger.json")
def spec():
swag = swagger(app)
swag['info']['version'] = "1.0"
swag['info']['title'] = "SOCA Web API"
swag['info']['description'] = "<h3>Documentation for your Scale-Out Computing on AWS (SOCA) API</h3><hr>" \
"<li>User and Admin Documentation: https://awslabs.github.io/scale-out-computing-on-aws/</li>" \
"<li>CodeBase: https://github.com/awslabs/scale-out-computing-on-aws</li>"
return jsonify(swag)
@app.errorhandler(404)
def page_not_found(e):
return redirect('/')
# Manage logger
dict_config = {
'version': 1,
'formatters': {
'default': {
'format': '[%(asctime)s] [%(levelname)s] [%(module)s] [%(message)s]',
}
},
'handlers': {
'default': {
'level': 'DEBUG',
'formatter': 'default',
'class': 'logging.handlers.TimedRotatingFileHandler',
'filename': "soca_api.log",
'when': "midnight",
'interval': 1,
'backupCount': config.Config.DAILY_BACKUP_COUNT
},
},
'loggers': {
'api_log': {
'handlers': ["default"],
'level': 'DEBUG',
},
}
}
logger = logging.getLogger("api_log")
logging.config.dictConfig(dict_config)
app.logger.addHandler(logger)
# Scheduled tasks
def clean_tmp_folders():
directories = ["tmp/zip_downloads/*", "tmp/ssh/*"]
for directory in directories:
logger.info("Remove files inside " + directory)
files = glob.glob(directory)
for f in files:
os.remove(f)
sched = BackgroundScheduler(daemon=False)
sched.add_job(clean_tmp_folders, 'interval', hours=1)
sched.start()
with app.app_context():
db.init_app(app)
db.create_all()
app_session = Session(app)
app_session.app.session_interface.db.create_all()
app.config["SESSION_SQLALCHEMY"] = SQLAlchemy(app)
api_doc(app, config_url=config.Config.FLASK_ENDPOINT + "/api/swagger.json", url_prefix="/api/doc", title="SOCA API Documentation",)
if __name__ == '__main__':
app.run()
|
import logging.config
from flask import Flask, redirect, jsonify
from flask_restful import Api
from flask_session import Session
from flask_sqlalchemy import SQLAlchemy
from api.v1.scheduler.pbspro.job import Job
from api.v1.scheduler.pbspro.jobs import Jobs
from api.v1.scheduler.pbspro.queue import Queue
from api.v1.scheduler.pbspro.queues import Queues
from api.v1.ldap.sudo import Sudo
from api.v1.ldap.ids import Ids
from api.v1.ldap.user import User
from api.v1.ldap.users import Users
from api.v1.user.reset_password import Reset
from api.v1.user.api_key import ApiKey
from api.v1.ldap.group import Group
from api.v1.ldap.groups import Groups
from api.v1.ldap.authenticate import Authenticate
from api.v1.system.files import Files
from api.v1.system.aws_price import AwsPrice
from views.index import index
from views.ssh import ssh
from views.sftp import sftp
from views.my_api_key import my_api_key
from views.admin.users import admin_users
from views.admin.queues import admin_queues
from views.admin.groups import admin_groups
from views.admin.applications import admin_applications
from views.my_jobs import my_jobs
from views.my_activity import my_activity
from views.dashboard import dashboard
from views.remote_desktop import remote_desktop
from views.my_account import my_account
from views.my_files import my_files
from views.submit_job import submit_job
from flask_wtf.csrf import CSRFProtect
from config import app_config
from models import db
from flask_swagger import swagger
from swagger_ui import api_doc
import config
from apscheduler.schedulers.background import BackgroundScheduler
import glob
import os
app = Flask(__name__)
csrf = CSRFProtect(app)
csrf.exempt("api")
# Register routes
app.config.from_object(app_config)
# Add API
api = Api(app, decorators=[csrf.exempt])
# LDAP
api.add_resource(Sudo, '/api/ldap/sudo')
api.add_resource(Authenticate, '/api/ldap/authenticate')
api.add_resource(Ids, '/api/ldap/ids')
api.add_resource(User, '/api/ldap/user')
api.add_resource(Users, '/api/ldap/users')
api.add_resource(Group, '/api/ldap/group')
api.add_resource(Groups, '/api/ldap/groups')
# Users
api.add_resource(ApiKey, '/api/user/api_key')
api.add_resource(Reset, '/api/user/reset_password')
# System
api.add_resource(Files, '/api/system/files')
api.add_resource(AwsPrice, '/api/system/aws_price')
# Scheduler
api.add_resource(Job, '/api/scheduler/job')
api.add_resource(Jobs, '/api/scheduler/jobs')
api.add_resource(Queue, '/api/scheduler/queue')
api.add_resource(Queues, '/api/scheduler/queues')
# Register views
app.register_blueprint(index)
app.register_blueprint(my_api_key)
app.register_blueprint(my_account)
app.register_blueprint(admin_users)
app.register_blueprint(admin_queues)
app.register_blueprint(admin_groups)
app.register_blueprint(admin_applications)
app.register_blueprint(my_files)
app.register_blueprint(submit_job)
app.register_blueprint(ssh)
app.register_blueprint(sftp)
app.register_blueprint(my_jobs)
app.register_blueprint(remote_desktop)
app.register_blueprint(dashboard)
app.register_blueprint(my_activity)
# Custom Jinja2 filters
@app.template_filter('folder_name_truncate')
def folder_name_truncate(folder_name):
# This make sure folders with long name on /my_files are displayed correctly
if folder_name.__len__() < 20:
return folder_name
else:
split_number = [20, 40, 60]
for number in split_number:
try:
if folder_name[number] != "-" and folder_name[number-1] != "-" and folder_name[number+1] != "-":
folder_name = folder_name[:number] + '-' + folder_name[number:]
except IndexError:
break
return folder_name
app.jinja_env.filters['folder_name_truncate'] = folder_name_truncate
@app.route("/api/swagger.json")
def spec():
swag = swagger(app)
swag['info']['version'] = "1.0"
swag['info']['title'] = "SOCA Web API"
swag['info']['description'] = "<h3>Documentation for your Scale-Out Computing on AWS (SOCA) API</h3><hr>" \
"<li>User and Admin Documentation: https://awslabs.github.io/scale-out-computing-on-aws/</li>" \
"<li>CodeBase: https://github.com/awslabs/scale-out-computing-on-aws</li>"
return jsonify(swag)
@app.errorhandler(404)
def page_not_found(e):
return redirect('/')
# Manage logger
dict_config = {
'version': 1,
'formatters': {
'default': {
'format': '[%(asctime)s] [%(levelname)s] [%(module)s] [%(message)s]',
}
},
'handlers': {
'default': {
'level': 'DEBUG',
'formatter': 'default',
'class': 'logging.handlers.TimedRotatingFileHandler',
'filename': "soca_api.log",
'when': "midnight",
'interval': 1,
'backupCount': config.Config.DAILY_BACKUP_COUNT
},
},
'loggers': {
'api_log': {
'handlers': ["default"],
'level': 'DEBUG',
},
}
}
logger = logging.getLogger("api_log")
logging.config.dictConfig(dict_config)
app.logger.addHandler(logger)
# Scheduled tasks
def clean_tmp_folders():
directories = ["tmp/zip_downloads/*", "tmp/ssh/*"]
for directory in directories:
logger.info("Remove files inside " + directory)
files = glob.glob(directory)
for f in files:
os.remove(f)
sched = BackgroundScheduler(daemon=False)
sched.add_job(clean_tmp_folders, 'interval', hours=1)
sched.start()
with app.app_context():
db.init_app(app)
db.create_all()
app_session = Session(app)
app_session.app.session_interface.db.create_all()
app.config["SESSION_SQLALCHEMY"] = SQLAlchemy(app)
api_doc(app, config_url=config.Config.FLASK_ENDPOINT + "/api/swagger.json", url_prefix="/api/doc", title="SOCA API Documentation",)
if __name__ == '__main__':
app.run()
|
en
| 0.773254
|
# Register routes # Add API # LDAP # Users # System # Scheduler # Register views # Custom Jinja2 filters # This make sure folders with long name on /my_files are displayed correctly # Manage logger # Scheduled tasks
| 1.509777
| 2
|
src/aceinna/framework/utils/resource.py
|
lihaiyong827/python-openimu
| 41
|
6629289
|
import os
import sys
import pkgutil
from ... import PACKAGE_NAME
def is_in_bundle():
return hasattr(sys, 'frozen') and getattr(sys, 'frozen') and hasattr(sys, '_MEIPASS')
def is_dev_mode():
return hasattr(sys, '__dev__') and getattr(sys, '__dev__')
def get_executor_path():
if is_in_bundle():
path = os.path.abspath(os.path.dirname(sys.executable))
else:
if is_dev_mode(): # if start from main.py
path = os.getcwd()
else:
path = os.path.join(os.path.expanduser('~'), PACKAGE_NAME)
if not os.path.isdir(path):
os.makedirs(path,exist_ok=True)
return path
def get_content_from_bundle(package, path):
module_name = 'aceinna'
if is_in_bundle():
content = pkgutil.get_data(package, path)
else:
content = pkgutil.get_data(module_name, os.path.join(package, path))
return content
|
import os
import sys
import pkgutil
from ... import PACKAGE_NAME
def is_in_bundle():
return hasattr(sys, 'frozen') and getattr(sys, 'frozen') and hasattr(sys, '_MEIPASS')
def is_dev_mode():
return hasattr(sys, '__dev__') and getattr(sys, '__dev__')
def get_executor_path():
if is_in_bundle():
path = os.path.abspath(os.path.dirname(sys.executable))
else:
if is_dev_mode(): # if start from main.py
path = os.getcwd()
else:
path = os.path.join(os.path.expanduser('~'), PACKAGE_NAME)
if not os.path.isdir(path):
os.makedirs(path,exist_ok=True)
return path
def get_content_from_bundle(package, path):
module_name = 'aceinna'
if is_in_bundle():
content = pkgutil.get_data(package, path)
else:
content = pkgutil.get_data(module_name, os.path.join(package, path))
return content
|
en
| 0.436953
|
# if start from main.py
| 2.103004
| 2
|
bot/cogs/defcon.py
|
crazygmr101/bot
| 1
|
6629290
|
<filename>bot/cogs/defcon.py
from __future__ import annotations
import logging
from collections import namedtuple
from datetime import datetime, timedelta
from enum import Enum
from discord import Colour, Embed, Member
from discord.ext.commands import Cog, Context, group
from bot.bot import Bot
from bot.cogs.moderation import ModLog
from bot.constants import Channels, Colours, Emojis, Event, Icons, Roles
from bot.decorators import with_role
log = logging.getLogger(__name__)
REJECTION_MESSAGE = """
Hi, {user} - Thanks for your interest in our server!
Due to a current (or detected) cyberattack on our community, we've limited access to the server for new accounts. Since
your account is relatively new, we're unable to provide access to the server at this time.
Even so, thanks for joining! We're very excited at the possibility of having you here, and we hope that this situation
will be resolved soon. In the meantime, please feel free to peruse the resources on our site at
<https://pythondiscord.com/>, and have a nice day!
"""
BASE_CHANNEL_TOPIC = "Python Discord Defense Mechanism"
class Action(Enum):
"""Defcon Action."""
ActionInfo = namedtuple('LogInfoDetails', ['icon', 'color', 'template'])
ENABLED = ActionInfo(Icons.defcon_enabled, Colours.soft_green, "**Days:** {days}\n\n")
DISABLED = ActionInfo(Icons.defcon_disabled, Colours.soft_red, "")
UPDATED = ActionInfo(Icons.defcon_updated, Colour.blurple(), "**Days:** {days}\n\n")
class Defcon(Cog):
"""Time-sensitive server defense mechanisms."""
days = None # type: timedelta
enabled = False # type: bool
def __init__(self, bot: Bot):
self.bot = bot
self.channel = None
self.days = timedelta(days=0)
self.bot.loop.create_task(self.sync_settings())
@property
def mod_log(self) -> ModLog:
"""Get currently loaded ModLog cog instance."""
return self.bot.get_cog("ModLog")
async def sync_settings(self) -> None:
"""On cog load, try to synchronize DEFCON settings to the API."""
await self.bot.wait_until_guild_available()
self.channel = await self.bot.fetch_channel(Channels.defcon)
try:
response = await self.bot.api_client.get('bot/bot-settings/defcon')
data = response['data']
except Exception: # Yikes!
log.exception("Unable to get DEFCON settings!")
await self.bot.get_channel(Channels.dev_log).send(
f"<@&{Roles.admins}> **WARNING**: Unable to get DEFCON settings!"
)
else:
if data["enabled"]:
self.enabled = True
self.days = timedelta(days=data["days"])
log.info(f"DEFCON enabled: {self.days.days} days")
else:
self.enabled = False
self.days = timedelta(days=0)
log.info("DEFCON disabled")
await self.update_channel_topic()
@Cog.listener()
async def on_member_join(self, member: Member) -> None:
"""If DEFCON is enabled, check newly joining users to see if they meet the account age threshold."""
if self.enabled and self.days.days > 0:
now = datetime.utcnow()
if now - member.created_at < self.days:
log.info(f"Rejecting user {member}: Account is too new and DEFCON is enabled")
message_sent = False
try:
await member.send(REJECTION_MESSAGE.format(user=member.mention))
message_sent = True
except Exception:
log.exception(f"Unable to send rejection message to user: {member}")
await member.kick(reason="DEFCON active, user is too new")
self.bot.stats.incr("defcon.leaves")
message = (
f"{member} (`{member.id}`) was denied entry because their account is too new."
)
if not message_sent:
message = f"{message}\n\nUnable to send rejection message via DM; they probably have DMs disabled."
await self.mod_log.send_log_message(
Icons.defcon_denied, Colours.soft_red, "Entry denied",
message, member.avatar_url_as(static_format="png")
)
@group(name='defcon', aliases=('dc',), invoke_without_command=True)
@with_role(Roles.admins, Roles.owners)
async def defcon_group(self, ctx: Context) -> None:
"""Check the DEFCON status or run a subcommand."""
await ctx.send_help(ctx.command)
async def _defcon_action(self, ctx: Context, days: int, action: Action) -> None:
"""Providing a structured way to do an defcon action."""
try:
response = await self.bot.api_client.get('bot/bot-settings/defcon')
data = response['data']
if "enable_date" in data and action is Action.DISABLED:
enabled = datetime.fromisoformat(data["enable_date"])
delta = datetime.now() - enabled
self.bot.stats.timing("defcon.enabled", delta)
except Exception:
pass
error = None
try:
await self.bot.api_client.put(
'bot/bot-settings/defcon',
json={
'name': 'defcon',
'data': {
# TODO: retrieve old days count
'days': days,
'enabled': action is not Action.DISABLED,
'enable_date': datetime.now().isoformat()
}
}
)
except Exception as err:
log.exception("Unable to update DEFCON settings.")
error = err
finally:
await ctx.send(self.build_defcon_msg(action, error))
await self.send_defcon_log(action, ctx.author, error)
self.bot.stats.gauge("defcon.threshold", days)
@defcon_group.command(name='enable', aliases=('on', 'e'))
@with_role(Roles.admins, Roles.owners)
async def enable_command(self, ctx: Context) -> None:
"""
Enable DEFCON mode. Useful in a pinch, but be sure you know what you're doing!
Currently, this just adds an account age requirement. Use !defcon days <int> to set how old an account must be,
in days.
"""
self.enabled = True
await self._defcon_action(ctx, days=0, action=Action.ENABLED)
await self.update_channel_topic()
@defcon_group.command(name='disable', aliases=('off', 'd'))
@with_role(Roles.admins, Roles.owners)
async def disable_command(self, ctx: Context) -> None:
"""Disable DEFCON mode. Useful in a pinch, but be sure you know what you're doing!"""
self.enabled = False
await self._defcon_action(ctx, days=0, action=Action.DISABLED)
await self.update_channel_topic()
@defcon_group.command(name='status', aliases=('s',))
@with_role(Roles.admins, Roles.owners)
async def status_command(self, ctx: Context) -> None:
"""Check the current status of DEFCON mode."""
embed = Embed(
colour=Colour.blurple(), title="DEFCON Status",
description=f"**Enabled:** {self.enabled}\n"
f"**Days:** {self.days.days}"
)
await ctx.send(embed=embed)
@defcon_group.command(name='days')
@with_role(Roles.admins, Roles.owners)
async def days_command(self, ctx: Context, days: int) -> None:
"""Set how old an account must be to join the server, in days, with DEFCON mode enabled."""
self.days = timedelta(days=days)
self.enabled = True
await self._defcon_action(ctx, days=days, action=Action.UPDATED)
await self.update_channel_topic()
async def update_channel_topic(self) -> None:
"""Update the #defcon channel topic with the current DEFCON status."""
if self.enabled:
day_str = "days" if self.days.days > 1 else "day"
new_topic = f"{BASE_CHANNEL_TOPIC}\n(Status: Enabled, Threshold: {self.days.days} {day_str})"
else:
new_topic = f"{BASE_CHANNEL_TOPIC}\n(Status: Disabled)"
self.mod_log.ignore(Event.guild_channel_update, Channels.defcon)
await self.channel.edit(topic=new_topic)
def build_defcon_msg(self, action: Action, e: Exception = None) -> str:
"""Build in-channel response string for DEFCON action."""
if action is Action.ENABLED:
msg = f"{Emojis.defcon_enabled} DEFCON enabled.\n\n"
elif action is Action.DISABLED:
msg = f"{Emojis.defcon_disabled} DEFCON disabled.\n\n"
elif action is Action.UPDATED:
msg = (
f"{Emojis.defcon_updated} DEFCON days updated; accounts must be {self.days.days} "
f"day{'s' if self.days.days > 1 else ''} old to join the server.\n\n"
)
if e:
msg += (
"**There was a problem updating the site** - This setting may be reverted when the bot restarts.\n\n"
f"```py\n{e}\n```"
)
return msg
async def send_defcon_log(self, action: Action, actor: Member, e: Exception = None) -> None:
"""Send log message for DEFCON action."""
info = action.value
log_msg: str = (
f"**Staffer:** {actor.mention} {actor} (`{actor.id}`)\n"
f"{info.template.format(days=self.days.days)}"
)
status_msg = f"DEFCON {action.name.lower()}"
if e:
log_msg += (
"**There was a problem updating the site** - This setting may be reverted when the bot restarts.\n\n"
f"```py\n{e}\n```"
)
await self.mod_log.send_log_message(info.icon, info.color, status_msg, log_msg)
def setup(bot: Bot) -> None:
"""Load the Defcon cog."""
bot.add_cog(Defcon(bot))
|
<filename>bot/cogs/defcon.py
from __future__ import annotations
import logging
from collections import namedtuple
from datetime import datetime, timedelta
from enum import Enum
from discord import Colour, Embed, Member
from discord.ext.commands import Cog, Context, group
from bot.bot import Bot
from bot.cogs.moderation import ModLog
from bot.constants import Channels, Colours, Emojis, Event, Icons, Roles
from bot.decorators import with_role
log = logging.getLogger(__name__)
REJECTION_MESSAGE = """
Hi, {user} - Thanks for your interest in our server!
Due to a current (or detected) cyberattack on our community, we've limited access to the server for new accounts. Since
your account is relatively new, we're unable to provide access to the server at this time.
Even so, thanks for joining! We're very excited at the possibility of having you here, and we hope that this situation
will be resolved soon. In the meantime, please feel free to peruse the resources on our site at
<https://pythondiscord.com/>, and have a nice day!
"""
BASE_CHANNEL_TOPIC = "Python Discord Defense Mechanism"
class Action(Enum):
"""Defcon Action."""
ActionInfo = namedtuple('LogInfoDetails', ['icon', 'color', 'template'])
ENABLED = ActionInfo(Icons.defcon_enabled, Colours.soft_green, "**Days:** {days}\n\n")
DISABLED = ActionInfo(Icons.defcon_disabled, Colours.soft_red, "")
UPDATED = ActionInfo(Icons.defcon_updated, Colour.blurple(), "**Days:** {days}\n\n")
class Defcon(Cog):
"""Time-sensitive server defense mechanisms."""
days = None # type: timedelta
enabled = False # type: bool
def __init__(self, bot: Bot):
self.bot = bot
self.channel = None
self.days = timedelta(days=0)
self.bot.loop.create_task(self.sync_settings())
@property
def mod_log(self) -> ModLog:
"""Get currently loaded ModLog cog instance."""
return self.bot.get_cog("ModLog")
async def sync_settings(self) -> None:
"""On cog load, try to synchronize DEFCON settings to the API."""
await self.bot.wait_until_guild_available()
self.channel = await self.bot.fetch_channel(Channels.defcon)
try:
response = await self.bot.api_client.get('bot/bot-settings/defcon')
data = response['data']
except Exception: # Yikes!
log.exception("Unable to get DEFCON settings!")
await self.bot.get_channel(Channels.dev_log).send(
f"<@&{Roles.admins}> **WARNING**: Unable to get DEFCON settings!"
)
else:
if data["enabled"]:
self.enabled = True
self.days = timedelta(days=data["days"])
log.info(f"DEFCON enabled: {self.days.days} days")
else:
self.enabled = False
self.days = timedelta(days=0)
log.info("DEFCON disabled")
await self.update_channel_topic()
@Cog.listener()
async def on_member_join(self, member: Member) -> None:
"""If DEFCON is enabled, check newly joining users to see if they meet the account age threshold."""
if self.enabled and self.days.days > 0:
now = datetime.utcnow()
if now - member.created_at < self.days:
log.info(f"Rejecting user {member}: Account is too new and DEFCON is enabled")
message_sent = False
try:
await member.send(REJECTION_MESSAGE.format(user=member.mention))
message_sent = True
except Exception:
log.exception(f"Unable to send rejection message to user: {member}")
await member.kick(reason="DEFCON active, user is too new")
self.bot.stats.incr("defcon.leaves")
message = (
f"{member} (`{member.id}`) was denied entry because their account is too new."
)
if not message_sent:
message = f"{message}\n\nUnable to send rejection message via DM; they probably have DMs disabled."
await self.mod_log.send_log_message(
Icons.defcon_denied, Colours.soft_red, "Entry denied",
message, member.avatar_url_as(static_format="png")
)
@group(name='defcon', aliases=('dc',), invoke_without_command=True)
@with_role(Roles.admins, Roles.owners)
async def defcon_group(self, ctx: Context) -> None:
"""Check the DEFCON status or run a subcommand."""
await ctx.send_help(ctx.command)
async def _defcon_action(self, ctx: Context, days: int, action: Action) -> None:
"""Providing a structured way to do an defcon action."""
try:
response = await self.bot.api_client.get('bot/bot-settings/defcon')
data = response['data']
if "enable_date" in data and action is Action.DISABLED:
enabled = datetime.fromisoformat(data["enable_date"])
delta = datetime.now() - enabled
self.bot.stats.timing("defcon.enabled", delta)
except Exception:
pass
error = None
try:
await self.bot.api_client.put(
'bot/bot-settings/defcon',
json={
'name': 'defcon',
'data': {
# TODO: retrieve old days count
'days': days,
'enabled': action is not Action.DISABLED,
'enable_date': datetime.now().isoformat()
}
}
)
except Exception as err:
log.exception("Unable to update DEFCON settings.")
error = err
finally:
await ctx.send(self.build_defcon_msg(action, error))
await self.send_defcon_log(action, ctx.author, error)
self.bot.stats.gauge("defcon.threshold", days)
@defcon_group.command(name='enable', aliases=('on', 'e'))
@with_role(Roles.admins, Roles.owners)
async def enable_command(self, ctx: Context) -> None:
"""
Enable DEFCON mode. Useful in a pinch, but be sure you know what you're doing!
Currently, this just adds an account age requirement. Use !defcon days <int> to set how old an account must be,
in days.
"""
self.enabled = True
await self._defcon_action(ctx, days=0, action=Action.ENABLED)
await self.update_channel_topic()
@defcon_group.command(name='disable', aliases=('off', 'd'))
@with_role(Roles.admins, Roles.owners)
async def disable_command(self, ctx: Context) -> None:
"""Disable DEFCON mode. Useful in a pinch, but be sure you know what you're doing!"""
self.enabled = False
await self._defcon_action(ctx, days=0, action=Action.DISABLED)
await self.update_channel_topic()
@defcon_group.command(name='status', aliases=('s',))
@with_role(Roles.admins, Roles.owners)
async def status_command(self, ctx: Context) -> None:
"""Check the current status of DEFCON mode."""
embed = Embed(
colour=Colour.blurple(), title="DEFCON Status",
description=f"**Enabled:** {self.enabled}\n"
f"**Days:** {self.days.days}"
)
await ctx.send(embed=embed)
@defcon_group.command(name='days')
@with_role(Roles.admins, Roles.owners)
async def days_command(self, ctx: Context, days: int) -> None:
"""Set how old an account must be to join the server, in days, with DEFCON mode enabled."""
self.days = timedelta(days=days)
self.enabled = True
await self._defcon_action(ctx, days=days, action=Action.UPDATED)
await self.update_channel_topic()
async def update_channel_topic(self) -> None:
"""Update the #defcon channel topic with the current DEFCON status."""
if self.enabled:
day_str = "days" if self.days.days > 1 else "day"
new_topic = f"{BASE_CHANNEL_TOPIC}\n(Status: Enabled, Threshold: {self.days.days} {day_str})"
else:
new_topic = f"{BASE_CHANNEL_TOPIC}\n(Status: Disabled)"
self.mod_log.ignore(Event.guild_channel_update, Channels.defcon)
await self.channel.edit(topic=new_topic)
def build_defcon_msg(self, action: Action, e: Exception = None) -> str:
"""Build in-channel response string for DEFCON action."""
if action is Action.ENABLED:
msg = f"{Emojis.defcon_enabled} DEFCON enabled.\n\n"
elif action is Action.DISABLED:
msg = f"{Emojis.defcon_disabled} DEFCON disabled.\n\n"
elif action is Action.UPDATED:
msg = (
f"{Emojis.defcon_updated} DEFCON days updated; accounts must be {self.days.days} "
f"day{'s' if self.days.days > 1 else ''} old to join the server.\n\n"
)
if e:
msg += (
"**There was a problem updating the site** - This setting may be reverted when the bot restarts.\n\n"
f"```py\n{e}\n```"
)
return msg
async def send_defcon_log(self, action: Action, actor: Member, e: Exception = None) -> None:
"""Send log message for DEFCON action."""
info = action.value
log_msg: str = (
f"**Staffer:** {actor.mention} {actor} (`{actor.id}`)\n"
f"{info.template.format(days=self.days.days)}"
)
status_msg = f"DEFCON {action.name.lower()}"
if e:
log_msg += (
"**There was a problem updating the site** - This setting may be reverted when the bot restarts.\n\n"
f"```py\n{e}\n```"
)
await self.mod_log.send_log_message(info.icon, info.color, status_msg, log_msg)
def setup(bot: Bot) -> None:
"""Load the Defcon cog."""
bot.add_cog(Defcon(bot))
|
en
| 0.904042
|
Hi, {user} - Thanks for your interest in our server! Due to a current (or detected) cyberattack on our community, we've limited access to the server for new accounts. Since your account is relatively new, we're unable to provide access to the server at this time. Even so, thanks for joining! We're very excited at the possibility of having you here, and we hope that this situation will be resolved soon. In the meantime, please feel free to peruse the resources on our site at <https://pythondiscord.com/>, and have a nice day! Defcon Action. Time-sensitive server defense mechanisms. # type: timedelta # type: bool Get currently loaded ModLog cog instance. On cog load, try to synchronize DEFCON settings to the API. # Yikes! If DEFCON is enabled, check newly joining users to see if they meet the account age threshold. Check the DEFCON status or run a subcommand. Providing a structured way to do an defcon action. # TODO: retrieve old days count Enable DEFCON mode. Useful in a pinch, but be sure you know what you're doing! Currently, this just adds an account age requirement. Use !defcon days <int> to set how old an account must be, in days. Disable DEFCON mode. Useful in a pinch, but be sure you know what you're doing! Check the current status of DEFCON mode. Set how old an account must be to join the server, in days, with DEFCON mode enabled. Update the #defcon channel topic with the current DEFCON status. Build in-channel response string for DEFCON action. Send log message for DEFCON action. Load the Defcon cog.
| 2.334432
| 2
|
examples/phononic/coef_conf_elastic.py
|
olivierverdier/sfepy
| 1
|
6629291
|
from sfepy.fem.periodic import *
import sfepy.homogenization.coefs_base as cb
from sfepy.homogenization.utils import define_box_regions
def expand_regions( eqs, expand ):
out = {}
for key, val in eqs.iteritems():
out[key] = val % expand
return out
expr_elastic = """dw_lin_elastic.3.%s( matrix.D, Pi1, Pi2 )"""
eq_rs = {
'eq' : """dw_lin_elastic.3.%s( matrix.D, v1, u1 )
+ dw_lin_elastic.3.%s( matrix.D, v1, Pi ) = 0""",
}
def set_elastic(variables, ir, ic, mode, pis, corrs_phono_rs):
mode2var = {'row' : 'Pi1', 'col' : 'Pi2'}
val = pis.states[ir, ic]['u1'] + corrs_phono_rs.states[ir, ic]['u1']
variables[mode2var[mode]].data_from_any(val)
def define_input(filename, region, bbox):
"""Uses materials, fe of master file, merges regions."""
filename_mesh = filename
dim = bbox.shape[1]
options = {
'coefs' : 'coefs',
'requirements' : 'requirements',
}
functions = {
'match_x_plane' : (match_x_plane,),
'match_y_plane' : (match_y_plane,),
'match_z_plane' : (match_z_plane,),
'match_x_line' : (match_x_line,),
'match_y_line' : (match_y_line,),
}
coefs = {
'elastic' : {
'requires' : ['pis', 'corrs_phono_rs'],
'expression' : expr_elastic % region,
'set_variables' : set_elastic,
'class' : cb.CoefSymSym,
},
}
all_periodic = ['periodic_%s' % ii for ii in ['x', 'y', 'z'][:dim] ]
requirements = {
'pis' : {
'variables' : ['u1'],
'class' : cb.ShapeDimDim,
},
'corrs_phono_rs' : {
'requires' : ['pis'],
'ebcs' : ['fixed_u'],
'epbcs' : all_periodic,
'equations' : expand_regions( eq_rs, (region, region) ),
'set_variables' : [('Pi', 'pis', 'u1')],
'class' : cb.CorrDimDim,
'save_name' : 'corrs_phono',
'dump_variables' : ['u1'],
'save_variables' : ['u1'],
'is_linear' : True,
},
}
field_10 = {
'name' : 'displacement_matrix',
'dtype' : nm.float64,
'shape' : dim,
'region' : region,
'approx_order' : 1,
}
variables = {
'u1' : ('unknown field', 'displacement_matrix', 0),
'v1' : ('test field', 'displacement_matrix', 'u1'),
'Pi' : ('parameter field', 'displacement_matrix', 'u1'),
'Pi1' : ('parameter field', 'displacement_matrix', '(set-to-None)'),
'Pi2' : ('parameter field', 'displacement_matrix', '(set-to-None)'),
}
regions = define_box_regions(dim, bbox[0], bbox[1])
ebcs = {
'fixed_u' : ('Corners', {'u1.all' : 0.0}),
}
##
# Periodic boundary conditions.
if dim == 3:
epbc_10 = {
'name' : 'periodic_x',
'region' : ['Left', 'Right'],
'dofs' : {'u1.all' : 'u1.all'},
'match' : 'match_x_plane',
}
epbc_11 = {
'name' : 'periodic_y',
'region' : ['Near', 'Far'],
'dofs' : {'u1.all' : 'u1.all'},
'match' : 'match_y_plane',
}
epbc_12 = {
'name' : 'periodic_z',
'region' : ['Top', 'Bottom'],
'dofs' : {'u1.all' : 'u1.all'},
'match' : 'match_z_plane',
}
else:
epbc_10 = {
'name' : 'periodic_x',
'region' : ['Left', 'Right'],
'dofs' : {'u1.all' : 'u1.all'},
'match' : 'match_y_line',
}
epbc_11 = {
'name' : 'periodic_y',
'region' : ['Top', 'Bottom'],
'dofs' : {'u1.all' : 'u1.all'},
'match' : 'match_x_line',
}
solver_0 = {
'name' : 'ls',
'kind' : 'ls.scipy_direct', # Direct solver.
}
solver_1 = {
'name' : 'newton',
'kind' : 'nls.newton',
'i_max' : 2,
}
return locals()
|
from sfepy.fem.periodic import *
import sfepy.homogenization.coefs_base as cb
from sfepy.homogenization.utils import define_box_regions
def expand_regions( eqs, expand ):
out = {}
for key, val in eqs.iteritems():
out[key] = val % expand
return out
expr_elastic = """dw_lin_elastic.3.%s( matrix.D, Pi1, Pi2 )"""
eq_rs = {
'eq' : """dw_lin_elastic.3.%s( matrix.D, v1, u1 )
+ dw_lin_elastic.3.%s( matrix.D, v1, Pi ) = 0""",
}
def set_elastic(variables, ir, ic, mode, pis, corrs_phono_rs):
mode2var = {'row' : 'Pi1', 'col' : 'Pi2'}
val = pis.states[ir, ic]['u1'] + corrs_phono_rs.states[ir, ic]['u1']
variables[mode2var[mode]].data_from_any(val)
def define_input(filename, region, bbox):
"""Uses materials, fe of master file, merges regions."""
filename_mesh = filename
dim = bbox.shape[1]
options = {
'coefs' : 'coefs',
'requirements' : 'requirements',
}
functions = {
'match_x_plane' : (match_x_plane,),
'match_y_plane' : (match_y_plane,),
'match_z_plane' : (match_z_plane,),
'match_x_line' : (match_x_line,),
'match_y_line' : (match_y_line,),
}
coefs = {
'elastic' : {
'requires' : ['pis', 'corrs_phono_rs'],
'expression' : expr_elastic % region,
'set_variables' : set_elastic,
'class' : cb.CoefSymSym,
},
}
all_periodic = ['periodic_%s' % ii for ii in ['x', 'y', 'z'][:dim] ]
requirements = {
'pis' : {
'variables' : ['u1'],
'class' : cb.ShapeDimDim,
},
'corrs_phono_rs' : {
'requires' : ['pis'],
'ebcs' : ['fixed_u'],
'epbcs' : all_periodic,
'equations' : expand_regions( eq_rs, (region, region) ),
'set_variables' : [('Pi', 'pis', 'u1')],
'class' : cb.CorrDimDim,
'save_name' : 'corrs_phono',
'dump_variables' : ['u1'],
'save_variables' : ['u1'],
'is_linear' : True,
},
}
field_10 = {
'name' : 'displacement_matrix',
'dtype' : nm.float64,
'shape' : dim,
'region' : region,
'approx_order' : 1,
}
variables = {
'u1' : ('unknown field', 'displacement_matrix', 0),
'v1' : ('test field', 'displacement_matrix', 'u1'),
'Pi' : ('parameter field', 'displacement_matrix', 'u1'),
'Pi1' : ('parameter field', 'displacement_matrix', '(set-to-None)'),
'Pi2' : ('parameter field', 'displacement_matrix', '(set-to-None)'),
}
regions = define_box_regions(dim, bbox[0], bbox[1])
ebcs = {
'fixed_u' : ('Corners', {'u1.all' : 0.0}),
}
##
# Periodic boundary conditions.
if dim == 3:
epbc_10 = {
'name' : 'periodic_x',
'region' : ['Left', 'Right'],
'dofs' : {'u1.all' : 'u1.all'},
'match' : 'match_x_plane',
}
epbc_11 = {
'name' : 'periodic_y',
'region' : ['Near', 'Far'],
'dofs' : {'u1.all' : 'u1.all'},
'match' : 'match_y_plane',
}
epbc_12 = {
'name' : 'periodic_z',
'region' : ['Top', 'Bottom'],
'dofs' : {'u1.all' : 'u1.all'},
'match' : 'match_z_plane',
}
else:
epbc_10 = {
'name' : 'periodic_x',
'region' : ['Left', 'Right'],
'dofs' : {'u1.all' : 'u1.all'},
'match' : 'match_y_line',
}
epbc_11 = {
'name' : 'periodic_y',
'region' : ['Top', 'Bottom'],
'dofs' : {'u1.all' : 'u1.all'},
'match' : 'match_x_line',
}
solver_0 = {
'name' : 'ls',
'kind' : 'ls.scipy_direct', # Direct solver.
}
solver_1 = {
'name' : 'newton',
'kind' : 'nls.newton',
'i_max' : 2,
}
return locals()
|
en
| 0.547982
|
dw_lin_elastic.3.%s( matrix.D, Pi1, Pi2 ) dw_lin_elastic.3.%s( matrix.D, v1, u1 ) + dw_lin_elastic.3.%s( matrix.D, v1, Pi ) = 0 Uses materials, fe of master file, merges regions. ## # Periodic boundary conditions. # Direct solver.
| 1.99717
| 2
|
herders/migrations/0010_auto_20191214_1751.py
|
Itori/swarfarm
| 66
|
6629292
|
# Generated by Django 2.2.9.dev20191214052109 on 2019-12-15 01:51
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('herders', '0009_auto_20191114_1159'),
]
operations = [
migrations.AddField(
model_name='runeinstance',
name='has_gem',
field=models.BooleanField(default=False, help_text='Has had an enchant gem applied'),
),
migrations.AddField(
model_name='runeinstance',
name='has_grind',
field=models.IntegerField(default=0, help_text='Number of grindstones applied'),
),
migrations.AlterField(
model_name='runeinstance',
name='substats_enchanted',
field=django.contrib.postgres.fields.ArrayField(base_field=models.BooleanField(blank=True, default=False), default=list, size=4),
),
migrations.AlterField(
model_name='runeinstance',
name='substats_grind_value',
field=django.contrib.postgres.fields.ArrayField(base_field=models.IntegerField(blank=True, default=0), default=list, size=4),
),
]
|
# Generated by Django 2.2.9.dev20191214052109 on 2019-12-15 01:51
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('herders', '0009_auto_20191114_1159'),
]
operations = [
migrations.AddField(
model_name='runeinstance',
name='has_gem',
field=models.BooleanField(default=False, help_text='Has had an enchant gem applied'),
),
migrations.AddField(
model_name='runeinstance',
name='has_grind',
field=models.IntegerField(default=0, help_text='Number of grindstones applied'),
),
migrations.AlterField(
model_name='runeinstance',
name='substats_enchanted',
field=django.contrib.postgres.fields.ArrayField(base_field=models.BooleanField(blank=True, default=False), default=list, size=4),
),
migrations.AlterField(
model_name='runeinstance',
name='substats_grind_value',
field=django.contrib.postgres.fields.ArrayField(base_field=models.IntegerField(blank=True, default=0), default=list, size=4),
),
]
|
en
| 0.811128
|
# Generated by Django 2.2.9.dev20191214052109 on 2019-12-15 01:51
| 1.679203
| 2
|
tests/test_lists.py
|
eladrich/pyrallis
| 22
|
6629293
|
from dataclasses import dataclass, field
from typing import *
import pytest
from .testutils import *
def test_list_one_element(simple_attribute):
some_type, passed_value, expected_value = simple_attribute
@dataclass
class Container(TestSetup):
a: List[some_type] = field(default_factory=list)
c = Container.setup("")
assert c.a == []
c = Container.setup(f"--a [{passed_value}]")
assert c.a == [expected_value], Container.get_help_text()
@pytest.fixture
def ContainerClass():
@dataclass
class ContainerClass(TestSetup):
a: Tuple[int]
b: List[int]
c: Tuple[str] = tuple()
d: List[int] = field(default_factory=list)
return ContainerClass
def test_single_element_list(ContainerClass):
container = ContainerClass.setup("--a [1] --b [4] --c [7] --d [10]")
assert container.a == (1,)
assert container.b == [4]
assert container.c == ("7",)
assert container.d == [10]
def test_required_attributes_works(ContainerClass):
with raises(ParsingError):
ContainerClass.setup("--b [4]")
with raises(ParsingError):
ContainerClass.setup("--a [4]")
container = ContainerClass.setup("--a [4] --b [5]")
assert container == ContainerClass(a=(4,), b=[5])
def test_default_value(ContainerClass):
container = ContainerClass.setup("--a [1] --b '[4, 5, 6]'")
assert container.a == (1,)
assert container.b == [4, 5, 6]
assert container.c == tuple()
assert container.d == list()
@parametrize(
"item_type, passed_values",
[
(int, [[1, 2], [4, 5], [7, 8]]),
(float, [[1.1, 2.1], [4.2, 5.2], [7.2, 8.2]]),
(str, [["a", "b"], ["c", "d"], ["e", "f"]]),
(bool, [[True, True], [True, False], [False, True]]),
],
)
def test_parse_multiple_with_list_attributes(
item_type: Type,
passed_values: List[List[Any]],
):
@dataclass
class SomeClass(TestSetup):
a: List[item_type] = field(default_factory=list) # type: ignore
"""some docstring for attribute 'a'"""
for value in passed_values:
arguments = "--a " + format_list_using_brackets(value)
result = SomeClass.setup(arguments)
assert result == SomeClass(a=value)
@parametrize(
"item_type, type_hint, value, arg",
[
(list, List, [1, 2, 3], '[1, 2, 3]'),
(set, Set, {1, 2, 3}, '[1, 2, 3]'),
(tuple, Tuple, (1, 2, 3), '[1, 2, 3]'),
(dict, Dict, {1: 2}, '{1: 2}')
],
)
def test_collection_no_type(item_type, type_hint, value, arg):
@dataclass
class ContainerHint(TestSetup):
a: type_hint
c = ContainerHint.setup(f"--a '{arg}'")
assert c.a == value
@dataclass
class ContainerType(TestSetup):
a: item_type
c = ContainerType.setup(f"--a '{arg}'")
assert c.a == value
|
from dataclasses import dataclass, field
from typing import *
import pytest
from .testutils import *
def test_list_one_element(simple_attribute):
some_type, passed_value, expected_value = simple_attribute
@dataclass
class Container(TestSetup):
a: List[some_type] = field(default_factory=list)
c = Container.setup("")
assert c.a == []
c = Container.setup(f"--a [{passed_value}]")
assert c.a == [expected_value], Container.get_help_text()
@pytest.fixture
def ContainerClass():
@dataclass
class ContainerClass(TestSetup):
a: Tuple[int]
b: List[int]
c: Tuple[str] = tuple()
d: List[int] = field(default_factory=list)
return ContainerClass
def test_single_element_list(ContainerClass):
container = ContainerClass.setup("--a [1] --b [4] --c [7] --d [10]")
assert container.a == (1,)
assert container.b == [4]
assert container.c == ("7",)
assert container.d == [10]
def test_required_attributes_works(ContainerClass):
with raises(ParsingError):
ContainerClass.setup("--b [4]")
with raises(ParsingError):
ContainerClass.setup("--a [4]")
container = ContainerClass.setup("--a [4] --b [5]")
assert container == ContainerClass(a=(4,), b=[5])
def test_default_value(ContainerClass):
container = ContainerClass.setup("--a [1] --b '[4, 5, 6]'")
assert container.a == (1,)
assert container.b == [4, 5, 6]
assert container.c == tuple()
assert container.d == list()
@parametrize(
"item_type, passed_values",
[
(int, [[1, 2], [4, 5], [7, 8]]),
(float, [[1.1, 2.1], [4.2, 5.2], [7.2, 8.2]]),
(str, [["a", "b"], ["c", "d"], ["e", "f"]]),
(bool, [[True, True], [True, False], [False, True]]),
],
)
def test_parse_multiple_with_list_attributes(
item_type: Type,
passed_values: List[List[Any]],
):
@dataclass
class SomeClass(TestSetup):
a: List[item_type] = field(default_factory=list) # type: ignore
"""some docstring for attribute 'a'"""
for value in passed_values:
arguments = "--a " + format_list_using_brackets(value)
result = SomeClass.setup(arguments)
assert result == SomeClass(a=value)
@parametrize(
"item_type, type_hint, value, arg",
[
(list, List, [1, 2, 3], '[1, 2, 3]'),
(set, Set, {1, 2, 3}, '[1, 2, 3]'),
(tuple, Tuple, (1, 2, 3), '[1, 2, 3]'),
(dict, Dict, {1: 2}, '{1: 2}')
],
)
def test_collection_no_type(item_type, type_hint, value, arg):
@dataclass
class ContainerHint(TestSetup):
a: type_hint
c = ContainerHint.setup(f"--a '{arg}'")
assert c.a == value
@dataclass
class ContainerType(TestSetup):
a: item_type
c = ContainerType.setup(f"--a '{arg}'")
assert c.a == value
|
en
| 0.52286
|
# type: ignore some docstring for attribute 'a'
| 2.72615
| 3
|
samples/modules/tensorflow/magic_wand/train/data_prepare_test.py
|
lviala-zaack/zephyr
| 6,224
|
6629294
|
<filename>samples/modules/tensorflow/magic_wand/train/data_prepare_test.py<gh_stars>1000+
# Lint as: python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for data_prepare.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import json
import os
import unittest
from data_prepare import generate_negative_data
from data_prepare import prepare_original_data
from data_prepare import write_data
class TestPrepare(unittest.TestCase):
def setUp(self): # pylint: disable=g-missing-super-call
self.file = "./%s/output_%s_%s.txt" % (folders[0], folders[0], names[0]) # pylint: disable=undefined-variable
self.data = []
prepare_original_data(folders[0], names[0], self.data, self.file) # pylint: disable=undefined-variable
def test_prepare_data(self):
num = 0
with open(self.file, "r") as f:
lines = csv.reader(f)
for idx, line in enumerate(lines): # pylint: disable=unused-variable
if len(line) == 3 and line[2] == "-":
num += 1
self.assertEqual(len(self.data), num)
self.assertIsInstance(self.data, list)
self.assertIsInstance(self.data[0], dict)
self.assertEqual(list(self.data[-1]), ["gesture", "accel_ms2_xyz", "name"])
self.assertEqual(self.data[0]["name"], names[0]) # pylint: disable=undefined-variable
def test_generate_negative(self):
original_len = len(self.data)
generate_negative_data(self.data)
self.assertEqual(original_len + 300, len(self.data))
generated_num = 0
for idx, data in enumerate(self.data): # pylint: disable=unused-variable
if data["name"] == "negative6" or data["name"] == "negative7" or data[
"name"] == "negative8":
generated_num += 1
self.assertEqual(generated_num, 300)
def test_write_data(self):
data_path_test = "./data/data0"
write_data(self.data, data_path_test)
with open(data_path_test, "r") as f:
lines = f.readlines()
self.assertEqual(len(lines), len(self.data))
self.assertEqual(json.loads(lines[0]), self.data[0])
self.assertEqual(json.loads(lines[-1]), self.data[-1])
os.remove(data_path_test)
if __name__ == "__main__":
unittest.main()
|
<filename>samples/modules/tensorflow/magic_wand/train/data_prepare_test.py<gh_stars>1000+
# Lint as: python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for data_prepare.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import json
import os
import unittest
from data_prepare import generate_negative_data
from data_prepare import prepare_original_data
from data_prepare import write_data
class TestPrepare(unittest.TestCase):
def setUp(self): # pylint: disable=g-missing-super-call
self.file = "./%s/output_%s_%s.txt" % (folders[0], folders[0], names[0]) # pylint: disable=undefined-variable
self.data = []
prepare_original_data(folders[0], names[0], self.data, self.file) # pylint: disable=undefined-variable
def test_prepare_data(self):
num = 0
with open(self.file, "r") as f:
lines = csv.reader(f)
for idx, line in enumerate(lines): # pylint: disable=unused-variable
if len(line) == 3 and line[2] == "-":
num += 1
self.assertEqual(len(self.data), num)
self.assertIsInstance(self.data, list)
self.assertIsInstance(self.data[0], dict)
self.assertEqual(list(self.data[-1]), ["gesture", "accel_ms2_xyz", "name"])
self.assertEqual(self.data[0]["name"], names[0]) # pylint: disable=undefined-variable
def test_generate_negative(self):
original_len = len(self.data)
generate_negative_data(self.data)
self.assertEqual(original_len + 300, len(self.data))
generated_num = 0
for idx, data in enumerate(self.data): # pylint: disable=unused-variable
if data["name"] == "negative6" or data["name"] == "negative7" or data[
"name"] == "negative8":
generated_num += 1
self.assertEqual(generated_num, 300)
def test_write_data(self):
data_path_test = "./data/data0"
write_data(self.data, data_path_test)
with open(data_path_test, "r") as f:
lines = f.readlines()
self.assertEqual(len(lines), len(self.data))
self.assertEqual(json.loads(lines[0]), self.data[0])
self.assertEqual(json.loads(lines[-1]), self.data[-1])
os.remove(data_path_test)
if __name__ == "__main__":
unittest.main()
|
en
| 0.735468
|
# Lint as: python3 # Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== Test for data_prepare.py. # pylint: disable=g-missing-super-call # pylint: disable=undefined-variable # pylint: disable=undefined-variable # pylint: disable=unused-variable # pylint: disable=undefined-variable # pylint: disable=unused-variable
| 2.239187
| 2
|
examples/rules/PropertiesTagsIncluded.py
|
amabowilli/cfn-python-lint
| 1
|
6629295
|
"""
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from cfnlint import CloudFormationLintRule
from cfnlint import RuleMatch
import cfnlint.helpers
class PropertiesTagsIncluded(CloudFormationLintRule):
"""Check if Tags are included on supported resources"""
id = 'E9001'
shortdesc = 'Tags are included on resources that support it'
description = 'Check Tags for resources'
tags = ['resources', 'tags']
def get_resources_with_tags(self, region):
"""Get resource types that support tags"""
resourcespecs = cfnlint.helpers.RESOURCE_SPECS[region]
resourcetypes = resourcespecs['ResourceTypes']
matches = []
for resourcetype, resourceobj in resourcetypes.items():
propertiesobj = resourceobj.get('Properties')
if propertiesobj:
if 'Tags' in propertiesobj:
matches.append(resourcetype)
return matches
def match(self, cfn):
"""Check Tags for required keys"""
matches = []
all_tags = cfn.search_deep_keys('Tags')
all_tags = [x for x in all_tags if x[0] == 'Resources']
resources_tags = self.get_resources_with_tags(cfn.regions[0])
resources = cfn.get_resources()
for resource_name, resource_obj in resources.items():
resource_type = resource_obj.get('Type', "")
resource_properties = resource_obj.get('Properties', {})
if resource_type in resources_tags:
if 'Tags' not in resource_properties:
message = "Missing Tags Properties for {0}"
matches.append(
RuleMatch(
['Resources', resource_name, 'Properties'],
message.format('/'.join(map(str, ['Resources', resource_name, 'Properties'])))))
return matches
|
"""
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from cfnlint import CloudFormationLintRule
from cfnlint import RuleMatch
import cfnlint.helpers
class PropertiesTagsIncluded(CloudFormationLintRule):
"""Check if Tags are included on supported resources"""
id = 'E9001'
shortdesc = 'Tags are included on resources that support it'
description = 'Check Tags for resources'
tags = ['resources', 'tags']
def get_resources_with_tags(self, region):
"""Get resource types that support tags"""
resourcespecs = cfnlint.helpers.RESOURCE_SPECS[region]
resourcetypes = resourcespecs['ResourceTypes']
matches = []
for resourcetype, resourceobj in resourcetypes.items():
propertiesobj = resourceobj.get('Properties')
if propertiesobj:
if 'Tags' in propertiesobj:
matches.append(resourcetype)
return matches
def match(self, cfn):
"""Check Tags for required keys"""
matches = []
all_tags = cfn.search_deep_keys('Tags')
all_tags = [x for x in all_tags if x[0] == 'Resources']
resources_tags = self.get_resources_with_tags(cfn.regions[0])
resources = cfn.get_resources()
for resource_name, resource_obj in resources.items():
resource_type = resource_obj.get('Type', "")
resource_properties = resource_obj.get('Properties', {})
if resource_type in resources_tags:
if 'Tags' not in resource_properties:
message = "Missing Tags Properties for {0}"
matches.append(
RuleMatch(
['Resources', resource_name, 'Properties'],
message.format('/'.join(map(str, ['Resources', resource_name, 'Properties'])))))
return matches
|
en
| 0.777292
|
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. Check if Tags are included on supported resources Get resource types that support tags Check Tags for required keys
| 1.935151
| 2
|
modules/core/dataloader.py
|
CGL-Deeplearning/FRIDAY
| 6
|
6629296
|
<reponame>CGL-Deeplearning/FRIDAY
import os
import numpy as np
import pandas as pd
from torch.utils.data import Dataset
import h5py
import torch
import pickle
class SequenceDataset(Dataset):
"""
Arguments:
A CSV file path
"""
def __init__(self, csv_path, transform=None):
data_frame = pd.read_csv(csv_path, header=None, dtype=str)
# assert data_frame[0].apply(lambda x: os.path.isfile(x.split(' ')[0])).all(), \
# "Some images referenced in the CSV file were not found"
self.transform = transform
self.file_info = list(data_frame[0])
self.index_info = list(data_frame[1])
self.label = list(data_frame[3])
@staticmethod
def load_dictionary(dictionary_location):
f = open(dictionary_location, 'rb')
dict = pickle.load(f)
f.close()
return dict
def __getitem__(self, index):
# load the image
hdf5_file_path, allele_dict_path = self.file_info[index].split(' ')
hdf5_index = int(self.index_info[index])
hdf5_file = h5py.File(hdf5_file_path, 'r')
image_dataset = hdf5_file['images']
img = np.array(image_dataset[hdf5_index], dtype=np.uint8)
hdf5_file.close()
# load the labels
label = self.label[index]
label = [int(x) for x in label]
label = np.array(label)
# type fix and convert to tensor
if self.transform is not None:
img = self.transform(img)
img = img.transpose(1, 2)
label = torch.from_numpy(label)
return img, label
def __len__(self):
return len(self.file_info)
|
import os
import numpy as np
import pandas as pd
from torch.utils.data import Dataset
import h5py
import torch
import pickle
class SequenceDataset(Dataset):
"""
Arguments:
A CSV file path
"""
def __init__(self, csv_path, transform=None):
data_frame = pd.read_csv(csv_path, header=None, dtype=str)
# assert data_frame[0].apply(lambda x: os.path.isfile(x.split(' ')[0])).all(), \
# "Some images referenced in the CSV file were not found"
self.transform = transform
self.file_info = list(data_frame[0])
self.index_info = list(data_frame[1])
self.label = list(data_frame[3])
@staticmethod
def load_dictionary(dictionary_location):
f = open(dictionary_location, 'rb')
dict = pickle.load(f)
f.close()
return dict
def __getitem__(self, index):
# load the image
hdf5_file_path, allele_dict_path = self.file_info[index].split(' ')
hdf5_index = int(self.index_info[index])
hdf5_file = h5py.File(hdf5_file_path, 'r')
image_dataset = hdf5_file['images']
img = np.array(image_dataset[hdf5_index], dtype=np.uint8)
hdf5_file.close()
# load the labels
label = self.label[index]
label = [int(x) for x in label]
label = np.array(label)
# type fix and convert to tensor
if self.transform is not None:
img = self.transform(img)
img = img.transpose(1, 2)
label = torch.from_numpy(label)
return img, label
def __len__(self):
return len(self.file_info)
|
en
| 0.645642
|
Arguments: A CSV file path # assert data_frame[0].apply(lambda x: os.path.isfile(x.split(' ')[0])).all(), \ # "Some images referenced in the CSV file were not found" # load the image # load the labels # type fix and convert to tensor
| 2.733417
| 3
|
hashsha1.py
|
fbl4kd43m0n/Python
| 2
|
6629297
|
<filename>hashsha1.py
import hashlib
BLOCKSIZE = 65536
hasher = hashlib.sha1()
with open('anotherfile.txt', 'rb') as afile:
buf = afile.read(BLOCKSIZE)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(BLOCKSIZE)
print(hasher.hexdigest())
|
<filename>hashsha1.py
import hashlib
BLOCKSIZE = 65536
hasher = hashlib.sha1()
with open('anotherfile.txt', 'rb') as afile:
buf = afile.read(BLOCKSIZE)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(BLOCKSIZE)
print(hasher.hexdigest())
|
none
| 1
| 3.045454
| 3
|
|
container-with-most-water/container-with-most-water.py
|
QQuinn03/LeetHub
| 0
|
6629298
|
class Solution:
def maxArea(self, height: List[int]) -> int:
res = 0
left = 0
right = len(height)-1
while left<right:
left_contain = height[left]
right_contain = height[right]
if left_contain<right_contain:
res = max(res,(right-left)*left_contain)
left+=1
else:
res = max(res,(right-left)*right_contain)
right-=1
return res
|
class Solution:
def maxArea(self, height: List[int]) -> int:
res = 0
left = 0
right = len(height)-1
while left<right:
left_contain = height[left]
right_contain = height[right]
if left_contain<right_contain:
res = max(res,(right-left)*left_contain)
left+=1
else:
res = max(res,(right-left)*right_contain)
right-=1
return res
|
none
| 1
| 3.081709
| 3
|
|
twitter_scrooge/twitter_scrooge.bzl
|
adam-singer/rules_scala
| 0
|
6629299
|
load("@bazel_skylib//lib:dicts.bzl", "dicts")
load(
"//scala:scala_cross_version.bzl",
_default_maven_server_urls = "default_maven_server_urls",
)
load(
"//scala/private:common.bzl",
"write_manifest_file",
)
load(
"//scala/private:dependency.bzl",
"legacy_unclear_dependency_info_for_protobuf_scrooge",
)
load(
"//scala/private:rule_impls.bzl",
"compile_java",
"compile_scala",
)
load("@io_bazel_rules_scala//thrift:thrift_info.bzl", "ThriftInfo")
load(
"@io_bazel_rules_scala//thrift:thrift.bzl",
"merge_thrift_infos",
)
load("//third_party/repositories:repositories.bzl", "repositories")
_jar_extension = ".jar"
def _declare_and_bind(
label,
artifact_id,
external_artifact_id,
overriden_artifacts,
maven_servers):
if not label:
repositories(
for_artifact_ids = [
artifact_id,
],
maven_servers = maven_servers,
fetch_sources = False,
overriden_artifacts = overriden_artifacts,
)
label = "@" + artifact_id
native.bind(
name = external_artifact_id,
actual = label,
)
def twitter_scrooge(
maven_servers = _default_maven_server_urls(),
overriden_artifacts = {},
# These target labels need maven_servers to compute sensible defaults.
# Therefore we leave them None here.
libthrift = None,
scrooge_core = None,
scrooge_generator = None,
util_core = None,
util_logging = None):
_declare_and_bind(
libthrift,
"libthrift",
"io_bazel_rules_scala/dependency/thrift/libthrift",
overriden_artifacts,
maven_servers,
)
_declare_and_bind(
scrooge_core,
"io_bazel_rules_scala_scrooge_core",
"io_bazel_rules_scala/dependency/thrift/scrooge_core",
overriden_artifacts,
maven_servers,
)
_declare_and_bind(
scrooge_generator,
"io_bazel_rules_scala_scrooge_generator",
"io_bazel_rules_scala/dependency/thrift/scrooge_generator",
overriden_artifacts,
maven_servers,
)
_declare_and_bind(
util_core,
"io_bazel_rules_scala_util_core",
"io_bazel_rules_scala/dependency/thrift/util_core",
overriden_artifacts,
maven_servers,
)
_declare_and_bind(
util_logging,
"io_bazel_rules_scala_util_logging",
"io_bazel_rules_scala/dependency/thrift/util_logging",
overriden_artifacts,
maven_servers,
)
repositories(
for_artifact_ids = [
"io_bazel_rules_scala_mustache", # Mustache is needed to generate java from thrift, and is passed further down.
"io_bazel_rules_scala_guava",
"io_bazel_rules_scala_javax_annotation_api",
],
maven_servers = maven_servers,
fetch_sources = False,
overriden_artifacts = overriden_artifacts,
)
native.bind(
name = "io_bazel_rules_scala/dependency/thrift/mustache",
actual = "@io_bazel_rules_scala_mustache",
)
# scrooge-generator needs these runtime_deps to generate java from thrift.
if not native.existing_rule("io_bazel_rules_scala/dependency/scala/guava"):
native.bind(
name = "io_bazel_rules_scala/dependency/scala/guava",
actual = "@io_bazel_rules_scala_guava",
)
# This is a shim needed to import `@javax.annotation.Generated` when compiled with jdk11.
if not native.existing_rule("io_bazel_rules_scala/dependency/thrift/javax_annotation_api"):
native.bind(
name = "io_bazel_rules_scala/dependency/thrift/javax_annotation_api",
actual = "@io_bazel_rules_scala_javax_annotation_api",
)
native.register_toolchains("@io_bazel_rules_scala//twitter_scrooge:scrooge_toolchain")
def _colon_paths(data):
return ":".join([f.path for f in sorted(data)])
ScroogeAspectInfo = provider(fields = [
"thrift_info",
"src_jars",
"output_files",
"java_info",
])
ScroogeInfo = provider(fields = [
"aspect_info",
])
ScroogeImport = provider(fields = [
"java_info",
"thrift_info",
])
def merge_scrooge_aspect_info(scrooges):
return ScroogeAspectInfo(
src_jars = depset(transitive = [s.src_jars for s in scrooges]),
output_files = depset(transitive = [s.output_files for s in scrooges]),
thrift_info = merge_thrift_infos([s.thrift_info for s in scrooges]),
java_info = java_common.merge([s.java_info for s in scrooges]),
)
def _generate_jvm_code(ctx, label, compile_thrifts, include_thrifts, jar_output, language):
# bazel worker arguments cannot be empty so we pad to ensure non-empty
# and drop it off on the other side
# https://github.com/bazelbuild/bazel/issues/3329
worker_arg_pad = "_"
path_content = "\n".join([
worker_arg_pad + _colon_paths(ps)
for ps in [compile_thrifts, include_thrifts, [], []]
])
compiler_args = getattr(ctx.rule.attr, "compiler_args", [])
lang_flag = ["--language", language]
flags = compiler_args + lang_flag
worker_content = "{output}\n{paths}\n{flags}".format(
output = jar_output.path,
paths = path_content,
# we could put "include_services" on thrift_info, if needed
flags = worker_arg_pad + ":".join(flags),
)
# Since we may want to generate several languages from this thrift target,
# we need to mix the language into the worker input file.
argfile = ctx.actions.declare_file(
"{}_{}_worker_input".format(label.name, language),
sibling = jar_output,
)
ctx.actions.write(output = argfile, content = worker_content)
ctx.actions.run(
executable = ctx.executable._pluck_scrooge_scala,
inputs = compile_thrifts + include_thrifts + [argfile],
outputs = [jar_output],
mnemonic = "ScroogeRule",
progress_message = "creating scrooge files %s" % ctx.label,
execution_requirements = {"supports-workers": "1"},
# when we run with a worker, the `@argfile.path` is removed and passed
# line by line as arguments in the protobuf. In that case,
# the rest of the arguments are passed to the process that
# starts up and stays resident.
# In either case (worker or not), they will be jvm flags which will
# be correctly handled since the executable is a jvm app that will
# consume the flags on startup.
#arguments = ["--jvm_flag=%s" % flag for flag in ctx.attr.jvm_flags] +
arguments = ["@" + argfile.path],
)
def _compiled_jar_file(actions, scrooge_jar):
scrooge_jar_name = scrooge_jar.basename
# ends with .srcjar, so remove last 6 characters
without_suffix = scrooge_jar_name[0:len(scrooge_jar_name) - 6]
# this already ends with _scrooge because that is how scrooge_jar is named
compiled_jar = without_suffix + "jar"
return actions.declare_file(compiled_jar, sibling = scrooge_jar)
def _create_java_info_provider(scrooge_jar, all_deps, output):
return JavaInfo(
source_jar = scrooge_jar,
deps = all_deps,
runtime_deps = all_deps,
exports = all_deps,
output_jar = output,
compile_jar = output,
)
def _compile_generated_scala(
ctx,
label,
output,
scrooge_jar,
deps_java_info,
implicit_deps):
manifest = ctx.actions.declare_file(
label.name + "_MANIFEST.MF",
sibling = scrooge_jar,
)
write_manifest_file(ctx.actions, manifest, None)
statsfile = ctx.actions.declare_file(
label.name + "_scalac.statsfile",
sibling = scrooge_jar,
)
diagnosticsfile = ctx.actions.declare_file(
label.name + "_scalac.diagnosticsproto",
sibling = scrooge_jar,
)
all_deps = _concat_lists(deps_java_info, implicit_deps)
merged_deps = java_common.merge(all_deps)
# this only compiles scala, not the ijar, but we don't
# want the ijar for generated code anyway: any change
# in the thrift generally will change the interface and
# method bodies
compile_scala(
ctx,
label,
output,
manifest,
statsfile,
diagnosticsfile,
sources = [],
cjars = merged_deps.transitive_compile_time_jars,
all_srcjars = depset([scrooge_jar]),
transitive_compile_jars = merged_deps.transitive_compile_time_jars,
plugins = [],
resource_strip_prefix = "",
resources = [],
resource_jars = [],
labels = {},
in_scalacopts = [],
print_compile_time = False,
expect_java_output = False,
scalac_jvm_flags = [],
scalac = ctx.executable._scalac,
dependency_info = legacy_unclear_dependency_info_for_protobuf_scrooge(ctx),
unused_dependency_checker_ignored_targets = [],
)
return _create_java_info_provider(scrooge_jar, all_deps, output)
def _compile_generated_java(
ctx,
label,
output,
scrooge_jar,
deps_java_info,
implicit_deps):
all_deps = _concat_lists(deps_java_info, implicit_deps)
merged_deps = java_common.merge(all_deps)
compile_java(
ctx,
source_jars = [scrooge_jar],
source_files = [],
output = output,
extra_javac_opts = [],
providers_of_dependencies = [merged_deps],
)
return _create_java_info_provider(scrooge_jar, all_deps, output)
def _concat_lists(list1, list2):
all_providers = []
all_providers.extend(list1)
all_providers.extend(list2)
return all_providers
def _gather_thriftinfo_from_deps(target, ctx):
if ScroogeImport in target:
target_import = target[ScroogeImport]
target_ti = target_import.thrift_info
deps = [target_import.java_info]
transitive_ti = target_ti
else:
target_ti = target[ThriftInfo]
deps = [d[ScroogeAspectInfo].java_info for d in ctx.rule.attr.deps]
transitive_ti = merge_thrift_infos(
[
d[ScroogeAspectInfo].thrift_info
for d in ctx.rule.attr.deps
] + [target_ti],
)
imps = [j[JavaInfo] for j in ctx.attr._implicit_compile_deps]
return (
target_ti,
transitive_ti,
deps,
imps,
)
def _compile_thrift_to_language(target_ti, transitive_ti, language, target, ctx):
"""Calls scrooge to compile thrift to the language specified in `language`.
Returns the name of the compiled jar."""
scrooge_file = ctx.actions.declare_file(
target.label.name + "_scrooge_{}.srcjar".format(language),
)
# we sort so the inputs are always the same for caching
compile_thrifts = sorted(target_ti.srcs.to_list())
compile_thrift_map = {}
for ct in compile_thrifts:
compile_thrift_map[ct] = True
include_thrifts = sorted([
trans
for trans in transitive_ti.transitive_srcs.to_list()
if trans not in compile_thrift_map
])
_generate_jvm_code(
ctx,
target.label,
compile_thrifts,
include_thrifts,
scrooge_file,
language,
)
return scrooge_file
def _common_scrooge_aspect_implementation(target, ctx, language, compiler_function):
"""Aspect implementation to generate code from thrift files in a language of choice, and then compile it.
Takes in a `language` (either "java" or "scala") and a function to compile the generated sources.
This aspect is applied to the DAG of thrift_librarys reachable from a deps or a scrooge_scala_library.
Each thrift_library will be one scrooge invocation, assuming it has some sources.
"""
(
target_ti,
transitive_ti,
deps,
imps,
) = _gather_thriftinfo_from_deps(target, ctx)
if target_ti.srcs:
scrooge_file = _compile_thrift_to_language(target_ti, transitive_ti, language, target, ctx)
output = _compiled_jar_file(ctx.actions, scrooge_file)
java_info = compiler_function(
ctx,
target.label,
output,
scrooge_file,
deps,
imps,
)
return [ScroogeAspectInfo(
src_jars = depset([scrooge_file]),
output_files = depset([output]),
thrift_info = transitive_ti,
java_info = java_info,
)]
else:
# This target is an aggregation target. Aggregate the java_infos and return.
return [
ScroogeAspectInfo(
src_jars = depset(),
output_files = depset(),
thrift_info = transitive_ti,
java_info = java_common.merge(_concat_lists(deps, imps)),
),
]
def _scrooge_scala_aspect_impl(target, ctx):
return _common_scrooge_aspect_implementation(target, ctx, "scala", _compile_generated_scala)
def _scrooge_java_aspect_impl(target, ctx):
return _common_scrooge_aspect_implementation(target, ctx, "java", _compile_generated_java)
# Common attributes for both java and scala aspects, needed to generate JVM code from Thrift
common_attrs = {
"_pluck_scrooge_scala": attr.label(
executable = True,
cfg = "exec",
default = Label("//src/scala/scripts:scrooge_worker"),
allow_files = True,
),
"_implicit_compile_deps": attr.label_list(
providers = [JavaInfo],
default = [
Label(
"@io_bazel_rules_scala//twitter_scrooge:aspect_compile_classpath",
),
],
),
}
common_aspect_providers = [
[ThriftInfo],
[ScroogeImport],
]
scrooge_scala_aspect = aspect(
implementation = _scrooge_scala_aspect_impl,
attr_aspects = ["deps"],
attrs = dicts.add(
common_attrs,
{
"_scalac": attr.label(
executable = True,
cfg = "exec",
default = Label("@io_bazel_rules_scala//src/java/io/bazel/rulesscala/scalac"),
allow_files = True,
),
},
),
required_aspect_providers = common_aspect_providers,
toolchains = [
"@io_bazel_rules_scala//scala:toolchain_type",
"@io_bazel_rules_scala//twitter_scrooge/toolchain:scrooge_toolchain_type",
],
incompatible_use_toolchain_transition = True,
)
scrooge_java_aspect = aspect(
implementation = _scrooge_java_aspect_impl,
attr_aspects = ["deps"],
attrs = dicts.add(
common_attrs,
{
"_java_toolchain": attr.label(default = Label("@bazel_tools//tools/jdk:current_java_toolchain")),
"_host_javabase": attr.label(
default = Label("@bazel_tools//tools/jdk:current_java_runtime"),
cfg = "exec",
),
},
),
required_aspect_providers = common_aspect_providers,
toolchains = [
"@io_bazel_rules_scala//scala:toolchain_type",
"@io_bazel_rules_scala//twitter_scrooge/toolchain:scrooge_toolchain_type",
],
incompatible_use_toolchain_transition = True,
fragments = ["java"],
)
def _scrooge_jvm_library_impl(ctx):
aspect_info = merge_scrooge_aspect_info(
[dep[ScroogeAspectInfo] for dep in ctx.attr.deps],
)
if ctx.attr.exports:
exports = [exp[JavaInfo] for exp in ctx.attr.exports]
exports.append(aspect_info.java_info)
all_java = java_common.merge(exports)
else:
all_java = aspect_info.java_info
return [
all_java,
ScroogeInfo(aspect_info = aspect_info),
DefaultInfo(files = aspect_info.output_files),
]
scrooge_scala_library = rule(
implementation = _scrooge_jvm_library_impl,
attrs = {
"deps": attr.label_list(aspects = [scrooge_scala_aspect]),
"exports": attr.label_list(providers = [JavaInfo]),
},
provides = [DefaultInfo, ScroogeInfo, JavaInfo],
)
scrooge_java_library = rule(
# They can use the same implementation, since it's just an aggregator for the aspect info.
implementation = _scrooge_jvm_library_impl,
attrs = {
"deps": attr.label_list(aspects = [scrooge_java_aspect]),
"exports": attr.label_list(providers = [JavaInfo]),
},
provides = [DefaultInfo, ScroogeInfo, JavaInfo],
)
def _scrooge_scala_import_impl(ctx):
jars_jis = [
JavaInfo(
output_jar = scala_jar,
compile_jar = scala_jar,
)
for scala_jar in ctx.files.scala_jars
]
java_info = java_common.merge(
[imp[JavaInfo] for imp in ctx.attr._implicit_compile_deps] + jars_jis,
)
# to make the thrift_info, we only put this in the
# transitive part
ti = ThriftInfo(
srcs = depset(),
transitive_srcs = depset(ctx.files.thrift_jars),
)
return [java_info, ti, ScroogeImport(java_info = java_info, thrift_info = ti)]
# Allows you to consume thrifts and compiled jars from external repos
scrooge_scala_import = rule(
implementation = _scrooge_scala_import_impl,
attrs = {
"thrift_jars": attr.label_list(allow_files = [".jar"]),
"scala_jars": attr.label_list(allow_files = [".jar"]),
"_implicit_compile_deps": attr.label_list(
providers = [JavaInfo],
default = [
Label(
"@io_bazel_rules_scala//twitter_scrooge:compile_classpath",
),
],
),
},
provides = [ThriftInfo, JavaInfo, ScroogeImport],
toolchains = ["@io_bazel_rules_scala//twitter_scrooge/toolchain:scrooge_toolchain_type"],
incompatible_use_toolchain_transition = True,
)
|
load("@bazel_skylib//lib:dicts.bzl", "dicts")
load(
"//scala:scala_cross_version.bzl",
_default_maven_server_urls = "default_maven_server_urls",
)
load(
"//scala/private:common.bzl",
"write_manifest_file",
)
load(
"//scala/private:dependency.bzl",
"legacy_unclear_dependency_info_for_protobuf_scrooge",
)
load(
"//scala/private:rule_impls.bzl",
"compile_java",
"compile_scala",
)
load("@io_bazel_rules_scala//thrift:thrift_info.bzl", "ThriftInfo")
load(
"@io_bazel_rules_scala//thrift:thrift.bzl",
"merge_thrift_infos",
)
load("//third_party/repositories:repositories.bzl", "repositories")
_jar_extension = ".jar"
def _declare_and_bind(
label,
artifact_id,
external_artifact_id,
overriden_artifacts,
maven_servers):
if not label:
repositories(
for_artifact_ids = [
artifact_id,
],
maven_servers = maven_servers,
fetch_sources = False,
overriden_artifacts = overriden_artifacts,
)
label = "@" + artifact_id
native.bind(
name = external_artifact_id,
actual = label,
)
def twitter_scrooge(
maven_servers = _default_maven_server_urls(),
overriden_artifacts = {},
# These target labels need maven_servers to compute sensible defaults.
# Therefore we leave them None here.
libthrift = None,
scrooge_core = None,
scrooge_generator = None,
util_core = None,
util_logging = None):
_declare_and_bind(
libthrift,
"libthrift",
"io_bazel_rules_scala/dependency/thrift/libthrift",
overriden_artifacts,
maven_servers,
)
_declare_and_bind(
scrooge_core,
"io_bazel_rules_scala_scrooge_core",
"io_bazel_rules_scala/dependency/thrift/scrooge_core",
overriden_artifacts,
maven_servers,
)
_declare_and_bind(
scrooge_generator,
"io_bazel_rules_scala_scrooge_generator",
"io_bazel_rules_scala/dependency/thrift/scrooge_generator",
overriden_artifacts,
maven_servers,
)
_declare_and_bind(
util_core,
"io_bazel_rules_scala_util_core",
"io_bazel_rules_scala/dependency/thrift/util_core",
overriden_artifacts,
maven_servers,
)
_declare_and_bind(
util_logging,
"io_bazel_rules_scala_util_logging",
"io_bazel_rules_scala/dependency/thrift/util_logging",
overriden_artifacts,
maven_servers,
)
repositories(
for_artifact_ids = [
"io_bazel_rules_scala_mustache", # Mustache is needed to generate java from thrift, and is passed further down.
"io_bazel_rules_scala_guava",
"io_bazel_rules_scala_javax_annotation_api",
],
maven_servers = maven_servers,
fetch_sources = False,
overriden_artifacts = overriden_artifacts,
)
native.bind(
name = "io_bazel_rules_scala/dependency/thrift/mustache",
actual = "@io_bazel_rules_scala_mustache",
)
# scrooge-generator needs these runtime_deps to generate java from thrift.
if not native.existing_rule("io_bazel_rules_scala/dependency/scala/guava"):
native.bind(
name = "io_bazel_rules_scala/dependency/scala/guava",
actual = "@io_bazel_rules_scala_guava",
)
# This is a shim needed to import `@javax.annotation.Generated` when compiled with jdk11.
if not native.existing_rule("io_bazel_rules_scala/dependency/thrift/javax_annotation_api"):
native.bind(
name = "io_bazel_rules_scala/dependency/thrift/javax_annotation_api",
actual = "@io_bazel_rules_scala_javax_annotation_api",
)
native.register_toolchains("@io_bazel_rules_scala//twitter_scrooge:scrooge_toolchain")
def _colon_paths(data):
return ":".join([f.path for f in sorted(data)])
ScroogeAspectInfo = provider(fields = [
"thrift_info",
"src_jars",
"output_files",
"java_info",
])
ScroogeInfo = provider(fields = [
"aspect_info",
])
ScroogeImport = provider(fields = [
"java_info",
"thrift_info",
])
def merge_scrooge_aspect_info(scrooges):
return ScroogeAspectInfo(
src_jars = depset(transitive = [s.src_jars for s in scrooges]),
output_files = depset(transitive = [s.output_files for s in scrooges]),
thrift_info = merge_thrift_infos([s.thrift_info for s in scrooges]),
java_info = java_common.merge([s.java_info for s in scrooges]),
)
def _generate_jvm_code(ctx, label, compile_thrifts, include_thrifts, jar_output, language):
# bazel worker arguments cannot be empty so we pad to ensure non-empty
# and drop it off on the other side
# https://github.com/bazelbuild/bazel/issues/3329
worker_arg_pad = "_"
path_content = "\n".join([
worker_arg_pad + _colon_paths(ps)
for ps in [compile_thrifts, include_thrifts, [], []]
])
compiler_args = getattr(ctx.rule.attr, "compiler_args", [])
lang_flag = ["--language", language]
flags = compiler_args + lang_flag
worker_content = "{output}\n{paths}\n{flags}".format(
output = jar_output.path,
paths = path_content,
# we could put "include_services" on thrift_info, if needed
flags = worker_arg_pad + ":".join(flags),
)
# Since we may want to generate several languages from this thrift target,
# we need to mix the language into the worker input file.
argfile = ctx.actions.declare_file(
"{}_{}_worker_input".format(label.name, language),
sibling = jar_output,
)
ctx.actions.write(output = argfile, content = worker_content)
ctx.actions.run(
executable = ctx.executable._pluck_scrooge_scala,
inputs = compile_thrifts + include_thrifts + [argfile],
outputs = [jar_output],
mnemonic = "ScroogeRule",
progress_message = "creating scrooge files %s" % ctx.label,
execution_requirements = {"supports-workers": "1"},
# when we run with a worker, the `@argfile.path` is removed and passed
# line by line as arguments in the protobuf. In that case,
# the rest of the arguments are passed to the process that
# starts up and stays resident.
# In either case (worker or not), they will be jvm flags which will
# be correctly handled since the executable is a jvm app that will
# consume the flags on startup.
#arguments = ["--jvm_flag=%s" % flag for flag in ctx.attr.jvm_flags] +
arguments = ["@" + argfile.path],
)
def _compiled_jar_file(actions, scrooge_jar):
scrooge_jar_name = scrooge_jar.basename
# ends with .srcjar, so remove last 6 characters
without_suffix = scrooge_jar_name[0:len(scrooge_jar_name) - 6]
# this already ends with _scrooge because that is how scrooge_jar is named
compiled_jar = without_suffix + "jar"
return actions.declare_file(compiled_jar, sibling = scrooge_jar)
def _create_java_info_provider(scrooge_jar, all_deps, output):
return JavaInfo(
source_jar = scrooge_jar,
deps = all_deps,
runtime_deps = all_deps,
exports = all_deps,
output_jar = output,
compile_jar = output,
)
def _compile_generated_scala(
ctx,
label,
output,
scrooge_jar,
deps_java_info,
implicit_deps):
manifest = ctx.actions.declare_file(
label.name + "_MANIFEST.MF",
sibling = scrooge_jar,
)
write_manifest_file(ctx.actions, manifest, None)
statsfile = ctx.actions.declare_file(
label.name + "_scalac.statsfile",
sibling = scrooge_jar,
)
diagnosticsfile = ctx.actions.declare_file(
label.name + "_scalac.diagnosticsproto",
sibling = scrooge_jar,
)
all_deps = _concat_lists(deps_java_info, implicit_deps)
merged_deps = java_common.merge(all_deps)
# this only compiles scala, not the ijar, but we don't
# want the ijar for generated code anyway: any change
# in the thrift generally will change the interface and
# method bodies
compile_scala(
ctx,
label,
output,
manifest,
statsfile,
diagnosticsfile,
sources = [],
cjars = merged_deps.transitive_compile_time_jars,
all_srcjars = depset([scrooge_jar]),
transitive_compile_jars = merged_deps.transitive_compile_time_jars,
plugins = [],
resource_strip_prefix = "",
resources = [],
resource_jars = [],
labels = {},
in_scalacopts = [],
print_compile_time = False,
expect_java_output = False,
scalac_jvm_flags = [],
scalac = ctx.executable._scalac,
dependency_info = legacy_unclear_dependency_info_for_protobuf_scrooge(ctx),
unused_dependency_checker_ignored_targets = [],
)
return _create_java_info_provider(scrooge_jar, all_deps, output)
def _compile_generated_java(
ctx,
label,
output,
scrooge_jar,
deps_java_info,
implicit_deps):
all_deps = _concat_lists(deps_java_info, implicit_deps)
merged_deps = java_common.merge(all_deps)
compile_java(
ctx,
source_jars = [scrooge_jar],
source_files = [],
output = output,
extra_javac_opts = [],
providers_of_dependencies = [merged_deps],
)
return _create_java_info_provider(scrooge_jar, all_deps, output)
def _concat_lists(list1, list2):
all_providers = []
all_providers.extend(list1)
all_providers.extend(list2)
return all_providers
def _gather_thriftinfo_from_deps(target, ctx):
if ScroogeImport in target:
target_import = target[ScroogeImport]
target_ti = target_import.thrift_info
deps = [target_import.java_info]
transitive_ti = target_ti
else:
target_ti = target[ThriftInfo]
deps = [d[ScroogeAspectInfo].java_info for d in ctx.rule.attr.deps]
transitive_ti = merge_thrift_infos(
[
d[ScroogeAspectInfo].thrift_info
for d in ctx.rule.attr.deps
] + [target_ti],
)
imps = [j[JavaInfo] for j in ctx.attr._implicit_compile_deps]
return (
target_ti,
transitive_ti,
deps,
imps,
)
def _compile_thrift_to_language(target_ti, transitive_ti, language, target, ctx):
"""Calls scrooge to compile thrift to the language specified in `language`.
Returns the name of the compiled jar."""
scrooge_file = ctx.actions.declare_file(
target.label.name + "_scrooge_{}.srcjar".format(language),
)
# we sort so the inputs are always the same for caching
compile_thrifts = sorted(target_ti.srcs.to_list())
compile_thrift_map = {}
for ct in compile_thrifts:
compile_thrift_map[ct] = True
include_thrifts = sorted([
trans
for trans in transitive_ti.transitive_srcs.to_list()
if trans not in compile_thrift_map
])
_generate_jvm_code(
ctx,
target.label,
compile_thrifts,
include_thrifts,
scrooge_file,
language,
)
return scrooge_file
def _common_scrooge_aspect_implementation(target, ctx, language, compiler_function):
"""Aspect implementation to generate code from thrift files in a language of choice, and then compile it.
Takes in a `language` (either "java" or "scala") and a function to compile the generated sources.
This aspect is applied to the DAG of thrift_librarys reachable from a deps or a scrooge_scala_library.
Each thrift_library will be one scrooge invocation, assuming it has some sources.
"""
(
target_ti,
transitive_ti,
deps,
imps,
) = _gather_thriftinfo_from_deps(target, ctx)
if target_ti.srcs:
scrooge_file = _compile_thrift_to_language(target_ti, transitive_ti, language, target, ctx)
output = _compiled_jar_file(ctx.actions, scrooge_file)
java_info = compiler_function(
ctx,
target.label,
output,
scrooge_file,
deps,
imps,
)
return [ScroogeAspectInfo(
src_jars = depset([scrooge_file]),
output_files = depset([output]),
thrift_info = transitive_ti,
java_info = java_info,
)]
else:
# This target is an aggregation target. Aggregate the java_infos and return.
return [
ScroogeAspectInfo(
src_jars = depset(),
output_files = depset(),
thrift_info = transitive_ti,
java_info = java_common.merge(_concat_lists(deps, imps)),
),
]
def _scrooge_scala_aspect_impl(target, ctx):
return _common_scrooge_aspect_implementation(target, ctx, "scala", _compile_generated_scala)
def _scrooge_java_aspect_impl(target, ctx):
return _common_scrooge_aspect_implementation(target, ctx, "java", _compile_generated_java)
# Common attributes for both java and scala aspects, needed to generate JVM code from Thrift
common_attrs = {
"_pluck_scrooge_scala": attr.label(
executable = True,
cfg = "exec",
default = Label("//src/scala/scripts:scrooge_worker"),
allow_files = True,
),
"_implicit_compile_deps": attr.label_list(
providers = [JavaInfo],
default = [
Label(
"@io_bazel_rules_scala//twitter_scrooge:aspect_compile_classpath",
),
],
),
}
common_aspect_providers = [
[ThriftInfo],
[ScroogeImport],
]
scrooge_scala_aspect = aspect(
implementation = _scrooge_scala_aspect_impl,
attr_aspects = ["deps"],
attrs = dicts.add(
common_attrs,
{
"_scalac": attr.label(
executable = True,
cfg = "exec",
default = Label("@io_bazel_rules_scala//src/java/io/bazel/rulesscala/scalac"),
allow_files = True,
),
},
),
required_aspect_providers = common_aspect_providers,
toolchains = [
"@io_bazel_rules_scala//scala:toolchain_type",
"@io_bazel_rules_scala//twitter_scrooge/toolchain:scrooge_toolchain_type",
],
incompatible_use_toolchain_transition = True,
)
scrooge_java_aspect = aspect(
implementation = _scrooge_java_aspect_impl,
attr_aspects = ["deps"],
attrs = dicts.add(
common_attrs,
{
"_java_toolchain": attr.label(default = Label("@bazel_tools//tools/jdk:current_java_toolchain")),
"_host_javabase": attr.label(
default = Label("@bazel_tools//tools/jdk:current_java_runtime"),
cfg = "exec",
),
},
),
required_aspect_providers = common_aspect_providers,
toolchains = [
"@io_bazel_rules_scala//scala:toolchain_type",
"@io_bazel_rules_scala//twitter_scrooge/toolchain:scrooge_toolchain_type",
],
incompatible_use_toolchain_transition = True,
fragments = ["java"],
)
def _scrooge_jvm_library_impl(ctx):
aspect_info = merge_scrooge_aspect_info(
[dep[ScroogeAspectInfo] for dep in ctx.attr.deps],
)
if ctx.attr.exports:
exports = [exp[JavaInfo] for exp in ctx.attr.exports]
exports.append(aspect_info.java_info)
all_java = java_common.merge(exports)
else:
all_java = aspect_info.java_info
return [
all_java,
ScroogeInfo(aspect_info = aspect_info),
DefaultInfo(files = aspect_info.output_files),
]
scrooge_scala_library = rule(
implementation = _scrooge_jvm_library_impl,
attrs = {
"deps": attr.label_list(aspects = [scrooge_scala_aspect]),
"exports": attr.label_list(providers = [JavaInfo]),
},
provides = [DefaultInfo, ScroogeInfo, JavaInfo],
)
scrooge_java_library = rule(
# They can use the same implementation, since it's just an aggregator for the aspect info.
implementation = _scrooge_jvm_library_impl,
attrs = {
"deps": attr.label_list(aspects = [scrooge_java_aspect]),
"exports": attr.label_list(providers = [JavaInfo]),
},
provides = [DefaultInfo, ScroogeInfo, JavaInfo],
)
def _scrooge_scala_import_impl(ctx):
jars_jis = [
JavaInfo(
output_jar = scala_jar,
compile_jar = scala_jar,
)
for scala_jar in ctx.files.scala_jars
]
java_info = java_common.merge(
[imp[JavaInfo] for imp in ctx.attr._implicit_compile_deps] + jars_jis,
)
# to make the thrift_info, we only put this in the
# transitive part
ti = ThriftInfo(
srcs = depset(),
transitive_srcs = depset(ctx.files.thrift_jars),
)
return [java_info, ti, ScroogeImport(java_info = java_info, thrift_info = ti)]
# Allows you to consume thrifts and compiled jars from external repos
scrooge_scala_import = rule(
implementation = _scrooge_scala_import_impl,
attrs = {
"thrift_jars": attr.label_list(allow_files = [".jar"]),
"scala_jars": attr.label_list(allow_files = [".jar"]),
"_implicit_compile_deps": attr.label_list(
providers = [JavaInfo],
default = [
Label(
"@io_bazel_rules_scala//twitter_scrooge:compile_classpath",
),
],
),
},
provides = [ThriftInfo, JavaInfo, ScroogeImport],
toolchains = ["@io_bazel_rules_scala//twitter_scrooge/toolchain:scrooge_toolchain_type"],
incompatible_use_toolchain_transition = True,
)
|
en
| 0.883026
|
# These target labels need maven_servers to compute sensible defaults. # Therefore we leave them None here. # Mustache is needed to generate java from thrift, and is passed further down. # scrooge-generator needs these runtime_deps to generate java from thrift. # This is a shim needed to import `@javax.annotation.Generated` when compiled with jdk11. # bazel worker arguments cannot be empty so we pad to ensure non-empty # and drop it off on the other side # https://github.com/bazelbuild/bazel/issues/3329 # we could put "include_services" on thrift_info, if needed # Since we may want to generate several languages from this thrift target, # we need to mix the language into the worker input file. # when we run with a worker, the `@argfile.path` is removed and passed # line by line as arguments in the protobuf. In that case, # the rest of the arguments are passed to the process that # starts up and stays resident. # In either case (worker or not), they will be jvm flags which will # be correctly handled since the executable is a jvm app that will # consume the flags on startup. #arguments = ["--jvm_flag=%s" % flag for flag in ctx.attr.jvm_flags] + # ends with .srcjar, so remove last 6 characters # this already ends with _scrooge because that is how scrooge_jar is named # this only compiles scala, not the ijar, but we don't # want the ijar for generated code anyway: any change # in the thrift generally will change the interface and # method bodies Calls scrooge to compile thrift to the language specified in `language`. Returns the name of the compiled jar. # we sort so the inputs are always the same for caching Aspect implementation to generate code from thrift files in a language of choice, and then compile it. Takes in a `language` (either "java" or "scala") and a function to compile the generated sources. This aspect is applied to the DAG of thrift_librarys reachable from a deps or a scrooge_scala_library. Each thrift_library will be one scrooge invocation, assuming it has some sources. # This target is an aggregation target. Aggregate the java_infos and return. # Common attributes for both java and scala aspects, needed to generate JVM code from Thrift # They can use the same implementation, since it's just an aggregator for the aspect info. # to make the thrift_info, we only put this in the # transitive part # Allows you to consume thrifts and compiled jars from external repos
| 1.677275
| 2
|
contrib/agdc_workshop_exercises/pqa-finished.py
|
jeremyh/agdc
| 34
|
6629300
|
<filename>contrib/agdc_workshop_exercises/pqa-finished.py<gh_stars>10-100
'''
Created on 21/02/2013
@author: u76345
'''
import os
import sys
import logging
import re
import numpy
from datetime import datetime, time
from osgeo import gdal
from agdc.stacker import Stacker
from EOtools.utils import log_multiline
# Set top level standard output
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(logging.INFO)
console_formatter = logging.Formatter('%(message)s')
console_handler.setFormatter(console_formatter)
logger = logging.getLogger(__name__)
if not logger.level:
logger.setLevel(logging.DEBUG) # Default logging level for all modules
logger.addHandler(console_handler)
# Creates derived datasets by getting masking all NBAR datasets in a stack with PQA data
class PQAStacker(Stacker):
""" Subclass of Stacker
Used to implement specific functionality to create stacks of derived datasets.
"""
def derive_datasets(self, input_dataset_dict, stack_output_info, tile_type_info):
assert type(input_dataset_dict) == dict, 'input_dataset_dict must be a dict'
log_multiline(logger.debug, input_dataset_dict, 'input_dataset_dict', '\t')
# Figure out our input/output files
nbar_dataset_path = input_dataset_dict['NBAR']['tile_pathname']
nbar_dataset = gdal.Open(nbar_dataset_path)
assert nbar_dataset, 'Unable to open dataset %s' % nbar_dataset
total_bands = nbar_dataset.RasterCount
logger.debug('Opened NBAR dataset %s', nbar_dataset_path)
# Get the pixel mask as a single numpy array
# Be mindful of memory usage, should be fine in this instance
pqa_mask = self.get_pqa_mask(input_dataset_dict['PQA']['tile_pathname'])
# Instead of receiving one entry, this will have a number of entries = to the number of bands
output_dataset_dict = {}
# Instead of creating 1 file with many bands
# Let's create many files with a single band
for index in range(1, total_bands + 1):
output_tile_path = os.path.join(self.output_dir, re.sub('\.\w+$',
'_pqa_masked_band_%s%s' % (index, tile_type_info['file_extension']),
os.path.basename(nbar_dataset_path)))
output_stack_path = os.path.join(self.output_dir, 'pqa_masked_band_%s.vrt' % (index))
# Copy metadata for eventual inclusion in stack file output
# This could also be written to the output tile if required
output_dataset_info = dict(input_dataset_dict['NBAR'])
output_dataset_info['tile_pathname'] = output_tile_path # This is the most important modification - used to find
output_dataset_info['band_name'] = 'NBAR band %s with PQA mask applied' % (index)
output_dataset_info['band_tag'] = 'NBAR-PQA-%s' % (index)
output_dataset_info['tile_layer'] = 1
# Create a new geotiff for the masked output
gdal_driver = gdal.GetDriverByName(tile_type_info['file_format'])
output_dataset = gdal_driver.Create(output_tile_path,
nbar_dataset.RasterXSize, nbar_dataset.RasterYSize,
1, nbar_dataset.GetRasterBand(index).DataType,
tile_type_info['format_options'].split(','))
assert output_dataset, 'Unable to open output dataset %s' % output_dataset
output_dataset.SetGeoTransform(nbar_dataset.GetGeoTransform())
output_dataset.SetProjection(nbar_dataset.GetProjection())
# Mask our band (each band is a numpy array of values)
input_band = nbar_dataset.GetRasterBand(index)
input_band_data = input_band.ReadAsArray()
# Apply the mask in place on input_band_data
no_data_value = -32767
self.apply_pqa_mask(input_band_data, pqa_mask, no_data_value)
# Write the data as a new band
output_band = output_dataset.GetRasterBand(1)
output_band.WriteArray(input_band_data)
output_band.SetNoDataValue(no_data_value)
output_band.FlushCache()
# This is not strictly necessary - copy metadata to output dataset
output_dataset_metadata = nbar_dataset.GetMetadata()
if output_dataset_metadata:
output_dataset.SetMetadata(output_dataset_metadata)
output_dataset.FlushCache()
logger.info('Finished writing %s', output_tile_path)
output_dataset_dict[output_stack_path] = output_dataset_info
log_multiline(logger.debug, output_dataset_dict, 'output_dataset_dict', '\t')
return output_dataset_dict
# This is the main function when this script is directly executed - You can mostly
# ignore it's contents. The bulk of the "interesting work" is in the above class
if __name__ == '__main__':
def date2datetime(input_date, time_offset=time.min):
if not input_date:
return None
return datetime.combine(input_date, time_offset)
# Stacker class takes care of command line parameters
stacker = PQAStacker()
if stacker.debug:
console_handler.setLevel(logging.DEBUG)
# Check for required command line parameters
assert (stacker.x_index and stacker.y_index), 'You must specify Tile X/Y-index (-x/-y or --x_index/--y_index)'
assert stacker.output_dir, 'Output directory not specified (-o or --output)'
stack_info_dict = stacker.stack_derived(x_index=stacker.x_index,
y_index=stacker.y_index,
stack_output_dir=stacker.output_dir,
start_datetime=date2datetime(stacker.start_date, time.min),
end_datetime=date2datetime(stacker.end_date, time.max),
satellite=stacker.satellite,
sensor=stacker.sensor)
log_multiline(logger.debug, stack_info_dict, 'stack_info_dict', '\t')
logger.info('Finished creating %d temporal stack files in %s.', len(stack_info_dict), stacker.output_dir)
|
<filename>contrib/agdc_workshop_exercises/pqa-finished.py<gh_stars>10-100
'''
Created on 21/02/2013
@author: u76345
'''
import os
import sys
import logging
import re
import numpy
from datetime import datetime, time
from osgeo import gdal
from agdc.stacker import Stacker
from EOtools.utils import log_multiline
# Set top level standard output
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(logging.INFO)
console_formatter = logging.Formatter('%(message)s')
console_handler.setFormatter(console_formatter)
logger = logging.getLogger(__name__)
if not logger.level:
logger.setLevel(logging.DEBUG) # Default logging level for all modules
logger.addHandler(console_handler)
# Creates derived datasets by getting masking all NBAR datasets in a stack with PQA data
class PQAStacker(Stacker):
""" Subclass of Stacker
Used to implement specific functionality to create stacks of derived datasets.
"""
def derive_datasets(self, input_dataset_dict, stack_output_info, tile_type_info):
assert type(input_dataset_dict) == dict, 'input_dataset_dict must be a dict'
log_multiline(logger.debug, input_dataset_dict, 'input_dataset_dict', '\t')
# Figure out our input/output files
nbar_dataset_path = input_dataset_dict['NBAR']['tile_pathname']
nbar_dataset = gdal.Open(nbar_dataset_path)
assert nbar_dataset, 'Unable to open dataset %s' % nbar_dataset
total_bands = nbar_dataset.RasterCount
logger.debug('Opened NBAR dataset %s', nbar_dataset_path)
# Get the pixel mask as a single numpy array
# Be mindful of memory usage, should be fine in this instance
pqa_mask = self.get_pqa_mask(input_dataset_dict['PQA']['tile_pathname'])
# Instead of receiving one entry, this will have a number of entries = to the number of bands
output_dataset_dict = {}
# Instead of creating 1 file with many bands
# Let's create many files with a single band
for index in range(1, total_bands + 1):
output_tile_path = os.path.join(self.output_dir, re.sub('\.\w+$',
'_pqa_masked_band_%s%s' % (index, tile_type_info['file_extension']),
os.path.basename(nbar_dataset_path)))
output_stack_path = os.path.join(self.output_dir, 'pqa_masked_band_%s.vrt' % (index))
# Copy metadata for eventual inclusion in stack file output
# This could also be written to the output tile if required
output_dataset_info = dict(input_dataset_dict['NBAR'])
output_dataset_info['tile_pathname'] = output_tile_path # This is the most important modification - used to find
output_dataset_info['band_name'] = 'NBAR band %s with PQA mask applied' % (index)
output_dataset_info['band_tag'] = 'NBAR-PQA-%s' % (index)
output_dataset_info['tile_layer'] = 1
# Create a new geotiff for the masked output
gdal_driver = gdal.GetDriverByName(tile_type_info['file_format'])
output_dataset = gdal_driver.Create(output_tile_path,
nbar_dataset.RasterXSize, nbar_dataset.RasterYSize,
1, nbar_dataset.GetRasterBand(index).DataType,
tile_type_info['format_options'].split(','))
assert output_dataset, 'Unable to open output dataset %s' % output_dataset
output_dataset.SetGeoTransform(nbar_dataset.GetGeoTransform())
output_dataset.SetProjection(nbar_dataset.GetProjection())
# Mask our band (each band is a numpy array of values)
input_band = nbar_dataset.GetRasterBand(index)
input_band_data = input_band.ReadAsArray()
# Apply the mask in place on input_band_data
no_data_value = -32767
self.apply_pqa_mask(input_band_data, pqa_mask, no_data_value)
# Write the data as a new band
output_band = output_dataset.GetRasterBand(1)
output_band.WriteArray(input_band_data)
output_band.SetNoDataValue(no_data_value)
output_band.FlushCache()
# This is not strictly necessary - copy metadata to output dataset
output_dataset_metadata = nbar_dataset.GetMetadata()
if output_dataset_metadata:
output_dataset.SetMetadata(output_dataset_metadata)
output_dataset.FlushCache()
logger.info('Finished writing %s', output_tile_path)
output_dataset_dict[output_stack_path] = output_dataset_info
log_multiline(logger.debug, output_dataset_dict, 'output_dataset_dict', '\t')
return output_dataset_dict
# This is the main function when this script is directly executed - You can mostly
# ignore it's contents. The bulk of the "interesting work" is in the above class
if __name__ == '__main__':
def date2datetime(input_date, time_offset=time.min):
if not input_date:
return None
return datetime.combine(input_date, time_offset)
# Stacker class takes care of command line parameters
stacker = PQAStacker()
if stacker.debug:
console_handler.setLevel(logging.DEBUG)
# Check for required command line parameters
assert (stacker.x_index and stacker.y_index), 'You must specify Tile X/Y-index (-x/-y or --x_index/--y_index)'
assert stacker.output_dir, 'Output directory not specified (-o or --output)'
stack_info_dict = stacker.stack_derived(x_index=stacker.x_index,
y_index=stacker.y_index,
stack_output_dir=stacker.output_dir,
start_datetime=date2datetime(stacker.start_date, time.min),
end_datetime=date2datetime(stacker.end_date, time.max),
satellite=stacker.satellite,
sensor=stacker.sensor)
log_multiline(logger.debug, stack_info_dict, 'stack_info_dict', '\t')
logger.info('Finished creating %d temporal stack files in %s.', len(stack_info_dict), stacker.output_dir)
|
en
| 0.849941
|
Created on 21/02/2013 @author: u76345 # Set top level standard output # Default logging level for all modules # Creates derived datasets by getting masking all NBAR datasets in a stack with PQA data Subclass of Stacker Used to implement specific functionality to create stacks of derived datasets. # Figure out our input/output files # Get the pixel mask as a single numpy array # Be mindful of memory usage, should be fine in this instance # Instead of receiving one entry, this will have a number of entries = to the number of bands # Instead of creating 1 file with many bands # Let's create many files with a single band # Copy metadata for eventual inclusion in stack file output # This could also be written to the output tile if required # This is the most important modification - used to find # Create a new geotiff for the masked output # Mask our band (each band is a numpy array of values) # Apply the mask in place on input_band_data # Write the data as a new band # This is not strictly necessary - copy metadata to output dataset # This is the main function when this script is directly executed - You can mostly # ignore it's contents. The bulk of the "interesting work" is in the above class # Stacker class takes care of command line parameters # Check for required command line parameters
| 2.100486
| 2
|
tensorflow_data_validation/utils/display_util.py
|
tensorflow/data-validation
| 621
|
6629301
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils for displaying TFDV outputs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import base64
import sys
from typing import Dict, List, Optional, Text, Tuple, Union
import pandas as pd
from tensorflow_data_validation import types
from tensorflow_data_validation.utils import stats_util
from tensorflow_metadata.proto.v0 import anomalies_pb2
from tensorflow_metadata.proto.v0 import schema_pb2
from tensorflow_metadata.proto.v0 import statistics_pb2
try:
# pylint: disable=g-import-not-at-top
from IPython.display import display
from IPython.display import HTML
except ImportError as e:
def display(unused_input):
print('IPython is not installed. Unable to display.')
def HTML(s): # pylint: disable=invalid-name
return s
sys.stderr.write('Unable to import IPython: {}. \n'
'TFDV visualization APIs will not function. To use '
'visualization features, make sure IPython is installed, or '
'install TFDV using '
'"pip install tensorflow-data-validation[visualization]"\n'
.format(e))
_NL_CUSTOM_STATS_NAME = 'nl_statistics'
_TOKEN_NAME_KEY = 'token_name'
_FREQUENCY_KEY = 'frequency'
_FRACTION_OF_SEQ_KEY = 'fraction_of_sequences'
_PER_SEQ_MIN_FREQ_KEY = 'per_sequence_min_frequency'
_PER_SEQ_MAX_FREQ_KEY = 'per_sequence_max_frequency'
_PER_SEQ_AVG_FREQ_KEY = 'per_sequence_avg_frequency'
_POSITIONS_KEY = 'positions'
def _add_quotes(input_str: types.FeatureName) -> types.FeatureName:
return "'" + input_str.replace("'", "\\'") + "'"
def get_schema_dataframe(
schema: schema_pb2.Schema) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""Returns a tuple of DataFrames containing the input schema information.
Args:
schema: A Schema protocol buffer.
Returns:
A tuple of DataFrames containing the features and domains of the schema.
"""
if not isinstance(schema, schema_pb2.Schema):
raise TypeError('schema is of type %s, should be a Schema proto.' %
type(schema).__name__)
# Extract all the string domains at the schema level.
domain_rows = []
for domain in schema.string_domain:
domain_rows.append(
[_add_quotes(domain.name),
', '.join(_add_quotes(v) for v in domain.value)])
feature_rows = []
# Iterate over the features in the schema and extract the properties of each
# feature.
for feature in schema.feature:
# Extract the presence information of the feature.
if feature.HasField('presence'):
if feature.presence.min_fraction == 1.0:
feature_presence = 'required'
else:
feature_presence = 'optional'
else:
feature_presence = ''
# Extract the valency information of the feature.
valency = ''
if feature.HasField('value_count'):
if (feature.value_count.min == feature.value_count.max and
feature.value_count.min == 1):
valency = 'single'
else:
min_value_count = ('[%d' % feature.value_count.min
if feature.value_count.HasField('min') else '[0')
max_value_count = ('%d]' % feature.value_count.max
if feature.value_count.HasField('max') else 'inf)')
valency = min_value_count + ',' + max_value_count
# Extract the feature type.
feature_type = schema_pb2.FeatureType.Name(feature.type)
# If the feature has a string domain, treat it as a string feature.
if feature_type == 'BYTES' and (feature.HasField('domain') or
feature.HasField('string_domain')):
feature_type = 'STRING'
# Extract the domain (if any) of the feature.
def combine_min_max_strings(min_string, max_string):
if min_string is not None and max_string is not None:
domain_string = min_string + '; ' + max_string
elif min_string is not None:
domain_string = min_string
elif max_string is not None:
domain_string = max_string
else:
domain_string = '-'
return domain_string
domain = '-'
if feature.HasField('domain'):
domain = _add_quotes(feature.domain)
elif feature.HasField('int_domain'):
min_string = ('min: %d' % feature.int_domain.min
if feature.int_domain.HasField('min') else None)
max_string = ('max: %d' % feature.int_domain.max
if feature.int_domain.HasField('max') else None)
domain = combine_min_max_strings(min_string, max_string)
elif feature.HasField('float_domain'):
if feature.float_domain.HasField('min'):
min_string = 'min: %f' % feature.float_domain.min
elif feature.float_domain.disallow_inf:
min_string = None
else:
min_string = 'min: -inf'
if feature.float_domain.HasField('max'):
max_string = 'max: %f' % feature.float_domain.max
elif feature.float_domain.disallow_inf:
max_string = None
else:
max_string = 'max: inf'
domain = combine_min_max_strings(min_string, max_string)
elif feature.HasField('string_domain'):
domain = _add_quotes(feature.string_domain.name if
feature.string_domain.name else
feature.name + '_domain')
domain_rows.append([domain,
', '.join(_add_quotes(v) for v in
feature.string_domain.value)])
feature_rows.append(
[_add_quotes(feature.name), feature_type, feature_presence, valency,
domain])
features = pd.DataFrame(
feature_rows,
columns=['Feature name', 'Type', 'Presence', 'Valency',
'Domain']).set_index('Feature name')
domains = pd.DataFrame(
domain_rows, columns=['Domain', 'Values']).set_index('Domain')
return features, domains
def display_schema(schema: schema_pb2.Schema) -> None:
"""Displays the input schema (for use in a Jupyter notebook).
Args:
schema: A Schema protocol buffer.
"""
features_df, domains_df = get_schema_dataframe(schema)
display(features_df)
# Do not truncate columns.
if not domains_df.empty:
pd.set_option('max_colwidth', None)
display(domains_df)
def get_anomalies_dataframe(anomalies: anomalies_pb2.Anomalies) -> pd.DataFrame:
"""Returns a DataFrame containing the input anomalies.
Args:
anomalies: An Anomalies protocol buffer.
Returns:
A DataFrame containing the input anomalies, or an empty DataFrame if there
are no anomalies.
"""
if not isinstance(anomalies, anomalies_pb2.Anomalies):
raise TypeError('anomalies is of type %s, should be an Anomalies proto.' %
type(anomalies).__name__)
anomaly_rows = []
for feature_name, anomaly_info in anomalies.anomaly_info.items():
anomaly_rows.append([
_add_quotes(feature_name), anomaly_info.short_description,
anomaly_info.description
])
if anomalies.HasField('dataset_anomaly_info'):
anomaly_rows.append([
'[dataset anomaly]', anomalies.dataset_anomaly_info.short_description,
anomalies.dataset_anomaly_info.description
])
# Construct a DataFrame consisting of the anomalies and display it.
anomalies_df = pd.DataFrame(
anomaly_rows,
columns=[
'Feature name', 'Anomaly short description',
'Anomaly long description'
]).set_index('Feature name')
# Do not truncate columns.
pd.set_option('max_colwidth', None)
return anomalies_df
def display_anomalies(anomalies: anomalies_pb2.Anomalies) -> None:
"""Displays the input anomalies (for use in a Jupyter notebook).
Args:
anomalies: An Anomalies protocol buffer.
"""
anomalies_df = get_anomalies_dataframe(anomalies)
if anomalies_df.empty:
display(HTML('<h4 style="color:green;">No anomalies found.</h4>'))
else:
display(anomalies_df)
def _project_statistics(
statistics: statistics_pb2.DatasetFeatureStatisticsList,
allowlist_features: Optional[List[types.FeaturePath]] = None,
denylist_features: Optional[List[types.FeaturePath]] = None
) -> statistics_pb2.DatasetFeatureStatisticsList:
"""Project statistics proto based on allowlist and denylist features."""
if allowlist_features is None and denylist_features is None:
return statistics
result = statistics_pb2.DatasetFeatureStatisticsList()
for dataset_stats in statistics.datasets:
result_dataset_stats = result.datasets.add()
result_dataset_stats.MergeFrom(dataset_stats)
del result_dataset_stats.features[:]
if allowlist_features is not None:
allowlist_features = set(allowlist_features)
for feature in dataset_stats.features:
if types.FeaturePath.from_proto(feature.path) in allowlist_features:
result_dataset_stats.features.add().MergeFrom(feature)
else:
denylist_features = set(denylist_features)
for feature in dataset_stats.features:
if types.FeaturePath.from_proto(feature.path) in denylist_features:
continue
result_dataset_stats.features.add().MergeFrom(feature)
return result
def _get_combined_statistics(
lhs_statistics: statistics_pb2.DatasetFeatureStatisticsList,
rhs_statistics: Optional[
statistics_pb2.DatasetFeatureStatisticsList] = None,
lhs_name: Text = 'lhs_statistics',
rhs_name: Text = 'rhs_statistics',
allowlist_features: Optional[List[types.FeaturePath]] = None,
denylist_features: Optional[List[types.FeaturePath]] = None
) -> statistics_pb2.DatasetFeatureStatisticsList:
"""Get combined datatset statistics list proto."""
if not isinstance(lhs_statistics,
statistics_pb2.DatasetFeatureStatisticsList):
raise TypeError(
'lhs_statistics is of type %s, should be '
'a DatasetFeatureStatisticsList proto.' % type(lhs_statistics).__name__)
if not lhs_statistics.datasets:
raise ValueError('lhs_statistics proto contains no dataset.')
if len(lhs_statistics.datasets) != 1:
raise ValueError('lhs_statistics proto contains multiple datasets. Only '
'one dataset is currently supported.')
if lhs_statistics.datasets[0].name:
lhs_name = lhs_statistics.datasets[0].name
# Add lhs stats.
lhs_statistics = _project_statistics(
lhs_statistics, allowlist_features, denylist_features)
combined_statistics = statistics_pb2.DatasetFeatureStatisticsList()
lhs_stats_copy = combined_statistics.datasets.add()
lhs_stats_copy.MergeFrom(lhs_statistics.datasets[0])
if rhs_statistics is not None:
if not isinstance(rhs_statistics,
statistics_pb2.DatasetFeatureStatisticsList):
raise TypeError('rhs_statistics is of type %s, should be a '
'DatasetFeatureStatisticsList proto.'
% type(rhs_statistics).__name__)
if len(rhs_statistics.datasets) != 1:
raise ValueError('rhs_statistics proto contains multiple datasets. Only '
'one dataset is currently supported.')
if rhs_statistics.datasets[0].name:
rhs_name = rhs_statistics.datasets[0].name
# If we have same name, revert to default names.
if lhs_name == rhs_name:
lhs_name, rhs_name = 'lhs_statistics', 'rhs_statistics'
# Add rhs stats.
rhs_statistics = _project_statistics(
rhs_statistics, allowlist_features, denylist_features)
rhs_stats_copy = combined_statistics.datasets.add()
rhs_stats_copy.MergeFrom(rhs_statistics.datasets[0])
rhs_stats_copy.name = rhs_name
# Update lhs name.
lhs_stats_copy.name = lhs_name
return combined_statistics
def get_statistics_html(
lhs_statistics: statistics_pb2.DatasetFeatureStatisticsList,
rhs_statistics: Optional[
statistics_pb2.DatasetFeatureStatisticsList] = None,
lhs_name: Text = 'lhs_statistics',
rhs_name: Text = 'rhs_statistics',
allowlist_features: Optional[List[types.FeaturePath]] = None,
denylist_features: Optional[List[types.FeaturePath]] = None
) -> Text:
"""Build the HTML for visualizing the input statistics using Facets.
Args:
lhs_statistics: A DatasetFeatureStatisticsList protocol buffer.
rhs_statistics: An optional DatasetFeatureStatisticsList protocol buffer to
compare with lhs_statistics.
lhs_name: Name of the lhs_statistics dataset.
rhs_name: Name of the rhs_statistics dataset.
allowlist_features: Set of features to be visualized.
denylist_features: Set of features to ignore for visualization.
Returns:
HTML to be embedded for visualization.
Raises:
TypeError: If the input argument is not of the expected type.
ValueError: If the input statistics protos does not have only one dataset.
"""
combined_statistics = _get_combined_statistics(
lhs_statistics, rhs_statistics, lhs_name, rhs_name, allowlist_features,
denylist_features)
protostr = base64.b64encode(
combined_statistics.SerializeToString()).decode('utf-8')
# pylint: disable=line-too-long,anomalous-backslash-in-string
# Note that in the html template we currently assign a temporary id to the
# facets element and then remove it once we have appended the serialized proto
# string to the element. We do this to avoid any collision of ids when
# displaying multiple facets output in the notebook.
#
# Note that a string literal including '</script>' in a <script> tag needs to
# escape it as <\/script> to avoid early closing the wrapping <script> tag.
html_template = """<iframe id='facets-iframe' width="100%" height="500px"></iframe>
<script>
facets_iframe = document.getElementById('facets-iframe');
facets_html = '<script src="https://cdnjs.cloudflare.com/ajax/libs/webcomponentsjs/1.3.3/webcomponents-lite.js"><\/script><link rel="import" href="https://raw.githubusercontent.com/PAIR-code/facets/master/facets-dist/facets-jupyter.html"><facets-overview proto-input="protostr"></facets-overview>';
facets_iframe.srcdoc = facets_html;
facets_iframe.id = "";
setTimeout(() => {
facets_iframe.setAttribute('height', facets_iframe.contentWindow.document.body.offsetHeight + 'px')
}, 1500)
</script>"""
# pylint: enable=line-too-long
html = html_template.replace('protostr', protostr)
return html
def visualize_statistics(
lhs_statistics: statistics_pb2.DatasetFeatureStatisticsList,
rhs_statistics: Optional[
statistics_pb2.DatasetFeatureStatisticsList] = None,
lhs_name: Text = 'lhs_statistics',
rhs_name: Text = 'rhs_statistics',
allowlist_features: Optional[List[types.FeaturePath]] = None,
denylist_features: Optional[List[types.FeaturePath]] = None) -> None:
"""Visualize the input statistics using Facets.
Args:
lhs_statistics: A DatasetFeatureStatisticsList protocol buffer.
rhs_statistics: An optional DatasetFeatureStatisticsList protocol buffer to
compare with lhs_statistics.
lhs_name: Name of the lhs_statistics dataset.
rhs_name: Name of the rhs_statistics dataset.
allowlist_features: Set of features to be visualized.
denylist_features: Set of features to ignore for visualization.
Raises:
TypeError: If the input argument is not of the expected type.
ValueError: If the input statistics protos does not have only one dataset.
"""
assert (not allowlist_features or not denylist_features), (
'Only specify one of allowlist_features and denylist_features.')
html = get_statistics_html(lhs_statistics, rhs_statistics, lhs_name, rhs_name,
allowlist_features, denylist_features)
display(HTML(html))
def compare_slices(statistics: statistics_pb2.DatasetFeatureStatisticsList,
lhs_slice_key: Text, rhs_slice_key: Text):
"""Compare statistics of two slices using Facets.
Args:
statistics: A DatasetFeatureStatisticsList protocol buffer.
lhs_slice_key: Slice key of the first slice.
rhs_slice_key: Slice key of the second slice.
Raises:
ValueError: If the input statistics proto does not have the specified slice
statistics.
"""
lhs_stats = stats_util.get_slice_stats(statistics, lhs_slice_key)
rhs_stats = stats_util.get_slice_stats(statistics, rhs_slice_key)
visualize_statistics(lhs_stats, rhs_stats,
lhs_name=lhs_slice_key, rhs_name=rhs_slice_key)
def get_natural_language_statistics_dataframes(
lhs_statistics: statistics_pb2.DatasetFeatureStatisticsList,
rhs_statistics: Optional[
statistics_pb2.DatasetFeatureStatisticsList] = None,
lhs_name: Text = 'lhs_statistics',
rhs_name: Text = 'rhs_statistics',
allowlist_features: Optional[List[types.FeaturePath]] = None,
denylist_features: Optional[List[types.FeaturePath]] = None
) -> Optional[Dict[str, Dict[Union[int, str], Union[Dict[str, pd.DataFrame],
pd.DataFrame]]]]:
"""Gets the `NaturalLanguageStatistics` as a dict of pandas.DataFrame.
Each pd.DataFrame can be fed into a plot with little to no manipulation.
For example, to plot the `token_length_histogram` in plot.ly:
```
import pandas a pd
import plotly
import tensorflow_data_validation as tfdv
from tensorflow_data_validation.utils import display_util as tfdv_display_util
data = pd.DataFrame.from_dict({"col": [1, 2, 3]})
statistics = tfdv.generate_statistics_from_dataframe(data)
df = tfdv_display_util.get_natural_language_statistics_dataframes(statistics)
hist, bin_edges = np.histogram(df[ds_name][feature_name][
'token_length_histogram']['high_values'])
fig = plotly.graph_objs.Figure(data=[
plotly.graph_objs.Bar(x=bin_edges, y=hist, name='Histogram'),
])
```
The resulting dict contains `token_length_histogram` and each token name as
its keys. For each token, the data frame represents a list of stats as well
as the token's positions histogram.
Args:
lhs_statistics: A DatasetFeatureStatisticsList protocol buffer.
rhs_statistics: An optional DatasetFeatureStatisticsList protocol buffer to
compare with lhs_statistics.
lhs_name: Name of the lhs_statistics dataset.
rhs_name: Name of the rhs_statistics dataset.
allowlist_features: Set of features to be visualized.
denylist_features: Set of features to ignore for visualization.
Returns:
A dict of pandas data frames. Returns None if natural language statistics
does not exist in the statistics proto.
"""
combined_statistics = _get_combined_statistics(lhs_statistics, rhs_statistics,
lhs_name, rhs_name,
allowlist_features,
denylist_features)
nlp_stats = _get_natural_language_statistics(combined_statistics)
if not nlp_stats:
return None
result = {}
for ds_name, features_dict in nlp_stats.items():
result[ds_name] = {}
for feature_name, nlp_stat in features_dict.items():
result[ds_name][feature_name] = {
'token_length_histogram':
_get_histogram_dataframe(nlp_stat.token_length_histogram),
'token_statistics':
_get_token_statistics(list(nlp_stat.token_statistics))
}
return result
def _get_natural_language_statistics(
statistics: statistics_pb2.DatasetFeatureStatisticsList
) -> Dict[str, Dict[str, statistics_pb2.NaturalLanguageStatistics]]:
"""Gets the Natural Language stat out of the custom statistic."""
result = {}
for dataset in statistics.datasets:
if not dataset.name:
continue
features_dict = {}
for feature in dataset.features:
for custom_stats in feature.custom_stats:
if custom_stats.name == _NL_CUSTOM_STATS_NAME:
nlp_stat = statistics_pb2.NaturalLanguageStatistics()
custom_stats.any.Unpack(nlp_stat)
if feature.name:
feature_name = feature.name
else:
feature_name = str(types.FeaturePath.from_proto(feature.path))
features_dict[feature_name] = nlp_stat
if features_dict:
result[dataset.name] = features_dict
return result
def _get_token_statistics(
token_statistic: List[
statistics_pb2.NaturalLanguageStatistics.TokenStatistics]
) -> pd.DataFrame:
"""Returns a dict of each token's stats."""
nlp_stats_dict = {
_TOKEN_NAME_KEY: [],
_FREQUENCY_KEY: [],
_FRACTION_OF_SEQ_KEY: [],
_PER_SEQ_MIN_FREQ_KEY: [],
_PER_SEQ_MAX_FREQ_KEY: [],
_PER_SEQ_AVG_FREQ_KEY: [],
_POSITIONS_KEY: [],
}
for token in token_statistic:
if token.WhichOneof('token') == 'string_token':
token_name = token.string_token
else:
token_name = token.int_token
nlp_stats_dict[_TOKEN_NAME_KEY].append(token_name)
nlp_stats_dict[_FREQUENCY_KEY].append(token.frequency)
nlp_stats_dict[_FRACTION_OF_SEQ_KEY].append(token.fraction_of_sequences)
nlp_stats_dict[_PER_SEQ_MIN_FREQ_KEY].append(
token.per_sequence_min_frequency)
nlp_stats_dict[_PER_SEQ_MAX_FREQ_KEY].append(
token.per_sequence_max_frequency)
nlp_stats_dict[_PER_SEQ_AVG_FREQ_KEY].append(
token.per_sequence_avg_frequency)
nlp_stats_dict[_POSITIONS_KEY].append(
_get_histogram_dataframe(token.positions))
return pd.DataFrame.from_dict(nlp_stats_dict)
def _get_histogram_dataframe(
histogram: statistics_pb2.Histogram) -> pd.DataFrame:
"""Gets the `Histogram` as a pandas.DataFrame."""
return pd.DataFrame.from_dict({
'high_values': [b.high_value for b in histogram.buckets],
'low_values': [b.low_value for b in histogram.buckets],
'sample_counts': [b.sample_count for b in histogram.buckets],
})
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils for displaying TFDV outputs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import base64
import sys
from typing import Dict, List, Optional, Text, Tuple, Union
import pandas as pd
from tensorflow_data_validation import types
from tensorflow_data_validation.utils import stats_util
from tensorflow_metadata.proto.v0 import anomalies_pb2
from tensorflow_metadata.proto.v0 import schema_pb2
from tensorflow_metadata.proto.v0 import statistics_pb2
try:
# pylint: disable=g-import-not-at-top
from IPython.display import display
from IPython.display import HTML
except ImportError as e:
def display(unused_input):
print('IPython is not installed. Unable to display.')
def HTML(s): # pylint: disable=invalid-name
return s
sys.stderr.write('Unable to import IPython: {}. \n'
'TFDV visualization APIs will not function. To use '
'visualization features, make sure IPython is installed, or '
'install TFDV using '
'"pip install tensorflow-data-validation[visualization]"\n'
.format(e))
_NL_CUSTOM_STATS_NAME = 'nl_statistics'
_TOKEN_NAME_KEY = 'token_name'
_FREQUENCY_KEY = 'frequency'
_FRACTION_OF_SEQ_KEY = 'fraction_of_sequences'
_PER_SEQ_MIN_FREQ_KEY = 'per_sequence_min_frequency'
_PER_SEQ_MAX_FREQ_KEY = 'per_sequence_max_frequency'
_PER_SEQ_AVG_FREQ_KEY = 'per_sequence_avg_frequency'
_POSITIONS_KEY = 'positions'
def _add_quotes(input_str: types.FeatureName) -> types.FeatureName:
return "'" + input_str.replace("'", "\\'") + "'"
def get_schema_dataframe(
schema: schema_pb2.Schema) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""Returns a tuple of DataFrames containing the input schema information.
Args:
schema: A Schema protocol buffer.
Returns:
A tuple of DataFrames containing the features and domains of the schema.
"""
if not isinstance(schema, schema_pb2.Schema):
raise TypeError('schema is of type %s, should be a Schema proto.' %
type(schema).__name__)
# Extract all the string domains at the schema level.
domain_rows = []
for domain in schema.string_domain:
domain_rows.append(
[_add_quotes(domain.name),
', '.join(_add_quotes(v) for v in domain.value)])
feature_rows = []
# Iterate over the features in the schema and extract the properties of each
# feature.
for feature in schema.feature:
# Extract the presence information of the feature.
if feature.HasField('presence'):
if feature.presence.min_fraction == 1.0:
feature_presence = 'required'
else:
feature_presence = 'optional'
else:
feature_presence = ''
# Extract the valency information of the feature.
valency = ''
if feature.HasField('value_count'):
if (feature.value_count.min == feature.value_count.max and
feature.value_count.min == 1):
valency = 'single'
else:
min_value_count = ('[%d' % feature.value_count.min
if feature.value_count.HasField('min') else '[0')
max_value_count = ('%d]' % feature.value_count.max
if feature.value_count.HasField('max') else 'inf)')
valency = min_value_count + ',' + max_value_count
# Extract the feature type.
feature_type = schema_pb2.FeatureType.Name(feature.type)
# If the feature has a string domain, treat it as a string feature.
if feature_type == 'BYTES' and (feature.HasField('domain') or
feature.HasField('string_domain')):
feature_type = 'STRING'
# Extract the domain (if any) of the feature.
def combine_min_max_strings(min_string, max_string):
if min_string is not None and max_string is not None:
domain_string = min_string + '; ' + max_string
elif min_string is not None:
domain_string = min_string
elif max_string is not None:
domain_string = max_string
else:
domain_string = '-'
return domain_string
domain = '-'
if feature.HasField('domain'):
domain = _add_quotes(feature.domain)
elif feature.HasField('int_domain'):
min_string = ('min: %d' % feature.int_domain.min
if feature.int_domain.HasField('min') else None)
max_string = ('max: %d' % feature.int_domain.max
if feature.int_domain.HasField('max') else None)
domain = combine_min_max_strings(min_string, max_string)
elif feature.HasField('float_domain'):
if feature.float_domain.HasField('min'):
min_string = 'min: %f' % feature.float_domain.min
elif feature.float_domain.disallow_inf:
min_string = None
else:
min_string = 'min: -inf'
if feature.float_domain.HasField('max'):
max_string = 'max: %f' % feature.float_domain.max
elif feature.float_domain.disallow_inf:
max_string = None
else:
max_string = 'max: inf'
domain = combine_min_max_strings(min_string, max_string)
elif feature.HasField('string_domain'):
domain = _add_quotes(feature.string_domain.name if
feature.string_domain.name else
feature.name + '_domain')
domain_rows.append([domain,
', '.join(_add_quotes(v) for v in
feature.string_domain.value)])
feature_rows.append(
[_add_quotes(feature.name), feature_type, feature_presence, valency,
domain])
features = pd.DataFrame(
feature_rows,
columns=['Feature name', 'Type', 'Presence', 'Valency',
'Domain']).set_index('Feature name')
domains = pd.DataFrame(
domain_rows, columns=['Domain', 'Values']).set_index('Domain')
return features, domains
def display_schema(schema: schema_pb2.Schema) -> None:
"""Displays the input schema (for use in a Jupyter notebook).
Args:
schema: A Schema protocol buffer.
"""
features_df, domains_df = get_schema_dataframe(schema)
display(features_df)
# Do not truncate columns.
if not domains_df.empty:
pd.set_option('max_colwidth', None)
display(domains_df)
def get_anomalies_dataframe(anomalies: anomalies_pb2.Anomalies) -> pd.DataFrame:
"""Returns a DataFrame containing the input anomalies.
Args:
anomalies: An Anomalies protocol buffer.
Returns:
A DataFrame containing the input anomalies, or an empty DataFrame if there
are no anomalies.
"""
if not isinstance(anomalies, anomalies_pb2.Anomalies):
raise TypeError('anomalies is of type %s, should be an Anomalies proto.' %
type(anomalies).__name__)
anomaly_rows = []
for feature_name, anomaly_info in anomalies.anomaly_info.items():
anomaly_rows.append([
_add_quotes(feature_name), anomaly_info.short_description,
anomaly_info.description
])
if anomalies.HasField('dataset_anomaly_info'):
anomaly_rows.append([
'[dataset anomaly]', anomalies.dataset_anomaly_info.short_description,
anomalies.dataset_anomaly_info.description
])
# Construct a DataFrame consisting of the anomalies and display it.
anomalies_df = pd.DataFrame(
anomaly_rows,
columns=[
'Feature name', 'Anomaly short description',
'Anomaly long description'
]).set_index('Feature name')
# Do not truncate columns.
pd.set_option('max_colwidth', None)
return anomalies_df
def display_anomalies(anomalies: anomalies_pb2.Anomalies) -> None:
"""Displays the input anomalies (for use in a Jupyter notebook).
Args:
anomalies: An Anomalies protocol buffer.
"""
anomalies_df = get_anomalies_dataframe(anomalies)
if anomalies_df.empty:
display(HTML('<h4 style="color:green;">No anomalies found.</h4>'))
else:
display(anomalies_df)
def _project_statistics(
statistics: statistics_pb2.DatasetFeatureStatisticsList,
allowlist_features: Optional[List[types.FeaturePath]] = None,
denylist_features: Optional[List[types.FeaturePath]] = None
) -> statistics_pb2.DatasetFeatureStatisticsList:
"""Project statistics proto based on allowlist and denylist features."""
if allowlist_features is None and denylist_features is None:
return statistics
result = statistics_pb2.DatasetFeatureStatisticsList()
for dataset_stats in statistics.datasets:
result_dataset_stats = result.datasets.add()
result_dataset_stats.MergeFrom(dataset_stats)
del result_dataset_stats.features[:]
if allowlist_features is not None:
allowlist_features = set(allowlist_features)
for feature in dataset_stats.features:
if types.FeaturePath.from_proto(feature.path) in allowlist_features:
result_dataset_stats.features.add().MergeFrom(feature)
else:
denylist_features = set(denylist_features)
for feature in dataset_stats.features:
if types.FeaturePath.from_proto(feature.path) in denylist_features:
continue
result_dataset_stats.features.add().MergeFrom(feature)
return result
def _get_combined_statistics(
lhs_statistics: statistics_pb2.DatasetFeatureStatisticsList,
rhs_statistics: Optional[
statistics_pb2.DatasetFeatureStatisticsList] = None,
lhs_name: Text = 'lhs_statistics',
rhs_name: Text = 'rhs_statistics',
allowlist_features: Optional[List[types.FeaturePath]] = None,
denylist_features: Optional[List[types.FeaturePath]] = None
) -> statistics_pb2.DatasetFeatureStatisticsList:
"""Get combined datatset statistics list proto."""
if not isinstance(lhs_statistics,
statistics_pb2.DatasetFeatureStatisticsList):
raise TypeError(
'lhs_statistics is of type %s, should be '
'a DatasetFeatureStatisticsList proto.' % type(lhs_statistics).__name__)
if not lhs_statistics.datasets:
raise ValueError('lhs_statistics proto contains no dataset.')
if len(lhs_statistics.datasets) != 1:
raise ValueError('lhs_statistics proto contains multiple datasets. Only '
'one dataset is currently supported.')
if lhs_statistics.datasets[0].name:
lhs_name = lhs_statistics.datasets[0].name
# Add lhs stats.
lhs_statistics = _project_statistics(
lhs_statistics, allowlist_features, denylist_features)
combined_statistics = statistics_pb2.DatasetFeatureStatisticsList()
lhs_stats_copy = combined_statistics.datasets.add()
lhs_stats_copy.MergeFrom(lhs_statistics.datasets[0])
if rhs_statistics is not None:
if not isinstance(rhs_statistics,
statistics_pb2.DatasetFeatureStatisticsList):
raise TypeError('rhs_statistics is of type %s, should be a '
'DatasetFeatureStatisticsList proto.'
% type(rhs_statistics).__name__)
if len(rhs_statistics.datasets) != 1:
raise ValueError('rhs_statistics proto contains multiple datasets. Only '
'one dataset is currently supported.')
if rhs_statistics.datasets[0].name:
rhs_name = rhs_statistics.datasets[0].name
# If we have same name, revert to default names.
if lhs_name == rhs_name:
lhs_name, rhs_name = 'lhs_statistics', 'rhs_statistics'
# Add rhs stats.
rhs_statistics = _project_statistics(
rhs_statistics, allowlist_features, denylist_features)
rhs_stats_copy = combined_statistics.datasets.add()
rhs_stats_copy.MergeFrom(rhs_statistics.datasets[0])
rhs_stats_copy.name = rhs_name
# Update lhs name.
lhs_stats_copy.name = lhs_name
return combined_statistics
def get_statistics_html(
lhs_statistics: statistics_pb2.DatasetFeatureStatisticsList,
rhs_statistics: Optional[
statistics_pb2.DatasetFeatureStatisticsList] = None,
lhs_name: Text = 'lhs_statistics',
rhs_name: Text = 'rhs_statistics',
allowlist_features: Optional[List[types.FeaturePath]] = None,
denylist_features: Optional[List[types.FeaturePath]] = None
) -> Text:
"""Build the HTML for visualizing the input statistics using Facets.
Args:
lhs_statistics: A DatasetFeatureStatisticsList protocol buffer.
rhs_statistics: An optional DatasetFeatureStatisticsList protocol buffer to
compare with lhs_statistics.
lhs_name: Name of the lhs_statistics dataset.
rhs_name: Name of the rhs_statistics dataset.
allowlist_features: Set of features to be visualized.
denylist_features: Set of features to ignore for visualization.
Returns:
HTML to be embedded for visualization.
Raises:
TypeError: If the input argument is not of the expected type.
ValueError: If the input statistics protos does not have only one dataset.
"""
combined_statistics = _get_combined_statistics(
lhs_statistics, rhs_statistics, lhs_name, rhs_name, allowlist_features,
denylist_features)
protostr = base64.b64encode(
combined_statistics.SerializeToString()).decode('utf-8')
# pylint: disable=line-too-long,anomalous-backslash-in-string
# Note that in the html template we currently assign a temporary id to the
# facets element and then remove it once we have appended the serialized proto
# string to the element. We do this to avoid any collision of ids when
# displaying multiple facets output in the notebook.
#
# Note that a string literal including '</script>' in a <script> tag needs to
# escape it as <\/script> to avoid early closing the wrapping <script> tag.
html_template = """<iframe id='facets-iframe' width="100%" height="500px"></iframe>
<script>
facets_iframe = document.getElementById('facets-iframe');
facets_html = '<script src="https://cdnjs.cloudflare.com/ajax/libs/webcomponentsjs/1.3.3/webcomponents-lite.js"><\/script><link rel="import" href="https://raw.githubusercontent.com/PAIR-code/facets/master/facets-dist/facets-jupyter.html"><facets-overview proto-input="protostr"></facets-overview>';
facets_iframe.srcdoc = facets_html;
facets_iframe.id = "";
setTimeout(() => {
facets_iframe.setAttribute('height', facets_iframe.contentWindow.document.body.offsetHeight + 'px')
}, 1500)
</script>"""
# pylint: enable=line-too-long
html = html_template.replace('protostr', protostr)
return html
def visualize_statistics(
lhs_statistics: statistics_pb2.DatasetFeatureStatisticsList,
rhs_statistics: Optional[
statistics_pb2.DatasetFeatureStatisticsList] = None,
lhs_name: Text = 'lhs_statistics',
rhs_name: Text = 'rhs_statistics',
allowlist_features: Optional[List[types.FeaturePath]] = None,
denylist_features: Optional[List[types.FeaturePath]] = None) -> None:
"""Visualize the input statistics using Facets.
Args:
lhs_statistics: A DatasetFeatureStatisticsList protocol buffer.
rhs_statistics: An optional DatasetFeatureStatisticsList protocol buffer to
compare with lhs_statistics.
lhs_name: Name of the lhs_statistics dataset.
rhs_name: Name of the rhs_statistics dataset.
allowlist_features: Set of features to be visualized.
denylist_features: Set of features to ignore for visualization.
Raises:
TypeError: If the input argument is not of the expected type.
ValueError: If the input statistics protos does not have only one dataset.
"""
assert (not allowlist_features or not denylist_features), (
'Only specify one of allowlist_features and denylist_features.')
html = get_statistics_html(lhs_statistics, rhs_statistics, lhs_name, rhs_name,
allowlist_features, denylist_features)
display(HTML(html))
def compare_slices(statistics: statistics_pb2.DatasetFeatureStatisticsList,
lhs_slice_key: Text, rhs_slice_key: Text):
"""Compare statistics of two slices using Facets.
Args:
statistics: A DatasetFeatureStatisticsList protocol buffer.
lhs_slice_key: Slice key of the first slice.
rhs_slice_key: Slice key of the second slice.
Raises:
ValueError: If the input statistics proto does not have the specified slice
statistics.
"""
lhs_stats = stats_util.get_slice_stats(statistics, lhs_slice_key)
rhs_stats = stats_util.get_slice_stats(statistics, rhs_slice_key)
visualize_statistics(lhs_stats, rhs_stats,
lhs_name=lhs_slice_key, rhs_name=rhs_slice_key)
def get_natural_language_statistics_dataframes(
lhs_statistics: statistics_pb2.DatasetFeatureStatisticsList,
rhs_statistics: Optional[
statistics_pb2.DatasetFeatureStatisticsList] = None,
lhs_name: Text = 'lhs_statistics',
rhs_name: Text = 'rhs_statistics',
allowlist_features: Optional[List[types.FeaturePath]] = None,
denylist_features: Optional[List[types.FeaturePath]] = None
) -> Optional[Dict[str, Dict[Union[int, str], Union[Dict[str, pd.DataFrame],
pd.DataFrame]]]]:
"""Gets the `NaturalLanguageStatistics` as a dict of pandas.DataFrame.
Each pd.DataFrame can be fed into a plot with little to no manipulation.
For example, to plot the `token_length_histogram` in plot.ly:
```
import pandas a pd
import plotly
import tensorflow_data_validation as tfdv
from tensorflow_data_validation.utils import display_util as tfdv_display_util
data = pd.DataFrame.from_dict({"col": [1, 2, 3]})
statistics = tfdv.generate_statistics_from_dataframe(data)
df = tfdv_display_util.get_natural_language_statistics_dataframes(statistics)
hist, bin_edges = np.histogram(df[ds_name][feature_name][
'token_length_histogram']['high_values'])
fig = plotly.graph_objs.Figure(data=[
plotly.graph_objs.Bar(x=bin_edges, y=hist, name='Histogram'),
])
```
The resulting dict contains `token_length_histogram` and each token name as
its keys. For each token, the data frame represents a list of stats as well
as the token's positions histogram.
Args:
lhs_statistics: A DatasetFeatureStatisticsList protocol buffer.
rhs_statistics: An optional DatasetFeatureStatisticsList protocol buffer to
compare with lhs_statistics.
lhs_name: Name of the lhs_statistics dataset.
rhs_name: Name of the rhs_statistics dataset.
allowlist_features: Set of features to be visualized.
denylist_features: Set of features to ignore for visualization.
Returns:
A dict of pandas data frames. Returns None if natural language statistics
does not exist in the statistics proto.
"""
combined_statistics = _get_combined_statistics(lhs_statistics, rhs_statistics,
lhs_name, rhs_name,
allowlist_features,
denylist_features)
nlp_stats = _get_natural_language_statistics(combined_statistics)
if not nlp_stats:
return None
result = {}
for ds_name, features_dict in nlp_stats.items():
result[ds_name] = {}
for feature_name, nlp_stat in features_dict.items():
result[ds_name][feature_name] = {
'token_length_histogram':
_get_histogram_dataframe(nlp_stat.token_length_histogram),
'token_statistics':
_get_token_statistics(list(nlp_stat.token_statistics))
}
return result
def _get_natural_language_statistics(
statistics: statistics_pb2.DatasetFeatureStatisticsList
) -> Dict[str, Dict[str, statistics_pb2.NaturalLanguageStatistics]]:
"""Gets the Natural Language stat out of the custom statistic."""
result = {}
for dataset in statistics.datasets:
if not dataset.name:
continue
features_dict = {}
for feature in dataset.features:
for custom_stats in feature.custom_stats:
if custom_stats.name == _NL_CUSTOM_STATS_NAME:
nlp_stat = statistics_pb2.NaturalLanguageStatistics()
custom_stats.any.Unpack(nlp_stat)
if feature.name:
feature_name = feature.name
else:
feature_name = str(types.FeaturePath.from_proto(feature.path))
features_dict[feature_name] = nlp_stat
if features_dict:
result[dataset.name] = features_dict
return result
def _get_token_statistics(
token_statistic: List[
statistics_pb2.NaturalLanguageStatistics.TokenStatistics]
) -> pd.DataFrame:
"""Returns a dict of each token's stats."""
nlp_stats_dict = {
_TOKEN_NAME_KEY: [],
_FREQUENCY_KEY: [],
_FRACTION_OF_SEQ_KEY: [],
_PER_SEQ_MIN_FREQ_KEY: [],
_PER_SEQ_MAX_FREQ_KEY: [],
_PER_SEQ_AVG_FREQ_KEY: [],
_POSITIONS_KEY: [],
}
for token in token_statistic:
if token.WhichOneof('token') == 'string_token':
token_name = token.string_token
else:
token_name = token.int_token
nlp_stats_dict[_TOKEN_NAME_KEY].append(token_name)
nlp_stats_dict[_FREQUENCY_KEY].append(token.frequency)
nlp_stats_dict[_FRACTION_OF_SEQ_KEY].append(token.fraction_of_sequences)
nlp_stats_dict[_PER_SEQ_MIN_FREQ_KEY].append(
token.per_sequence_min_frequency)
nlp_stats_dict[_PER_SEQ_MAX_FREQ_KEY].append(
token.per_sequence_max_frequency)
nlp_stats_dict[_PER_SEQ_AVG_FREQ_KEY].append(
token.per_sequence_avg_frequency)
nlp_stats_dict[_POSITIONS_KEY].append(
_get_histogram_dataframe(token.positions))
return pd.DataFrame.from_dict(nlp_stats_dict)
def _get_histogram_dataframe(
histogram: statistics_pb2.Histogram) -> pd.DataFrame:
"""Gets the `Histogram` as a pandas.DataFrame."""
return pd.DataFrame.from_dict({
'high_values': [b.high_value for b in histogram.buckets],
'low_values': [b.low_value for b in histogram.buckets],
'sample_counts': [b.sample_count for b in histogram.buckets],
})
|
en
| 0.673873
|
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== Utils for displaying TFDV outputs. # pylint: disable=g-import-not-at-top # pylint: disable=invalid-name Returns a tuple of DataFrames containing the input schema information. Args: schema: A Schema protocol buffer. Returns: A tuple of DataFrames containing the features and domains of the schema. # Extract all the string domains at the schema level. # Iterate over the features in the schema and extract the properties of each # feature. # Extract the presence information of the feature. # Extract the valency information of the feature. # Extract the feature type. # If the feature has a string domain, treat it as a string feature. # Extract the domain (if any) of the feature. Displays the input schema (for use in a Jupyter notebook). Args: schema: A Schema protocol buffer. # Do not truncate columns. Returns a DataFrame containing the input anomalies. Args: anomalies: An Anomalies protocol buffer. Returns: A DataFrame containing the input anomalies, or an empty DataFrame if there are no anomalies. # Construct a DataFrame consisting of the anomalies and display it. # Do not truncate columns. Displays the input anomalies (for use in a Jupyter notebook). Args: anomalies: An Anomalies protocol buffer. Project statistics proto based on allowlist and denylist features. Get combined datatset statistics list proto. # Add lhs stats. # If we have same name, revert to default names. # Add rhs stats. # Update lhs name. Build the HTML for visualizing the input statistics using Facets. Args: lhs_statistics: A DatasetFeatureStatisticsList protocol buffer. rhs_statistics: An optional DatasetFeatureStatisticsList protocol buffer to compare with lhs_statistics. lhs_name: Name of the lhs_statistics dataset. rhs_name: Name of the rhs_statistics dataset. allowlist_features: Set of features to be visualized. denylist_features: Set of features to ignore for visualization. Returns: HTML to be embedded for visualization. Raises: TypeError: If the input argument is not of the expected type. ValueError: If the input statistics protos does not have only one dataset. # pylint: disable=line-too-long,anomalous-backslash-in-string # Note that in the html template we currently assign a temporary id to the # facets element and then remove it once we have appended the serialized proto # string to the element. We do this to avoid any collision of ids when # displaying multiple facets output in the notebook. # # Note that a string literal including '</script>' in a <script> tag needs to # escape it as <\/script> to avoid early closing the wrapping <script> tag. <iframe id='facets-iframe' width="100%" height="500px"></iframe> <script> facets_iframe = document.getElementById('facets-iframe'); facets_html = '<script src="https://cdnjs.cloudflare.com/ajax/libs/webcomponentsjs/1.3.3/webcomponents-lite.js"><\/script><link rel="import" href="https://raw.githubusercontent.com/PAIR-code/facets/master/facets-dist/facets-jupyter.html"><facets-overview proto-input="protostr"></facets-overview>'; facets_iframe.srcdoc = facets_html; facets_iframe.id = ""; setTimeout(() => { facets_iframe.setAttribute('height', facets_iframe.contentWindow.document.body.offsetHeight + 'px') }, 1500) </script> # pylint: enable=line-too-long Visualize the input statistics using Facets. Args: lhs_statistics: A DatasetFeatureStatisticsList protocol buffer. rhs_statistics: An optional DatasetFeatureStatisticsList protocol buffer to compare with lhs_statistics. lhs_name: Name of the lhs_statistics dataset. rhs_name: Name of the rhs_statistics dataset. allowlist_features: Set of features to be visualized. denylist_features: Set of features to ignore for visualization. Raises: TypeError: If the input argument is not of the expected type. ValueError: If the input statistics protos does not have only one dataset. Compare statistics of two slices using Facets. Args: statistics: A DatasetFeatureStatisticsList protocol buffer. lhs_slice_key: Slice key of the first slice. rhs_slice_key: Slice key of the second slice. Raises: ValueError: If the input statistics proto does not have the specified slice statistics. Gets the `NaturalLanguageStatistics` as a dict of pandas.DataFrame. Each pd.DataFrame can be fed into a plot with little to no manipulation. For example, to plot the `token_length_histogram` in plot.ly: ``` import pandas a pd import plotly import tensorflow_data_validation as tfdv from tensorflow_data_validation.utils import display_util as tfdv_display_util data = pd.DataFrame.from_dict({"col": [1, 2, 3]}) statistics = tfdv.generate_statistics_from_dataframe(data) df = tfdv_display_util.get_natural_language_statistics_dataframes(statistics) hist, bin_edges = np.histogram(df[ds_name][feature_name][ 'token_length_histogram']['high_values']) fig = plotly.graph_objs.Figure(data=[ plotly.graph_objs.Bar(x=bin_edges, y=hist, name='Histogram'), ]) ``` The resulting dict contains `token_length_histogram` and each token name as its keys. For each token, the data frame represents a list of stats as well as the token's positions histogram. Args: lhs_statistics: A DatasetFeatureStatisticsList protocol buffer. rhs_statistics: An optional DatasetFeatureStatisticsList protocol buffer to compare with lhs_statistics. lhs_name: Name of the lhs_statistics dataset. rhs_name: Name of the rhs_statistics dataset. allowlist_features: Set of features to be visualized. denylist_features: Set of features to ignore for visualization. Returns: A dict of pandas data frames. Returns None if natural language statistics does not exist in the statistics proto. Gets the Natural Language stat out of the custom statistic. Returns a dict of each token's stats. Gets the `Histogram` as a pandas.DataFrame.
| 1.973415
| 2
|
setup.py
|
gitdachong/lasttester
| 0
|
6629302
|
#coding:utf-8
from setuptools import setup, find_packages
from lasttester import __author__,__version__,__contact__
import sys
import os
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist bdist_wheel')
os.system('twine upload dist/*')
sys.exit()
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name = "lasttester",
version = __version__,
keywords = ["lasttester", "automatic test","online monitoring", "test data preparation", "continuous integration"],
description = "LastTester is a multiple protocols,powerful and easy to expand test lib,it is applicable to back-end automatic test, test data preparation, online monitoring, continuous integration and other scenarios",
long_description = long_description,
long_description_content_type='text/markdown',
license = "MIT Licence",
url = "https://github.com/gitdachong/lasttester",
project_urls={
"Bug Tracker": "https://github.com/gitdachong/lasttester",
"Documentation": "https://github.com/gitdachong/lasttester",
"Source Code": "https://github.com/gitdachong/lasttester",
},
author = __author__,
author_email = __contact__,
packages = find_packages(),
package_data={'': ['LICENSE'], 'lasttester': ['*.mo','*.po']},
include_package_data=True,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires = [
'requests'
]
)
|
#coding:utf-8
from setuptools import setup, find_packages
from lasttester import __author__,__version__,__contact__
import sys
import os
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist bdist_wheel')
os.system('twine upload dist/*')
sys.exit()
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name = "lasttester",
version = __version__,
keywords = ["lasttester", "automatic test","online monitoring", "test data preparation", "continuous integration"],
description = "LastTester is a multiple protocols,powerful and easy to expand test lib,it is applicable to back-end automatic test, test data preparation, online monitoring, continuous integration and other scenarios",
long_description = long_description,
long_description_content_type='text/markdown',
license = "MIT Licence",
url = "https://github.com/gitdachong/lasttester",
project_urls={
"Bug Tracker": "https://github.com/gitdachong/lasttester",
"Documentation": "https://github.com/gitdachong/lasttester",
"Source Code": "https://github.com/gitdachong/lasttester",
},
author = __author__,
author_email = __contact__,
packages = find_packages(),
package_data={'': ['LICENSE'], 'lasttester': ['*.mo','*.po']},
include_package_data=True,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires = [
'requests'
]
)
|
en
| 0.795494
|
#coding:utf-8
| 1.403904
| 1
|
apis/_track.py
|
JohnRipper/tinybot-rtc
| 1
|
6629303
|
import time
class Track:
def __init__(self, video_id='', video_time=0, video_title='', image='', owner=None, video_type='youTube'):
self.id = video_id
self.time = video_time
self.title = video_title
self.image = image
self.owner = owner
self.type = video_type
self.rq_time = time.time()
self.start = 0
self.pause = 0
|
import time
class Track:
def __init__(self, video_id='', video_time=0, video_title='', image='', owner=None, video_type='youTube'):
self.id = video_id
self.time = video_time
self.title = video_title
self.image = image
self.owner = owner
self.type = video_type
self.rq_time = time.time()
self.start = 0
self.pause = 0
|
none
| 1
| 2.660649
| 3
|
|
openclsim/plot/__init__.py
|
thijsreedijk/OpenCLSim
| 0
|
6629304
|
<filename>openclsim/plot/__init__.py<gh_stars>0
"""Directory for the simulation plots."""
from .log_dataframe import get_log_dataframe
from .step_chart import get_step_chart
from .vessel_planning import vessel_planning
__all__ = ["vessel_planning", "get_log_dataframe", "get_step_chart"]
|
<filename>openclsim/plot/__init__.py<gh_stars>0
"""Directory for the simulation plots."""
from .log_dataframe import get_log_dataframe
from .step_chart import get_step_chart
from .vessel_planning import vessel_planning
__all__ = ["vessel_planning", "get_log_dataframe", "get_step_chart"]
|
en
| 0.818233
|
Directory for the simulation plots.
| 1.276103
| 1
|
dlutils/models/gans/wasserstein_div/models.py
|
justusschock/dl-utils
| 13
|
6629305
|
from functools import reduce
from operator import mul
import torch
class Generator(torch.nn.Module):
"""
A simple generative network
"""
def __init__(self, img_shape, latent_dim):
"""
Parameters
----------
img_shape : tuple
the shape of the images to generate (including channels,
excluding batch dimension)
latent_dim : int
size of the latent noise dimension
"""
super().__init__()
def block(in_feat, out_feat, normalize=True):
layers = [torch.nn.Linear(in_feat, out_feat)]
if normalize:
layers.append(torch.nn.BatchNorm1d(out_feat, 0.8))
layers.append(torch.nn.LeakyReLU(0.2, inplace=True))
return layers
self.model = torch.nn.Sequential(
*block(latent_dim, 128, normalize=False),
*block(128, 256),
*block(256, 512),
*block(512, 1024),
torch.nn.Linear(1024, reduce(mul, img_shape)),
torch.nn.Tanh()
)
self._img_shape = img_shape
def forward(self, z):
"""
Feeds a batch of noise vectors through the network to generate images
Parameters
----------
z : :class:`torch.Tensor`
the noise batch
Returns
-------
:class:`torch.Tensor`
the generated image batch
"""
img = self.model(z)
img = img.view(img.shape[0], *self._img_shape)
return img
class Discriminator(torch.nn.Module):
"""
A very simple discriminator network
"""
def __init__(self, img_shape):
"""
Parameters
----------
img_shape : tuple
the shape of the input images (including channels, excluding
batch dimension)
"""
super().__init__()
self.model = torch.nn.Sequential(
torch.nn.Linear(reduce(mul, img_shape), 512),
torch.nn.LeakyReLU(0.2, inplace=True),
torch.nn.Linear(512, 256),
torch.nn.LeakyReLU(0.2, inplace=True),
torch.nn.Linear(256, 1),
)
def forward(self, img):
"""
Determines the validity of an image batch by feeding it through the
network
Parameters
----------
img : :class:`torch.Tensor`
the given image batch
Returns
-------
:class:`torch.Tensor`
the batches validity
"""
img_flat = img.view(img.shape[0], -1)
validity = self.model(img_flat)
return validity
|
from functools import reduce
from operator import mul
import torch
class Generator(torch.nn.Module):
"""
A simple generative network
"""
def __init__(self, img_shape, latent_dim):
"""
Parameters
----------
img_shape : tuple
the shape of the images to generate (including channels,
excluding batch dimension)
latent_dim : int
size of the latent noise dimension
"""
super().__init__()
def block(in_feat, out_feat, normalize=True):
layers = [torch.nn.Linear(in_feat, out_feat)]
if normalize:
layers.append(torch.nn.BatchNorm1d(out_feat, 0.8))
layers.append(torch.nn.LeakyReLU(0.2, inplace=True))
return layers
self.model = torch.nn.Sequential(
*block(latent_dim, 128, normalize=False),
*block(128, 256),
*block(256, 512),
*block(512, 1024),
torch.nn.Linear(1024, reduce(mul, img_shape)),
torch.nn.Tanh()
)
self._img_shape = img_shape
def forward(self, z):
"""
Feeds a batch of noise vectors through the network to generate images
Parameters
----------
z : :class:`torch.Tensor`
the noise batch
Returns
-------
:class:`torch.Tensor`
the generated image batch
"""
img = self.model(z)
img = img.view(img.shape[0], *self._img_shape)
return img
class Discriminator(torch.nn.Module):
"""
A very simple discriminator network
"""
def __init__(self, img_shape):
"""
Parameters
----------
img_shape : tuple
the shape of the input images (including channels, excluding
batch dimension)
"""
super().__init__()
self.model = torch.nn.Sequential(
torch.nn.Linear(reduce(mul, img_shape), 512),
torch.nn.LeakyReLU(0.2, inplace=True),
torch.nn.Linear(512, 256),
torch.nn.LeakyReLU(0.2, inplace=True),
torch.nn.Linear(256, 1),
)
def forward(self, img):
"""
Determines the validity of an image batch by feeding it through the
network
Parameters
----------
img : :class:`torch.Tensor`
the given image batch
Returns
-------
:class:`torch.Tensor`
the batches validity
"""
img_flat = img.view(img.shape[0], -1)
validity = self.model(img_flat)
return validity
|
en
| 0.64112
|
A simple generative network Parameters ---------- img_shape : tuple the shape of the images to generate (including channels, excluding batch dimension) latent_dim : int size of the latent noise dimension Feeds a batch of noise vectors through the network to generate images Parameters ---------- z : :class:`torch.Tensor` the noise batch Returns ------- :class:`torch.Tensor` the generated image batch A very simple discriminator network Parameters ---------- img_shape : tuple the shape of the input images (including channels, excluding batch dimension) Determines the validity of an image batch by feeding it through the network Parameters ---------- img : :class:`torch.Tensor` the given image batch Returns ------- :class:`torch.Tensor` the batches validity
| 2.925046
| 3
|
data/studio21_generated/introductory/2466/starter_code.py
|
vijaykumawat256/Prompt-Summarization
| 0
|
6629306
|
class Solution:
def diagonalSum(self, mat: List[List[int]]) -> int:
|
class Solution:
def diagonalSum(self, mat: List[List[int]]) -> int:
|
none
| 1
| 2.049908
| 2
|
|
qitensor/arrayformatter.py
|
dstahlke/qitensor
| 6
|
6629307
|
<filename>qitensor/arrayformatter.py
"""
This module handles formatting of arrays. Everything in here is for internal use only,
except for the :func:`set_qitensor_printoptions` and :func:`get_qitensor_printoptions`
functions.
"""
import numpy as np
from qitensor import have_sage
from qitensor.exceptions import HilbertError
__all__ = ['set_qitensor_printoptions', 'get_qitensor_printoptions', 'setup_qitensor_for_qtconsole', 'HilbertArrayFormatter']
class HilbertArrayFormatter(object):
def __init__(self):
"""
This module handles formatting of arrays.
Methods of this class are called by methods of HilbertArray, and
shouldn't need to be dealt with directly.
sage: import qitensor.arrayformatter
sage: TestSuite(qitensor.arrayformatter.FORMATTER).run()
"""
self.str_use_sage = False
# FIXME - make this undocumented option public (requires publishing np_colorizer)
self.str_use_colorize = False
self.zero_color_latex = 'Silver'
self.zero_color_html = '#cccccc'
self.use_latex_label_in_html = True
self.ipy_table_format_mode = 'html'
self.ipy_space_format_mode = 'latex'
def _get_suppress(self):
"""
Gets the current suppression settings (from numpy).
"""
suppress = np.get_printoptions()['suppress']
suppress_thresh = 0.1 ** (np.get_printoptions()['precision'] + 0.5)
return (suppress, suppress_thresh)
def py_scalar_latex_formatter(self, data, dollar_if_tex):
"""
Formats python scalar for latex.
"""
if data.dtype == complex:
(suppress, suppress_thresh) = self._get_suppress()
precision = np.get_printoptions()['precision']
return np.core.arrayprint.ComplexFormat(
data, precision=precision, suppress_small=suppress)
else:
return str
def sage_scalar_latex_formatter(self, data, dollar_if_tex):
"""
Formats Sage scalar for latex.
"""
if not have_sage:
raise HilbertError('This is only available under Sage')
import sage.all
if dollar_if_tex:
return lambda x: '$'+sage.all.latex(x)+'$'
else:
return lambda x: sage.all.latex(x)
def sympy_scalar_latex_formatter(self, data, dollar_if_tex):
"""
Formats Sympy scalar for latex.
"""
import sympy
if dollar_if_tex:
return lambda x: '$'+sympy.latex(x)+'$'
else:
return lambda x: sympy.latex(x)
def _get_arr_obj(self, arr):
if self.str_use_sage:
return arr.sage_block_matrix()
elif self.str_use_colorize:
import np_colorizer
return np_colorizer.colorize(arr.nparray)
else:
return arr.nparray
def array_str(self, arr):
"""
Creates string for HilbertArray.
"""
return str(arr.space)+'\n'+str(self._get_arr_obj(arr))
def array_repr(self, arr):
"""
Creates repr for HilbertArray.
"""
return 'HilbertArray('+repr(arr.space)+',\n'+repr(self._get_arr_obj(arr))+')'
def array_latex_block_table(self, arr, use_hline=False):
"""
Formats array in Latex. Used by both Sage and IPython.
"""
# Alternative way to do it:
# if not have_sage:
# raise HilbertError('This is only available under Sage')
#
# import sage.all
#
# return '\\begin{array}{l}\n'+ \
# sage.all.latex(self.space)+' \\\\\n'+ \
# sage.all.latex(self.sage_block_matrix())+ \
# '\\end{array}'
(suppress, suppress_thresh) = self._get_suppress()
spc = arr.space
if len(spc.ket_set):
ket_indices = list(spc.ket_space().index_iter())
else:
ket_indices = [None]
if len(spc.bra_set):
bra_indices = list(spc.bra_space().index_iter())
else:
bra_indices = [None]
fmt = spc.base_field.latex_formatter(arr.nparray.flatten(), dollar_if_tex=False)
ht = r'\scriptsize{'
ht += r'\begin{array}{|'
if spc.ket_set:
ht += 'l|'
if spc.bra_set:
bra_shape = spc.bra_space().shape
colgrp_size = np.product(bra_shape[1:])
ht += ('c'*colgrp_size + '|')*bra_shape[0]
else:
ht += 'c|'
ht += "}\n"
if spc.bra_set:
if use_hline: ht += r'\hline' + "\n"
if spc.ket_set:
ht += '&'
for (b_idx_n, b_idx) in enumerate(bra_indices):
if b_idx_n:
ht += ' & '
if b_idx is not None:
ht += r'\left< '
for (x, y) in zip(b_idx, spc.sorted_bras):
ht += str(x) + '_{' + y.latex_label + '}'
ht += r' \right|'
ht += r' \\' + "\n"
last_k = None
for k_idx in ket_indices:
if k_idx is None or k_idx[0] != last_k:
if use_hline: ht += r'\hline' + "\n"
if k_idx is not None:
last_k = k_idx[0]
if k_idx is not None:
ht += r'\left| '
for (x, y) in zip(k_idx, spc.sorted_kets):
ht += str(x) + '_{' + y.latex_label + '}'
ht += r' \right>'
ht += ' & '
for (b_idx_n, b_idx) in enumerate(bra_indices):
if k_idx is None and b_idx is None:
assert 0
elif k_idx is None:
idx = b_idx
elif b_idx is None:
idx = k_idx
else:
idx = k_idx + b_idx
v = arr[idx]
if suppress and spc.base_field.eval_suppress_small(v, suppress_thresh):
if self.zero_color_latex != '':
vs = r'\color{'+self.zero_color_latex+'}{0}'
else:
vs = '0'
else:
vs = fmt(v)
if b_idx_n:
ht += ' & '
ht += vs
ht += r' \\' + "\n"
if use_hline: ht += r'\hline' + "\n"
ht += r"\end{array}" + "\n"
ht += '}' # small
return ht
def array_html_block_table(self, arr):
r"""
Format array in HTML. Used for IPython.
>>> from qitensor import qudit
>>> ha = qudit('a', 3)
>>> hb = qudit('b', 2)
>>> X = ha.eye() * hb.ket(1)
>>> f = HilbertArrayFormatter()
>>> f.set_printoptions()
>>> print(f.array_html_block_table(X))
$\left| a,b \right\rangle\left\langle a \right|$<table style='margin: 0px 0px;'>
<colgroup style='border: 2px solid black;'></colgroup>
<colgroup span=3 style='border: 2px solid black;'></colgroup>
<tbody style='border: 2px solid black;'>
<tr style='border: 1px dotted; padding: 2px;'><td style='border: 1px dotted; padding: 2px; text-align: center;'> </td><td style='border: 1px dotted; padding: 2px; text-align: center;'><nobr>⟨<tt>0</tt>|</nobr></td><td style='border: 1px dotted; padding: 2px; text-align: center;'><nobr>⟨<tt>1</tt>|</nobr></td><td style='border: 1px dotted; padding: 2px; text-align: center;'><nobr>⟨<tt>2</tt>|</nobr></td></tr>
</tbody>
<tbody style='border: 2px solid black;'>
<tr style='border: 1px dotted; padding: 2px;'><td style='border: 1px dotted; padding: 2px; text-align: center;'><nobr>|<tt>0</tt>,<tt>0</tt>⟩</nobr></td><td style='border: 1px dotted; padding: 2px; text-align: right;'><font color='#cccccc'>0</font></td><td style='border: 1px dotted; padding: 2px; text-align: right;'><font color='#cccccc'>0</font></td><td style='border: 1px dotted; padding: 2px; text-align: right;'><font color='#cccccc'>0</font></td></tr>
<tr style='border: 1px dotted; padding: 2px;'><td style='border: 1px dotted; padding: 2px; text-align: center;'><nobr>|<tt>0</tt>,<tt>1</tt>⟩</nobr></td><td style='border: 1px dotted; padding: 2px; text-align: right;'><nobr><tt> 1.+0.j</tt></nobr></td><td style='border: 1px dotted; padding: 2px; text-align: right;'><font color='#cccccc'>0</font></td><td style='border: 1px dotted; padding: 2px; text-align: right;'><font color='#cccccc'>0</font></td></tr>
</tbody>
<tbody style='border: 2px solid black;'>
<tr style='border: 1px dotted; padding: 2px;'><td style='border: 1px dotted; padding: 2px; text-align: center;'><nobr>|<tt>1</tt>,<tt>0</tt>⟩</nobr></td><td style='border: 1px dotted; padding: 2px; text-align: right;'><font color='#cccccc'>0</font></td><td style='border: 1px dotted; padding: 2px; text-align: right;'><font color='#cccccc'>0</font></td><td style='border: 1px dotted; padding: 2px; text-align: right;'><font color='#cccccc'>0</font></td></tr>
<tr style='border: 1px dotted; padding: 2px;'><td style='border: 1px dotted; padding: 2px; text-align: center;'><nobr>|<tt>1</tt>,<tt>1</tt>⟩</nobr></td><td style='border: 1px dotted; padding: 2px; text-align: right;'><font color='#cccccc'>0</font></td><td style='border: 1px dotted; padding: 2px; text-align: right;'><nobr><tt> 1.+0.j</tt></nobr></td><td style='border: 1px dotted; padding: 2px; text-align: right;'><font color='#cccccc'>0</font></td></tr>
</tbody>
<tbody style='border: 2px solid black;'>
<tr style='border: 1px dotted; padding: 2px;'><td style='border: 1px dotted; padding: 2px; text-align: center;'><nobr>|<tt>2</tt>,<tt>0</tt>⟩</nobr></td><td style='border: 1px dotted; padding: 2px; text-align: right;'><font color='#cccccc'>0</font></td><td style='border: 1px dotted; padding: 2px; text-align: right;'><font color='#cccccc'>0</font></td><td style='border: 1px dotted; padding: 2px; text-align: right;'><font color='#cccccc'>0</font></td></tr>
<tr style='border: 1px dotted; padding: 2px;'><td style='border: 1px dotted; padding: 2px; text-align: center;'><nobr>|<tt>2</tt>,<tt>1</tt>⟩</nobr></td><td style='border: 1px dotted; padding: 2px; text-align: right;'><font color='#cccccc'>0</font></td><td style='border: 1px dotted; padding: 2px; text-align: right;'><font color='#cccccc'>0</font></td><td style='border: 1px dotted; padding: 2px; text-align: right;'><nobr><tt> 1.+0.j</tt></nobr></td></tr>
</tbody>
</table>
<BLANKLINE>
"""
(suppress, suppress_thresh) = self._get_suppress()
st_tab = "style='border: 2px solid black;'"
st_tr = "style='border: 1px dotted; padding: 2px;'"
st_th = "style='border: 1px dotted; padding: 2px; text-align: center;'"
st_tdval = "style='border: 1px dotted; padding: 2px; text-align: right;'"
spc = arr.space
if len(spc.ket_set):
ket_indices = list(spc.ket_space().index_iter())
else:
ket_indices = [None]
if len(spc.bra_set):
bra_indices = list(spc.bra_space().index_iter())
else:
bra_indices = [None]
fmt = spc.base_field.latex_formatter(arr.nparray.flatten(), dollar_if_tex=True)
ht = ''
if self.use_latex_label_in_html:
ht += '$'+spc._latex_()+'$'
else:
# FIXME - here, and elsewhere, use unicode symbols '⟨' and '⟩'
# for html.
ht += spc._html_()+'<br>'
ht += "<table style='margin: 0px 0px;'>\n"
if spc.ket_set:
ht += "<colgroup "+st_tab+"></colgroup>\n"
if len(spc.bra_set):
colgrp_size = spc.bra_space().shape[-1]
for i in range(spc.bra_space().dim() // colgrp_size):
ht += ("<colgroup span=%d "+st_tab+"></colgroup>\n") % colgrp_size
else:
ht += "<colgroup "+st_tab+"></colgroup>\n"
if spc.bra_set:
ht += "<tbody "+st_tab+">\n"
ht += '<tr '+st_tr+'>'
if spc.ket_set:
ht += '<td '+st_th+'> </td>'
for b_idx in bra_indices:
ht += '<td '+st_th+'><nobr>'
#if self.use_latex_label_in_html:
# ht += r'$\scriptsize{\left< '
# ht += ','.join([str(x) for x in b_idx]) # FIXME - latex label for indices?
# ht += r' \right|}$'
#else:
ht += '⟨'+(','.join(['<tt>'+str(x)+'</tt>' for x in b_idx]))+'|'
ht += '</nobr></td>'
ht += '</tr>\n'
ht += '</tbody>\n'
last_k = None
for k_idx in ket_indices:
if k_idx is not None and len(k_idx) > 1 and k_idx[-2] != last_k:
if last_k is not None:
ht += '</tbody>\n'
ht += "<tbody "+st_tab+">\n"
last_k = k_idx[-2]
ht += '<tr '+st_tr+'>'
if spc.ket_set:
ht += '<td '+st_th+'><nobr>'
#if self.use_latex_label_in_html:
# ht += r'$\scriptsize{\left| '
# ht += ','.join([str(x) for x in k_idx]) # FIXME - latex label for indices?
# ht += r' \right>}$'
#else:
ht += '|'+(','.join(['<tt>'+str(x)+'</tt>' for x in k_idx]))+'⟩'
ht += '</nobr></td>'
for b_idx in bra_indices:
if k_idx is None and b_idx is None:
assert 0
elif k_idx is None:
idx = b_idx
elif b_idx is None:
idx = k_idx
else:
idx = k_idx + b_idx
v = arr[idx]
if suppress and spc.base_field.eval_suppress_small(v, suppress_thresh):
if self.zero_color_html != '':
vs = "<font color='"+self.zero_color_html+"'>0</font>"
else:
vs = "0"
else:
vs = "<nobr><tt>"+fmt(v)+"</tt></nobr>"
ht += '<td '+st_tdval+'>'+vs+'</td>'
ht += '</tr>\n'
ht += '</tbody>\n'
ht += '</table>\n'
return ht
# NOTE: this is normally accessed via set_qitensor_printoptions
def set_printoptions(
self,
str_use_sage=None,
zero_color_latex=None,
zero_color_html=None,
use_latex_label_in_html=None,
ipy_table_format_mode=None,
ipy_space_format_mode=None
):
"""
Sets print options for qitensor.
Any options passed the ``None`` value won't be changed.
:param str_use_sage: If true, use Sage's matrix formatting functions
when available (this is prettier).
:type str_use_sage: bool
:param zero_color_latex: Color to use for drawing the number zero in latex.
:type zero_color_latex: string
:param zero_color_html: Color to use for drawing the number zero in HTML.
:type zero_color_html: string
:param use_latex_label_in_html: If true, HilbertSpace labels will be
shown in latex form when rendering an array in HTML. Works good with
the IPython notebook, but not with qtconsole.
:type use_latex_label_in_html: bool
:param ipy_table_format_mode: Which mode to use for formatting arrays in
the IPython notebook.
:type ipy_table_format_mode: string ('html', 'latex', 'png', 'plain')
:param ipy_space_format_mode: Which mode to use for formatting HilbertSpace
labels in the IPython notebook.
:type ipy_space_format_mode: string ('latex', 'png', 'plain')
qitensor also makes use of the ``suppress`` and ``precision`` options from
numpy.set_printoptions.
See also: :func:`get_qitensor_printoptions`
"""
if str_use_sage is not None:
self.str_use_sage = bool(str_use_sage)
if zero_color_latex is not None:
self.zero_color_latex = str(zero_color_latex)
if zero_color_html is not None:
self.zero_color_html = str(zero_color_html)
if use_latex_label_in_html is not None:
self.use_latex_label_in_html = bool(use_latex_label_in_html)
if ipy_table_format_mode is not None:
assert ipy_table_format_mode in ['html', 'latex', 'png', 'plain']
self.ipy_table_format_mode = ipy_table_format_mode
if ipy_space_format_mode is not None:
assert ipy_space_format_mode in ['latex', 'png', 'plain']
self.ipy_space_format_mode = ipy_space_format_mode
# NOTE: this is normally accessed via get_qitensor_printoptions
def get_printoptions(self):
"""
Gets the current qitensor formatting options.
See also: :func:`set_qitensor_printoptions`
"""
return {
"str_use_sage" : self.str_use_sage,
"zero_color_latex" : self.zero_color_latex,
"zero_color_html" : self.zero_color_html,
"use_latex_label_in_html" : self.use_latex_label_in_html,
"ipy_table_format_mode" : self.ipy_table_format_mode,
"ipy_space_format_mode" : self.ipy_space_format_mode,
}
def setup_for_qtconsole(self):
"""
Sets good printing options for IPython QTconsole.
"""
self.set_printoptions(ipy_table_format_mode='png', ipy_space_format_mode='png')
# FIXME - latex_to_png is limited in its allowed colors
self.set_printoptions(zero_color_latex='yellow')
FORMATTER = HilbertArrayFormatter()
set_qitensor_printoptions = FORMATTER.set_printoptions
get_qitensor_printoptions = FORMATTER.get_printoptions
setup_qitensor_for_qtconsole = FORMATTER.setup_for_qtconsole
|
<filename>qitensor/arrayformatter.py
"""
This module handles formatting of arrays. Everything in here is for internal use only,
except for the :func:`set_qitensor_printoptions` and :func:`get_qitensor_printoptions`
functions.
"""
import numpy as np
from qitensor import have_sage
from qitensor.exceptions import HilbertError
__all__ = ['set_qitensor_printoptions', 'get_qitensor_printoptions', 'setup_qitensor_for_qtconsole', 'HilbertArrayFormatter']
class HilbertArrayFormatter(object):
def __init__(self):
"""
This module handles formatting of arrays.
Methods of this class are called by methods of HilbertArray, and
shouldn't need to be dealt with directly.
sage: import qitensor.arrayformatter
sage: TestSuite(qitensor.arrayformatter.FORMATTER).run()
"""
self.str_use_sage = False
# FIXME - make this undocumented option public (requires publishing np_colorizer)
self.str_use_colorize = False
self.zero_color_latex = 'Silver'
self.zero_color_html = '#cccccc'
self.use_latex_label_in_html = True
self.ipy_table_format_mode = 'html'
self.ipy_space_format_mode = 'latex'
def _get_suppress(self):
"""
Gets the current suppression settings (from numpy).
"""
suppress = np.get_printoptions()['suppress']
suppress_thresh = 0.1 ** (np.get_printoptions()['precision'] + 0.5)
return (suppress, suppress_thresh)
def py_scalar_latex_formatter(self, data, dollar_if_tex):
"""
Formats python scalar for latex.
"""
if data.dtype == complex:
(suppress, suppress_thresh) = self._get_suppress()
precision = np.get_printoptions()['precision']
return np.core.arrayprint.ComplexFormat(
data, precision=precision, suppress_small=suppress)
else:
return str
def sage_scalar_latex_formatter(self, data, dollar_if_tex):
"""
Formats Sage scalar for latex.
"""
if not have_sage:
raise HilbertError('This is only available under Sage')
import sage.all
if dollar_if_tex:
return lambda x: '$'+sage.all.latex(x)+'$'
else:
return lambda x: sage.all.latex(x)
def sympy_scalar_latex_formatter(self, data, dollar_if_tex):
"""
Formats Sympy scalar for latex.
"""
import sympy
if dollar_if_tex:
return lambda x: '$'+sympy.latex(x)+'$'
else:
return lambda x: sympy.latex(x)
def _get_arr_obj(self, arr):
if self.str_use_sage:
return arr.sage_block_matrix()
elif self.str_use_colorize:
import np_colorizer
return np_colorizer.colorize(arr.nparray)
else:
return arr.nparray
def array_str(self, arr):
"""
Creates string for HilbertArray.
"""
return str(arr.space)+'\n'+str(self._get_arr_obj(arr))
def array_repr(self, arr):
"""
Creates repr for HilbertArray.
"""
return 'HilbertArray('+repr(arr.space)+',\n'+repr(self._get_arr_obj(arr))+')'
def array_latex_block_table(self, arr, use_hline=False):
"""
Formats array in Latex. Used by both Sage and IPython.
"""
# Alternative way to do it:
# if not have_sage:
# raise HilbertError('This is only available under Sage')
#
# import sage.all
#
# return '\\begin{array}{l}\n'+ \
# sage.all.latex(self.space)+' \\\\\n'+ \
# sage.all.latex(self.sage_block_matrix())+ \
# '\\end{array}'
(suppress, suppress_thresh) = self._get_suppress()
spc = arr.space
if len(spc.ket_set):
ket_indices = list(spc.ket_space().index_iter())
else:
ket_indices = [None]
if len(spc.bra_set):
bra_indices = list(spc.bra_space().index_iter())
else:
bra_indices = [None]
fmt = spc.base_field.latex_formatter(arr.nparray.flatten(), dollar_if_tex=False)
ht = r'\scriptsize{'
ht += r'\begin{array}{|'
if spc.ket_set:
ht += 'l|'
if spc.bra_set:
bra_shape = spc.bra_space().shape
colgrp_size = np.product(bra_shape[1:])
ht += ('c'*colgrp_size + '|')*bra_shape[0]
else:
ht += 'c|'
ht += "}\n"
if spc.bra_set:
if use_hline: ht += r'\hline' + "\n"
if spc.ket_set:
ht += '&'
for (b_idx_n, b_idx) in enumerate(bra_indices):
if b_idx_n:
ht += ' & '
if b_idx is not None:
ht += r'\left< '
for (x, y) in zip(b_idx, spc.sorted_bras):
ht += str(x) + '_{' + y.latex_label + '}'
ht += r' \right|'
ht += r' \\' + "\n"
last_k = None
for k_idx in ket_indices:
if k_idx is None or k_idx[0] != last_k:
if use_hline: ht += r'\hline' + "\n"
if k_idx is not None:
last_k = k_idx[0]
if k_idx is not None:
ht += r'\left| '
for (x, y) in zip(k_idx, spc.sorted_kets):
ht += str(x) + '_{' + y.latex_label + '}'
ht += r' \right>'
ht += ' & '
for (b_idx_n, b_idx) in enumerate(bra_indices):
if k_idx is None and b_idx is None:
assert 0
elif k_idx is None:
idx = b_idx
elif b_idx is None:
idx = k_idx
else:
idx = k_idx + b_idx
v = arr[idx]
if suppress and spc.base_field.eval_suppress_small(v, suppress_thresh):
if self.zero_color_latex != '':
vs = r'\color{'+self.zero_color_latex+'}{0}'
else:
vs = '0'
else:
vs = fmt(v)
if b_idx_n:
ht += ' & '
ht += vs
ht += r' \\' + "\n"
if use_hline: ht += r'\hline' + "\n"
ht += r"\end{array}" + "\n"
ht += '}' # small
return ht
def array_html_block_table(self, arr):
r"""
Format array in HTML. Used for IPython.
>>> from qitensor import qudit
>>> ha = qudit('a', 3)
>>> hb = qudit('b', 2)
>>> X = ha.eye() * hb.ket(1)
>>> f = HilbertArrayFormatter()
>>> f.set_printoptions()
>>> print(f.array_html_block_table(X))
$\left| a,b \right\rangle\left\langle a \right|$<table style='margin: 0px 0px;'>
<colgroup style='border: 2px solid black;'></colgroup>
<colgroup span=3 style='border: 2px solid black;'></colgroup>
<tbody style='border: 2px solid black;'>
<tr style='border: 1px dotted; padding: 2px;'><td style='border: 1px dotted; padding: 2px; text-align: center;'> </td><td style='border: 1px dotted; padding: 2px; text-align: center;'><nobr>⟨<tt>0</tt>|</nobr></td><td style='border: 1px dotted; padding: 2px; text-align: center;'><nobr>⟨<tt>1</tt>|</nobr></td><td style='border: 1px dotted; padding: 2px; text-align: center;'><nobr>⟨<tt>2</tt>|</nobr></td></tr>
</tbody>
<tbody style='border: 2px solid black;'>
<tr style='border: 1px dotted; padding: 2px;'><td style='border: 1px dotted; padding: 2px; text-align: center;'><nobr>|<tt>0</tt>,<tt>0</tt>⟩</nobr></td><td style='border: 1px dotted; padding: 2px; text-align: right;'><font color='#cccccc'>0</font></td><td style='border: 1px dotted; padding: 2px; text-align: right;'><font color='#cccccc'>0</font></td><td style='border: 1px dotted; padding: 2px; text-align: right;'><font color='#cccccc'>0</font></td></tr>
<tr style='border: 1px dotted; padding: 2px;'><td style='border: 1px dotted; padding: 2px; text-align: center;'><nobr>|<tt>0</tt>,<tt>1</tt>⟩</nobr></td><td style='border: 1px dotted; padding: 2px; text-align: right;'><nobr><tt> 1.+0.j</tt></nobr></td><td style='border: 1px dotted; padding: 2px; text-align: right;'><font color='#cccccc'>0</font></td><td style='border: 1px dotted; padding: 2px; text-align: right;'><font color='#cccccc'>0</font></td></tr>
</tbody>
<tbody style='border: 2px solid black;'>
<tr style='border: 1px dotted; padding: 2px;'><td style='border: 1px dotted; padding: 2px; text-align: center;'><nobr>|<tt>1</tt>,<tt>0</tt>⟩</nobr></td><td style='border: 1px dotted; padding: 2px; text-align: right;'><font color='#cccccc'>0</font></td><td style='border: 1px dotted; padding: 2px; text-align: right;'><font color='#cccccc'>0</font></td><td style='border: 1px dotted; padding: 2px; text-align: right;'><font color='#cccccc'>0</font></td></tr>
<tr style='border: 1px dotted; padding: 2px;'><td style='border: 1px dotted; padding: 2px; text-align: center;'><nobr>|<tt>1</tt>,<tt>1</tt>⟩</nobr></td><td style='border: 1px dotted; padding: 2px; text-align: right;'><font color='#cccccc'>0</font></td><td style='border: 1px dotted; padding: 2px; text-align: right;'><nobr><tt> 1.+0.j</tt></nobr></td><td style='border: 1px dotted; padding: 2px; text-align: right;'><font color='#cccccc'>0</font></td></tr>
</tbody>
<tbody style='border: 2px solid black;'>
<tr style='border: 1px dotted; padding: 2px;'><td style='border: 1px dotted; padding: 2px; text-align: center;'><nobr>|<tt>2</tt>,<tt>0</tt>⟩</nobr></td><td style='border: 1px dotted; padding: 2px; text-align: right;'><font color='#cccccc'>0</font></td><td style='border: 1px dotted; padding: 2px; text-align: right;'><font color='#cccccc'>0</font></td><td style='border: 1px dotted; padding: 2px; text-align: right;'><font color='#cccccc'>0</font></td></tr>
<tr style='border: 1px dotted; padding: 2px;'><td style='border: 1px dotted; padding: 2px; text-align: center;'><nobr>|<tt>2</tt>,<tt>1</tt>⟩</nobr></td><td style='border: 1px dotted; padding: 2px; text-align: right;'><font color='#cccccc'>0</font></td><td style='border: 1px dotted; padding: 2px; text-align: right;'><font color='#cccccc'>0</font></td><td style='border: 1px dotted; padding: 2px; text-align: right;'><nobr><tt> 1.+0.j</tt></nobr></td></tr>
</tbody>
</table>
<BLANKLINE>
"""
(suppress, suppress_thresh) = self._get_suppress()
st_tab = "style='border: 2px solid black;'"
st_tr = "style='border: 1px dotted; padding: 2px;'"
st_th = "style='border: 1px dotted; padding: 2px; text-align: center;'"
st_tdval = "style='border: 1px dotted; padding: 2px; text-align: right;'"
spc = arr.space
if len(spc.ket_set):
ket_indices = list(spc.ket_space().index_iter())
else:
ket_indices = [None]
if len(spc.bra_set):
bra_indices = list(spc.bra_space().index_iter())
else:
bra_indices = [None]
fmt = spc.base_field.latex_formatter(arr.nparray.flatten(), dollar_if_tex=True)
ht = ''
if self.use_latex_label_in_html:
ht += '$'+spc._latex_()+'$'
else:
# FIXME - here, and elsewhere, use unicode symbols '⟨' and '⟩'
# for html.
ht += spc._html_()+'<br>'
ht += "<table style='margin: 0px 0px;'>\n"
if spc.ket_set:
ht += "<colgroup "+st_tab+"></colgroup>\n"
if len(spc.bra_set):
colgrp_size = spc.bra_space().shape[-1]
for i in range(spc.bra_space().dim() // colgrp_size):
ht += ("<colgroup span=%d "+st_tab+"></colgroup>\n") % colgrp_size
else:
ht += "<colgroup "+st_tab+"></colgroup>\n"
if spc.bra_set:
ht += "<tbody "+st_tab+">\n"
ht += '<tr '+st_tr+'>'
if spc.ket_set:
ht += '<td '+st_th+'> </td>'
for b_idx in bra_indices:
ht += '<td '+st_th+'><nobr>'
#if self.use_latex_label_in_html:
# ht += r'$\scriptsize{\left< '
# ht += ','.join([str(x) for x in b_idx]) # FIXME - latex label for indices?
# ht += r' \right|}$'
#else:
ht += '⟨'+(','.join(['<tt>'+str(x)+'</tt>' for x in b_idx]))+'|'
ht += '</nobr></td>'
ht += '</tr>\n'
ht += '</tbody>\n'
last_k = None
for k_idx in ket_indices:
if k_idx is not None and len(k_idx) > 1 and k_idx[-2] != last_k:
if last_k is not None:
ht += '</tbody>\n'
ht += "<tbody "+st_tab+">\n"
last_k = k_idx[-2]
ht += '<tr '+st_tr+'>'
if spc.ket_set:
ht += '<td '+st_th+'><nobr>'
#if self.use_latex_label_in_html:
# ht += r'$\scriptsize{\left| '
# ht += ','.join([str(x) for x in k_idx]) # FIXME - latex label for indices?
# ht += r' \right>}$'
#else:
ht += '|'+(','.join(['<tt>'+str(x)+'</tt>' for x in k_idx]))+'⟩'
ht += '</nobr></td>'
for b_idx in bra_indices:
if k_idx is None and b_idx is None:
assert 0
elif k_idx is None:
idx = b_idx
elif b_idx is None:
idx = k_idx
else:
idx = k_idx + b_idx
v = arr[idx]
if suppress and spc.base_field.eval_suppress_small(v, suppress_thresh):
if self.zero_color_html != '':
vs = "<font color='"+self.zero_color_html+"'>0</font>"
else:
vs = "0"
else:
vs = "<nobr><tt>"+fmt(v)+"</tt></nobr>"
ht += '<td '+st_tdval+'>'+vs+'</td>'
ht += '</tr>\n'
ht += '</tbody>\n'
ht += '</table>\n'
return ht
# NOTE: this is normally accessed via set_qitensor_printoptions
def set_printoptions(
self,
str_use_sage=None,
zero_color_latex=None,
zero_color_html=None,
use_latex_label_in_html=None,
ipy_table_format_mode=None,
ipy_space_format_mode=None
):
"""
Sets print options for qitensor.
Any options passed the ``None`` value won't be changed.
:param str_use_sage: If true, use Sage's matrix formatting functions
when available (this is prettier).
:type str_use_sage: bool
:param zero_color_latex: Color to use for drawing the number zero in latex.
:type zero_color_latex: string
:param zero_color_html: Color to use for drawing the number zero in HTML.
:type zero_color_html: string
:param use_latex_label_in_html: If true, HilbertSpace labels will be
shown in latex form when rendering an array in HTML. Works good with
the IPython notebook, but not with qtconsole.
:type use_latex_label_in_html: bool
:param ipy_table_format_mode: Which mode to use for formatting arrays in
the IPython notebook.
:type ipy_table_format_mode: string ('html', 'latex', 'png', 'plain')
:param ipy_space_format_mode: Which mode to use for formatting HilbertSpace
labels in the IPython notebook.
:type ipy_space_format_mode: string ('latex', 'png', 'plain')
qitensor also makes use of the ``suppress`` and ``precision`` options from
numpy.set_printoptions.
See also: :func:`get_qitensor_printoptions`
"""
if str_use_sage is not None:
self.str_use_sage = bool(str_use_sage)
if zero_color_latex is not None:
self.zero_color_latex = str(zero_color_latex)
if zero_color_html is not None:
self.zero_color_html = str(zero_color_html)
if use_latex_label_in_html is not None:
self.use_latex_label_in_html = bool(use_latex_label_in_html)
if ipy_table_format_mode is not None:
assert ipy_table_format_mode in ['html', 'latex', 'png', 'plain']
self.ipy_table_format_mode = ipy_table_format_mode
if ipy_space_format_mode is not None:
assert ipy_space_format_mode in ['latex', 'png', 'plain']
self.ipy_space_format_mode = ipy_space_format_mode
# NOTE: this is normally accessed via get_qitensor_printoptions
def get_printoptions(self):
"""
Gets the current qitensor formatting options.
See also: :func:`set_qitensor_printoptions`
"""
return {
"str_use_sage" : self.str_use_sage,
"zero_color_latex" : self.zero_color_latex,
"zero_color_html" : self.zero_color_html,
"use_latex_label_in_html" : self.use_latex_label_in_html,
"ipy_table_format_mode" : self.ipy_table_format_mode,
"ipy_space_format_mode" : self.ipy_space_format_mode,
}
def setup_for_qtconsole(self):
"""
Sets good printing options for IPython QTconsole.
"""
self.set_printoptions(ipy_table_format_mode='png', ipy_space_format_mode='png')
# FIXME - latex_to_png is limited in its allowed colors
self.set_printoptions(zero_color_latex='yellow')
FORMATTER = HilbertArrayFormatter()
set_qitensor_printoptions = FORMATTER.set_printoptions
get_qitensor_printoptions = FORMATTER.get_printoptions
setup_qitensor_for_qtconsole = FORMATTER.setup_for_qtconsole
|
en
| 0.182097
|
This module handles formatting of arrays. Everything in here is for internal use only, except for the :func:`set_qitensor_printoptions` and :func:`get_qitensor_printoptions` functions. This module handles formatting of arrays. Methods of this class are called by methods of HilbertArray, and shouldn't need to be dealt with directly. sage: import qitensor.arrayformatter sage: TestSuite(qitensor.arrayformatter.FORMATTER).run() # FIXME - make this undocumented option public (requires publishing np_colorizer) Gets the current suppression settings (from numpy). Formats python scalar for latex. Formats Sage scalar for latex. Formats Sympy scalar for latex. Creates string for HilbertArray. Creates repr for HilbertArray. Formats array in Latex. Used by both Sage and IPython. # Alternative way to do it: # if not have_sage: # raise HilbertError('This is only available under Sage') # # import sage.all # # return '\\begin{array}{l}\n'+ \ # sage.all.latex(self.space)+' \\\\\n'+ \ # sage.all.latex(self.sage_block_matrix())+ \ # '\\end{array}' # small Format array in HTML. Used for IPython. >>> from qitensor import qudit >>> ha = qudit('a', 3) >>> hb = qudit('b', 2) >>> X = ha.eye() * hb.ket(1) >>> f = HilbertArrayFormatter() >>> f.set_printoptions() >>> print(f.array_html_block_table(X)) $\left| a,b \right\rangle\left\langle a \right|$<table style='margin: 0px 0px;'> <colgroup style='border: 2px solid black;'></colgroup> <colgroup span=3 style='border: 2px solid black;'></colgroup> <tbody style='border: 2px solid black;'> <tr style='border: 1px dotted; padding: 2px;'><td style='border: 1px dotted; padding: 2px; text-align: center;'> </td><td style='border: 1px dotted; padding: 2px; text-align: center;'><nobr>⟨<tt>0</tt>|</nobr></td><td style='border: 1px dotted; padding: 2px; text-align: center;'><nobr>⟨<tt>1</tt>|</nobr></td><td style='border: 1px dotted; padding: 2px; text-align: center;'><nobr>⟨<tt>2</tt>|</nobr></td></tr> </tbody> <tbody style='border: 2px solid black;'> <tr style='border: 1px dotted; padding: 2px;'><td style='border: 1px dotted; padding: 2px; text-align: center;'><nobr>|<tt>0</tt>,<tt>0</tt>⟩</nobr></td><td style='border: 1px dotted; padding: 2px; text-align: right;'><font color='#cccccc'>0</font></td><td style='border: 1px dotted; padding: 2px; text-align: right;'><font color='#cccccc'>0</font></td><td style='border: 1px dotted; padding: 2px; text-align: right;'><font color='#cccccc'>0</font></td></tr> <tr style='border: 1px dotted; padding: 2px;'><td style='border: 1px dotted; padding: 2px; text-align: center;'><nobr>|<tt>0</tt>,<tt>1</tt>⟩</nobr></td><td style='border: 1px dotted; padding: 2px; text-align: right;'><nobr><tt> 1.+0.j</tt></nobr></td><td style='border: 1px dotted; padding: 2px; text-align: right;'><font color='#cccccc'>0</font></td><td style='border: 1px dotted; padding: 2px; text-align: right;'><font color='#cccccc'>0</font></td></tr> </tbody> <tbody style='border: 2px solid black;'> <tr style='border: 1px dotted; padding: 2px;'><td style='border: 1px dotted; padding: 2px; text-align: center;'><nobr>|<tt>1</tt>,<tt>0</tt>⟩</nobr></td><td style='border: 1px dotted; padding: 2px; text-align: right;'><font color='#cccccc'>0</font></td><td style='border: 1px dotted; padding: 2px; text-align: right;'><font color='#cccccc'>0</font></td><td style='border: 1px dotted; padding: 2px; text-align: right;'><font color='#cccccc'>0</font></td></tr> <tr style='border: 1px dotted; padding: 2px;'><td style='border: 1px dotted; padding: 2px; text-align: center;'><nobr>|<tt>1</tt>,<tt>1</tt>⟩</nobr></td><td style='border: 1px dotted; padding: 2px; text-align: right;'><font color='#cccccc'>0</font></td><td style='border: 1px dotted; padding: 2px; text-align: right;'><nobr><tt> 1.+0.j</tt></nobr></td><td style='border: 1px dotted; padding: 2px; text-align: right;'><font color='#cccccc'>0</font></td></tr> </tbody> <tbody style='border: 2px solid black;'> <tr style='border: 1px dotted; padding: 2px;'><td style='border: 1px dotted; padding: 2px; text-align: center;'><nobr>|<tt>2</tt>,<tt>0</tt>⟩</nobr></td><td style='border: 1px dotted; padding: 2px; text-align: right;'><font color='#cccccc'>0</font></td><td style='border: 1px dotted; padding: 2px; text-align: right;'><font color='#cccccc'>0</font></td><td style='border: 1px dotted; padding: 2px; text-align: right;'><font color='#cccccc'>0</font></td></tr> <tr style='border: 1px dotted; padding: 2px;'><td style='border: 1px dotted; padding: 2px; text-align: center;'><nobr>|<tt>2</tt>,<tt>1</tt>⟩</nobr></td><td style='border: 1px dotted; padding: 2px; text-align: right;'><font color='#cccccc'>0</font></td><td style='border: 1px dotted; padding: 2px; text-align: right;'><font color='#cccccc'>0</font></td><td style='border: 1px dotted; padding: 2px; text-align: right;'><nobr><tt> 1.+0.j</tt></nobr></td></tr> </tbody> </table> <BLANKLINE> # FIXME - here, and elsewhere, use unicode symbols '⟨' and '⟩' # for html. #if self.use_latex_label_in_html: # ht += r'$\scriptsize{\left< ' # ht += ','.join([str(x) for x in b_idx]) # FIXME - latex label for indices? # ht += r' \right|}$' #else: #x27e8;'+(','.join(['<tt>'+str(x)+'</tt>' for x in b_idx]))+'|' #if self.use_latex_label_in_html: # ht += r'$\scriptsize{\left| ' # ht += ','.join([str(x) for x in k_idx]) # FIXME - latex label for indices? # ht += r' \right>}$' #else: #x27e9;' # NOTE: this is normally accessed via set_qitensor_printoptions Sets print options for qitensor. Any options passed the ``None`` value won't be changed. :param str_use_sage: If true, use Sage's matrix formatting functions when available (this is prettier). :type str_use_sage: bool :param zero_color_latex: Color to use for drawing the number zero in latex. :type zero_color_latex: string :param zero_color_html: Color to use for drawing the number zero in HTML. :type zero_color_html: string :param use_latex_label_in_html: If true, HilbertSpace labels will be shown in latex form when rendering an array in HTML. Works good with the IPython notebook, but not with qtconsole. :type use_latex_label_in_html: bool :param ipy_table_format_mode: Which mode to use for formatting arrays in the IPython notebook. :type ipy_table_format_mode: string ('html', 'latex', 'png', 'plain') :param ipy_space_format_mode: Which mode to use for formatting HilbertSpace labels in the IPython notebook. :type ipy_space_format_mode: string ('latex', 'png', 'plain') qitensor also makes use of the ``suppress`` and ``precision`` options from numpy.set_printoptions. See also: :func:`get_qitensor_printoptions` # NOTE: this is normally accessed via get_qitensor_printoptions Gets the current qitensor formatting options. See also: :func:`set_qitensor_printoptions` Sets good printing options for IPython QTconsole. # FIXME - latex_to_png is limited in its allowed colors
| 2.390714
| 2
|
tests/tune/concepts/space/test_parameters.py
|
fugue-project/tune
| 14
|
6629308
|
<reponame>fugue-project/tune
import json
import numpy as np
import pandas as pd
from pytest import raises
from scipy import stats
from triad import to_uuid
from tune._utils import assert_close
from tune.concepts.space import (
Choice,
Grid,
NormalRand,
NormalRandInt,
Rand,
RandInt,
TransitionChoice,
TuningParametersTemplate,
to_template,
FuncParam,
)
def test_grid():
v = Grid("a", "b")
assert ["a", "b"] == list(v)
v2 = Grid("b", "a")
assert v == v and v != v2
assert to_uuid(v) != to_uuid(v2)
raises(ValueError, lambda: Grid())
def test_choice():
raises(ValueError, lambda: Choice())
v = Choice("a", "b", "c")
assert v.generate(0) == v.generate(0)
assert v.generate(0) != v.generate(1)
assert v.generate_many(20, 0) == v.generate_many(20, 0)
assert v.generate_many(20, 0) != v.generate_many(20, 1)
actual = set(v.generate_many(20, 0))
assert set(["a", "b", "c"]) == actual
assert to_uuid(v) != to_uuid(Grid("a", "b", "c"))
assert v != Grid("a", "b", "c")
v = Choice(1, 2, 3)
assert json.loads(json.dumps({"x": v.generate(0)}))["x"] <= 3
v = Choice("a", "b", "c")
assert isinstance(json.loads(json.dumps({"x": v.generate(0)}))["x"], str)
v2 = Choice("a", "b", "c")
v3 = Choice("a", "b", "d")
assert to_uuid(v) == to_uuid(v2)
assert to_uuid(v2) != to_uuid(v3)
def test_transition_choice():
raises(ValueError, lambda: TransitionChoice())
v = TransitionChoice("a", "b", "c")
assert v.generate(0) == v.generate(0)
assert v.generate(0) != v.generate(1)
assert v.generate_many(20, 0) == v.generate_many(20, 0)
assert v.generate_many(20, 0) != v.generate_many(20, 1)
actual = set(v.generate_many(20, 0))
assert set(["a", "b", "c"]) == actual
assert to_uuid(v) != to_uuid(Grid("a", "b", "c"))
assert v != Grid("a", "b", "c")
v = TransitionChoice(1, 2, 3)
assert json.loads(json.dumps({"x": v.generate(0)}))["x"] <= 3
v = TransitionChoice("a", "b", "c")
assert isinstance(json.loads(json.dumps({"x": v.generate(0)}))["x"], str)
v2 = TransitionChoice("a", "b", "c")
v3 = Choice("a", "b", "c")
assert to_uuid(v) == to_uuid(v2)
assert to_uuid(v2) != to_uuid(v3)
def test_rand():
with raises(ValueError):
Rand(1.0, 0.9)
with raises(ValueError):
Rand(1.0, 10, q=-0.1)
with raises(ValueError):
Rand(1.0, 1.0, include_high=False)
with raises(ValueError):
Rand(0.0, 1.0, log=True) # for log, low>=1.0
v = Rand(0.1, 0.1, q=0.1, log=False)
assert 0.1 == v.generate()
assert 0.1 == v.generate(10)
v = Rand(1.0, 1.0, q=0.1, log=True)
assert 1.0 == v.generate()
assert 1.0 == v.generate(10)
v = Rand(1.0, 2.0, q=0.1, log=False)
assert v.generate(0) == v.generate(0)
assert v.generate(0) != v.generate(1)
assert_close([x / 10 for x in range(10, 21)], v.generate_many(100, 0))
v = Rand(1.0, 2.09999, q=0.1, log=False)
assert v.generate(0) == v.generate(0)
assert v.generate(0) != v.generate(1)
assert_close([x / 10 for x in range(10, 21)], v.generate_many(100, 0))
v = Rand(1.0, 2.0, q=0.1, log=False, include_high=False)
assert v.generate(0) == v.generate(0)
assert v.generate(0) != v.generate(1)
assert_close([x / 10 for x in range(10, 20)], v.generate_many(100, 0))
v = Rand(1.0, 2.09999, q=0.1, log=False, include_high=False)
assert v.generate(0) == v.generate(0)
assert v.generate(0) != v.generate(1)
assert_close([x / 10 for x in range(10, 21)], v.generate_many(100, 0))
v = Rand(0.1, 2.0, log=True, include_high=False)
assert v.generate(0) == v.generate(0)
assert v.generate(0) != v.generate(1)
res = v.generate_many(10000, 0)
for x in res:
assert x >= 0.1 and x <= 2.0
t = stats.kstest(
np.log(res), "uniform", args=(np.log(0.1), np.log(2) - np.log(0.1))
)
assert t.pvalue > 0.4
v1 = Rand(1.0, 2.0, q=0.1, log=False)
v2 = Rand(1.0, 2.0, log=False, q=0.1)
v3 = Rand(1.0, 2.0, log=False)
assert to_uuid(v1) == to_uuid(v2)
assert to_uuid(v1) != to_uuid(v3)
def test_randint():
with raises(ValueError):
RandInt(0, 10, log=True) # for log, low>=1.0
v = RandInt(10, 20, log=False)
assert v.generate(0) == v.generate(0)
assert v.generate(0) != v.generate(1)
actual = set(v.generate_many(100, 0))
assert set(range(10, 21)) == actual
v = RandInt(10, 20, include_high=False)
actual = set(v.generate_many(100, 0))
assert set(range(10, 20)) == actual
v = RandInt(10, 20, q=5, include_high=False)
actual = set(v.generate_many(100, 0))
assert set([10, 15]) == actual
v = RandInt(10, 20, q=5, include_high=True)
actual = set(v.generate_many(100, 0))
assert set([10, 15, 20]) == actual
v = RandInt(3, 20, log=True)
assert v.generate(0) == v.generate(0)
assert v.generate(0) != v.generate(1)
actual = set(v.generate_many(1000, 0))
assert set(range(3, 21)) == actual
v1 = RandInt(1, 20, q=2)
v2 = RandInt(1, 20, q=2)
v3 = Rand(1, 20, q=2)
assert to_uuid(v1) == to_uuid(v2)
assert to_uuid(v1) != to_uuid(v3)
def test_normal_rand():
with raises(ValueError):
NormalRand(1.0, 0.0)
with raises(ValueError):
NormalRand(1.0, -1.0)
v = NormalRand(0.05, 0.2)
assert v.generate(0) == v.generate(0)
assert v.generate(0) != v.generate(1)
res = v.generate_many(100000, 0)
t = stats.kstest(res, "norm", args=(0.05, 0.2))
assert t.pvalue > 0.4
v = NormalRand(0.05, 0.2, q=0.1)
assert v.generate(0) == v.generate(0)
assert v.generate(0) != v.generate(1)
actual = [x for x in v.generate_many(1000, 0) if x >= -0.155 and x <= 0.255]
assert_close([-0.15, -0.05, 0.05, 0.15, 0.25], actual)
v2 = NormalRand(0.05, 0.2, q=0.1)
v3 = Rand(0.05, 0.2, q=0.1)
assert to_uuid(v) == to_uuid(v2)
assert to_uuid(v) != to_uuid(v3)
def test_normal_randint():
v = NormalRandInt(5, 2)
assert v.generate(0) == v.generate(0)
assert v.generate(0) != v.generate(1)
actual = set(v.generate_many(50, 0))
for x in [3, 4, 5, 6, 7]:
assert x in actual
v = NormalRandInt(5, 2, q=3)
assert v.generate(0) == v.generate(0)
assert v.generate(0) != v.generate(2)
actual = set(v.generate_many(50, 0))
for x in [-1, 2, 5, 8, 11]:
assert x in actual
assert 6 not in actual
v2 = NormalRandInt(5, 2, q=3)
v3 = NormalRand(5, 2, q=3)
assert to_uuid(v) == to_uuid(v2)
assert to_uuid(v) != to_uuid(v3)
def test_func_param():
def tf(*args, x, y):
return sum(args) + x + y
f1 = FuncParam(tf, 4, x=1, y=2)
assert 7 == f1()
f2 = FuncParam(tf, 4, x=1, y=2)
f3 = FuncParam(tf, 5, x=1, y=2)
assert f1 == f2
assert f1 != f3
assert to_uuid(f1) == to_uuid(f2)
assert to_uuid(f1) != to_uuid(f3)
f1[0] = 5
f1["y"] = 3
assert 5 == f1[0]
assert 3 == f1["y"]
assert 9 == f1()
def test_tuning_parameters_template():
data = dict(a=1)
e = make_template(data)
assert e.empty
assert not e.has_grid
assert not e.has_stochastic
data = dict(a=Rand(0, 1))
e = make_template(data)
assert not e.empty
assert not e.has_grid
assert e.has_stochastic
data = dict(a=Grid(0, 1))
e = make_template(data)
assert not e.empty
assert e.has_grid
assert not e.has_stochastic
data = dict(
a=Rand(0, 1),
b=Grid(2, 3),
c=dict(
a=Rand(10, 20), b=[dict(x=Rand(100, 200))], c=[1, Rand(1000, 2000)], d=None
),
d=None,
)
e = make_template(data)
assert not e.empty
assert e.has_grid
assert e.has_stochastic
assert [
Rand(0, 1),
Grid(2, 3),
Rand(10, 20),
Rand(100, 200),
Rand(1000, 2000),
] == e.params
res = e.fill([0.5, 2, 10.5, 100.5, 1000.5])
res2 = e.fill([0.55, 2, 10.55, 100.5, 1000.5])
assert (
dict(
a=0.5,
b=2,
c=dict(a=10.5, b=[dict(x=100.5)], c=[1, 1000.5], d=None),
d=None,
)
== res
)
assert res2 is not res
assert (
dict(
a=0.55,
b=2,
c=dict(a=10.55, b=[dict(x=100.5)], c=[1, 1000.5], d=None),
d=None,
)
== res2
)
# extract and fill by dicts
data = dict(
a=Rand(0, 1),
b=dict(x=[Grid(2, 3)]),
)
e = make_template(data)
assert dict(p0=Rand(0, 1), p1=Grid(2, 3)) == e.params_dict
assert dict(a=0.5, b=dict(x=[2])) == e.fill_dict(dict(p1=2, p0=0.5))
# same express in template
expr = Rand(0, 1)
data = dict(a=expr, b=dict(x=expr), c=Rand(2, 4))
e = make_template(data)
assert dict(p0=Rand(0, 1), p1=Rand(2, 4)) == e.params_dict
assert dict(a=0.5, b=dict(x=0.5), c=2) == e.fill_dict(dict(p1=2, p0=0.5))
# special objects
e = make_template(dict(a=Rand(0, 1), b=pd.DataFrame([[0]])))
# func
def tf(*args, x):
return sum(args) + x[0]
u = Grid(0, 1)
e = make_template(dict(a=1, b=[FuncParam(tf, Rand(0, 1), u, x=[u])]))
assert e.has_grid
assert e.has_stochastic
assert dict(a=1, b=[2.5]) == e.fill([0.5, 1])
def test_template_eq():
data1 = make_template(dict())
data2 = make_template(dict())
assert data1 == data2
data1 = make_template(dict(a=1, b=2))
data2 = make_template(dict(a=1, b=2))
data3 = make_template(dict(a=1, b=3))
assert data1 == data2
assert data1 != data3
data1 = make_template(dict(a=1, b=Grid(0, 1)))
data2 = make_template(dict(a=1, b=Grid(0, 1)))
data3 = make_template(dict(a=1, b=Grid(0, 2)))
assert data1 == data2
assert data1 != data3
u = Grid(0, 1)
v = Grid(0, 1)
data1 = make_template(dict(a=1, b=u, c=u))
data2 = dict(a=1, b=v, c=v)
data3 = dict(a=1, b=u, c=v)
assert data1 == data2
assert data1 != data3
assert data2 == data1
assert data3 != data1
def test_template_product():
data = make_template(dict())
assert [dict()] == list(data.product_grid())
data = make_template(dict(a=1, b=2))
assert [dict(a=1, b=2)] == list(data.product_grid())
data = make_template(dict(a=1, b=Grid(0, 1)))
assert [dict(a=1, b=0), dict(a=1, b=1)] == list(data.product_grid())
u = Grid(0, 1)
data = make_template(dict(a=u, b=1, c=[u], d=Grid(0, 1)))
assert [
dict(a=0, b=1, c=[0], d=0),
dict(a=0, b=1, c=[0], d=1),
dict(a=1, b=1, c=[1], d=0),
dict(a=1, b=1, c=[1], d=1),
] == list(data.product_grid())
data = make_template(dict(a=1, b=Grid(0, 1), c=Rand(0, 1)))
assert [dict(a=1, b=0, c=Rand(0, 1)), dict(a=1, b=1, c=Rand(0, 1))] == list(
data.product_grid()
)
def test_template_sample():
data = make_template(dict())
raises(ValueError, lambda: list(data.sample(0, 0)))
raises(ValueError, lambda: list(data.sample(-1, 0)))
assert [dict()] == list(data.sample(100, 0))
data = make_template(dict(a=1, b=2))
assert [dict(a=1, b=2)] == list(data.sample(100, 0))
data = make_template(dict(a=1, b=Rand(0, 1)))
assert list(data.sample(10, 0)) == list(data.sample(10, 0))
assert list(data.sample(10, 0)) != list(data.sample(10, 1))
a = list(data.sample(10, 0))
assert 10 == len(a)
assert all(x.template["b"] >= 0 and x.template["b"] <= 1 for x in a)
assert all(x.empty for x in a)
assert all(not x.has_grid for x in a)
assert all(not x.has_stochastic for x in a)
u = Rand(0, 1)
data = make_template(dict(a=1, b=u, c=Grid(0, 1), d=[u]))
a = list(data.sample(10, 0))
assert 10 == len(a)
assert all(x.template["b"] >= 0 and x.template["b"] <= 1 for x in a)
assert all(x.template["d"][0] == x.template["b"] for x in a)
assert all(not x.empty for x in a)
assert all(x.has_grid for x in a)
assert all(not x.has_stochastic for x in a)
def test_template_concat():
u = Grid(0, 1)
t1 = TuningParametersTemplate(dict(a=1, b=u, c=Grid(2, 3)))
t2 = TuningParametersTemplate(dict(d=2, e=u, f=Grid(2, 3)))
t = t1.concat(t2)
assert dict(a=1, b=u, c=Grid(2, 3), d=2, e=u, f=Grid(2, 3)) == t
assert dict(a=1, b=0, c=2) == t1.fill([0, 2])
assert dict(d=2, e=1, f=3) == t2.fill([1, 3])
assert dict(a=1, b=1, c=2, d=2, e=1, f=3) == t.fill([1, 2, 3])
raises(ValueError, lambda: t.concat(t1))
def test_template_misc():
# to_template
t = to_template(dict(a=1, b=Grid(0, 1)))
assert isinstance(t, TuningParametersTemplate)
t2 = to_template(t)
assert t is t2
t3 = to_template(t.encode())
assert t == t3
raises(ValueError, lambda: to_template(123))
# uuid
u = Grid(0, 1)
t1 = make_template(dict(a=1, b=u, c=Grid(0, 1)))
t2 = make_template(dict(a=1, b=u, c=Grid(0, 1)))
t3 = make_template(dict(a=1, b=u, c=u))
t4 = make_template(dict(a=1, b=u, c=u))
assert to_uuid(t1) == to_uuid(t2)
assert to_uuid(t2) != to_uuid(t3)
assert to_uuid(t3) == to_uuid(t4)
# simple value
u = Grid(0, 1)
t1 = make_template(dict(a=1, b=u, c=Grid(0, 1), d=FuncParam(lambda x: x + 1, u)))
raises(ValueError, lambda: t1.simple_value)
assert [
dict(a=1, b=0, c=0, d=1),
dict(a=1, b=0, c=1, d=1),
dict(a=1, b=1, c=0, d=2),
dict(a=1, b=1, c=1, d=2),
] == list(t1.product_grid())
t2 = make_template(dict(a=1, b=2))
dict(a=1, b=2) == t2.simple_value
t2 = make_template(dict(a=1, b=FuncParam(lambda x: x + 1, x=2)))
assert dict(a=1, b=3) == t2.simple_value
def test_repr():
assert "Grid('a', 'b')" == repr(Grid("a", "b"))
assert "Choice('a', 'b')" == repr(Choice("a", "b"))
assert "TransitionChoice('a', 'b')" == repr(TransitionChoice("a", "b"))
assert "Rand(low=0.2, high=1, q=0.1, log=True, include_high=False)" == repr(
Rand(0.2, 1, 0.1, True, False)
)
assert "RandInt(low=2, high=10, q=2, log=True, include_high=False)" == repr(
RandInt(2, 10, 2, True, False)
)
assert "NormalRand(mu=0.1, sigma=0.2, q=0.3)" == repr(NormalRand(0.1, 0.2, 0.3))
assert "NormalRandInt(mu=2, sigma=0.2, q=3)" == repr(NormalRandInt(2, 0.2, 3))
assert "FuncParam(make_template, a=Grid('a', 'b'))" == repr(
FuncParam(make_template, a=Grid("a", "b"))
)
assert "FuncParam(<lambda>, Grid('a', 'b'))" == repr(
FuncParam(lambda x: x + 1, Grid("a", "b"))
)
assert "{'a': 1, 'b': Grid(1, 2)}" == repr(
TuningParametersTemplate(dict(a=1, b=Grid(1, 2)))
)
assert "{'a': 1, 'b': Grid(1, 2)}" == str(
TuningParametersTemplate(dict(a=1, b=Grid(1, 2)))
)
def make_template(d):
x = TuningParametersTemplate(d).encode()
return TuningParametersTemplate.decode(x)
|
import json
import numpy as np
import pandas as pd
from pytest import raises
from scipy import stats
from triad import to_uuid
from tune._utils import assert_close
from tune.concepts.space import (
Choice,
Grid,
NormalRand,
NormalRandInt,
Rand,
RandInt,
TransitionChoice,
TuningParametersTemplate,
to_template,
FuncParam,
)
def test_grid():
v = Grid("a", "b")
assert ["a", "b"] == list(v)
v2 = Grid("b", "a")
assert v == v and v != v2
assert to_uuid(v) != to_uuid(v2)
raises(ValueError, lambda: Grid())
def test_choice():
raises(ValueError, lambda: Choice())
v = Choice("a", "b", "c")
assert v.generate(0) == v.generate(0)
assert v.generate(0) != v.generate(1)
assert v.generate_many(20, 0) == v.generate_many(20, 0)
assert v.generate_many(20, 0) != v.generate_many(20, 1)
actual = set(v.generate_many(20, 0))
assert set(["a", "b", "c"]) == actual
assert to_uuid(v) != to_uuid(Grid("a", "b", "c"))
assert v != Grid("a", "b", "c")
v = Choice(1, 2, 3)
assert json.loads(json.dumps({"x": v.generate(0)}))["x"] <= 3
v = Choice("a", "b", "c")
assert isinstance(json.loads(json.dumps({"x": v.generate(0)}))["x"], str)
v2 = Choice("a", "b", "c")
v3 = Choice("a", "b", "d")
assert to_uuid(v) == to_uuid(v2)
assert to_uuid(v2) != to_uuid(v3)
def test_transition_choice():
raises(ValueError, lambda: TransitionChoice())
v = TransitionChoice("a", "b", "c")
assert v.generate(0) == v.generate(0)
assert v.generate(0) != v.generate(1)
assert v.generate_many(20, 0) == v.generate_many(20, 0)
assert v.generate_many(20, 0) != v.generate_many(20, 1)
actual = set(v.generate_many(20, 0))
assert set(["a", "b", "c"]) == actual
assert to_uuid(v) != to_uuid(Grid("a", "b", "c"))
assert v != Grid("a", "b", "c")
v = TransitionChoice(1, 2, 3)
assert json.loads(json.dumps({"x": v.generate(0)}))["x"] <= 3
v = TransitionChoice("a", "b", "c")
assert isinstance(json.loads(json.dumps({"x": v.generate(0)}))["x"], str)
v2 = TransitionChoice("a", "b", "c")
v3 = Choice("a", "b", "c")
assert to_uuid(v) == to_uuid(v2)
assert to_uuid(v2) != to_uuid(v3)
def test_rand():
with raises(ValueError):
Rand(1.0, 0.9)
with raises(ValueError):
Rand(1.0, 10, q=-0.1)
with raises(ValueError):
Rand(1.0, 1.0, include_high=False)
with raises(ValueError):
Rand(0.0, 1.0, log=True) # for log, low>=1.0
v = Rand(0.1, 0.1, q=0.1, log=False)
assert 0.1 == v.generate()
assert 0.1 == v.generate(10)
v = Rand(1.0, 1.0, q=0.1, log=True)
assert 1.0 == v.generate()
assert 1.0 == v.generate(10)
v = Rand(1.0, 2.0, q=0.1, log=False)
assert v.generate(0) == v.generate(0)
assert v.generate(0) != v.generate(1)
assert_close([x / 10 for x in range(10, 21)], v.generate_many(100, 0))
v = Rand(1.0, 2.09999, q=0.1, log=False)
assert v.generate(0) == v.generate(0)
assert v.generate(0) != v.generate(1)
assert_close([x / 10 for x in range(10, 21)], v.generate_many(100, 0))
v = Rand(1.0, 2.0, q=0.1, log=False, include_high=False)
assert v.generate(0) == v.generate(0)
assert v.generate(0) != v.generate(1)
assert_close([x / 10 for x in range(10, 20)], v.generate_many(100, 0))
v = Rand(1.0, 2.09999, q=0.1, log=False, include_high=False)
assert v.generate(0) == v.generate(0)
assert v.generate(0) != v.generate(1)
assert_close([x / 10 for x in range(10, 21)], v.generate_many(100, 0))
v = Rand(0.1, 2.0, log=True, include_high=False)
assert v.generate(0) == v.generate(0)
assert v.generate(0) != v.generate(1)
res = v.generate_many(10000, 0)
for x in res:
assert x >= 0.1 and x <= 2.0
t = stats.kstest(
np.log(res), "uniform", args=(np.log(0.1), np.log(2) - np.log(0.1))
)
assert t.pvalue > 0.4
v1 = Rand(1.0, 2.0, q=0.1, log=False)
v2 = Rand(1.0, 2.0, log=False, q=0.1)
v3 = Rand(1.0, 2.0, log=False)
assert to_uuid(v1) == to_uuid(v2)
assert to_uuid(v1) != to_uuid(v3)
def test_randint():
with raises(ValueError):
RandInt(0, 10, log=True) # for log, low>=1.0
v = RandInt(10, 20, log=False)
assert v.generate(0) == v.generate(0)
assert v.generate(0) != v.generate(1)
actual = set(v.generate_many(100, 0))
assert set(range(10, 21)) == actual
v = RandInt(10, 20, include_high=False)
actual = set(v.generate_many(100, 0))
assert set(range(10, 20)) == actual
v = RandInt(10, 20, q=5, include_high=False)
actual = set(v.generate_many(100, 0))
assert set([10, 15]) == actual
v = RandInt(10, 20, q=5, include_high=True)
actual = set(v.generate_many(100, 0))
assert set([10, 15, 20]) == actual
v = RandInt(3, 20, log=True)
assert v.generate(0) == v.generate(0)
assert v.generate(0) != v.generate(1)
actual = set(v.generate_many(1000, 0))
assert set(range(3, 21)) == actual
v1 = RandInt(1, 20, q=2)
v2 = RandInt(1, 20, q=2)
v3 = Rand(1, 20, q=2)
assert to_uuid(v1) == to_uuid(v2)
assert to_uuid(v1) != to_uuid(v3)
def test_normal_rand():
with raises(ValueError):
NormalRand(1.0, 0.0)
with raises(ValueError):
NormalRand(1.0, -1.0)
v = NormalRand(0.05, 0.2)
assert v.generate(0) == v.generate(0)
assert v.generate(0) != v.generate(1)
res = v.generate_many(100000, 0)
t = stats.kstest(res, "norm", args=(0.05, 0.2))
assert t.pvalue > 0.4
v = NormalRand(0.05, 0.2, q=0.1)
assert v.generate(0) == v.generate(0)
assert v.generate(0) != v.generate(1)
actual = [x for x in v.generate_many(1000, 0) if x >= -0.155 and x <= 0.255]
assert_close([-0.15, -0.05, 0.05, 0.15, 0.25], actual)
v2 = NormalRand(0.05, 0.2, q=0.1)
v3 = Rand(0.05, 0.2, q=0.1)
assert to_uuid(v) == to_uuid(v2)
assert to_uuid(v) != to_uuid(v3)
def test_normal_randint():
v = NormalRandInt(5, 2)
assert v.generate(0) == v.generate(0)
assert v.generate(0) != v.generate(1)
actual = set(v.generate_many(50, 0))
for x in [3, 4, 5, 6, 7]:
assert x in actual
v = NormalRandInt(5, 2, q=3)
assert v.generate(0) == v.generate(0)
assert v.generate(0) != v.generate(2)
actual = set(v.generate_many(50, 0))
for x in [-1, 2, 5, 8, 11]:
assert x in actual
assert 6 not in actual
v2 = NormalRandInt(5, 2, q=3)
v3 = NormalRand(5, 2, q=3)
assert to_uuid(v) == to_uuid(v2)
assert to_uuid(v) != to_uuid(v3)
def test_func_param():
def tf(*args, x, y):
return sum(args) + x + y
f1 = FuncParam(tf, 4, x=1, y=2)
assert 7 == f1()
f2 = FuncParam(tf, 4, x=1, y=2)
f3 = FuncParam(tf, 5, x=1, y=2)
assert f1 == f2
assert f1 != f3
assert to_uuid(f1) == to_uuid(f2)
assert to_uuid(f1) != to_uuid(f3)
f1[0] = 5
f1["y"] = 3
assert 5 == f1[0]
assert 3 == f1["y"]
assert 9 == f1()
def test_tuning_parameters_template():
data = dict(a=1)
e = make_template(data)
assert e.empty
assert not e.has_grid
assert not e.has_stochastic
data = dict(a=Rand(0, 1))
e = make_template(data)
assert not e.empty
assert not e.has_grid
assert e.has_stochastic
data = dict(a=Grid(0, 1))
e = make_template(data)
assert not e.empty
assert e.has_grid
assert not e.has_stochastic
data = dict(
a=Rand(0, 1),
b=Grid(2, 3),
c=dict(
a=Rand(10, 20), b=[dict(x=Rand(100, 200))], c=[1, Rand(1000, 2000)], d=None
),
d=None,
)
e = make_template(data)
assert not e.empty
assert e.has_grid
assert e.has_stochastic
assert [
Rand(0, 1),
Grid(2, 3),
Rand(10, 20),
Rand(100, 200),
Rand(1000, 2000),
] == e.params
res = e.fill([0.5, 2, 10.5, 100.5, 1000.5])
res2 = e.fill([0.55, 2, 10.55, 100.5, 1000.5])
assert (
dict(
a=0.5,
b=2,
c=dict(a=10.5, b=[dict(x=100.5)], c=[1, 1000.5], d=None),
d=None,
)
== res
)
assert res2 is not res
assert (
dict(
a=0.55,
b=2,
c=dict(a=10.55, b=[dict(x=100.5)], c=[1, 1000.5], d=None),
d=None,
)
== res2
)
# extract and fill by dicts
data = dict(
a=Rand(0, 1),
b=dict(x=[Grid(2, 3)]),
)
e = make_template(data)
assert dict(p0=Rand(0, 1), p1=Grid(2, 3)) == e.params_dict
assert dict(a=0.5, b=dict(x=[2])) == e.fill_dict(dict(p1=2, p0=0.5))
# same express in template
expr = Rand(0, 1)
data = dict(a=expr, b=dict(x=expr), c=Rand(2, 4))
e = make_template(data)
assert dict(p0=Rand(0, 1), p1=Rand(2, 4)) == e.params_dict
assert dict(a=0.5, b=dict(x=0.5), c=2) == e.fill_dict(dict(p1=2, p0=0.5))
# special objects
e = make_template(dict(a=Rand(0, 1), b=pd.DataFrame([[0]])))
# func
def tf(*args, x):
return sum(args) + x[0]
u = Grid(0, 1)
e = make_template(dict(a=1, b=[FuncParam(tf, Rand(0, 1), u, x=[u])]))
assert e.has_grid
assert e.has_stochastic
assert dict(a=1, b=[2.5]) == e.fill([0.5, 1])
def test_template_eq():
data1 = make_template(dict())
data2 = make_template(dict())
assert data1 == data2
data1 = make_template(dict(a=1, b=2))
data2 = make_template(dict(a=1, b=2))
data3 = make_template(dict(a=1, b=3))
assert data1 == data2
assert data1 != data3
data1 = make_template(dict(a=1, b=Grid(0, 1)))
data2 = make_template(dict(a=1, b=Grid(0, 1)))
data3 = make_template(dict(a=1, b=Grid(0, 2)))
assert data1 == data2
assert data1 != data3
u = Grid(0, 1)
v = Grid(0, 1)
data1 = make_template(dict(a=1, b=u, c=u))
data2 = dict(a=1, b=v, c=v)
data3 = dict(a=1, b=u, c=v)
assert data1 == data2
assert data1 != data3
assert data2 == data1
assert data3 != data1
def test_template_product():
data = make_template(dict())
assert [dict()] == list(data.product_grid())
data = make_template(dict(a=1, b=2))
assert [dict(a=1, b=2)] == list(data.product_grid())
data = make_template(dict(a=1, b=Grid(0, 1)))
assert [dict(a=1, b=0), dict(a=1, b=1)] == list(data.product_grid())
u = Grid(0, 1)
data = make_template(dict(a=u, b=1, c=[u], d=Grid(0, 1)))
assert [
dict(a=0, b=1, c=[0], d=0),
dict(a=0, b=1, c=[0], d=1),
dict(a=1, b=1, c=[1], d=0),
dict(a=1, b=1, c=[1], d=1),
] == list(data.product_grid())
data = make_template(dict(a=1, b=Grid(0, 1), c=Rand(0, 1)))
assert [dict(a=1, b=0, c=Rand(0, 1)), dict(a=1, b=1, c=Rand(0, 1))] == list(
data.product_grid()
)
def test_template_sample():
data = make_template(dict())
raises(ValueError, lambda: list(data.sample(0, 0)))
raises(ValueError, lambda: list(data.sample(-1, 0)))
assert [dict()] == list(data.sample(100, 0))
data = make_template(dict(a=1, b=2))
assert [dict(a=1, b=2)] == list(data.sample(100, 0))
data = make_template(dict(a=1, b=Rand(0, 1)))
assert list(data.sample(10, 0)) == list(data.sample(10, 0))
assert list(data.sample(10, 0)) != list(data.sample(10, 1))
a = list(data.sample(10, 0))
assert 10 == len(a)
assert all(x.template["b"] >= 0 and x.template["b"] <= 1 for x in a)
assert all(x.empty for x in a)
assert all(not x.has_grid for x in a)
assert all(not x.has_stochastic for x in a)
u = Rand(0, 1)
data = make_template(dict(a=1, b=u, c=Grid(0, 1), d=[u]))
a = list(data.sample(10, 0))
assert 10 == len(a)
assert all(x.template["b"] >= 0 and x.template["b"] <= 1 for x in a)
assert all(x.template["d"][0] == x.template["b"] for x in a)
assert all(not x.empty for x in a)
assert all(x.has_grid for x in a)
assert all(not x.has_stochastic for x in a)
def test_template_concat():
u = Grid(0, 1)
t1 = TuningParametersTemplate(dict(a=1, b=u, c=Grid(2, 3)))
t2 = TuningParametersTemplate(dict(d=2, e=u, f=Grid(2, 3)))
t = t1.concat(t2)
assert dict(a=1, b=u, c=Grid(2, 3), d=2, e=u, f=Grid(2, 3)) == t
assert dict(a=1, b=0, c=2) == t1.fill([0, 2])
assert dict(d=2, e=1, f=3) == t2.fill([1, 3])
assert dict(a=1, b=1, c=2, d=2, e=1, f=3) == t.fill([1, 2, 3])
raises(ValueError, lambda: t.concat(t1))
def test_template_misc():
# to_template
t = to_template(dict(a=1, b=Grid(0, 1)))
assert isinstance(t, TuningParametersTemplate)
t2 = to_template(t)
assert t is t2
t3 = to_template(t.encode())
assert t == t3
raises(ValueError, lambda: to_template(123))
# uuid
u = Grid(0, 1)
t1 = make_template(dict(a=1, b=u, c=Grid(0, 1)))
t2 = make_template(dict(a=1, b=u, c=Grid(0, 1)))
t3 = make_template(dict(a=1, b=u, c=u))
t4 = make_template(dict(a=1, b=u, c=u))
assert to_uuid(t1) == to_uuid(t2)
assert to_uuid(t2) != to_uuid(t3)
assert to_uuid(t3) == to_uuid(t4)
# simple value
u = Grid(0, 1)
t1 = make_template(dict(a=1, b=u, c=Grid(0, 1), d=FuncParam(lambda x: x + 1, u)))
raises(ValueError, lambda: t1.simple_value)
assert [
dict(a=1, b=0, c=0, d=1),
dict(a=1, b=0, c=1, d=1),
dict(a=1, b=1, c=0, d=2),
dict(a=1, b=1, c=1, d=2),
] == list(t1.product_grid())
t2 = make_template(dict(a=1, b=2))
dict(a=1, b=2) == t2.simple_value
t2 = make_template(dict(a=1, b=FuncParam(lambda x: x + 1, x=2)))
assert dict(a=1, b=3) == t2.simple_value
def test_repr():
assert "Grid('a', 'b')" == repr(Grid("a", "b"))
assert "Choice('a', 'b')" == repr(Choice("a", "b"))
assert "TransitionChoice('a', 'b')" == repr(TransitionChoice("a", "b"))
assert "Rand(low=0.2, high=1, q=0.1, log=True, include_high=False)" == repr(
Rand(0.2, 1, 0.1, True, False)
)
assert "RandInt(low=2, high=10, q=2, log=True, include_high=False)" == repr(
RandInt(2, 10, 2, True, False)
)
assert "NormalRand(mu=0.1, sigma=0.2, q=0.3)" == repr(NormalRand(0.1, 0.2, 0.3))
assert "NormalRandInt(mu=2, sigma=0.2, q=3)" == repr(NormalRandInt(2, 0.2, 3))
assert "FuncParam(make_template, a=Grid('a', 'b'))" == repr(
FuncParam(make_template, a=Grid("a", "b"))
)
assert "FuncParam(<lambda>, Grid('a', 'b'))" == repr(
FuncParam(lambda x: x + 1, Grid("a", "b"))
)
assert "{'a': 1, 'b': Grid(1, 2)}" == repr(
TuningParametersTemplate(dict(a=1, b=Grid(1, 2)))
)
assert "{'a': 1, 'b': Grid(1, 2)}" == str(
TuningParametersTemplate(dict(a=1, b=Grid(1, 2)))
)
def make_template(d):
x = TuningParametersTemplate(d).encode()
return TuningParametersTemplate.decode(x)
|
en
| 0.654357
|
# for log, low>=1.0 # for log, low>=1.0 # extract and fill by dicts # same express in template # special objects # func # to_template # uuid # simple value
| 2.186809
| 2
|
thelma/repositories/rdb/mappers/tag.py
|
fogathmann/TheLMA
| 1
|
6629309
|
<reponame>fogathmann/TheLMA<gh_stars>1-10
"""
This file is part of the TheLMA (THe Laboratory Management Application) project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
Tag mapper.
"""
from sqlalchemy.orm import column_property
from sqlalchemy.orm import relationship
from sqlalchemy.orm.deprecated_interfaces import MapperExtension
from sqlalchemy.sql import select
from sqlalchemy.sql.expression import func
from sqlalchemy.sql.expression import insert
from everest.repositories.rdb.utils import as_slug_expression
from everest.repositories.rdb.utils import mapper
from thelma.entities.tagging import Tag
from thelma.entities.tagging import Tagged
__docformat__ = "reStructuredText en"
__all__ = ['create_mapper']
class TagMapperExtension(MapperExtension):
# FIXME: the mapper extension mechanism is deprecated.
"""
Mapper extension to take care of inserting/updating the non-mapped
`tag_domain`, `tag_predicate`, and `tag_value` records when a `tag`
record is created/updated.
"""
def __init__(self, tag_domain_tbl, tag_predicate_tbl, tag_value_tbl):
MapperExtension.__init__(self)
self.__tag_domain_tbl = tag_domain_tbl
self.__tag_predicate_tbl = tag_predicate_tbl
self.__tag_value_tbl = tag_value_tbl
def before_insert(self, tag_mapper, connection, instance): # pylint:disable=W0613
tn_id = self.__fetch_or_insert(connection,
self.__tag_domain_tbl,
'tag_domain_id',
'domain',
instance.domain)
instance.tag_domain_id = tn_id
p_id = self.__fetch_or_insert(connection,
self.__tag_predicate_tbl,
'tag_predicate_id',
'predicate',
instance.predicate)
instance.tag_predicate_id = p_id
v_id = self.__fetch_or_insert(connection,
self.__tag_value_tbl,
'tag_value_id',
'value',
instance.value)
instance.tag_value_id = v_id
def __fetch_or_insert(self, conn, ref_tbl, id_col_name, val_col_name, val):
whereclause = getattr(ref_tbl.c, val_col_name) == val
sel_proxy = conn.execute(select([getattr(ref_tbl.c, id_col_name)],
whereclause))
result = sel_proxy.fetchall()
if len(result) == 1:
# Found related entry - return found ID.
ref_id = result[0][id_col_name]
else:
# Not found - insert new and return new ID.
ins_proxy = conn.execute(insert(ref_tbl,
values={val_col_name:val}))
ref_id = ins_proxy.inserted_primary_key[0]
return ref_id
def create_mapper(tag_tbl, tag_domain_tbl, tag_predicate_tbl, tag_value_tbl,
tagging_tbl):
"Mapper factory."
m = mapper(Tag,
tag_tbl,
id_attribute='tag_id',
slug_expression=lambda cls: as_slug_expression(
func.concatenate(cls.domain, ':',
cls.predicate, '=',
cls.value)),
extension=TagMapperExtension(tag_domain_tbl,
tag_predicate_tbl, tag_value_tbl),
properties=
dict(tagged=relationship(Tagged,
secondary=tagging_tbl,
back_populates='tags'),
domain=column_property(
select([tag_domain_tbl.c.domain]) \
.where(tag_tbl.c.tag_domain_id ==
tag_domain_tbl.c.tag_domain_id)
),
predicate=column_property(
select([tag_predicate_tbl.c.predicate]) \
.where(tag_tbl.c.tag_predicate_id ==
tag_predicate_tbl.c.tag_predicate_id)
),
value=column_property(
select([tag_value_tbl.c.value]) \
.where(tag_tbl.c.tag_value_id ==
tag_value_tbl.c.tag_value_id)
),
)
)
return m
|
"""
This file is part of the TheLMA (THe Laboratory Management Application) project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
Tag mapper.
"""
from sqlalchemy.orm import column_property
from sqlalchemy.orm import relationship
from sqlalchemy.orm.deprecated_interfaces import MapperExtension
from sqlalchemy.sql import select
from sqlalchemy.sql.expression import func
from sqlalchemy.sql.expression import insert
from everest.repositories.rdb.utils import as_slug_expression
from everest.repositories.rdb.utils import mapper
from thelma.entities.tagging import Tag
from thelma.entities.tagging import Tagged
__docformat__ = "reStructuredText en"
__all__ = ['create_mapper']
class TagMapperExtension(MapperExtension):
# FIXME: the mapper extension mechanism is deprecated.
"""
Mapper extension to take care of inserting/updating the non-mapped
`tag_domain`, `tag_predicate`, and `tag_value` records when a `tag`
record is created/updated.
"""
def __init__(self, tag_domain_tbl, tag_predicate_tbl, tag_value_tbl):
MapperExtension.__init__(self)
self.__tag_domain_tbl = tag_domain_tbl
self.__tag_predicate_tbl = tag_predicate_tbl
self.__tag_value_tbl = tag_value_tbl
def before_insert(self, tag_mapper, connection, instance): # pylint:disable=W0613
tn_id = self.__fetch_or_insert(connection,
self.__tag_domain_tbl,
'tag_domain_id',
'domain',
instance.domain)
instance.tag_domain_id = tn_id
p_id = self.__fetch_or_insert(connection,
self.__tag_predicate_tbl,
'tag_predicate_id',
'predicate',
instance.predicate)
instance.tag_predicate_id = p_id
v_id = self.__fetch_or_insert(connection,
self.__tag_value_tbl,
'tag_value_id',
'value',
instance.value)
instance.tag_value_id = v_id
def __fetch_or_insert(self, conn, ref_tbl, id_col_name, val_col_name, val):
whereclause = getattr(ref_tbl.c, val_col_name) == val
sel_proxy = conn.execute(select([getattr(ref_tbl.c, id_col_name)],
whereclause))
result = sel_proxy.fetchall()
if len(result) == 1:
# Found related entry - return found ID.
ref_id = result[0][id_col_name]
else:
# Not found - insert new and return new ID.
ins_proxy = conn.execute(insert(ref_tbl,
values={val_col_name:val}))
ref_id = ins_proxy.inserted_primary_key[0]
return ref_id
def create_mapper(tag_tbl, tag_domain_tbl, tag_predicate_tbl, tag_value_tbl,
tagging_tbl):
"Mapper factory."
m = mapper(Tag,
tag_tbl,
id_attribute='tag_id',
slug_expression=lambda cls: as_slug_expression(
func.concatenate(cls.domain, ':',
cls.predicate, '=',
cls.value)),
extension=TagMapperExtension(tag_domain_tbl,
tag_predicate_tbl, tag_value_tbl),
properties=
dict(tagged=relationship(Tagged,
secondary=tagging_tbl,
back_populates='tags'),
domain=column_property(
select([tag_domain_tbl.c.domain]) \
.where(tag_tbl.c.tag_domain_id ==
tag_domain_tbl.c.tag_domain_id)
),
predicate=column_property(
select([tag_predicate_tbl.c.predicate]) \
.where(tag_tbl.c.tag_predicate_id ==
tag_predicate_tbl.c.tag_predicate_id)
),
value=column_property(
select([tag_value_tbl.c.value]) \
.where(tag_tbl.c.tag_value_id ==
tag_value_tbl.c.tag_value_id)
),
)
)
return m
|
en
| 0.73786
|
This file is part of the TheLMA (THe Laboratory Management Application) project. See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information. Tag mapper. # FIXME: the mapper extension mechanism is deprecated. Mapper extension to take care of inserting/updating the non-mapped `tag_domain`, `tag_predicate`, and `tag_value` records when a `tag` record is created/updated. # pylint:disable=W0613 # Found related entry - return found ID. # Not found - insert new and return new ID.
| 1.936533
| 2
|
tests/ddo/ddo_event_sample_v4.py
|
oceanprotocol/provider-py
| 1
|
6629310
|
#
# Copyright 2021 Ocean Protocol Foundation
# SPDX-License-Identifier: Apache-2.0
#
ddo_event_sample_v4 = {
"@context": ["https://w3id.org/did/v1"],
"id": "did:op:ffa5037987b74fbab600d7515605146bb7babcb929c94c60ba93ac5ceda56775",
"created": "2000-10-31T01:30:00.000-05:00",
"updated": "2000-10-31T01:30:00.000-05:00",
"version": "4.0.0",
"chainId": 8996,
"metadata": {
"type": "dataset",
"name": "Event DDO sample",
"description": "Event DDO sample",
"author": "Met Office",
"license": "CC-BY",
"contentLanguage": "en-US",
"tags": ["samples"],
},
"services": [
{
"id": "test_id",
"type": "access",
"datatokenAddress": "0x20e91598bb797eEd2C7D4431a274c2997D080f53",
"name": "dataAssetAccess",
"description": "dataAssetAccess",
"serviceEndpoint": "http://localhost:8030/",
"timeout": 0,
"files": "encrypted files",
},
{
"id": "test_id2",
"type": "compute",
"name": "dataAssetComputingService",
"description": "dataAssetComputingService",
"datatokenAddress": "0x20e91598bb797eEd2C7D4431a274c2997D080f53",
"serviceEndpoint": "http://localhost:8030/",
"timeout": 3600,
"files": "encrypted files",
"compute": {
"namespace": "dataAssetComputingService",
"allowRawAlgorithm": False,
"allowNetworkAccess": False,
"publisherTrustedAlgorithms": [],
"publisherTrustedAlgorithmPublishers": [],
},
},
],
}
|
#
# Copyright 2021 Ocean Protocol Foundation
# SPDX-License-Identifier: Apache-2.0
#
ddo_event_sample_v4 = {
"@context": ["https://w3id.org/did/v1"],
"id": "did:op:ffa5037987b74fbab600d7515605146bb7babcb929c94c60ba93ac5ceda56775",
"created": "2000-10-31T01:30:00.000-05:00",
"updated": "2000-10-31T01:30:00.000-05:00",
"version": "4.0.0",
"chainId": 8996,
"metadata": {
"type": "dataset",
"name": "Event DDO sample",
"description": "Event DDO sample",
"author": "Met Office",
"license": "CC-BY",
"contentLanguage": "en-US",
"tags": ["samples"],
},
"services": [
{
"id": "test_id",
"type": "access",
"datatokenAddress": "0x20e91598bb797eEd2C7D4431a274c2997D080f53",
"name": "dataAssetAccess",
"description": "dataAssetAccess",
"serviceEndpoint": "http://localhost:8030/",
"timeout": 0,
"files": "encrypted files",
},
{
"id": "test_id2",
"type": "compute",
"name": "dataAssetComputingService",
"description": "dataAssetComputingService",
"datatokenAddress": "0x20e91598bb797eEd2C7D4431a274c2997D080f53",
"serviceEndpoint": "http://localhost:8030/",
"timeout": 3600,
"files": "encrypted files",
"compute": {
"namespace": "dataAssetComputingService",
"allowRawAlgorithm": False,
"allowNetworkAccess": False,
"publisherTrustedAlgorithms": [],
"publisherTrustedAlgorithmPublishers": [],
},
},
],
}
|
en
| 0.268318
|
# # Copyright 2021 Ocean Protocol Foundation # SPDX-License-Identifier: Apache-2.0 #
| 1.322538
| 1
|
src/models/predict_model.py
|
data-modeler/rnn-surv
| 7
|
6629311
|
"""
Predict RNN-SURV model
"""
import os
import numpy as np
import pandas as pd
import json
from os.path import dirname as up
from tensorflow.keras.models import model_from_json
from src.models.rnnsurv import get_data, DataGenerator, create_model
def predict_rnnsurv(modelname, modelpath, datapath, outpath=None):
""" Predicts and outputs risks and probabilities over time for each new
observation.
"""
if outpath is None:
outpath = modelpath
print('Getting Data...')
xt = get_data(path_to_file=datapath, filename='rain_X_test.csv', nrows=1000)
risk_oids = xt['oid'].drop_duplicates().astype(int)
print("Loading Model...")
with open(os.path.join(modelpath, f"{modelname}.json"), "r") as json_file:
model_json = json_file.read()
with open(os.path.join(modelpath, f"{modelname}_data_params.json"), "r") as json_file:
params_str = json_file.read()
params = json.loads(params_str)
model = model_from_json(model_json)
model.load_weights(os.path.join(modelpath, f"{modelname}.h5"))
test_generator = DataGenerator(xt, prediction=True, **params)
pred = model.predict(test_generator)
risks = pd.DataFrame({
'oid': risk_oids,
'risk': np.transpose(pred[0])[0]
})
risk_out_loc = os.path.join(outpath, f"{modelname}_output_risks.csv")
risks.to_csv(risk_out_loc, index=False)
probs = pd.DataFrame(pred[1], index=risk_oids).reset_index(drop=False)
probs_out_loc = os.path.join(outpath, f"{modelname}_output_probs.csv")
probs.to_csv(probs_out_loc, index=False)
if __name__ == '__main__':
MODELNAME = 'model-002'
BASEPATH = up(up(up(__file__)))
DATAPATH = os.path.join(BASEPATH, 'data', 'processed')
MODELPATH = os.path.join(BASEPATH, 'models')
predict_rnnsurv(MODELNAME, MODELPATH, DATAPATH)
|
"""
Predict RNN-SURV model
"""
import os
import numpy as np
import pandas as pd
import json
from os.path import dirname as up
from tensorflow.keras.models import model_from_json
from src.models.rnnsurv import get_data, DataGenerator, create_model
def predict_rnnsurv(modelname, modelpath, datapath, outpath=None):
""" Predicts and outputs risks and probabilities over time for each new
observation.
"""
if outpath is None:
outpath = modelpath
print('Getting Data...')
xt = get_data(path_to_file=datapath, filename='rain_X_test.csv', nrows=1000)
risk_oids = xt['oid'].drop_duplicates().astype(int)
print("Loading Model...")
with open(os.path.join(modelpath, f"{modelname}.json"), "r") as json_file:
model_json = json_file.read()
with open(os.path.join(modelpath, f"{modelname}_data_params.json"), "r") as json_file:
params_str = json_file.read()
params = json.loads(params_str)
model = model_from_json(model_json)
model.load_weights(os.path.join(modelpath, f"{modelname}.h5"))
test_generator = DataGenerator(xt, prediction=True, **params)
pred = model.predict(test_generator)
risks = pd.DataFrame({
'oid': risk_oids,
'risk': np.transpose(pred[0])[0]
})
risk_out_loc = os.path.join(outpath, f"{modelname}_output_risks.csv")
risks.to_csv(risk_out_loc, index=False)
probs = pd.DataFrame(pred[1], index=risk_oids).reset_index(drop=False)
probs_out_loc = os.path.join(outpath, f"{modelname}_output_probs.csv")
probs.to_csv(probs_out_loc, index=False)
if __name__ == '__main__':
MODELNAME = 'model-002'
BASEPATH = up(up(up(__file__)))
DATAPATH = os.path.join(BASEPATH, 'data', 'processed')
MODELPATH = os.path.join(BASEPATH, 'models')
predict_rnnsurv(MODELNAME, MODELPATH, DATAPATH)
|
en
| 0.825279
|
Predict RNN-SURV model Predicts and outputs risks and probabilities over time for each new observation.
| 2.740906
| 3
|
engine/core/org_singleton.py
|
torrotitans/torro_community
| 1
|
6629312
|
#!/usr/bin/python
# -*- coding: UTF-8 -*
from db.org.db_org_mgr import org_mgr
__all__ = {"orgSingleton"}
class orgSingleton():
def add_new_org_setting(self, org):
"""
:return:
"""
return org_mgr.add_new_org_setting(org)
def get_org_info(self):
return org_mgr.get_org_info()
def update_org(self, org):
return org_mgr.update_org_info(org)
def get_roles_info(self):
return org_mgr.get_roles_info()
def insert_notification(self, emails, input_form_id, history_id, notify_msg):
return org_mgr.insert_notification(emails, input_form_id, history_id, notify_msg)
orgSingleton_singleton = orgSingleton()
|
#!/usr/bin/python
# -*- coding: UTF-8 -*
from db.org.db_org_mgr import org_mgr
__all__ = {"orgSingleton"}
class orgSingleton():
def add_new_org_setting(self, org):
"""
:return:
"""
return org_mgr.add_new_org_setting(org)
def get_org_info(self):
return org_mgr.get_org_info()
def update_org(self, org):
return org_mgr.update_org_info(org)
def get_roles_info(self):
return org_mgr.get_roles_info()
def insert_notification(self, emails, input_form_id, history_id, notify_msg):
return org_mgr.insert_notification(emails, input_form_id, history_id, notify_msg)
orgSingleton_singleton = orgSingleton()
|
en
| 0.168529
|
#!/usr/bin/python # -*- coding: UTF-8 -* :return:
| 2.38471
| 2
|
test/test_vfxt_template_deploy.py
|
anhowe/Avere
| 0
|
6629313
|
<filename>test/test_vfxt_template_deploy.py
#!/usr/bin/python3
# Copyright (C) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE-CODE in the project root for license information.
"""
Driver for testing Azure ARM template-based deployment of the Avere vFXT.
"""
# standard imports
import json
import logging
import os
import sys
import time
from uuid import uuid4
# from requirements.txt
import pytest
# local libraries
from lib.helpers import get_vm_ips, split_ip_range, wait_for_op
class TestVfxtTemplateDeploy:
# TODO: modularize common code
def test_deploy_template(self, resource_group, test_vars): # noqa: F811
"""
Deploy a vFXT cluster.
- create a new VNET
- use an Avere-backed storage account
"""
log = logging.getLogger("test_deploy_template")
atd = test_vars["atd_obj"]
with open("{}/src/vfxt/azuredeploy-auto.json".format(
test_vars["build_root"])) as tfile:
atd.template = json.load(tfile)
with open(test_vars["ssh_pub_key"], "r") as ssh_pub_f:
ssh_pub_key = ssh_pub_f.read()
atd.deploy_params = {
"adminPassword": os.environ["AVERE_ADMIN_PW"],
"avereBackedStorageAccountName": atd.deploy_id + "sa",
"avereClusterName": atd.deploy_id + "-cluster",
"avereInstanceType": "Standard_E32s_v3",
"avereNodeCount": 3,
"controllerAdminUsername": "azureuser",
"controllerAuthenticationType": "sshPublicKey",
"controllerName": atd.deploy_id + "-con",
"controllerPassword": os.environ["AVERE_CONTROLLER_PW"],
"controllerSSHKeyData": ssh_pub_key,
"enableCloudTraceDebugging": True,
"rbacRoleAssignmentUniqueId": str(uuid4()),
"createVirtualNetwork": True,
"virtualNetworkName": atd.deploy_id + "-vnet",
"virtualNetworkResourceGroup": atd.resource_group,
"virtualNetworkSubnetName": atd.deploy_id + "-subnet",
}
if "VFXT_CONTROLLER_IMG_REF_ID" in os.environ:
atd.deploy_params["controllerImageReferenceId"] = os.environ["VFXT_CONTROLLER_IMG_REF_ID"]
test_vars["storage_account"] = atd.deploy_params["avereBackedStorageAccountName"]
test_vars["controller_name"] = atd.deploy_params["controllerName"]
test_vars["controller_user"] = atd.deploy_params["controllerAdminUsername"]
log.debug("Generated deploy parameters: \n{}".format(
json.dumps(atd.deploy_params, indent=4)))
atd.deploy_name = "test_deploy_template"
try:
deploy_outputs = wait_for_op(atd.deploy()).properties.outputs
test_vars["cluster_mgmt_ip"] = deploy_outputs["mgmt_ip"]["value"]
test_vars["cluster_vs_ips"] = split_ip_range(deploy_outputs["vserver_ips"]["value"])
finally:
# (c_priv_ip, c_pub_ip) = get_vm_ips(
# atd.nm_client, atd.resource_group, test_vars["controller_name"])
# test_vars["controller_ip"] = c_pub_ip or c_priv_ip
test_vars["public_ip"] = atd.nm_client.public_ip_addresses.get(
atd.resource_group, "publicip-" + test_vars["controller_name"]
).ip_address
test_vars["controller_ip"] = test_vars["public_ip"]
def test_no_storage_account_deploy(self, resource_group, test_vars): # noqa: E501, F811
"""
Deploy a vFXT cluster.
- create a new VNET
- do NOT use an Avere-backed storage account
"""
log = logging.getLogger("test_no_storage_account_deploy")
atd = test_vars["atd_obj"]
with open("{}/src/vfxt/azuredeploy-auto.json".format(
test_vars["build_root"])) as tfile:
atd.template = json.load(tfile)
with open(test_vars["ssh_pub_key"], "r") as ssh_pub_f:
ssh_pub_key = ssh_pub_f.read()
atd.deploy_params = {
"adminPassword": os.environ["<PASSWORD>"],
"avereClusterName": atd.deploy_id + "-cluster",
"avereInstanceType": "Standard_E32s_v3",
"avereNodeCount": 3,
"controllerAdminUsername": "azureuser",
"controllerAuthenticationType": "sshPublicKey",
"controllerName": atd.deploy_id + "-con",
"controllerPassword": os.environ["<PASSWORD>"],
"controllerSSHKeyData": ssh_pub_key,
"enableCloudTraceDebugging": True,
"rbacRoleAssignmentUniqueId": str(uuid4()),
"createVirtualNetwork": True,
"virtualNetworkName": atd.deploy_id + "-vnet",
"virtualNetworkResourceGroup": atd.resource_group,
"virtualNetworkSubnetName": atd.deploy_id + "-subnet",
"useAvereBackedStorageAccount": False,
"avereBackedStorageAccountName": atd.deploy_id + "sa", # BUG
}
if "VFXT_CONTROLLER_IMG_REF_ID" in os.environ:
atd.deploy_params["controllerImageReferenceId"] = os.environ["VFXT_CONTROLLER_IMG_REF_ID"]
test_vars["controller_name"] = atd.deploy_params["controllerName"]
test_vars["controller_user"] = atd.deploy_params["controllerAdminUsername"]
log.debug("Generated deploy parameters: \n{}".format(
json.dumps(atd.deploy_params, indent=4)))
atd.deploy_name = "test_no_storage_account_deploy"
try:
deploy_outputs = wait_for_op(atd.deploy()).properties.outputs
test_vars["cluster_mgmt_ip"] = deploy_outputs["mgmt_ip"]["value"]
test_vars["cluster_vs_ips"] = split_ip_range(deploy_outputs["vserver_ips"]["value"])
time.sleep(60)
finally:
# (c_priv_ip, c_pub_ip) = get_vm_ips(
# atd.nm_client, atd.resource_group, test_vars["controller_name"])
# test_vars["controller_ip"] = c_pub_ip or c_priv_ip
test_vars["public_ip"] = atd.nm_client.public_ip_addresses.get(
atd.resource_group, "publicip-" + test_vars["controller_name"]
).ip_address
test_vars["controller_ip"] = test_vars["public_ip"]
def test_byovnet_deploy(self, ext_vnet, resource_group, test_vars): # noqa: E501, F811
"""
Deploy a vFXT cluster.
- do NOT create a new VNET
- use an Avere-backed storage account
"""
log = logging.getLogger("test_byovnet_deploy")
atd = test_vars["atd_obj"]
with open("{}/src/vfxt/azuredeploy-auto.json".format(
test_vars["build_root"])) as tfile:
atd.template = json.load(tfile)
with open(test_vars["ssh_pub_key"], "r") as ssh_pub_f:
ssh_pub_key = ssh_pub_f.read()
atd.deploy_params = {
"adminPassword": os.environ["AVERE_ADMIN_PW"],
"avereBackedStorageAccountName": atd.deploy_id + "sa",
"avereClusterName": atd.deploy_id + "-cluster",
"avereInstanceType": "Standard_E32s_v3",
"avereNodeCount": 3,
"controllerAdminUsername": "azureuser",
"controllerAuthenticationType": "sshPublicKey",
"controllerName": atd.deploy_id + "-con",
"controllerPassword": os.environ["AVERE_CONTROLLER_PW"],
"controllerSSHKeyData": ssh_pub_key,
"enableCloudTraceDebugging": True,
"rbacRoleAssignmentUniqueId": str(uuid4()),
"createVirtualNetwork": False,
"virtualNetworkResourceGroup": ext_vnet["resource_group"]["value"],
"virtualNetworkName": ext_vnet["virtual_network_name"]["value"],
"virtualNetworkSubnetName": ext_vnet["subnet_name"]["value"],
}
if "VFXT_CONTROLLER_IMG_REF_ID" in os.environ:
atd.deploy_params["controllerImageReferenceId"] = os.environ["VFXT_CONTROLLER_IMG_REF_ID"]
test_vars["storage_account"] = atd.deploy_params["avereBackedStorageAccountName"]
test_vars["controller_name"] = atd.deploy_params["controllerName"]
test_vars["controller_user"] = atd.deploy_params["controllerAdminUsername"]
log.debug("Generated deploy parameters: \n{}".format(
json.dumps(atd.deploy_params, indent=4)))
atd.deploy_name = "test_byovnet_deploy"
try:
deploy_outputs = wait_for_op(atd.deploy()).properties.outputs
test_vars["cluster_mgmt_ip"] = deploy_outputs["mgmt_ip"]["value"]
test_vars["cluster_vs_ips"] = split_ip_range(deploy_outputs["vserver_ips"]["value"])
finally:
test_vars["controller_ip"] = get_vm_ips(
atd.nm_client, atd.resource_group, test_vars["controller_name"]
)[0]
test_vars["public_ip"] = ext_vnet["public_ip_address"]["value"]
if __name__ == "__main__":
pytest.main(sys.argv)
|
<filename>test/test_vfxt_template_deploy.py
#!/usr/bin/python3
# Copyright (C) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE-CODE in the project root for license information.
"""
Driver for testing Azure ARM template-based deployment of the Avere vFXT.
"""
# standard imports
import json
import logging
import os
import sys
import time
from uuid import uuid4
# from requirements.txt
import pytest
# local libraries
from lib.helpers import get_vm_ips, split_ip_range, wait_for_op
class TestVfxtTemplateDeploy:
# TODO: modularize common code
def test_deploy_template(self, resource_group, test_vars): # noqa: F811
"""
Deploy a vFXT cluster.
- create a new VNET
- use an Avere-backed storage account
"""
log = logging.getLogger("test_deploy_template")
atd = test_vars["atd_obj"]
with open("{}/src/vfxt/azuredeploy-auto.json".format(
test_vars["build_root"])) as tfile:
atd.template = json.load(tfile)
with open(test_vars["ssh_pub_key"], "r") as ssh_pub_f:
ssh_pub_key = ssh_pub_f.read()
atd.deploy_params = {
"adminPassword": os.environ["AVERE_ADMIN_PW"],
"avereBackedStorageAccountName": atd.deploy_id + "sa",
"avereClusterName": atd.deploy_id + "-cluster",
"avereInstanceType": "Standard_E32s_v3",
"avereNodeCount": 3,
"controllerAdminUsername": "azureuser",
"controllerAuthenticationType": "sshPublicKey",
"controllerName": atd.deploy_id + "-con",
"controllerPassword": os.environ["AVERE_CONTROLLER_PW"],
"controllerSSHKeyData": ssh_pub_key,
"enableCloudTraceDebugging": True,
"rbacRoleAssignmentUniqueId": str(uuid4()),
"createVirtualNetwork": True,
"virtualNetworkName": atd.deploy_id + "-vnet",
"virtualNetworkResourceGroup": atd.resource_group,
"virtualNetworkSubnetName": atd.deploy_id + "-subnet",
}
if "VFXT_CONTROLLER_IMG_REF_ID" in os.environ:
atd.deploy_params["controllerImageReferenceId"] = os.environ["VFXT_CONTROLLER_IMG_REF_ID"]
test_vars["storage_account"] = atd.deploy_params["avereBackedStorageAccountName"]
test_vars["controller_name"] = atd.deploy_params["controllerName"]
test_vars["controller_user"] = atd.deploy_params["controllerAdminUsername"]
log.debug("Generated deploy parameters: \n{}".format(
json.dumps(atd.deploy_params, indent=4)))
atd.deploy_name = "test_deploy_template"
try:
deploy_outputs = wait_for_op(atd.deploy()).properties.outputs
test_vars["cluster_mgmt_ip"] = deploy_outputs["mgmt_ip"]["value"]
test_vars["cluster_vs_ips"] = split_ip_range(deploy_outputs["vserver_ips"]["value"])
finally:
# (c_priv_ip, c_pub_ip) = get_vm_ips(
# atd.nm_client, atd.resource_group, test_vars["controller_name"])
# test_vars["controller_ip"] = c_pub_ip or c_priv_ip
test_vars["public_ip"] = atd.nm_client.public_ip_addresses.get(
atd.resource_group, "publicip-" + test_vars["controller_name"]
).ip_address
test_vars["controller_ip"] = test_vars["public_ip"]
def test_no_storage_account_deploy(self, resource_group, test_vars): # noqa: E501, F811
"""
Deploy a vFXT cluster.
- create a new VNET
- do NOT use an Avere-backed storage account
"""
log = logging.getLogger("test_no_storage_account_deploy")
atd = test_vars["atd_obj"]
with open("{}/src/vfxt/azuredeploy-auto.json".format(
test_vars["build_root"])) as tfile:
atd.template = json.load(tfile)
with open(test_vars["ssh_pub_key"], "r") as ssh_pub_f:
ssh_pub_key = ssh_pub_f.read()
atd.deploy_params = {
"adminPassword": os.environ["<PASSWORD>"],
"avereClusterName": atd.deploy_id + "-cluster",
"avereInstanceType": "Standard_E32s_v3",
"avereNodeCount": 3,
"controllerAdminUsername": "azureuser",
"controllerAuthenticationType": "sshPublicKey",
"controllerName": atd.deploy_id + "-con",
"controllerPassword": os.environ["<PASSWORD>"],
"controllerSSHKeyData": ssh_pub_key,
"enableCloudTraceDebugging": True,
"rbacRoleAssignmentUniqueId": str(uuid4()),
"createVirtualNetwork": True,
"virtualNetworkName": atd.deploy_id + "-vnet",
"virtualNetworkResourceGroup": atd.resource_group,
"virtualNetworkSubnetName": atd.deploy_id + "-subnet",
"useAvereBackedStorageAccount": False,
"avereBackedStorageAccountName": atd.deploy_id + "sa", # BUG
}
if "VFXT_CONTROLLER_IMG_REF_ID" in os.environ:
atd.deploy_params["controllerImageReferenceId"] = os.environ["VFXT_CONTROLLER_IMG_REF_ID"]
test_vars["controller_name"] = atd.deploy_params["controllerName"]
test_vars["controller_user"] = atd.deploy_params["controllerAdminUsername"]
log.debug("Generated deploy parameters: \n{}".format(
json.dumps(atd.deploy_params, indent=4)))
atd.deploy_name = "test_no_storage_account_deploy"
try:
deploy_outputs = wait_for_op(atd.deploy()).properties.outputs
test_vars["cluster_mgmt_ip"] = deploy_outputs["mgmt_ip"]["value"]
test_vars["cluster_vs_ips"] = split_ip_range(deploy_outputs["vserver_ips"]["value"])
time.sleep(60)
finally:
# (c_priv_ip, c_pub_ip) = get_vm_ips(
# atd.nm_client, atd.resource_group, test_vars["controller_name"])
# test_vars["controller_ip"] = c_pub_ip or c_priv_ip
test_vars["public_ip"] = atd.nm_client.public_ip_addresses.get(
atd.resource_group, "publicip-" + test_vars["controller_name"]
).ip_address
test_vars["controller_ip"] = test_vars["public_ip"]
def test_byovnet_deploy(self, ext_vnet, resource_group, test_vars): # noqa: E501, F811
"""
Deploy a vFXT cluster.
- do NOT create a new VNET
- use an Avere-backed storage account
"""
log = logging.getLogger("test_byovnet_deploy")
atd = test_vars["atd_obj"]
with open("{}/src/vfxt/azuredeploy-auto.json".format(
test_vars["build_root"])) as tfile:
atd.template = json.load(tfile)
with open(test_vars["ssh_pub_key"], "r") as ssh_pub_f:
ssh_pub_key = ssh_pub_f.read()
atd.deploy_params = {
"adminPassword": os.environ["AVERE_ADMIN_PW"],
"avereBackedStorageAccountName": atd.deploy_id + "sa",
"avereClusterName": atd.deploy_id + "-cluster",
"avereInstanceType": "Standard_E32s_v3",
"avereNodeCount": 3,
"controllerAdminUsername": "azureuser",
"controllerAuthenticationType": "sshPublicKey",
"controllerName": atd.deploy_id + "-con",
"controllerPassword": os.environ["AVERE_CONTROLLER_PW"],
"controllerSSHKeyData": ssh_pub_key,
"enableCloudTraceDebugging": True,
"rbacRoleAssignmentUniqueId": str(uuid4()),
"createVirtualNetwork": False,
"virtualNetworkResourceGroup": ext_vnet["resource_group"]["value"],
"virtualNetworkName": ext_vnet["virtual_network_name"]["value"],
"virtualNetworkSubnetName": ext_vnet["subnet_name"]["value"],
}
if "VFXT_CONTROLLER_IMG_REF_ID" in os.environ:
atd.deploy_params["controllerImageReferenceId"] = os.environ["VFXT_CONTROLLER_IMG_REF_ID"]
test_vars["storage_account"] = atd.deploy_params["avereBackedStorageAccountName"]
test_vars["controller_name"] = atd.deploy_params["controllerName"]
test_vars["controller_user"] = atd.deploy_params["controllerAdminUsername"]
log.debug("Generated deploy parameters: \n{}".format(
json.dumps(atd.deploy_params, indent=4)))
atd.deploy_name = "test_byovnet_deploy"
try:
deploy_outputs = wait_for_op(atd.deploy()).properties.outputs
test_vars["cluster_mgmt_ip"] = deploy_outputs["mgmt_ip"]["value"]
test_vars["cluster_vs_ips"] = split_ip_range(deploy_outputs["vserver_ips"]["value"])
finally:
test_vars["controller_ip"] = get_vm_ips(
atd.nm_client, atd.resource_group, test_vars["controller_name"]
)[0]
test_vars["public_ip"] = ext_vnet["public_ip_address"]["value"]
if __name__ == "__main__":
pytest.main(sys.argv)
|
en
| 0.550379
|
#!/usr/bin/python3 # Copyright (C) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See LICENSE-CODE in the project root for license information. Driver for testing Azure ARM template-based deployment of the Avere vFXT. # standard imports # from requirements.txt # local libraries # TODO: modularize common code # noqa: F811 Deploy a vFXT cluster. - create a new VNET - use an Avere-backed storage account # (c_priv_ip, c_pub_ip) = get_vm_ips( # atd.nm_client, atd.resource_group, test_vars["controller_name"]) # test_vars["controller_ip"] = c_pub_ip or c_priv_ip # noqa: E501, F811 Deploy a vFXT cluster. - create a new VNET - do NOT use an Avere-backed storage account # BUG # (c_priv_ip, c_pub_ip) = get_vm_ips( # atd.nm_client, atd.resource_group, test_vars["controller_name"]) # test_vars["controller_ip"] = c_pub_ip or c_priv_ip # noqa: E501, F811 Deploy a vFXT cluster. - do NOT create a new VNET - use an Avere-backed storage account
| 2.023232
| 2
|
quandl_fund_xlsx/fundamentals.py
|
robren/quandl_fund_xlsx
| 1
|
6629314
|
"""This module provides functions to calculate fundamental ratios
for a stock potfolio.
The results are saved in an excel workbook with one sheet per stock
as well as a summary sheet
:copyright: (c) 2021 by <NAME>
:license: Apache 2, see LICENCE for more details
"""
import collections
import logging
import numpy as np
import os
import sys
import pandas as pd
import quandl
from quandl.errors.quandl_error import NotFoundError
from xlsxwriter.utility import xl_range
from xlsxwriter.utility import xl_rowcol_to_cell
# Added this one line below to get logging from the requests module,
# comment me out when done
# logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
formatter = logging.Formatter("%(asctime)s %(levelname)-8s %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
#logger.setLevel(logging.DEBUG)
class Fundamentals_ng(object):
def __init__(
self,
database,
i_ind,
cf_ind,
bal_ind,
metrics_and_ratios_ind,
calc_ratios,
summarize_ind,
):
if database == "SF0":
if "QUANDL_API_SF0_KEY" in os.environ:
quandl.ApiConfig.api_key = os.environ["QUANDL_API_SF0_KEY"]
else:
print(
"Exiting: Please set the QUANDL_API_SF0_KEY environment variable."
)
sys.exit()
elif database == "SF1":
if "QUANDL_API_SF1_KEY" in os.environ:
quandl.ApiConfig.api_key = os.environ["QUANDL_API_SF1_KEY"]
else:
print("Exiting Please set the QUANDL_API_SF1_KEY environment variable.")
sys.exit()
# self.database = 'SHARADAR/' + database
self.database = database
self.all_inds_df = None
self.i_stmnt_ind_dict = collections.OrderedDict(i_ind)
self.i_stmnt_df = None
self.cf_stmnt_ind_dict = collections.OrderedDict(cf_ind)
self.cf_stmnt_df = None
self.bal_stmnt_ind_dict = collections.OrderedDict(bal_ind)
self.bal_stmnt_df = None
self.metrics_and_ratios_ind_dict = collections.OrderedDict(
metrics_and_ratios_ind
)
self.metrics_and_ratios_df = None
self.calc_ratios_dict = collections.OrderedDict(calc_ratios)
self.calc_ratios_df = None
self.dimension = None
self.periods = None
self.summarize_ind_dict = collections.OrderedDict(summarize_ind)
def get_indicators(self, ticker, dimension, periods):
"""Obtains fundamental company indicators from the Quandl API.
Uses the specified Quandl database to obtain a set of fundamental
datapoints (or indicators in Quandl parlance) for the provided ticker.
The formats accepted for the indicators and dimensions are described
in: https://www.quandl.com/data/SF0-Free-US-Fundamentals-Data/documentation/about
and
https://www.quandl.com/data/SF1-Core-US-Fundamentals-Data/documentation/about
This is vastly simpler than earlier versions where I got a subset of the indicators one
by one.
Args:
ticker: A string representing the stock.
dimension: A string representing the timeframe for which data is required.
For the SF0 database only 'MRY' or most recent yearly is supported.
For the SF1 database available options are: MRY, MRQ, MRT,ARY,ARQ,ART
periods: An integer representing the number of years of data.
Returns:
A dataframe containing all of the indicators for this Ticker.
The indicators are the columns and the time periods are the rows.
This is after all the next gen refactored version
"""
# self.stmnt_df = quandl.get_table('SHARADAR/SF1', ticker=['AAPL','INTC'],dimension="MRY")
# We'll get all of the data for a given ticker, then filter what we give back
# At some point the SF0 table was removed and if we just have an "SF0" database access
# we still need to request access to SHARADAR/SF1 table. Their API takes care of
# restricting access to the SF0 limited dataset
try:
self.all_inds_df = quandl.get_table(
"SHARADAR/SF1", ticker=ticker, dimension=dimension
)
if self.all_inds_df.empty:
raise NotFoundError
# Sort so that earliest dates will now be at the top
self.all_inds_df.sort_values("datekey", inplace=True)
self.all_inds_df = self.all_inds_df.tail(periods)
loc_df = self.all_inds_df.copy()
logger.debug(
"get_indicators: df columns = %s" % (self.all_inds_df.columns.tolist())
)
logger.debug("get_indicators: all_inds_df = %s" % (self.all_inds_df.head()))
except NotFoundError:
logger.warning("get_indicators: The ticker %s " "is not supported", ticker)
raise
# Let's create separate income statement dataframe, cf, balance and metrics dataframes
# by filtering out from the all_inds datafarame.
self.i_stmnt_df = self.all_inds_df[self.i_stmnt_ind_dict.keys()].copy()
self.cf_stmnt_df = self.all_inds_df[self.cf_stmnt_ind_dict.keys()].copy()
self.bal_stmnt_df = self.all_inds_df[self.bal_stmnt_ind_dict.keys()].copy()
self.metrics_and_ratios_df = self.all_inds_df[
self.metrics_and_ratios_ind_dict.keys()
].copy()
self.dimension = dimension
self.periods = periods
logger.debug("get_indicators: income dataframe = %s" % (self.i_stmnt_df.head()))
return loc_df
def get_transposed_and_formatted_i_stmnt(self):
""" Returns a transposed and formatted partial income statement dataframe with
description added ready for printing to an excel sheet, or possible via html
in the future.
The original dataframe is in a format where the column headers are the indicators
and the rows are the per year or per quarter samples. This is the desired format
for performing operations on the data, it's so-called clean-data.
For visualing in a spreadsheet we want the columns to be the dates and the rows
to be the indicators. Hence the need to transpose.
Returns:
A dataframe
"""
stmnt_df = self.i_stmnt_df.copy()
desc_dict = self.i_stmnt_ind_dict
description = "Sharadar Income"
return self._transpose_and_format_stmnt(stmnt_df, desc_dict, description)
def get_transposed_and_formatted_cf_stmnt(self):
""" Returns a transposed and formatted subset of the cash flow statement
dataframe with description added ready for printing to an excel sheet, or
possible via html in the future.
Returns:
A dataframe
"""
stmnt_df = self.cf_stmnt_df.copy()
desc_dict = self.cf_stmnt_ind_dict
description = "Sharadar Cash Flow"
return self._transpose_and_format_stmnt(stmnt_df, desc_dict, description)
def get_transposed_and_formatted_bal_stmnt(self):
""" Returns a transposed and formatted subset of the balance sheet statement dataframe
with description addedready for printing to an excel sheet, or possible via html in the future.
Returns:
A dataframe
"""
stmnt_df = self.bal_stmnt_df.copy()
desc_dict = self.bal_stmnt_ind_dict
description = "Sharadar Balance"
return self._transpose_and_format_stmnt(stmnt_df, desc_dict, description)
def get_transposed_and_formatted_metrics_and_ratios(self):
""" Returns a transposed and formatted subset of sharadar metrics and
ratios statement dataframe with description added ready for printing to
an excel sheet, or possible via html in the future.
Returns:
A dataframe
"""
stmnt_df = self.metrics_and_ratios_df.copy()
desc_dict = self.metrics_and_ratios_ind_dict
description = "Sharadar Metrics and Ratios"
return self._transpose_and_format_stmnt(stmnt_df, desc_dict, description)
def get_transposed_and_formatted_calculated_ratios(self):
""" Returns a transposed and formatted calculated ratios dataframe with
description added ready for printing to an excel sheet, or possible
via html in the future.
Returns:
A dataframe
"""
stmnt_df = self.calc_ratios_df.copy()
desc_dict = self.calc_ratios_dict
description = "Calculated Metrics and Ratios"
return self._transpose_and_format_stmnt(stmnt_df, desc_dict, description)
def _transpose_and_format_stmnt(
self, stmnt_df, description_dict, description_of_indicators
):
""" Transpose the df so that we have the indicators as rows and datefields as columns
Side effects. Modifies the passed in dataframe.
"""
# As a precursor to making the datefields as columns we set the datefield as the index.
# We then transpose the dataframe such that the index becomes the columns and the columns become rows
stmnt_df.set_index("datekey", inplace=True)
# Transpose to get this dataframe ready for printing
# Convert the df so that we have the indicators as the index and datefields as columns
ret_df = stmnt_df.transpose()
# The columns are of a dateTime type, we need them to be text in order for the dataframe
# to excel module to work.
ret_df.columns = ret_df.columns.map(lambda t: t.strftime("%Y-%m-%d"))
# Now we want two additional descriptive columns in the dataframe.
# We want the description of the indicator in one column and the Sharadar code
# in another.
# Note that dictionary keys, in this case the Sharadar Indicator code
# becomes the index of the newly created Pandas series. The values become the data associated
# with these keys.
description_s = pd.Series(description_dict)
# The insert method is what enables us to place the column exactly where we want it.
ret_df.insert(0, "Description", description_s)
# For the second column, the sharadar codes, we can get the manes of these from the index of our
# dataframe. So a variation on the previous case where we inserted a column from a PD series. Here
# we point to an array like item which the insert method accepts, that of the dataframe index. After
# the transpose this contains what were the column i.e the Sharadar indicators.
#
# Create a new column using the values from the index, similar to doing a .reset_index
# but uses an explicit column instead of column 0 which reset-index does.
ret_df.insert(1, description_of_indicators + " " + self.dimension, ret_df.index)
return ret_df
def calc_ratios(self):
"""Obtain some financial ratios and metrics skewed towards credit analysis.
- Some suggested as useful in the book by <NAME> Alvarez:
'Financial Statement Analysis'.
- Others are credit sanity checking or rough approximations to REIT
specific ratios.
Returns:
A dataframe containing financial ratios.
"""
# Note updated to work on our data in the form where the rows as the dates and the columns are the metricss.
# we build up each metric as a new column in the calc_ratios df.
# initialize an empty calc_ratios_df but using the same indexing as our existing dataframes which we've pulled
# in from sharadar
self.calc_ratios_df = pd.DataFrame(index=self.i_stmnt_df.index)
for ratio in self.calc_ratios_dict:
logger.debug("get_calc_ratios: ratio = %s" % (ratio))
self._calc_ratios(ratio)
# This datekey column will be needed later when we transpose the dataframe
# The sharadar returned dataframes included a datekey column as part of the results.
# self.calc_ratios_df["datekey"] = self.i_stmnt_df["datekey"]
# A nicer way is to insert the datekey column as the first column of
# our synthetically created calc_ratios_df. This way it's easier to
# see for debug and is in the same position in col 1 as the dfs
# returned by sharadar
self.calc_ratios_df.insert(0, "datekey", self.i_stmnt_df["datekey"])
# Change nan to None and inf to a big recognizable number.
self.calc_ratios_df = self.calc_ratios_df.replace({np.nan: None})
self.calc_ratios_df = self.calc_ratios_df.replace({np.inf: 999999999})
logger.debug("get_calc_ratios: dataframe = %s" % (self.calc_ratios_df))
return self.calc_ratios_df.copy()
def _calc_ratios(self, ratio):
# Debt to Cash Flow From Operations
def _debt_cfo_ratio():
logger.debug(
"_calc_ratios._debt_cfo_ratio: debt = %s" % (self.bal_stmnt_df["debt"])
)
self.calc_ratios_df[ratio] = (
self.bal_stmnt_df["debt"] / self.cf_stmnt_df["ncfo"]
)
return
# Debt to Equity
def _debt_equity_ratio():
logger.debug(
"_calc_ratios._debt_equity_ratio: debt = %s"
% (self.bal_stmnt_df["debt"])
)
logger.debug(
"_calc_ratios._debt_equity_ratio: equity = %s"
% (self.bal_stmnt_df["equity"])
)
self.calc_ratios_df[ratio] = (
self.bal_stmnt_df["debt"] / self.bal_stmnt_df["equity"]
)
return
def _liabilities_equity_ratio():
logger.debug(
"_calc_ratios._liabilities_equity:_ratio liabilities = %s"
% (self.bal_stmnt_df["liabilities"])
)
logger.debug(
"_calc_ratios._liabilities_equity_ratio: equity = %s"
% (self.bal_stmnt_df["equity"])
)
self.calc_ratios_df[ratio] = (
self.bal_stmnt_df["liabilities"] / self.bal_stmnt_df["equity"]
)
return
# Debt to ebitda
def _debt_ebitda_ratio():
self.calc_ratios_df[ratio] = (
self.bal_stmnt_df["debt"] / self.metrics_and_ratios_df["ebitda"]
)
return
# Debt to ebitda minus CapEx
def _debt_ebitda_minus_capex_ratio():
# capex is returned from Sharadar as a -ve number, hence we need to add this to
# subtract capex
self.calc_ratios_df[ratio] = self.bal_stmnt_df["debt"] / (
self.metrics_and_ratios_df["ebitda"] + self.cf_stmnt_df["capex"]
)
return
# Net Debt to ebitda
def _net_debt_ebitda_ratio():
self.calc_ratios_df[ratio] = (
self.bal_stmnt_df["debt"] - self.bal_stmnt_df["cashnequsd"]
) / self.metrics_and_ratios_df["ebitda"]
return
# Net Debt to ebitda minus CapEx
def _net_debt_ebitda_minus_capex_ratio():
# capex is returned from Sharadar as a -ve number, hence we need to add this to
# subtract capex
self.calc_ratios_df[ratio] = (
self.bal_stmnt_df["debt"] - self.bal_stmnt_df["cashnequsd"]
) / (self.metrics_and_ratios_df["ebitda"] + self.cf_stmnt_df["capex"])
return
# Depreciation to Cash Flow From Operations Pg 278.
def _depreciation_cfo_ratio():
self.calc_ratios_df[ratio] = (
self.cf_stmnt_df["depamor"] / self.cf_stmnt_df["ncfo"]
)
return
def _depreciation_revenue_ratio():
self.calc_ratios_df[ratio] = (
self.cf_stmnt_df["depamor"] / self.i_stmnt_df["revenue"]
)
return
def _debt_to_total_capital():
self.calc_ratios_df[ratio] = (
self.bal_stmnt_df["debt"] / self.metrics_and_ratios_df["invcapavg"]
)
return
def _roic():
self.calc_ratios_df[ratio] = (
self.i_stmnt_df["ebit"] / self.metrics_and_ratios_df["invcapavg"]
)
# self.database = database
# Times Interest coverage aka fixed charge coverage Pg 278.
# (Net Income + Income taxes + Interest Expense)/(Interest expense + Capitalized Interest)
# Cannot see how to get capitalized interest from the API so that term is excluded.
# This is the same as ebit to Interest Expense
def _ebit_interest_coverage():
self.calc_ratios_df[ratio] = (
self.i_stmnt_df["ebit"] / self.i_stmnt_df["intexp"]
)
return
def _ebitda_interest_coverage():
self.calc_ratios_df[ratio] = (
self.metrics_and_ratios_df["ebitda"] / self.i_stmnt_df["intexp"]
)
return
def _ebitda_minus_capex_interest_coverage():
# Recall that capex is returned from Sharadar as a -ve number.
self.calc_ratios_df[ratio] = (
self.metrics_and_ratios_df["ebitda"] + self.cf_stmnt_df["capex"]
) / self.i_stmnt_df["intexp"]
return
def _rough_ffo():
self.calc_ratios_df[ratio] = (
self.i_stmnt_df["netinc"] + self.cf_stmnt_df["depamor"]
)
return
def _rough_affo():
# capex is returned from Quandl as a -ve number, hence we add this to
# subtract capex
self.calc_ratios_df[ratio] = (
self.i_stmnt_df["netinc"]
+ self.cf_stmnt_df["depamor"]
+ self.cf_stmnt_df["capex"]
)
return
def _rough_ffo_dividend_payout_ratio():
self.calc_ratios_df[ratio] = self.cf_stmnt_df["ncfdiv"] / (
self.i_stmnt_df["netinc"] + self.cf_stmnt_df["depamor"]
)
return
def _rough_affo_dividend_payout_ratio():
self.calc_ratios_df[ratio] = self.cf_stmnt_df["ncfdiv"] / (
self.i_stmnt_df["netinc"]
+ self.cf_stmnt_df["depamor"]
+ self.cf_stmnt_df["capex"]
)
return
def _income_dividend_payout_ratio():
# negating since ncfdiv is returned as a negative number
self.calc_ratios_df[ratio] = (
-self.cf_stmnt_df["ncfdiv"] / self.i_stmnt_df["netinc"]
)
return
# TODO add some conditional logig to use the fullydiluted shares value when it
# is provided
def _price_rough_ffo_ps_ratio():
self.calc_ratios_df[ratio] = self.i_stmnt_df["price"] / (
self.calc_ratios_df["rough_ffo"] / self.bal_stmnt_df["shareswa"]
)
return
def _rough_ffo_ps():
self.calc_ratios_df[ratio] = (
self.calc_ratios_df["rough_ffo"] / self.bal_stmnt_df["shareswa"]
)
return
def _cfo_ps():
self.calc_ratios_df[ratio] = (
self.cf_stmnt_df["ncfo"] / self.bal_stmnt_df["shareswa"]
)
return
def _opinc_ps():
self.calc_ratios_df[ratio] = (
self.i_stmnt_df["opinc"] / self.bal_stmnt_df["shareswa"]
)
return
def _fcf_ps():
self.calc_ratios_df[ratio] = (
self.metrics_and_ratios_df["fcf"] / self.bal_stmnt_df["shareswa"]
)
return
def _ev_opinc_ratio():
self.calc_ratios_df[ratio] = (
self.metrics_and_ratios_df["ev"] / self.i_stmnt_df["opinc"]
)
return
# <NAME>, author of Good Stocks Cheap, definition
# of capital employed. He has two defnitions, one where cash is
# subtracted and one where it's not. Accrued expenses should be
# substracted but Is not available in the Sharadar API, probably a
# scour the footnotes thing if really wanted to include this.
def _kjm_capital_employed_sub_cash():
self.calc_ratios_df[ratio] = (
self.bal_stmnt_df["assets"]
- self.bal_stmnt_df["cashnequsd"]
- self.bal_stmnt_df["payables"]
- self.bal_stmnt_df["deferredrev"]
)
return
def _kjm_capital_employed_with_cash():
self.calc_ratios_df[ratio] = (
self.bal_stmnt_df["assets"]
- self.bal_stmnt_df["payables"]
- self.bal_stmnt_df["deferredrev"]
)
return
def _kjm_roce_sub_cash():
self.calc_ratios_df[ratio] = (
self.i_stmnt_df["opinc"]
/ self.calc_ratios_df["kjm_capital_employed_sub_cash"]
)
return
def _kjm_roce_with_cash():
self.calc_ratios_df[ratio] = (
self.i_stmnt_df["opinc"]
/ self.calc_ratios_df["kjm_capital_employed_with_cash"]
)
return
def _kjm_fcf_return_on_capital_employed_sub_cash():
self.calc_ratios_df[ratio] = (
self.metrics_and_ratios_df["fcf"]
/ self.calc_ratios_df["kjm_capital_employed_sub_cash"]
)
return
def _kjm_fcf_return_on_capital_employed_with_cash():
self.calc_ratios_df[ratio] = (
self.metrics_and_ratios_df["fcf"]
/ self.calc_ratios_df["kjm_capital_employed_with_cash"]
)
return
def _kjm_delta_oi_fds():
self.calc_ratios_df[ratio] = self.calc_ratios_df["opinc_ps"].pct_change()
return
def _kjm_delta_fcf_fds():
self.calc_ratios_df[ratio] = self.calc_ratios_df["fcf_ps"].pct_change()
return
def _kjm_delta_bv_fds():
self.calc_ratios_df[ratio] = self.bal_stmnt_df["equity"].pct_change()
return
def _kjm_delta_tbv_fds():
self.calc_ratios_df[ratio] = (
self.bal_stmnt_df["equity"] - self.bal_stmnt_df["intangibles"]
).pct_change()
return
def _dividends_free_cash_flow_ratio():
self.calc_ratios_df[ratio] = (
-self.cf_stmnt_df["ncfdiv"] / self.metrics_and_ratios_df["fcf"]
)
return
def _preferred_free_cash_flow_ratio():
self.calc_ratios_df[ratio] = (
self.i_stmnt_df["prefdivis"] / self.metrics_and_ratios_df["fcf"]
)
return
def _operating_margin():
self.calc_ratios_df[ratio] = (
self.i_stmnt_df["opinc"] / self.i_stmnt_df["revenue"]
)
return
def _sg_and_a_gross_profit_ratio():
self.calc_ratios_df[ratio] = self.i_stmnt_df["sgna"] / self.i_stmnt_df["gp"]
return
def _ltdebt_cfo_ratio():
self.calc_ratios_df[ratio] = (
self.bal_stmnt_df["debtnc"] / self.cf_stmnt_df["ncfo"]
)
return
def _ltdebt_earnings_ratio():
self.calc_ratios_df[ratio] = (
self.bal_stmnt_df["debtnc"] / self.i_stmnt_df["netinc"]
)
return
def _free_cash_flow_conversion_ratio():
self.calc_ratios_df[ratio] = (
self.metrics_and_ratios_df["fcf"] / self.metrics_and_ratios_df["ebitda"]
)
return
# Pg 290 of Creative Cash Flow Reporting, Mumford et al.
def _excess_cash_margin_ratio():
self.calc_ratios_df[ratio] = (
(self.cf_stmnt_df["ncfo"] - self.i_stmnt_df["opinc"])
* 100
/ self.i_stmnt_df["revenue"]
)
return
def _interest_to_cfo_plus_interest_coverage():
self.calc_ratios_df[ratio] = self.i_stmnt_df["intexp"] / (
self.cf_stmnt_df["ncfo"] + self.i_stmnt_df["intexp"]
)
return
def _dividends_cfo_ratio():
# negating since ncfdiv is returned as a negative number
self.calc_ratios_df[ratio] = (
-self.cf_stmnt_df["ncfdiv"] / self.cf_stmnt_df["ncfo"]
)
return
def _preferred_cfo_ratio():
self.calc_ratios_df[ratio] = (
self.i_stmnt_df["prefdivis"] / self.cf_stmnt_df["ncfo"]
)
return
switcher = {
"debt_equity_ratio": _debt_equity_ratio,
"liabilities_equity_ratio": _liabilities_equity_ratio,
"debt_ebitda_ratio": _debt_ebitda_ratio,
"debt_ebitda_minus_capex_ratio": _debt_ebitda_minus_capex_ratio,
"net_debt_ebitda_ratio": _net_debt_ebitda_ratio,
"net_debt_ebitda_minus_capex_ratio": _net_debt_ebitda_minus_capex_ratio,
"debt_to_total_capital": _debt_to_total_capital,
"return_on_invested_capital": _roic,
"ebit_interest_coverage": _ebit_interest_coverage,
"ebitda_interest_coverage": _ebitda_interest_coverage,
"ebitda_minus_capex_interest_coverage": _ebitda_minus_capex_interest_coverage,
"debt_cfo_ratio": _debt_cfo_ratio,
"depreciation_cfo_ratio": _depreciation_cfo_ratio,
"depreciation_revenue_ratio": _depreciation_revenue_ratio,
"rough_ffo": _rough_ffo,
"rough_affo": _rough_affo,
"rough_ffo_dividend_payout_ratio": _rough_ffo_dividend_payout_ratio,
"rough_affo_dividend_payout_ratio": _rough_affo_dividend_payout_ratio,
"income_dividend_payout_ratio": _income_dividend_payout_ratio,
"price_rough_ffo_ps_ratio": _price_rough_ffo_ps_ratio,
"rough_ffo_ps": _rough_ffo_ps,
"opinc_ps": _opinc_ps,
"cfo_ps": _cfo_ps,
"fcf_ps": _fcf_ps,
"ev_opinc_ratio": _ev_opinc_ratio,
"dividends_free_cash_flow_ratio": _dividends_free_cash_flow_ratio,
"preferred_free_cash_flow_ratio": _preferred_free_cash_flow_ratio,
"operating_margin": _operating_margin,
"sg_and_a_gross_profit_ratio": _sg_and_a_gross_profit_ratio,
"ltdebt_cfo_ratio": _ltdebt_cfo_ratio,
"ltdebt_earnings_ratio": _ltdebt_earnings_ratio,
"free_cash_flow_conversion_ratio": _free_cash_flow_conversion_ratio,
"excess_cash_margin_ratio": _excess_cash_margin_ratio,
"interest_to_cfo_plus_interest_coverage": _interest_to_cfo_plus_interest_coverage,
"dividends_cfo_ratio": _dividends_cfo_ratio,
"preferred_cfo_ratio": _preferred_cfo_ratio,
"kjm_capital_employed_sub_cash": _kjm_capital_employed_sub_cash,
"kjm_capital_employed_with_cash": _kjm_capital_employed_with_cash,
"kjm_roce_sub_cash": _kjm_roce_sub_cash,
"kjm_roce_with_cash": _kjm_roce_with_cash,
"kjm_fcf_return_on_capital_employed_sub_cash": _kjm_fcf_return_on_capital_employed_sub_cash,
"kjm_fcf_return_on_capital_employed_with_cash": _kjm_fcf_return_on_capital_employed_with_cash,
"kjm_delta_oi_fds": _kjm_delta_oi_fds,
"kjm_delta_fcf_fds": _kjm_delta_fcf_fds,
"kjm_delta_bv_fds": _kjm_delta_bv_fds,
"kjm_delta_tbv_fds": _kjm_delta_tbv_fds,
}
# Get the function from switcher dictionary
func = switcher.get(ratio, lambda: NotImplementedError)
# Execute the function
return func()
class SharadarFundamentals(Fundamentals_ng):
# Locally calculated by this package. For each ratio or metric in this
# table, there's a routine to calculate the value from the quandl API provided
# statement indicator value.
# The first item in each tuple is the Sharadar Code, the second is
# a description.
# Income Statement Indicator Quandl/Sharadar Codes
I_STMNT_IND = [
("datekey", "SEC filing date"),
("revenue", "Revenues"),
("cor", "Cost of Revenue"),
("gp", "Gross Profit"),
("sgna", "Sales General and Admin"),
("rnd", "Research and Development Expense"),
("opex", "Operating Expenses"),
("intexp", "Interest Expense"),
("taxexp", "Tax Expense"),
("netincdis", "Net Loss Income from Discontinued Operations "),
("netincnci", "Net Income to Non-Controlling Interests"),
("opinc", "Operating Income"),
("ebit", "Earnings Before Interest and Taxes"),
("netinc", "Net Income"),
("prefdivis", "Preferred Dividends"),
("netinccmn", "Net Income to Common (after prefs paid)"),
("epsdil", "Earnings Per Share Diluted"),
("price", "Price per Share"),
("shareswadil", "Weighted Average Shares Diluted"),
("dps", "Dividends per Basic Common Share"),
]
# Cash Flow Statement Indicator Quandl/Sharadar Codes
CF_STMNT_IND = [
("datekey", "SEC filing date"),
("depamor", "Depreciation and Amortization"),
("ncfo", "Net Cash Flow From Operations"),
("ncfi", "Net Cash Flow From Investing"),
("capex", "Capital Expenditure"),
("ncff", "Net Cash Flow From Financing"),
("ncfdiv", "Payment of Dividends and Other Cash Distributions"),
]
# Balance Statement Indicator Quandl/Sharadar Codes
BAL_STMNT_IND = [
("datekey", "SEC filing date"),
("cashnequsd", "Cash and Equivalents (USD)"),
("receivables", "Receivables"),
("inventory", "Inventory"),
("investmentsc", "Investments Current"),
("assetsc", "Current Assets"),
("intangibles", "Intangibles"),
("ppnenet", "Property Plant and Equipment Net"),
("investmentsnc", "Investments Non-Current"),
("assetsnc", "Non Current Assets"),
("assets", "Total Assets"),
("deferredrev", "Deferred Revenue"),
("payables", "Payables"),
("liabilitiesc", "Current Liabilities"),
("debtc", "Current Debt"),
("taxliabilities", "Tax Liabilities"),
("debtnc", "Non Current Debt"),
("liabilitiesnc", "Non Current Liabilities"),
("liabilities", "Total Liabilities"),
("retearn", "Retained Earnings"),
("equity", "Shareholders Equity"),
("debt", "Total Debt"),
("shareswa", "Weighted Average Shares"),
("workingcapital", "Working Capital"),
]
# Metrics and Ratio Indicator Quandl/Sharadar Codes
METRICS_AND_RATIOS_IND = [
("datekey", "SEC filing date"),
# ('DE', 'Debt to Equity Ratio'), Needs to be locally calculated when
# using TTM figures
("ev", "Enterprise Value"),
# evebitda only returned for the MRT period, the default for SF1
("evebitda", "Enterprise Value divided by ebitda"),
("pe", "Price Earnings Damodaran: Market Cap / Net Income"),
("ps", "Price Sales Damodaran: Market Cap / Revenue"),
("assetturnover", "Revenue / Assets average"),
("roa", "Return on Assets: Net Income / Average Assets"),
("roe", "Return on Equity: Net Income / Average Equity"),
("ros", "Return on Sales: ebit / Revenue"),
("ebitda", "Earnings Before Interest Taxes & Depreciation & Amortization"),
("fcf", "Free Cash Flow: CFO - CapEx"),
("invcapavg", "Invested Capital"),
("roic", "Return On Invested Capital"),
("grossmargin", "Gross Margin: Gross Profit/ Revenue"),
("netmargin", "Net Margin: Net Income/ Revenue"),
]
CALCULATED_RATIOS = [
(
"kjm_capital_employed_sub_cash",
"<NAME> Marshal Capital Employed Subtract Cash",
),
(
"kjm_capital_employed_with_cash",
"<NAME> Marshal Capital Employed With Cash",
),
(
"kjm_roce_sub_cash",
"KJM Return on Capital Employed subtract Cash",
),
(
"kjm_roce_with_cash",
"KJM Return on Capital Employed With Cash",
),
(
"kjm_fcf_return_on_capital_employed_with_cash",
"KJM Free Cash Flow ROCE With Cash",
),
(
"kjm_fcf_return_on_capital_employed_sub_cash",
"KJM Free Cash FLow Subtract Cash",
),
("opinc_ps", "Operating Income Per Share"),
("cfo_ps", "Cash Flow from Operations Per Share"),
("fcf_ps", "Free Cash Flow per Share"),
("kjm_delta_oi_fds", "YoY change in Operating Income per Fully Diluted Share"),
("kjm_delta_fcf_fds", "YoY change in Free Cash Flow per Fully Diluted Share"),
("kjm_delta_bv_fds", "YoY change in Book Value per Fully Diluted Share"),
(
"kjm_delta_tbv_fds",
"YoY change in Tangible Book Value per Fully Diluted Share",
),
("liabilities_equity_ratio", "Total Liabilities / Shareholders Equity"),
("debt_ebitda_ratio", "Total Debt / ebitda"),
("debt_ebitda_minus_capex_ratio", "Total Debt / (ebitda - CapEx)"),
("net_debt_ebitda_ratio", "Net Debt / ebitda"),
("net_debt_ebitda_minus_capex_ratio", "Net Debt / (ebitda - CapEx)"),
("debt_equity_ratio", "Total Debt / Shareholders Equity"),
("ebit_interest_coverage", "ebit / Interest Expense"),
("ebitda_interest_coverage", "ebitda / Interest Expense"),
("ebitda_minus_capex_interest_coverage", "ebitda - CapEx / Interest Expense"),
("interest_to_cfo_plus_interest_coverage", "Interest / (CFO + Interest"),
("debt_to_total_capital", "Total Debt / Invested Capital"),
("debt_cfo_ratio", "Total Debt / Cash Flow From Operations"),
("ltdebt_cfo_ratio", "Long Term Debt / Cash Flow From Operations"),
("ltdebt_earnings_ratio", "Long Term Debt / Income"),
("income_dividend_payout_ratio", "Dividends / Net Income"),
("dividends_cfo_ratio", "Dividends/CFO"),
("preferred_cfo_ratio", "Preferred Payments/CFO"),
("dividends_free_cash_flow_ratio", "Dividends/fcf"),
("preferred_free_cash_flow_ratio", "Preferred Payments/fcf"),
("operating_margin", "Operating Margin: (Gross Profit - Opex)/ Revenue"),
("sg_and_a_gross_profit_ratio", "SG&A to Gross Profit Ratio"),
("ev_opinc_ratio", "Acquirers Multiple: Enterprise Value / Operating Income"),
(
"return_on_invested_capital",
"Return on Invested Capital: ebit / Invested Capital",
),
("free_cash_flow_conversion_ratio", "Free Cash Flow Conversion Ratio"),
("excess_cash_margin_ratio", "Excess Cash Margin Ratio"),
("depreciation_revenue_ratio", "Depreciation / Revenue"),
("depreciation_cfo_ratio", "Depreciation / Cash Flow From Operations"),
# fcf is already levered since CFO already includes the effect of interest
# payments.
# ("free_cash_flow_levered", 'fcf-Levered: fcf - Interest Expenses'),
(
"rough_ffo",
"Rough FFO: Net Income plus Depreciation (missing cap gain from RE sales adjust)",
),
("rough_ffo_ps", "Rough FFO per Share"),
("price_rough_ffo_ps_ratio", "Price divided by rough_ffo_ps"),
("rough_ffo_dividend_payout_ratio", "Dividends / rough_ffo"),
]
# The indicators which we'd like to show on a separate summary page
# Edit this to customize what we show.
# We control the excel conditional formatting by means of a formatting control
# asc (ascending) means "Higher is better" desc (descending) "Lower is better"
SUMMARIZE_IND = [
("ebitda_interest_coverage", "asc"),
("net_debt_ebitda_ratio", "desc"),
("workingcapital", "asc"),
("operating_margin", "asc"),
("grossmargin", "asc"),
("roic", "asc"),
("kjm_roce_sub_cash", "asc"),
("dividends_cfo_ratio", "desc"),
("dividends_free_cash_flow_ratio", "desc"),
("kjm_delta_oi_fds", "asc"),
("kjm_delta_fcf_fds", "asc"),
("preferred_cfo_ratio", "desc"),
]
def __init__(self, database):
Fundamentals_ng.__init__(
self,
database,
self.I_STMNT_IND,
self.CF_STMNT_IND,
self.BAL_STMNT_IND,
self.METRICS_AND_RATIOS_IND,
self.CALCULATED_RATIOS,
self.SUMMARIZE_IND,
)
class Excel:
def __init__(self, outfile):
writer = pd.ExcelWriter(outfile, engine="xlsxwriter", date_format="d mmmm yyyy")
self.writer = writer
self.workbook = writer.book
self.summary_sht = self.workbook.add_worksheet("Summary")
self.summary_sht.set_first_sheet()
self.summary_rows = []
self.format_bold = self.workbook.add_format()
self.format_bold.set_bold()
self.format_commas_2dec = self.workbook.add_format()
self.format_commas_2dec.set_num_format("0.#?")
self.format_commas_1dec = self.workbook.add_format()
self.format_commas_1dec.set_num_format("#,##0.0")
self.format_commas = self.workbook.add_format()
self.format_commas.set_num_format("#,##0")
self.format_justify = self.workbook.add_format()
self.format_justify.set_align("justify")
def save(self):
self.writer.save()
def add_summary_row(self, ticker, fund):
"""Accumulate summary values for a given ticker.
Args:
ticker: The ticker for the stock we are given data for.
sum_ind_l: A list of (indicator,value) tuples for a given ticker
"""
sum_ind_l = self._summarized_indicators(fund, ticker)
self.summary_rows.append((ticker, sum_ind_l))
def write_summary_sheet(self, summarized_ind_dict):
"""Writes the accumulated summary_values to the Summary sheet
"""
# calculate the size of the table we will need
# this is using row,column indexing
top_left = (0,0)
y0, x0 = top_left
rows = len(self.summary_rows)
a_row = self.summary_rows[0]
ticker, indicator_list = a_row
cols = len(indicator_list)
bottom_right = (y0 + rows, x0 + cols)
self._create_empty_table(top_left, bottom_right, indicator_list)
self._data_to_summary_table(top_left, bottom_right, self.format_commas_1dec)
self._format_table(top_left, bottom_right, summarized_ind_dict)
def _format_table(self, top_left, bottom_right, summarized_ind_dict):
""" Will conditionally format each column of data.
Hard coded with the simple 3_color_scale
args:
top_left: y,x coordinates of the top left of the table
bottom_right: y,x coordinates of the bottom right of the table
"""
crimson = "#DC143C"
greenish = "#00CC66"
# "Larger numbers are better" formatting
ascend_fmt = {
"type": "3_color_scale",
"min_color": crimson,
"max_color": greenish,
}
# "Smaller numbers are better" formatting
descend_fmt = {
"type": "3_color_scale",
"min_color": greenish,
"max_color": crimson,
}
# adjust the top_left coordinates to exclude the table header and the
# first column
# y_tc stands for y top column, so y coordinate of top of column
# x_tc stands for x top column, so x coordinate of top of column
y_tc, x_tc = top_left
y_tc += 1
x_tc += 1
y_br, x_br = bottom_right
y_bc = y_br
x_bc = x_tc
# Walk through each of the columns
for ind, fmt in summarized_ind_dict.items():
if fmt == "asc":
self.summary_sht.conditional_format(y_tc, x_tc, y_bc, x_bc, ascend_fmt)
elif fmt == "desc":
self.summary_sht.conditional_format(y_tc, x_tc, y_bc, x_bc, descend_fmt)
else:
raise ValueError("Format parameter must be asc or desc")
x_tc += 1
x_bc += 1
# breakpoint()
assert x_bc - 1 == x_br
def _data_to_summary_table(self, top_left, bottom_right, cell_format):
i = 0
y0, x0 = top_left
for row in self.summary_rows:
val_list = []
ticker = row[0]
val_list.append(ticker)
for ind, val in row[1]: # unpack the tuples of indicator value
val_list.append(val)
row_y = y0 + 1 + i
row_x = x0
# Note we had to replace the infs and Nans prior to this
self.summary_sht.write_row(row_y, row_x, val_list, cell_format)
i += 1
def _create_empty_table(self, top_left, bottom_right, indicator_list):
# Create the empty table complete with column headers
# We need to create a list of dicts.
# Each entry of the form {'header':'Column name'}
dict_list = []
dict_list.append({"header": "Ticker"})
for ind in indicator_list:
hdr = {"header": ind[0]}
dict_list.append(hdr)
# breakpoint()
self.summary_sht.add_table(*top_left, *bottom_right, {"columns": dict_list})
def _latest_indicator_values(
self, ticker, indicators, calc_ratios_df, all_sharadar_inds_df
):
"""Obtains the latest values for a given list of indicators
Uses the provided dataframes to lookup the latest in time values for
each of the indicators in the provided indicators list
Args:
ticker:
indicators: A list of indicators
calc_ratios_df: The calculated ratios dataframe.
all_sharadar_inds_df: The dataframe containing the full table of
results for a given dimension and ticker from Sharadar
Returns:
A list of Tuples of indicator, values pairs.
"""
ind_val_l = []
for indicator in indicators:
if indicator in calc_ratios_df.columns:
recent_ind_val = calc_ratios_df[indicator].tail(1).iloc[0]
elif indicator in all_sharadar_inds_df.columns:
recent_ind_val = all_sharadar_inds_df[indicator].tail(1).iloc[0]
else:
raise KeyError("Couln't find indicator %s" % (indicator))
ind_val_l.append((indicator, recent_ind_val))
return ind_val_l
def _summarized_indicators(self, fund, stock):
# unpack the indicators from the inds_to_summarize
indicators = [*fund.summarize_ind_dict]
summarized = self._latest_indicator_values(
stock, indicators, fund.calc_ratios_df, fund.all_inds_df
)
# need to add fmt to the thing we pass return and deal wit it all the way downstream
return summarized
def write_df(
self, dframe, row, col, sheetname, dimension, use_header=True, num_text_cols=2
):
"""Writes a dataframe to an excel worksheet.
Args:
dframe: A Pandas dataframe. The index must have been promoted to
a column (using df.) prior to calling.
row: An int, the row to start writing at, zero based.
col: An int, the col to start writing at, zero based.
sheetname: A string, the desired name for the sheet.
dimension: A string representing the timeframe for which data is required.
For the SF0 sample database only 'MRY' or most recent yearly is supported.
For the SF1 database available options are: MRY, MRQ, MRT,ARY,ARQ,ART
use_header: Whether to print the header of the dataframe
num_text_cols: The number of columns which contain text. The remainder
of the columns are assumed to create numeric values.
Returns:
rows_written: The number of rows written.
"""
# logging.debug("write_df_to_excel_sheet: dataframe = %s" % ( dframe.info()))
# We need to write out the df first using to_excel to obtain a
# worksheet object which we'll then operate on for formatting.
# We do not write the header using to_excel but explicitly write
# later with Xlsxwriter.
if use_header is True:
start_row = row + 1
else:
start_row = row
dframe.to_excel(
self.writer,
sheet_name=sheetname,
startcol=col,
startrow=start_row,
index=False,
header=False,
)
worksheet = self.writer.sheets[sheetname]
rows_written = len(dframe.index)
num_cols = len(dframe.columns.values)
# Format the text columns and the numeric ones following these.
worksheet.set_column(0, num_text_cols - 1, 40, self.format_justify)
worksheet.set_column(num_text_cols, num_cols, 16, self.format_justify)
numeric_data_range = xl_range(
start_row, col + num_text_cols, start_row + rows_written, col + num_cols
)
worksheet.conditional_format(
numeric_data_range,
{
"type": "cell",
"criteria": "between",
"minimum": -100,
"maximum": 100,
"format": self.format_commas_2dec
},
)
worksheet.conditional_format(
numeric_data_range,
{
"type": "cell",
"criteria": "not between",
"minimum": -100,
"maximum": 100,
"format": self.format_commas
},
)
# Lets figure out CAGR for a given row item
cagr_col = col + num_cols
begin_cagr_calc_col = num_text_cols
end_cagr_calc_col = cagr_col - 1
for cagr_row in range(start_row, start_row + rows_written):
# looks like I'll need to use xl_rowcol_to_cell()
beg_val = xl_rowcol_to_cell(cagr_row, begin_cagr_calc_col)
end_val = xl_rowcol_to_cell(cagr_row, end_cagr_calc_col)
if dimension == "MRY" or dimension == "ARY":
# We want the number of periods between the years.
years = end_cagr_calc_col - begin_cagr_calc_col
else:
# Theres a quarter between each reporting period
years = (end_cagr_calc_col - begin_cagr_calc_col) / 4
formula = '=IFERROR(({end_val}/{beg_val})^(1/{years}) - 1,"")'.format(
beg_val=beg_val, end_val=end_val, years=years
)
# worksheet.write(cagr_row, cagr_col, formula)
worksheet.write_formula(cagr_row, cagr_col, formula, self.format_commas_2dec)
# Sparklines make data trends easily visible
spark_col = cagr_col + 1
worksheet.set_column(spark_col, spark_col, 20)
for spark_row in range(start_row, start_row + rows_written):
numeric_data_row_range = xl_range(
spark_row, col + num_text_cols, spark_row, col + cagr_col - 1
)
worksheet.add_sparkline(
spark_row,
spark_col,
{"range": numeric_data_row_range, "markers": "True"},
)
if use_header is True:
for column, hdr in zip(
range(col, num_cols + col), dframe.columns.values.tolist()
):
worksheet.write_string(row, column, hdr, self.format_bold)
rows_written += 1
return rows_written
def stock_xlsx(outfile, stocks, database, dimension, periods):
excel = Excel(outfile)
# Get a stmnt dataframe, a quandl ratios dataframe and our calculated ratios dataframe
# for each of these frames write into a separate worksheet per stock
for stock in stocks:
fund = SharadarFundamentals(database)
logger.info("Processing the stock %s", stock)
shtname = "{}".format(stock)
try:
fund.get_indicators(stock, dimension, periods)
except NotFoundError:
logger.warning(
"NotFoundError when getting indicators for the stock %s", stock
)
continue
# Now calculate some of the additional ratios for credit analysis
fund.calc_ratios()
row, col = 0, 0
i_stmnt_trans_df = fund.get_transposed_and_formatted_i_stmnt()
rows_written = excel.write_df(
i_stmnt_trans_df, row, col, shtname, dimension, use_header=True
)
row = row + rows_written + 1
cf_stmnt_trans_df = fund.get_transposed_and_formatted_cf_stmnt()
rows_written = excel.write_df(
cf_stmnt_trans_df, row, col, shtname, dimension, use_header=True
)
row = row + rows_written + 1
bal_stmnt_trans_df = fund.get_transposed_and_formatted_bal_stmnt()
rows_written = excel.write_df(
bal_stmnt_trans_df, row, col, shtname, dimension, use_header=True
)
row = row + rows_written + 1
# Now for the metrics and ratios from the quandl API
metrics_and_ratios_trans_df = (
fund.get_transposed_and_formatted_metrics_and_ratios()
)
rows_written = excel.write_df(
metrics_and_ratios_trans_df, row, col, shtname, dimension, use_header=True
)
row = row + rows_written + 2
calculated_ratios_df = fund.get_transposed_and_formatted_calculated_ratios()
rows_written = excel.write_df(
calculated_ratios_df, row, col, shtname, dimension
)
excel.add_summary_row(stock, fund)
logger.info("Processed the stock %s", stock)
excel.write_summary_sheet(fund.summarize_ind_dict)
excel.save()
def main():
# stocks = ['SPG', 'WPC', 'KIM', 'SKT', 'NNN', 'STOR']
stocks = ["AAPL"]
periods = 5
outfile = "quandl_ratios.xlsx"
# stock_xlsx(outfile, stocks, "SF0", 'MRY', periods)
stock_xlsx(outfile, stocks, "SF0", "MRY", periods)
if __name__ == "__main__":
main()
|
"""This module provides functions to calculate fundamental ratios
for a stock potfolio.
The results are saved in an excel workbook with one sheet per stock
as well as a summary sheet
:copyright: (c) 2021 by <NAME>
:license: Apache 2, see LICENCE for more details
"""
import collections
import logging
import numpy as np
import os
import sys
import pandas as pd
import quandl
from quandl.errors.quandl_error import NotFoundError
from xlsxwriter.utility import xl_range
from xlsxwriter.utility import xl_rowcol_to_cell
# Added this one line below to get logging from the requests module,
# comment me out when done
# logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
formatter = logging.Formatter("%(asctime)s %(levelname)-8s %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
#logger.setLevel(logging.DEBUG)
class Fundamentals_ng(object):
def __init__(
self,
database,
i_ind,
cf_ind,
bal_ind,
metrics_and_ratios_ind,
calc_ratios,
summarize_ind,
):
if database == "SF0":
if "QUANDL_API_SF0_KEY" in os.environ:
quandl.ApiConfig.api_key = os.environ["QUANDL_API_SF0_KEY"]
else:
print(
"Exiting: Please set the QUANDL_API_SF0_KEY environment variable."
)
sys.exit()
elif database == "SF1":
if "QUANDL_API_SF1_KEY" in os.environ:
quandl.ApiConfig.api_key = os.environ["QUANDL_API_SF1_KEY"]
else:
print("Exiting Please set the QUANDL_API_SF1_KEY environment variable.")
sys.exit()
# self.database = 'SHARADAR/' + database
self.database = database
self.all_inds_df = None
self.i_stmnt_ind_dict = collections.OrderedDict(i_ind)
self.i_stmnt_df = None
self.cf_stmnt_ind_dict = collections.OrderedDict(cf_ind)
self.cf_stmnt_df = None
self.bal_stmnt_ind_dict = collections.OrderedDict(bal_ind)
self.bal_stmnt_df = None
self.metrics_and_ratios_ind_dict = collections.OrderedDict(
metrics_and_ratios_ind
)
self.metrics_and_ratios_df = None
self.calc_ratios_dict = collections.OrderedDict(calc_ratios)
self.calc_ratios_df = None
self.dimension = None
self.periods = None
self.summarize_ind_dict = collections.OrderedDict(summarize_ind)
def get_indicators(self, ticker, dimension, periods):
"""Obtains fundamental company indicators from the Quandl API.
Uses the specified Quandl database to obtain a set of fundamental
datapoints (or indicators in Quandl parlance) for the provided ticker.
The formats accepted for the indicators and dimensions are described
in: https://www.quandl.com/data/SF0-Free-US-Fundamentals-Data/documentation/about
and
https://www.quandl.com/data/SF1-Core-US-Fundamentals-Data/documentation/about
This is vastly simpler than earlier versions where I got a subset of the indicators one
by one.
Args:
ticker: A string representing the stock.
dimension: A string representing the timeframe for which data is required.
For the SF0 database only 'MRY' or most recent yearly is supported.
For the SF1 database available options are: MRY, MRQ, MRT,ARY,ARQ,ART
periods: An integer representing the number of years of data.
Returns:
A dataframe containing all of the indicators for this Ticker.
The indicators are the columns and the time periods are the rows.
This is after all the next gen refactored version
"""
# self.stmnt_df = quandl.get_table('SHARADAR/SF1', ticker=['AAPL','INTC'],dimension="MRY")
# We'll get all of the data for a given ticker, then filter what we give back
# At some point the SF0 table was removed and if we just have an "SF0" database access
# we still need to request access to SHARADAR/SF1 table. Their API takes care of
# restricting access to the SF0 limited dataset
try:
self.all_inds_df = quandl.get_table(
"SHARADAR/SF1", ticker=ticker, dimension=dimension
)
if self.all_inds_df.empty:
raise NotFoundError
# Sort so that earliest dates will now be at the top
self.all_inds_df.sort_values("datekey", inplace=True)
self.all_inds_df = self.all_inds_df.tail(periods)
loc_df = self.all_inds_df.copy()
logger.debug(
"get_indicators: df columns = %s" % (self.all_inds_df.columns.tolist())
)
logger.debug("get_indicators: all_inds_df = %s" % (self.all_inds_df.head()))
except NotFoundError:
logger.warning("get_indicators: The ticker %s " "is not supported", ticker)
raise
# Let's create separate income statement dataframe, cf, balance and metrics dataframes
# by filtering out from the all_inds datafarame.
self.i_stmnt_df = self.all_inds_df[self.i_stmnt_ind_dict.keys()].copy()
self.cf_stmnt_df = self.all_inds_df[self.cf_stmnt_ind_dict.keys()].copy()
self.bal_stmnt_df = self.all_inds_df[self.bal_stmnt_ind_dict.keys()].copy()
self.metrics_and_ratios_df = self.all_inds_df[
self.metrics_and_ratios_ind_dict.keys()
].copy()
self.dimension = dimension
self.periods = periods
logger.debug("get_indicators: income dataframe = %s" % (self.i_stmnt_df.head()))
return loc_df
def get_transposed_and_formatted_i_stmnt(self):
""" Returns a transposed and formatted partial income statement dataframe with
description added ready for printing to an excel sheet, or possible via html
in the future.
The original dataframe is in a format where the column headers are the indicators
and the rows are the per year or per quarter samples. This is the desired format
for performing operations on the data, it's so-called clean-data.
For visualing in a spreadsheet we want the columns to be the dates and the rows
to be the indicators. Hence the need to transpose.
Returns:
A dataframe
"""
stmnt_df = self.i_stmnt_df.copy()
desc_dict = self.i_stmnt_ind_dict
description = "Sharadar Income"
return self._transpose_and_format_stmnt(stmnt_df, desc_dict, description)
def get_transposed_and_formatted_cf_stmnt(self):
""" Returns a transposed and formatted subset of the cash flow statement
dataframe with description added ready for printing to an excel sheet, or
possible via html in the future.
Returns:
A dataframe
"""
stmnt_df = self.cf_stmnt_df.copy()
desc_dict = self.cf_stmnt_ind_dict
description = "Sharadar Cash Flow"
return self._transpose_and_format_stmnt(stmnt_df, desc_dict, description)
def get_transposed_and_formatted_bal_stmnt(self):
""" Returns a transposed and formatted subset of the balance sheet statement dataframe
with description addedready for printing to an excel sheet, or possible via html in the future.
Returns:
A dataframe
"""
stmnt_df = self.bal_stmnt_df.copy()
desc_dict = self.bal_stmnt_ind_dict
description = "Sharadar Balance"
return self._transpose_and_format_stmnt(stmnt_df, desc_dict, description)
def get_transposed_and_formatted_metrics_and_ratios(self):
""" Returns a transposed and formatted subset of sharadar metrics and
ratios statement dataframe with description added ready for printing to
an excel sheet, or possible via html in the future.
Returns:
A dataframe
"""
stmnt_df = self.metrics_and_ratios_df.copy()
desc_dict = self.metrics_and_ratios_ind_dict
description = "Sharadar Metrics and Ratios"
return self._transpose_and_format_stmnt(stmnt_df, desc_dict, description)
def get_transposed_and_formatted_calculated_ratios(self):
""" Returns a transposed and formatted calculated ratios dataframe with
description added ready for printing to an excel sheet, or possible
via html in the future.
Returns:
A dataframe
"""
stmnt_df = self.calc_ratios_df.copy()
desc_dict = self.calc_ratios_dict
description = "Calculated Metrics and Ratios"
return self._transpose_and_format_stmnt(stmnt_df, desc_dict, description)
def _transpose_and_format_stmnt(
self, stmnt_df, description_dict, description_of_indicators
):
""" Transpose the df so that we have the indicators as rows and datefields as columns
Side effects. Modifies the passed in dataframe.
"""
# As a precursor to making the datefields as columns we set the datefield as the index.
# We then transpose the dataframe such that the index becomes the columns and the columns become rows
stmnt_df.set_index("datekey", inplace=True)
# Transpose to get this dataframe ready for printing
# Convert the df so that we have the indicators as the index and datefields as columns
ret_df = stmnt_df.transpose()
# The columns are of a dateTime type, we need them to be text in order for the dataframe
# to excel module to work.
ret_df.columns = ret_df.columns.map(lambda t: t.strftime("%Y-%m-%d"))
# Now we want two additional descriptive columns in the dataframe.
# We want the description of the indicator in one column and the Sharadar code
# in another.
# Note that dictionary keys, in this case the Sharadar Indicator code
# becomes the index of the newly created Pandas series. The values become the data associated
# with these keys.
description_s = pd.Series(description_dict)
# The insert method is what enables us to place the column exactly where we want it.
ret_df.insert(0, "Description", description_s)
# For the second column, the sharadar codes, we can get the manes of these from the index of our
# dataframe. So a variation on the previous case where we inserted a column from a PD series. Here
# we point to an array like item which the insert method accepts, that of the dataframe index. After
# the transpose this contains what were the column i.e the Sharadar indicators.
#
# Create a new column using the values from the index, similar to doing a .reset_index
# but uses an explicit column instead of column 0 which reset-index does.
ret_df.insert(1, description_of_indicators + " " + self.dimension, ret_df.index)
return ret_df
def calc_ratios(self):
"""Obtain some financial ratios and metrics skewed towards credit analysis.
- Some suggested as useful in the book by <NAME> Alvarez:
'Financial Statement Analysis'.
- Others are credit sanity checking or rough approximations to REIT
specific ratios.
Returns:
A dataframe containing financial ratios.
"""
# Note updated to work on our data in the form where the rows as the dates and the columns are the metricss.
# we build up each metric as a new column in the calc_ratios df.
# initialize an empty calc_ratios_df but using the same indexing as our existing dataframes which we've pulled
# in from sharadar
self.calc_ratios_df = pd.DataFrame(index=self.i_stmnt_df.index)
for ratio in self.calc_ratios_dict:
logger.debug("get_calc_ratios: ratio = %s" % (ratio))
self._calc_ratios(ratio)
# This datekey column will be needed later when we transpose the dataframe
# The sharadar returned dataframes included a datekey column as part of the results.
# self.calc_ratios_df["datekey"] = self.i_stmnt_df["datekey"]
# A nicer way is to insert the datekey column as the first column of
# our synthetically created calc_ratios_df. This way it's easier to
# see for debug and is in the same position in col 1 as the dfs
# returned by sharadar
self.calc_ratios_df.insert(0, "datekey", self.i_stmnt_df["datekey"])
# Change nan to None and inf to a big recognizable number.
self.calc_ratios_df = self.calc_ratios_df.replace({np.nan: None})
self.calc_ratios_df = self.calc_ratios_df.replace({np.inf: 999999999})
logger.debug("get_calc_ratios: dataframe = %s" % (self.calc_ratios_df))
return self.calc_ratios_df.copy()
def _calc_ratios(self, ratio):
# Debt to Cash Flow From Operations
def _debt_cfo_ratio():
logger.debug(
"_calc_ratios._debt_cfo_ratio: debt = %s" % (self.bal_stmnt_df["debt"])
)
self.calc_ratios_df[ratio] = (
self.bal_stmnt_df["debt"] / self.cf_stmnt_df["ncfo"]
)
return
# Debt to Equity
def _debt_equity_ratio():
logger.debug(
"_calc_ratios._debt_equity_ratio: debt = %s"
% (self.bal_stmnt_df["debt"])
)
logger.debug(
"_calc_ratios._debt_equity_ratio: equity = %s"
% (self.bal_stmnt_df["equity"])
)
self.calc_ratios_df[ratio] = (
self.bal_stmnt_df["debt"] / self.bal_stmnt_df["equity"]
)
return
def _liabilities_equity_ratio():
logger.debug(
"_calc_ratios._liabilities_equity:_ratio liabilities = %s"
% (self.bal_stmnt_df["liabilities"])
)
logger.debug(
"_calc_ratios._liabilities_equity_ratio: equity = %s"
% (self.bal_stmnt_df["equity"])
)
self.calc_ratios_df[ratio] = (
self.bal_stmnt_df["liabilities"] / self.bal_stmnt_df["equity"]
)
return
# Debt to ebitda
def _debt_ebitda_ratio():
self.calc_ratios_df[ratio] = (
self.bal_stmnt_df["debt"] / self.metrics_and_ratios_df["ebitda"]
)
return
# Debt to ebitda minus CapEx
def _debt_ebitda_minus_capex_ratio():
# capex is returned from Sharadar as a -ve number, hence we need to add this to
# subtract capex
self.calc_ratios_df[ratio] = self.bal_stmnt_df["debt"] / (
self.metrics_and_ratios_df["ebitda"] + self.cf_stmnt_df["capex"]
)
return
# Net Debt to ebitda
def _net_debt_ebitda_ratio():
self.calc_ratios_df[ratio] = (
self.bal_stmnt_df["debt"] - self.bal_stmnt_df["cashnequsd"]
) / self.metrics_and_ratios_df["ebitda"]
return
# Net Debt to ebitda minus CapEx
def _net_debt_ebitda_minus_capex_ratio():
# capex is returned from Sharadar as a -ve number, hence we need to add this to
# subtract capex
self.calc_ratios_df[ratio] = (
self.bal_stmnt_df["debt"] - self.bal_stmnt_df["cashnequsd"]
) / (self.metrics_and_ratios_df["ebitda"] + self.cf_stmnt_df["capex"])
return
# Depreciation to Cash Flow From Operations Pg 278.
def _depreciation_cfo_ratio():
self.calc_ratios_df[ratio] = (
self.cf_stmnt_df["depamor"] / self.cf_stmnt_df["ncfo"]
)
return
def _depreciation_revenue_ratio():
self.calc_ratios_df[ratio] = (
self.cf_stmnt_df["depamor"] / self.i_stmnt_df["revenue"]
)
return
def _debt_to_total_capital():
self.calc_ratios_df[ratio] = (
self.bal_stmnt_df["debt"] / self.metrics_and_ratios_df["invcapavg"]
)
return
def _roic():
self.calc_ratios_df[ratio] = (
self.i_stmnt_df["ebit"] / self.metrics_and_ratios_df["invcapavg"]
)
# self.database = database
# Times Interest coverage aka fixed charge coverage Pg 278.
# (Net Income + Income taxes + Interest Expense)/(Interest expense + Capitalized Interest)
# Cannot see how to get capitalized interest from the API so that term is excluded.
# This is the same as ebit to Interest Expense
def _ebit_interest_coverage():
self.calc_ratios_df[ratio] = (
self.i_stmnt_df["ebit"] / self.i_stmnt_df["intexp"]
)
return
def _ebitda_interest_coverage():
self.calc_ratios_df[ratio] = (
self.metrics_and_ratios_df["ebitda"] / self.i_stmnt_df["intexp"]
)
return
def _ebitda_minus_capex_interest_coverage():
# Recall that capex is returned from Sharadar as a -ve number.
self.calc_ratios_df[ratio] = (
self.metrics_and_ratios_df["ebitda"] + self.cf_stmnt_df["capex"]
) / self.i_stmnt_df["intexp"]
return
def _rough_ffo():
self.calc_ratios_df[ratio] = (
self.i_stmnt_df["netinc"] + self.cf_stmnt_df["depamor"]
)
return
def _rough_affo():
# capex is returned from Quandl as a -ve number, hence we add this to
# subtract capex
self.calc_ratios_df[ratio] = (
self.i_stmnt_df["netinc"]
+ self.cf_stmnt_df["depamor"]
+ self.cf_stmnt_df["capex"]
)
return
def _rough_ffo_dividend_payout_ratio():
self.calc_ratios_df[ratio] = self.cf_stmnt_df["ncfdiv"] / (
self.i_stmnt_df["netinc"] + self.cf_stmnt_df["depamor"]
)
return
def _rough_affo_dividend_payout_ratio():
self.calc_ratios_df[ratio] = self.cf_stmnt_df["ncfdiv"] / (
self.i_stmnt_df["netinc"]
+ self.cf_stmnt_df["depamor"]
+ self.cf_stmnt_df["capex"]
)
return
def _income_dividend_payout_ratio():
# negating since ncfdiv is returned as a negative number
self.calc_ratios_df[ratio] = (
-self.cf_stmnt_df["ncfdiv"] / self.i_stmnt_df["netinc"]
)
return
# TODO add some conditional logig to use the fullydiluted shares value when it
# is provided
def _price_rough_ffo_ps_ratio():
self.calc_ratios_df[ratio] = self.i_stmnt_df["price"] / (
self.calc_ratios_df["rough_ffo"] / self.bal_stmnt_df["shareswa"]
)
return
def _rough_ffo_ps():
self.calc_ratios_df[ratio] = (
self.calc_ratios_df["rough_ffo"] / self.bal_stmnt_df["shareswa"]
)
return
def _cfo_ps():
self.calc_ratios_df[ratio] = (
self.cf_stmnt_df["ncfo"] / self.bal_stmnt_df["shareswa"]
)
return
def _opinc_ps():
self.calc_ratios_df[ratio] = (
self.i_stmnt_df["opinc"] / self.bal_stmnt_df["shareswa"]
)
return
def _fcf_ps():
self.calc_ratios_df[ratio] = (
self.metrics_and_ratios_df["fcf"] / self.bal_stmnt_df["shareswa"]
)
return
def _ev_opinc_ratio():
self.calc_ratios_df[ratio] = (
self.metrics_and_ratios_df["ev"] / self.i_stmnt_df["opinc"]
)
return
# <NAME>, author of Good Stocks Cheap, definition
# of capital employed. He has two defnitions, one where cash is
# subtracted and one where it's not. Accrued expenses should be
# substracted but Is not available in the Sharadar API, probably a
# scour the footnotes thing if really wanted to include this.
def _kjm_capital_employed_sub_cash():
self.calc_ratios_df[ratio] = (
self.bal_stmnt_df["assets"]
- self.bal_stmnt_df["cashnequsd"]
- self.bal_stmnt_df["payables"]
- self.bal_stmnt_df["deferredrev"]
)
return
def _kjm_capital_employed_with_cash():
self.calc_ratios_df[ratio] = (
self.bal_stmnt_df["assets"]
- self.bal_stmnt_df["payables"]
- self.bal_stmnt_df["deferredrev"]
)
return
def _kjm_roce_sub_cash():
self.calc_ratios_df[ratio] = (
self.i_stmnt_df["opinc"]
/ self.calc_ratios_df["kjm_capital_employed_sub_cash"]
)
return
def _kjm_roce_with_cash():
self.calc_ratios_df[ratio] = (
self.i_stmnt_df["opinc"]
/ self.calc_ratios_df["kjm_capital_employed_with_cash"]
)
return
def _kjm_fcf_return_on_capital_employed_sub_cash():
self.calc_ratios_df[ratio] = (
self.metrics_and_ratios_df["fcf"]
/ self.calc_ratios_df["kjm_capital_employed_sub_cash"]
)
return
def _kjm_fcf_return_on_capital_employed_with_cash():
self.calc_ratios_df[ratio] = (
self.metrics_and_ratios_df["fcf"]
/ self.calc_ratios_df["kjm_capital_employed_with_cash"]
)
return
def _kjm_delta_oi_fds():
self.calc_ratios_df[ratio] = self.calc_ratios_df["opinc_ps"].pct_change()
return
def _kjm_delta_fcf_fds():
self.calc_ratios_df[ratio] = self.calc_ratios_df["fcf_ps"].pct_change()
return
def _kjm_delta_bv_fds():
self.calc_ratios_df[ratio] = self.bal_stmnt_df["equity"].pct_change()
return
def _kjm_delta_tbv_fds():
self.calc_ratios_df[ratio] = (
self.bal_stmnt_df["equity"] - self.bal_stmnt_df["intangibles"]
).pct_change()
return
def _dividends_free_cash_flow_ratio():
self.calc_ratios_df[ratio] = (
-self.cf_stmnt_df["ncfdiv"] / self.metrics_and_ratios_df["fcf"]
)
return
def _preferred_free_cash_flow_ratio():
self.calc_ratios_df[ratio] = (
self.i_stmnt_df["prefdivis"] / self.metrics_and_ratios_df["fcf"]
)
return
def _operating_margin():
self.calc_ratios_df[ratio] = (
self.i_stmnt_df["opinc"] / self.i_stmnt_df["revenue"]
)
return
def _sg_and_a_gross_profit_ratio():
self.calc_ratios_df[ratio] = self.i_stmnt_df["sgna"] / self.i_stmnt_df["gp"]
return
def _ltdebt_cfo_ratio():
self.calc_ratios_df[ratio] = (
self.bal_stmnt_df["debtnc"] / self.cf_stmnt_df["ncfo"]
)
return
def _ltdebt_earnings_ratio():
self.calc_ratios_df[ratio] = (
self.bal_stmnt_df["debtnc"] / self.i_stmnt_df["netinc"]
)
return
def _free_cash_flow_conversion_ratio():
self.calc_ratios_df[ratio] = (
self.metrics_and_ratios_df["fcf"] / self.metrics_and_ratios_df["ebitda"]
)
return
# Pg 290 of Creative Cash Flow Reporting, Mumford et al.
def _excess_cash_margin_ratio():
self.calc_ratios_df[ratio] = (
(self.cf_stmnt_df["ncfo"] - self.i_stmnt_df["opinc"])
* 100
/ self.i_stmnt_df["revenue"]
)
return
def _interest_to_cfo_plus_interest_coverage():
self.calc_ratios_df[ratio] = self.i_stmnt_df["intexp"] / (
self.cf_stmnt_df["ncfo"] + self.i_stmnt_df["intexp"]
)
return
def _dividends_cfo_ratio():
# negating since ncfdiv is returned as a negative number
self.calc_ratios_df[ratio] = (
-self.cf_stmnt_df["ncfdiv"] / self.cf_stmnt_df["ncfo"]
)
return
def _preferred_cfo_ratio():
self.calc_ratios_df[ratio] = (
self.i_stmnt_df["prefdivis"] / self.cf_stmnt_df["ncfo"]
)
return
switcher = {
"debt_equity_ratio": _debt_equity_ratio,
"liabilities_equity_ratio": _liabilities_equity_ratio,
"debt_ebitda_ratio": _debt_ebitda_ratio,
"debt_ebitda_minus_capex_ratio": _debt_ebitda_minus_capex_ratio,
"net_debt_ebitda_ratio": _net_debt_ebitda_ratio,
"net_debt_ebitda_minus_capex_ratio": _net_debt_ebitda_minus_capex_ratio,
"debt_to_total_capital": _debt_to_total_capital,
"return_on_invested_capital": _roic,
"ebit_interest_coverage": _ebit_interest_coverage,
"ebitda_interest_coverage": _ebitda_interest_coverage,
"ebitda_minus_capex_interest_coverage": _ebitda_minus_capex_interest_coverage,
"debt_cfo_ratio": _debt_cfo_ratio,
"depreciation_cfo_ratio": _depreciation_cfo_ratio,
"depreciation_revenue_ratio": _depreciation_revenue_ratio,
"rough_ffo": _rough_ffo,
"rough_affo": _rough_affo,
"rough_ffo_dividend_payout_ratio": _rough_ffo_dividend_payout_ratio,
"rough_affo_dividend_payout_ratio": _rough_affo_dividend_payout_ratio,
"income_dividend_payout_ratio": _income_dividend_payout_ratio,
"price_rough_ffo_ps_ratio": _price_rough_ffo_ps_ratio,
"rough_ffo_ps": _rough_ffo_ps,
"opinc_ps": _opinc_ps,
"cfo_ps": _cfo_ps,
"fcf_ps": _fcf_ps,
"ev_opinc_ratio": _ev_opinc_ratio,
"dividends_free_cash_flow_ratio": _dividends_free_cash_flow_ratio,
"preferred_free_cash_flow_ratio": _preferred_free_cash_flow_ratio,
"operating_margin": _operating_margin,
"sg_and_a_gross_profit_ratio": _sg_and_a_gross_profit_ratio,
"ltdebt_cfo_ratio": _ltdebt_cfo_ratio,
"ltdebt_earnings_ratio": _ltdebt_earnings_ratio,
"free_cash_flow_conversion_ratio": _free_cash_flow_conversion_ratio,
"excess_cash_margin_ratio": _excess_cash_margin_ratio,
"interest_to_cfo_plus_interest_coverage": _interest_to_cfo_plus_interest_coverage,
"dividends_cfo_ratio": _dividends_cfo_ratio,
"preferred_cfo_ratio": _preferred_cfo_ratio,
"kjm_capital_employed_sub_cash": _kjm_capital_employed_sub_cash,
"kjm_capital_employed_with_cash": _kjm_capital_employed_with_cash,
"kjm_roce_sub_cash": _kjm_roce_sub_cash,
"kjm_roce_with_cash": _kjm_roce_with_cash,
"kjm_fcf_return_on_capital_employed_sub_cash": _kjm_fcf_return_on_capital_employed_sub_cash,
"kjm_fcf_return_on_capital_employed_with_cash": _kjm_fcf_return_on_capital_employed_with_cash,
"kjm_delta_oi_fds": _kjm_delta_oi_fds,
"kjm_delta_fcf_fds": _kjm_delta_fcf_fds,
"kjm_delta_bv_fds": _kjm_delta_bv_fds,
"kjm_delta_tbv_fds": _kjm_delta_tbv_fds,
}
# Get the function from switcher dictionary
func = switcher.get(ratio, lambda: NotImplementedError)
# Execute the function
return func()
class SharadarFundamentals(Fundamentals_ng):
# Locally calculated by this package. For each ratio or metric in this
# table, there's a routine to calculate the value from the quandl API provided
# statement indicator value.
# The first item in each tuple is the Sharadar Code, the second is
# a description.
# Income Statement Indicator Quandl/Sharadar Codes
I_STMNT_IND = [
("datekey", "SEC filing date"),
("revenue", "Revenues"),
("cor", "Cost of Revenue"),
("gp", "Gross Profit"),
("sgna", "Sales General and Admin"),
("rnd", "Research and Development Expense"),
("opex", "Operating Expenses"),
("intexp", "Interest Expense"),
("taxexp", "Tax Expense"),
("netincdis", "Net Loss Income from Discontinued Operations "),
("netincnci", "Net Income to Non-Controlling Interests"),
("opinc", "Operating Income"),
("ebit", "Earnings Before Interest and Taxes"),
("netinc", "Net Income"),
("prefdivis", "Preferred Dividends"),
("netinccmn", "Net Income to Common (after prefs paid)"),
("epsdil", "Earnings Per Share Diluted"),
("price", "Price per Share"),
("shareswadil", "Weighted Average Shares Diluted"),
("dps", "Dividends per Basic Common Share"),
]
# Cash Flow Statement Indicator Quandl/Sharadar Codes
CF_STMNT_IND = [
("datekey", "SEC filing date"),
("depamor", "Depreciation and Amortization"),
("ncfo", "Net Cash Flow From Operations"),
("ncfi", "Net Cash Flow From Investing"),
("capex", "Capital Expenditure"),
("ncff", "Net Cash Flow From Financing"),
("ncfdiv", "Payment of Dividends and Other Cash Distributions"),
]
# Balance Statement Indicator Quandl/Sharadar Codes
BAL_STMNT_IND = [
("datekey", "SEC filing date"),
("cashnequsd", "Cash and Equivalents (USD)"),
("receivables", "Receivables"),
("inventory", "Inventory"),
("investmentsc", "Investments Current"),
("assetsc", "Current Assets"),
("intangibles", "Intangibles"),
("ppnenet", "Property Plant and Equipment Net"),
("investmentsnc", "Investments Non-Current"),
("assetsnc", "Non Current Assets"),
("assets", "Total Assets"),
("deferredrev", "Deferred Revenue"),
("payables", "Payables"),
("liabilitiesc", "Current Liabilities"),
("debtc", "Current Debt"),
("taxliabilities", "Tax Liabilities"),
("debtnc", "Non Current Debt"),
("liabilitiesnc", "Non Current Liabilities"),
("liabilities", "Total Liabilities"),
("retearn", "Retained Earnings"),
("equity", "Shareholders Equity"),
("debt", "Total Debt"),
("shareswa", "Weighted Average Shares"),
("workingcapital", "Working Capital"),
]
# Metrics and Ratio Indicator Quandl/Sharadar Codes
METRICS_AND_RATIOS_IND = [
("datekey", "SEC filing date"),
# ('DE', 'Debt to Equity Ratio'), Needs to be locally calculated when
# using TTM figures
("ev", "Enterprise Value"),
# evebitda only returned for the MRT period, the default for SF1
("evebitda", "Enterprise Value divided by ebitda"),
("pe", "Price Earnings Damodaran: Market Cap / Net Income"),
("ps", "Price Sales Damodaran: Market Cap / Revenue"),
("assetturnover", "Revenue / Assets average"),
("roa", "Return on Assets: Net Income / Average Assets"),
("roe", "Return on Equity: Net Income / Average Equity"),
("ros", "Return on Sales: ebit / Revenue"),
("ebitda", "Earnings Before Interest Taxes & Depreciation & Amortization"),
("fcf", "Free Cash Flow: CFO - CapEx"),
("invcapavg", "Invested Capital"),
("roic", "Return On Invested Capital"),
("grossmargin", "Gross Margin: Gross Profit/ Revenue"),
("netmargin", "Net Margin: Net Income/ Revenue"),
]
CALCULATED_RATIOS = [
(
"kjm_capital_employed_sub_cash",
"<NAME> Marshal Capital Employed Subtract Cash",
),
(
"kjm_capital_employed_with_cash",
"<NAME> Marshal Capital Employed With Cash",
),
(
"kjm_roce_sub_cash",
"KJM Return on Capital Employed subtract Cash",
),
(
"kjm_roce_with_cash",
"KJM Return on Capital Employed With Cash",
),
(
"kjm_fcf_return_on_capital_employed_with_cash",
"KJM Free Cash Flow ROCE With Cash",
),
(
"kjm_fcf_return_on_capital_employed_sub_cash",
"KJM Free Cash FLow Subtract Cash",
),
("opinc_ps", "Operating Income Per Share"),
("cfo_ps", "Cash Flow from Operations Per Share"),
("fcf_ps", "Free Cash Flow per Share"),
("kjm_delta_oi_fds", "YoY change in Operating Income per Fully Diluted Share"),
("kjm_delta_fcf_fds", "YoY change in Free Cash Flow per Fully Diluted Share"),
("kjm_delta_bv_fds", "YoY change in Book Value per Fully Diluted Share"),
(
"kjm_delta_tbv_fds",
"YoY change in Tangible Book Value per Fully Diluted Share",
),
("liabilities_equity_ratio", "Total Liabilities / Shareholders Equity"),
("debt_ebitda_ratio", "Total Debt / ebitda"),
("debt_ebitda_minus_capex_ratio", "Total Debt / (ebitda - CapEx)"),
("net_debt_ebitda_ratio", "Net Debt / ebitda"),
("net_debt_ebitda_minus_capex_ratio", "Net Debt / (ebitda - CapEx)"),
("debt_equity_ratio", "Total Debt / Shareholders Equity"),
("ebit_interest_coverage", "ebit / Interest Expense"),
("ebitda_interest_coverage", "ebitda / Interest Expense"),
("ebitda_minus_capex_interest_coverage", "ebitda - CapEx / Interest Expense"),
("interest_to_cfo_plus_interest_coverage", "Interest / (CFO + Interest"),
("debt_to_total_capital", "Total Debt / Invested Capital"),
("debt_cfo_ratio", "Total Debt / Cash Flow From Operations"),
("ltdebt_cfo_ratio", "Long Term Debt / Cash Flow From Operations"),
("ltdebt_earnings_ratio", "Long Term Debt / Income"),
("income_dividend_payout_ratio", "Dividends / Net Income"),
("dividends_cfo_ratio", "Dividends/CFO"),
("preferred_cfo_ratio", "Preferred Payments/CFO"),
("dividends_free_cash_flow_ratio", "Dividends/fcf"),
("preferred_free_cash_flow_ratio", "Preferred Payments/fcf"),
("operating_margin", "Operating Margin: (Gross Profit - Opex)/ Revenue"),
("sg_and_a_gross_profit_ratio", "SG&A to Gross Profit Ratio"),
("ev_opinc_ratio", "Acquirers Multiple: Enterprise Value / Operating Income"),
(
"return_on_invested_capital",
"Return on Invested Capital: ebit / Invested Capital",
),
("free_cash_flow_conversion_ratio", "Free Cash Flow Conversion Ratio"),
("excess_cash_margin_ratio", "Excess Cash Margin Ratio"),
("depreciation_revenue_ratio", "Depreciation / Revenue"),
("depreciation_cfo_ratio", "Depreciation / Cash Flow From Operations"),
# fcf is already levered since CFO already includes the effect of interest
# payments.
# ("free_cash_flow_levered", 'fcf-Levered: fcf - Interest Expenses'),
(
"rough_ffo",
"Rough FFO: Net Income plus Depreciation (missing cap gain from RE sales adjust)",
),
("rough_ffo_ps", "Rough FFO per Share"),
("price_rough_ffo_ps_ratio", "Price divided by rough_ffo_ps"),
("rough_ffo_dividend_payout_ratio", "Dividends / rough_ffo"),
]
# The indicators which we'd like to show on a separate summary page
# Edit this to customize what we show.
# We control the excel conditional formatting by means of a formatting control
# asc (ascending) means "Higher is better" desc (descending) "Lower is better"
SUMMARIZE_IND = [
("ebitda_interest_coverage", "asc"),
("net_debt_ebitda_ratio", "desc"),
("workingcapital", "asc"),
("operating_margin", "asc"),
("grossmargin", "asc"),
("roic", "asc"),
("kjm_roce_sub_cash", "asc"),
("dividends_cfo_ratio", "desc"),
("dividends_free_cash_flow_ratio", "desc"),
("kjm_delta_oi_fds", "asc"),
("kjm_delta_fcf_fds", "asc"),
("preferred_cfo_ratio", "desc"),
]
def __init__(self, database):
Fundamentals_ng.__init__(
self,
database,
self.I_STMNT_IND,
self.CF_STMNT_IND,
self.BAL_STMNT_IND,
self.METRICS_AND_RATIOS_IND,
self.CALCULATED_RATIOS,
self.SUMMARIZE_IND,
)
class Excel:
def __init__(self, outfile):
writer = pd.ExcelWriter(outfile, engine="xlsxwriter", date_format="d mmmm yyyy")
self.writer = writer
self.workbook = writer.book
self.summary_sht = self.workbook.add_worksheet("Summary")
self.summary_sht.set_first_sheet()
self.summary_rows = []
self.format_bold = self.workbook.add_format()
self.format_bold.set_bold()
self.format_commas_2dec = self.workbook.add_format()
self.format_commas_2dec.set_num_format("0.#?")
self.format_commas_1dec = self.workbook.add_format()
self.format_commas_1dec.set_num_format("#,##0.0")
self.format_commas = self.workbook.add_format()
self.format_commas.set_num_format("#,##0")
self.format_justify = self.workbook.add_format()
self.format_justify.set_align("justify")
def save(self):
self.writer.save()
def add_summary_row(self, ticker, fund):
"""Accumulate summary values for a given ticker.
Args:
ticker: The ticker for the stock we are given data for.
sum_ind_l: A list of (indicator,value) tuples for a given ticker
"""
sum_ind_l = self._summarized_indicators(fund, ticker)
self.summary_rows.append((ticker, sum_ind_l))
def write_summary_sheet(self, summarized_ind_dict):
"""Writes the accumulated summary_values to the Summary sheet
"""
# calculate the size of the table we will need
# this is using row,column indexing
top_left = (0,0)
y0, x0 = top_left
rows = len(self.summary_rows)
a_row = self.summary_rows[0]
ticker, indicator_list = a_row
cols = len(indicator_list)
bottom_right = (y0 + rows, x0 + cols)
self._create_empty_table(top_left, bottom_right, indicator_list)
self._data_to_summary_table(top_left, bottom_right, self.format_commas_1dec)
self._format_table(top_left, bottom_right, summarized_ind_dict)
def _format_table(self, top_left, bottom_right, summarized_ind_dict):
""" Will conditionally format each column of data.
Hard coded with the simple 3_color_scale
args:
top_left: y,x coordinates of the top left of the table
bottom_right: y,x coordinates of the bottom right of the table
"""
crimson = "#DC143C"
greenish = "#00CC66"
# "Larger numbers are better" formatting
ascend_fmt = {
"type": "3_color_scale",
"min_color": crimson,
"max_color": greenish,
}
# "Smaller numbers are better" formatting
descend_fmt = {
"type": "3_color_scale",
"min_color": greenish,
"max_color": crimson,
}
# adjust the top_left coordinates to exclude the table header and the
# first column
# y_tc stands for y top column, so y coordinate of top of column
# x_tc stands for x top column, so x coordinate of top of column
y_tc, x_tc = top_left
y_tc += 1
x_tc += 1
y_br, x_br = bottom_right
y_bc = y_br
x_bc = x_tc
# Walk through each of the columns
for ind, fmt in summarized_ind_dict.items():
if fmt == "asc":
self.summary_sht.conditional_format(y_tc, x_tc, y_bc, x_bc, ascend_fmt)
elif fmt == "desc":
self.summary_sht.conditional_format(y_tc, x_tc, y_bc, x_bc, descend_fmt)
else:
raise ValueError("Format parameter must be asc or desc")
x_tc += 1
x_bc += 1
# breakpoint()
assert x_bc - 1 == x_br
def _data_to_summary_table(self, top_left, bottom_right, cell_format):
i = 0
y0, x0 = top_left
for row in self.summary_rows:
val_list = []
ticker = row[0]
val_list.append(ticker)
for ind, val in row[1]: # unpack the tuples of indicator value
val_list.append(val)
row_y = y0 + 1 + i
row_x = x0
# Note we had to replace the infs and Nans prior to this
self.summary_sht.write_row(row_y, row_x, val_list, cell_format)
i += 1
def _create_empty_table(self, top_left, bottom_right, indicator_list):
# Create the empty table complete with column headers
# We need to create a list of dicts.
# Each entry of the form {'header':'Column name'}
dict_list = []
dict_list.append({"header": "Ticker"})
for ind in indicator_list:
hdr = {"header": ind[0]}
dict_list.append(hdr)
# breakpoint()
self.summary_sht.add_table(*top_left, *bottom_right, {"columns": dict_list})
def _latest_indicator_values(
self, ticker, indicators, calc_ratios_df, all_sharadar_inds_df
):
"""Obtains the latest values for a given list of indicators
Uses the provided dataframes to lookup the latest in time values for
each of the indicators in the provided indicators list
Args:
ticker:
indicators: A list of indicators
calc_ratios_df: The calculated ratios dataframe.
all_sharadar_inds_df: The dataframe containing the full table of
results for a given dimension and ticker from Sharadar
Returns:
A list of Tuples of indicator, values pairs.
"""
ind_val_l = []
for indicator in indicators:
if indicator in calc_ratios_df.columns:
recent_ind_val = calc_ratios_df[indicator].tail(1).iloc[0]
elif indicator in all_sharadar_inds_df.columns:
recent_ind_val = all_sharadar_inds_df[indicator].tail(1).iloc[0]
else:
raise KeyError("Couln't find indicator %s" % (indicator))
ind_val_l.append((indicator, recent_ind_val))
return ind_val_l
def _summarized_indicators(self, fund, stock):
# unpack the indicators from the inds_to_summarize
indicators = [*fund.summarize_ind_dict]
summarized = self._latest_indicator_values(
stock, indicators, fund.calc_ratios_df, fund.all_inds_df
)
# need to add fmt to the thing we pass return and deal wit it all the way downstream
return summarized
def write_df(
self, dframe, row, col, sheetname, dimension, use_header=True, num_text_cols=2
):
"""Writes a dataframe to an excel worksheet.
Args:
dframe: A Pandas dataframe. The index must have been promoted to
a column (using df.) prior to calling.
row: An int, the row to start writing at, zero based.
col: An int, the col to start writing at, zero based.
sheetname: A string, the desired name for the sheet.
dimension: A string representing the timeframe for which data is required.
For the SF0 sample database only 'MRY' or most recent yearly is supported.
For the SF1 database available options are: MRY, MRQ, MRT,ARY,ARQ,ART
use_header: Whether to print the header of the dataframe
num_text_cols: The number of columns which contain text. The remainder
of the columns are assumed to create numeric values.
Returns:
rows_written: The number of rows written.
"""
# logging.debug("write_df_to_excel_sheet: dataframe = %s" % ( dframe.info()))
# We need to write out the df first using to_excel to obtain a
# worksheet object which we'll then operate on for formatting.
# We do not write the header using to_excel but explicitly write
# later with Xlsxwriter.
if use_header is True:
start_row = row + 1
else:
start_row = row
dframe.to_excel(
self.writer,
sheet_name=sheetname,
startcol=col,
startrow=start_row,
index=False,
header=False,
)
worksheet = self.writer.sheets[sheetname]
rows_written = len(dframe.index)
num_cols = len(dframe.columns.values)
# Format the text columns and the numeric ones following these.
worksheet.set_column(0, num_text_cols - 1, 40, self.format_justify)
worksheet.set_column(num_text_cols, num_cols, 16, self.format_justify)
numeric_data_range = xl_range(
start_row, col + num_text_cols, start_row + rows_written, col + num_cols
)
worksheet.conditional_format(
numeric_data_range,
{
"type": "cell",
"criteria": "between",
"minimum": -100,
"maximum": 100,
"format": self.format_commas_2dec
},
)
worksheet.conditional_format(
numeric_data_range,
{
"type": "cell",
"criteria": "not between",
"minimum": -100,
"maximum": 100,
"format": self.format_commas
},
)
# Lets figure out CAGR for a given row item
cagr_col = col + num_cols
begin_cagr_calc_col = num_text_cols
end_cagr_calc_col = cagr_col - 1
for cagr_row in range(start_row, start_row + rows_written):
# looks like I'll need to use xl_rowcol_to_cell()
beg_val = xl_rowcol_to_cell(cagr_row, begin_cagr_calc_col)
end_val = xl_rowcol_to_cell(cagr_row, end_cagr_calc_col)
if dimension == "MRY" or dimension == "ARY":
# We want the number of periods between the years.
years = end_cagr_calc_col - begin_cagr_calc_col
else:
# Theres a quarter between each reporting period
years = (end_cagr_calc_col - begin_cagr_calc_col) / 4
formula = '=IFERROR(({end_val}/{beg_val})^(1/{years}) - 1,"")'.format(
beg_val=beg_val, end_val=end_val, years=years
)
# worksheet.write(cagr_row, cagr_col, formula)
worksheet.write_formula(cagr_row, cagr_col, formula, self.format_commas_2dec)
# Sparklines make data trends easily visible
spark_col = cagr_col + 1
worksheet.set_column(spark_col, spark_col, 20)
for spark_row in range(start_row, start_row + rows_written):
numeric_data_row_range = xl_range(
spark_row, col + num_text_cols, spark_row, col + cagr_col - 1
)
worksheet.add_sparkline(
spark_row,
spark_col,
{"range": numeric_data_row_range, "markers": "True"},
)
if use_header is True:
for column, hdr in zip(
range(col, num_cols + col), dframe.columns.values.tolist()
):
worksheet.write_string(row, column, hdr, self.format_bold)
rows_written += 1
return rows_written
def stock_xlsx(outfile, stocks, database, dimension, periods):
excel = Excel(outfile)
# Get a stmnt dataframe, a quandl ratios dataframe and our calculated ratios dataframe
# for each of these frames write into a separate worksheet per stock
for stock in stocks:
fund = SharadarFundamentals(database)
logger.info("Processing the stock %s", stock)
shtname = "{}".format(stock)
try:
fund.get_indicators(stock, dimension, periods)
except NotFoundError:
logger.warning(
"NotFoundError when getting indicators for the stock %s", stock
)
continue
# Now calculate some of the additional ratios for credit analysis
fund.calc_ratios()
row, col = 0, 0
i_stmnt_trans_df = fund.get_transposed_and_formatted_i_stmnt()
rows_written = excel.write_df(
i_stmnt_trans_df, row, col, shtname, dimension, use_header=True
)
row = row + rows_written + 1
cf_stmnt_trans_df = fund.get_transposed_and_formatted_cf_stmnt()
rows_written = excel.write_df(
cf_stmnt_trans_df, row, col, shtname, dimension, use_header=True
)
row = row + rows_written + 1
bal_stmnt_trans_df = fund.get_transposed_and_formatted_bal_stmnt()
rows_written = excel.write_df(
bal_stmnt_trans_df, row, col, shtname, dimension, use_header=True
)
row = row + rows_written + 1
# Now for the metrics and ratios from the quandl API
metrics_and_ratios_trans_df = (
fund.get_transposed_and_formatted_metrics_and_ratios()
)
rows_written = excel.write_df(
metrics_and_ratios_trans_df, row, col, shtname, dimension, use_header=True
)
row = row + rows_written + 2
calculated_ratios_df = fund.get_transposed_and_formatted_calculated_ratios()
rows_written = excel.write_df(
calculated_ratios_df, row, col, shtname, dimension
)
excel.add_summary_row(stock, fund)
logger.info("Processed the stock %s", stock)
excel.write_summary_sheet(fund.summarize_ind_dict)
excel.save()
def main():
# stocks = ['SPG', 'WPC', 'KIM', 'SKT', 'NNN', 'STOR']
stocks = ["AAPL"]
periods = 5
outfile = "quandl_ratios.xlsx"
# stock_xlsx(outfile, stocks, "SF0", 'MRY', periods)
stock_xlsx(outfile, stocks, "SF0", "MRY", periods)
if __name__ == "__main__":
main()
|
en
| 0.836913
|
This module provides functions to calculate fundamental ratios for a stock potfolio. The results are saved in an excel workbook with one sheet per stock as well as a summary sheet :copyright: (c) 2021 by <NAME> :license: Apache 2, see LICENCE for more details # Added this one line below to get logging from the requests module, # comment me out when done # logging.basicConfig(level=logging.DEBUG) #logger.setLevel(logging.DEBUG) # self.database = 'SHARADAR/' + database Obtains fundamental company indicators from the Quandl API. Uses the specified Quandl database to obtain a set of fundamental datapoints (or indicators in Quandl parlance) for the provided ticker. The formats accepted for the indicators and dimensions are described in: https://www.quandl.com/data/SF0-Free-US-Fundamentals-Data/documentation/about and https://www.quandl.com/data/SF1-Core-US-Fundamentals-Data/documentation/about This is vastly simpler than earlier versions where I got a subset of the indicators one by one. Args: ticker: A string representing the stock. dimension: A string representing the timeframe for which data is required. For the SF0 database only 'MRY' or most recent yearly is supported. For the SF1 database available options are: MRY, MRQ, MRT,ARY,ARQ,ART periods: An integer representing the number of years of data. Returns: A dataframe containing all of the indicators for this Ticker. The indicators are the columns and the time periods are the rows. This is after all the next gen refactored version # self.stmnt_df = quandl.get_table('SHARADAR/SF1', ticker=['AAPL','INTC'],dimension="MRY") # We'll get all of the data for a given ticker, then filter what we give back # At some point the SF0 table was removed and if we just have an "SF0" database access # we still need to request access to SHARADAR/SF1 table. Their API takes care of # restricting access to the SF0 limited dataset # Sort so that earliest dates will now be at the top # Let's create separate income statement dataframe, cf, balance and metrics dataframes # by filtering out from the all_inds datafarame. Returns a transposed and formatted partial income statement dataframe with description added ready for printing to an excel sheet, or possible via html in the future. The original dataframe is in a format where the column headers are the indicators and the rows are the per year or per quarter samples. This is the desired format for performing operations on the data, it's so-called clean-data. For visualing in a spreadsheet we want the columns to be the dates and the rows to be the indicators. Hence the need to transpose. Returns: A dataframe Returns a transposed and formatted subset of the cash flow statement dataframe with description added ready for printing to an excel sheet, or possible via html in the future. Returns: A dataframe Returns a transposed and formatted subset of the balance sheet statement dataframe with description addedready for printing to an excel sheet, or possible via html in the future. Returns: A dataframe Returns a transposed and formatted subset of sharadar metrics and ratios statement dataframe with description added ready for printing to an excel sheet, or possible via html in the future. Returns: A dataframe Returns a transposed and formatted calculated ratios dataframe with description added ready for printing to an excel sheet, or possible via html in the future. Returns: A dataframe Transpose the df so that we have the indicators as rows and datefields as columns Side effects. Modifies the passed in dataframe. # As a precursor to making the datefields as columns we set the datefield as the index. # We then transpose the dataframe such that the index becomes the columns and the columns become rows # Transpose to get this dataframe ready for printing # Convert the df so that we have the indicators as the index and datefields as columns # The columns are of a dateTime type, we need them to be text in order for the dataframe # to excel module to work. # Now we want two additional descriptive columns in the dataframe. # We want the description of the indicator in one column and the Sharadar code # in another. # Note that dictionary keys, in this case the Sharadar Indicator code # becomes the index of the newly created Pandas series. The values become the data associated # with these keys. # The insert method is what enables us to place the column exactly where we want it. # For the second column, the sharadar codes, we can get the manes of these from the index of our # dataframe. So a variation on the previous case where we inserted a column from a PD series. Here # we point to an array like item which the insert method accepts, that of the dataframe index. After # the transpose this contains what were the column i.e the Sharadar indicators. # # Create a new column using the values from the index, similar to doing a .reset_index # but uses an explicit column instead of column 0 which reset-index does. Obtain some financial ratios and metrics skewed towards credit analysis. - Some suggested as useful in the book by <NAME> Alvarez: 'Financial Statement Analysis'. - Others are credit sanity checking or rough approximations to REIT specific ratios. Returns: A dataframe containing financial ratios. # Note updated to work on our data in the form where the rows as the dates and the columns are the metricss. # we build up each metric as a new column in the calc_ratios df. # initialize an empty calc_ratios_df but using the same indexing as our existing dataframes which we've pulled # in from sharadar # This datekey column will be needed later when we transpose the dataframe # The sharadar returned dataframes included a datekey column as part of the results. # self.calc_ratios_df["datekey"] = self.i_stmnt_df["datekey"] # A nicer way is to insert the datekey column as the first column of # our synthetically created calc_ratios_df. This way it's easier to # see for debug and is in the same position in col 1 as the dfs # returned by sharadar # Change nan to None and inf to a big recognizable number. # Debt to Cash Flow From Operations # Debt to Equity # Debt to ebitda # Debt to ebitda minus CapEx # capex is returned from Sharadar as a -ve number, hence we need to add this to # subtract capex # Net Debt to ebitda # Net Debt to ebitda minus CapEx # capex is returned from Sharadar as a -ve number, hence we need to add this to # subtract capex # Depreciation to Cash Flow From Operations Pg 278. # self.database = database # Times Interest coverage aka fixed charge coverage Pg 278. # (Net Income + Income taxes + Interest Expense)/(Interest expense + Capitalized Interest) # Cannot see how to get capitalized interest from the API so that term is excluded. # This is the same as ebit to Interest Expense # Recall that capex is returned from Sharadar as a -ve number. # capex is returned from Quandl as a -ve number, hence we add this to # subtract capex # negating since ncfdiv is returned as a negative number # TODO add some conditional logig to use the fullydiluted shares value when it # is provided # <NAME>, author of Good Stocks Cheap, definition # of capital employed. He has two defnitions, one where cash is # subtracted and one where it's not. Accrued expenses should be # substracted but Is not available in the Sharadar API, probably a # scour the footnotes thing if really wanted to include this. # Pg 290 of Creative Cash Flow Reporting, Mumford et al. # negating since ncfdiv is returned as a negative number # Get the function from switcher dictionary # Execute the function # Locally calculated by this package. For each ratio or metric in this # table, there's a routine to calculate the value from the quandl API provided # statement indicator value. # The first item in each tuple is the Sharadar Code, the second is # a description. # Income Statement Indicator Quandl/Sharadar Codes # Cash Flow Statement Indicator Quandl/Sharadar Codes # Balance Statement Indicator Quandl/Sharadar Codes # Metrics and Ratio Indicator Quandl/Sharadar Codes # ('DE', 'Debt to Equity Ratio'), Needs to be locally calculated when # using TTM figures # evebitda only returned for the MRT period, the default for SF1 # fcf is already levered since CFO already includes the effect of interest # payments. # ("free_cash_flow_levered", 'fcf-Levered: fcf - Interest Expenses'), # The indicators which we'd like to show on a separate summary page # Edit this to customize what we show. # We control the excel conditional formatting by means of a formatting control # asc (ascending) means "Higher is better" desc (descending) "Lower is better" #?") ##0.0") ##0") Accumulate summary values for a given ticker. Args: ticker: The ticker for the stock we are given data for. sum_ind_l: A list of (indicator,value) tuples for a given ticker Writes the accumulated summary_values to the Summary sheet # calculate the size of the table we will need # this is using row,column indexing Will conditionally format each column of data. Hard coded with the simple 3_color_scale args: top_left: y,x coordinates of the top left of the table bottom_right: y,x coordinates of the bottom right of the table # "Larger numbers are better" formatting # "Smaller numbers are better" formatting # adjust the top_left coordinates to exclude the table header and the # first column # y_tc stands for y top column, so y coordinate of top of column # x_tc stands for x top column, so x coordinate of top of column # Walk through each of the columns # breakpoint() # unpack the tuples of indicator value # Note we had to replace the infs and Nans prior to this # Create the empty table complete with column headers # We need to create a list of dicts. # Each entry of the form {'header':'Column name'} # breakpoint() Obtains the latest values for a given list of indicators Uses the provided dataframes to lookup the latest in time values for each of the indicators in the provided indicators list Args: ticker: indicators: A list of indicators calc_ratios_df: The calculated ratios dataframe. all_sharadar_inds_df: The dataframe containing the full table of results for a given dimension and ticker from Sharadar Returns: A list of Tuples of indicator, values pairs. # unpack the indicators from the inds_to_summarize # need to add fmt to the thing we pass return and deal wit it all the way downstream Writes a dataframe to an excel worksheet. Args: dframe: A Pandas dataframe. The index must have been promoted to a column (using df.) prior to calling. row: An int, the row to start writing at, zero based. col: An int, the col to start writing at, zero based. sheetname: A string, the desired name for the sheet. dimension: A string representing the timeframe for which data is required. For the SF0 sample database only 'MRY' or most recent yearly is supported. For the SF1 database available options are: MRY, MRQ, MRT,ARY,ARQ,ART use_header: Whether to print the header of the dataframe num_text_cols: The number of columns which contain text. The remainder of the columns are assumed to create numeric values. Returns: rows_written: The number of rows written. # logging.debug("write_df_to_excel_sheet: dataframe = %s" % ( dframe.info())) # We need to write out the df first using to_excel to obtain a # worksheet object which we'll then operate on for formatting. # We do not write the header using to_excel but explicitly write # later with Xlsxwriter. # Format the text columns and the numeric ones following these. # Lets figure out CAGR for a given row item # looks like I'll need to use xl_rowcol_to_cell() # We want the number of periods between the years. # Theres a quarter between each reporting period # worksheet.write(cagr_row, cagr_col, formula) # Sparklines make data trends easily visible # Get a stmnt dataframe, a quandl ratios dataframe and our calculated ratios dataframe # for each of these frames write into a separate worksheet per stock # Now calculate some of the additional ratios for credit analysis # Now for the metrics and ratios from the quandl API # stocks = ['SPG', 'WPC', 'KIM', 'SKT', 'NNN', 'STOR'] # stock_xlsx(outfile, stocks, "SF0", 'MRY', periods)
| 2.740186
| 3
|
code/evaluate_annotation.py
|
JonaBenja/lad-assignment1
| 0
|
6629315
|
<reponame>JonaBenja/lad-assignment1<gh_stars>0
import pandas as pd
import glob
import os.path
from itertools import combinations
from sklearn.metrics import cohen_kappa_score, confusion_matrix
nl_terms = ["activist", "politici", "klimaat"]
it_terms = ["attivist", "politici", "climatico"]
categories = ["pos", "neg", "neu"]
for term in nl_terms:
print(term)
annotations = {}
# Read in the data
for sheet in glob.glob("../data/annotations/nl/annotationsheet_" + term +"*.tsv"):
filename, extension = os.path.basename(sheet).split(".")
prefix, term, annotator = filename.split("_")
# Read in annotations
annotation_data = pd.read_csv(sheet, sep="\t", header=0, keep_default_na=False)
annotations[annotator] = annotation_data["Annotation"]
annotators = annotations.keys()
for annotator_a, annotator_b in combinations(annotators, 2):
agreement = [anno1 == anno2 for anno1, anno2 in zip(annotations[annotator_a], annotations[annotator_b])]
percentage = sum(agreement)/len(agreement)
print(annotator_a, annotator_b)
print("Percentage Agreement: %.2f" %percentage)
kappa = cohen_kappa_score(annotations[annotator_a], annotations[annotator_b], labels=categories)
print("Cohen's Kappa: %.2f" %kappa)
confusions = confusion_matrix(annotations[annotator_a], annotations[annotator_b], labels=categories)
#print(confusions)
pandas_table = pd.DataFrame(confusions, index=["positive", "negative", "neutral"])
print('Pandas')
print(pandas_table)
print('Markdown')
print(pandas_table.to_markdown())
for term in it_terms:
print(term)
annotations = {}
# Read in the data
for sheet in glob.glob("../data/annotations/nl/annotationsheet_" + term +"*.tsv"):
filename, extension = os.path.basename(sheet).split(".")
prefix, term, annotator = filename.split("_")
# Read in annotations
annotation_data = pd.read_csv(sheet, sep="\t", header=0, keep_default_na=False)
annotations[annotator] = annotation_data["Annotation"]
annotators = annotations.keys()
for annotator_a, annotator_b in combinations(annotators, 2):
agreement = [anno1 == anno2 for anno1, anno2 in zip(annotations[annotator_a], annotations[annotator_b])]
percentage = sum(agreement)/len(agreement)
print(annotator_a, annotator_b)
print("Percentage Agreement: %.2f" %percentage)
kappa = cohen_kappa_score(annotations[annotator_a], annotations[annotator_b], labels=categories)
print("Cohen's Kappa: %.2f" %kappa)
confusions = confusion_matrix(annotations[annotator_a], annotations[annotator_b], labels=categories)
pandas_table = pd.DataFrame(confusions, index=["positive", "negative", "neutral"])
print('Pandas')
print(pandas_table)
print('Markdown')
print(pandas_table.to_markdown())
|
import pandas as pd
import glob
import os.path
from itertools import combinations
from sklearn.metrics import cohen_kappa_score, confusion_matrix
nl_terms = ["activist", "politici", "klimaat"]
it_terms = ["attivist", "politici", "climatico"]
categories = ["pos", "neg", "neu"]
for term in nl_terms:
print(term)
annotations = {}
# Read in the data
for sheet in glob.glob("../data/annotations/nl/annotationsheet_" + term +"*.tsv"):
filename, extension = os.path.basename(sheet).split(".")
prefix, term, annotator = filename.split("_")
# Read in annotations
annotation_data = pd.read_csv(sheet, sep="\t", header=0, keep_default_na=False)
annotations[annotator] = annotation_data["Annotation"]
annotators = annotations.keys()
for annotator_a, annotator_b in combinations(annotators, 2):
agreement = [anno1 == anno2 for anno1, anno2 in zip(annotations[annotator_a], annotations[annotator_b])]
percentage = sum(agreement)/len(agreement)
print(annotator_a, annotator_b)
print("Percentage Agreement: %.2f" %percentage)
kappa = cohen_kappa_score(annotations[annotator_a], annotations[annotator_b], labels=categories)
print("Cohen's Kappa: %.2f" %kappa)
confusions = confusion_matrix(annotations[annotator_a], annotations[annotator_b], labels=categories)
#print(confusions)
pandas_table = pd.DataFrame(confusions, index=["positive", "negative", "neutral"])
print('Pandas')
print(pandas_table)
print('Markdown')
print(pandas_table.to_markdown())
for term in it_terms:
print(term)
annotations = {}
# Read in the data
for sheet in glob.glob("../data/annotations/nl/annotationsheet_" + term +"*.tsv"):
filename, extension = os.path.basename(sheet).split(".")
prefix, term, annotator = filename.split("_")
# Read in annotations
annotation_data = pd.read_csv(sheet, sep="\t", header=0, keep_default_na=False)
annotations[annotator] = annotation_data["Annotation"]
annotators = annotations.keys()
for annotator_a, annotator_b in combinations(annotators, 2):
agreement = [anno1 == anno2 for anno1, anno2 in zip(annotations[annotator_a], annotations[annotator_b])]
percentage = sum(agreement)/len(agreement)
print(annotator_a, annotator_b)
print("Percentage Agreement: %.2f" %percentage)
kappa = cohen_kappa_score(annotations[annotator_a], annotations[annotator_b], labels=categories)
print("Cohen's Kappa: %.2f" %kappa)
confusions = confusion_matrix(annotations[annotator_a], annotations[annotator_b], labels=categories)
pandas_table = pd.DataFrame(confusions, index=["positive", "negative", "neutral"])
print('Pandas')
print(pandas_table)
print('Markdown')
print(pandas_table.to_markdown())
|
en
| 0.734695
|
# Read in the data # Read in annotations #print(confusions) # Read in the data # Read in annotations
| 2.63855
| 3
|
getLessons.py
|
Hammania689/ttmik_webscrapper
| 0
|
6629316
|
<reponame>Hammania689/ttmik_webscrapper<gh_stars>0
import sys
import requests
import urllib3
from bs4 import BeautifulSoup
from pathlib import Path
def download_all_lessons():
"""
:return: All lesson for all 9 levels of TTMIK :)
"""
# Start level index
# Set path to where files will be downloaded to
# Set Download url parent link
level = 1
download_path = Path("/home/hameed/Downloads/TTMIK_Unit_Lessons")
download_path.mkdir(parents=True, exist_ok=True)
# TODO : Change download root link back to curriculm page
# download_root = "http://talktomeinkorean.com/curriculum/"
#
# downloaded_root_page = requests.get(download_root)
# root_page_text = downloaded_root_page.text
#
# root_page = BeautifulSoup(root_page_text, "html.parser")
# links = root_page.find('div',{'class':'entry-content'})
#
# lessons = links.find()
print(lessons)
# # For the 9 levels available
# for i in range(9):
# # Set the level's path and set lesson counter to 1
# path = download_path / str(level)
# path.mkdir(parents=True, exist_ok=True)
# lesson_num = 1
#
# # 30 Lessons in each level
# while lesson_num <= 30:
#
# # Name of the current lesson
# lesson = 'l'+ str(level) + 'l' + str(lesson_num)
#
#
# #############################################################
# # TODO : Change the mp3_site and pdf_site variables to match the actual download link from the page
# # The curriculm page will
#
#
# # Append the download link root to point to current lesson
# # HTTP Get Method with requests
# # Write content to specified file
# mp3_site = download_root + lesson + '.mp3'
# mp3 = requests.get(mp3_site)
# with open((path / str(lesson + '.mp3')), 'wb') as m:
# m.write(mp3.content)
#
# pdf_site = download_root + lesson + '.pdf'
# pdf = requests.get(pdf_site)
# with open((path / str(lesson + '.pdf')), 'wb') as p:
# p.write(pdf.content)
# #############################################################
#
# # Move on to the next lesson
# lesson_num += 1
#
# # Move on to the next unit
# level += 1
# root_url = "http://talktomeinkorean.com/l1l2.pdf/"
# source_code = requests.get(root_url)
# plain_text = source_code.text
# root_page = BeautifulSoup(plain_text, "html.parser")
# lessons = root_page.find('div',{'class':'entry-content'})
# lessons = lessons.findAll('a')
# while lesson_num <= 30:
# source_code = requests.get(root_url)
# plain_text = source_code.text
# print(len(plain_text))
#
# root_page = BeautifulSoup(plain_text, "html.parser")
# # print(root_page.prettify())
# for link in root_page.findAll('div',):
# # print(link.get('href'))
# print(link.p)
#
# lesson_num += 1
# print(lesson_num)
download_all_lessons()
|
import sys
import requests
import urllib3
from bs4 import BeautifulSoup
from pathlib import Path
def download_all_lessons():
"""
:return: All lesson for all 9 levels of TTMIK :)
"""
# Start level index
# Set path to where files will be downloaded to
# Set Download url parent link
level = 1
download_path = Path("/home/hameed/Downloads/TTMIK_Unit_Lessons")
download_path.mkdir(parents=True, exist_ok=True)
# TODO : Change download root link back to curriculm page
# download_root = "http://talktomeinkorean.com/curriculum/"
#
# downloaded_root_page = requests.get(download_root)
# root_page_text = downloaded_root_page.text
#
# root_page = BeautifulSoup(root_page_text, "html.parser")
# links = root_page.find('div',{'class':'entry-content'})
#
# lessons = links.find()
print(lessons)
# # For the 9 levels available
# for i in range(9):
# # Set the level's path and set lesson counter to 1
# path = download_path / str(level)
# path.mkdir(parents=True, exist_ok=True)
# lesson_num = 1
#
# # 30 Lessons in each level
# while lesson_num <= 30:
#
# # Name of the current lesson
# lesson = 'l'+ str(level) + 'l' + str(lesson_num)
#
#
# #############################################################
# # TODO : Change the mp3_site and pdf_site variables to match the actual download link from the page
# # The curriculm page will
#
#
# # Append the download link root to point to current lesson
# # HTTP Get Method with requests
# # Write content to specified file
# mp3_site = download_root + lesson + '.mp3'
# mp3 = requests.get(mp3_site)
# with open((path / str(lesson + '.mp3')), 'wb') as m:
# m.write(mp3.content)
#
# pdf_site = download_root + lesson + '.pdf'
# pdf = requests.get(pdf_site)
# with open((path / str(lesson + '.pdf')), 'wb') as p:
# p.write(pdf.content)
# #############################################################
#
# # Move on to the next lesson
# lesson_num += 1
#
# # Move on to the next unit
# level += 1
# root_url = "http://talktomeinkorean.com/l1l2.pdf/"
# source_code = requests.get(root_url)
# plain_text = source_code.text
# root_page = BeautifulSoup(plain_text, "html.parser")
# lessons = root_page.find('div',{'class':'entry-content'})
# lessons = lessons.findAll('a')
# while lesson_num <= 30:
# source_code = requests.get(root_url)
# plain_text = source_code.text
# print(len(plain_text))
#
# root_page = BeautifulSoup(plain_text, "html.parser")
# # print(root_page.prettify())
# for link in root_page.findAll('div',):
# # print(link.get('href'))
# print(link.p)
#
# lesson_num += 1
# print(lesson_num)
download_all_lessons()
|
en
| 0.505842
|
:return: All lesson for all 9 levels of TTMIK :) # Start level index # Set path to where files will be downloaded to # Set Download url parent link # TODO : Change download root link back to curriculm page # download_root = "http://talktomeinkorean.com/curriculum/" # # downloaded_root_page = requests.get(download_root) # root_page_text = downloaded_root_page.text # # root_page = BeautifulSoup(root_page_text, "html.parser") # links = root_page.find('div',{'class':'entry-content'}) # # lessons = links.find() # # For the 9 levels available # for i in range(9): # # Set the level's path and set lesson counter to 1 # path = download_path / str(level) # path.mkdir(parents=True, exist_ok=True) # lesson_num = 1 # # # 30 Lessons in each level # while lesson_num <= 30: # # # Name of the current lesson # lesson = 'l'+ str(level) + 'l' + str(lesson_num) # # # ############################################################# # # TODO : Change the mp3_site and pdf_site variables to match the actual download link from the page # # The curriculm page will # # # # Append the download link root to point to current lesson # # HTTP Get Method with requests # # Write content to specified file # mp3_site = download_root + lesson + '.mp3' # mp3 = requests.get(mp3_site) # with open((path / str(lesson + '.mp3')), 'wb') as m: # m.write(mp3.content) # # pdf_site = download_root + lesson + '.pdf' # pdf = requests.get(pdf_site) # with open((path / str(lesson + '.pdf')), 'wb') as p: # p.write(pdf.content) # ############################################################# # # # Move on to the next lesson # lesson_num += 1 # # # Move on to the next unit # level += 1 # root_url = "http://talktomeinkorean.com/l1l2.pdf/" # source_code = requests.get(root_url) # plain_text = source_code.text # root_page = BeautifulSoup(plain_text, "html.parser") # lessons = root_page.find('div',{'class':'entry-content'}) # lessons = lessons.findAll('a') # while lesson_num <= 30: # source_code = requests.get(root_url) # plain_text = source_code.text # print(len(plain_text)) # # root_page = BeautifulSoup(plain_text, "html.parser") # # print(root_page.prettify()) # for link in root_page.findAll('div',): # # print(link.get('href')) # print(link.p) # # lesson_num += 1 # print(lesson_num)
| 3.387711
| 3
|
scripts/equals.py
|
anto2318/ramdapy
| 0
|
6629317
|
<gh_stars>0
def equals(a, b=0):
return a == b
if __name__ == '__main__':
print(equals(3,3))
|
def equals(a, b=0):
return a == b
if __name__ == '__main__':
print(equals(3,3))
|
none
| 1
| 3.004136
| 3
|
|
deltasherlock/common/fingerprinting.py
|
deltasherlock/utility-package
| 0
|
6629318
|
<reponame>deltasherlock/utility-package<filename>deltasherlock/common/fingerprinting.py
# DeltaSherlock. See README.md for usage. See LICENSE for MIT/X11 license info.
# pylint: disable=W0201,W1401,R0903
"""
DeltaSherlock fingerprinting module. Contains methods for generating a
filesystem fingerprint based on a changeset
"""
from math import sqrt
from enum import Enum, unique
from gensim.models import Word2Vec
import numpy as np
from deltasherlock.common.changesets import Changeset
@unique
class FingerprintingMethod(Enum):
"""
An enumerated type containing representations of each fingerprinting method.
Notice how adding the integer values of two or more "basic" methods results
in the appropriate "combination" method. Also, all odd values incorporate a
histogram fingerprint, while the evens do not. This numbering scheme should
remain as backward compatible as possible, since these values are used in
the server's database (see source for FingerprintWrapper)
"""
undefined = 0
histogram = 1
filetree = 2
histofiletree = 3
neighbor = 4
histoneighbor = 5
filetreeneighbor = 6
combined = 7
def requires_filetree_dict(self):
return (self.value == self.filetree.value or self.value == self.histofiletree.value or self.value == self.combined.value)
def requires_neighbor_dict(self):
return (self.value == self.neighbor.value or self.value == self.histoneighbor.value or self.value == self.combined.value)
class Fingerprint(np.ndarray):
"""
A wrapper around a numpy array designed to handle numerical vector
representations of changesets. The best way to instantiate a Fingerprint
is by providing a raw numpy array and FingerprintingMethod as parameters
(ie. fp = Fingerprint(arr, method=neighbor)), and then manually setting the
remaining attributes
:attribute method: the FingerprintingMethod used to create this fingerprint
:attribute labels: a list of labels contained within this fingerprint
:attribute predicted_quantity: the quantity of events (ie an application
installation) that probably occurred during the recording interval.
Determined by the original Changeset
:attribute db_id: an optional identifier populated when a fingerprint is
"unwrapped" from the database
:attribute cs_db_id: an optional identifier populated when a fingerprint is
"unwrapped" from the database. This points to the fingerprint's origin
changeset
Adapted from https://docs.scipy.org/doc/numpy/user/basics.subclassing.html
"""
def __new__(cls, input_array, method=FingerprintingMethod.undefined):
"""
Our "constructor." Required for subclassing of numpy array. See link in
class docstring
"""
# Input array is an already formed ndarray instance
# We first cast to be our class type
obj = np.asarray(input_array).view(cls)
# add the new attribute to the created instance
obj.method = method
obj.labels = []
obj.predicted_quantity = -1
obj.db_id = None
obj.cs_db_id = None
# Finally, we must return the newly created object:
return obj
def __array_finalize__(self, obj):
"""
Required for subclassing of ndarray. See link in class docstring
"""
if obj is None:
return
# Redeclare all member attributes here, for subclassing purposes
self.method = getattr(obj, 'method', FingerprintingMethod.undefined)
self.labels = getattr(obj, 'labels', [])
self.predicted_quantity = getattr(obj, 'predicted_quantity', -1)
self.db_id = getattr(obj, 'db_id', None)
self.cs_db_id = getattr(obj, 'cs_db_id', None)
def __reduce__(self):
"""
Reduction method, for pickling. We have to override this because ndarray's
__reduce__ does not handle our custom attributes. Adapted from:
http://stackoverflow.com/a/26599346
"""
# Call parent __reduce__
pickled_state = super().__reduce__()
# Create our own tuple to pass to __setstate__
new_state = pickled_state[
2] + (self.method, self.labels, self.predicted_quantity, self.db_id, self.cs_db_id)
# Return a tuple that replaces the parent's __setstate__ tuple with our
# own
return (pickled_state[0], pickled_state[1], new_state)
def __setstate__(self, state):
"""
Set-state method, for pickling. Wee have to override this because ndarray's
__setstate__ does not handle our custom attributes. Adapted from:
http://stackoverflow.com/a/26599346
"""
# Recall our own member attributes from state
self.method = state[-5]
self.labels = state[-4]
self.predicted_quantity = state[-3]
self.db_id = state[-2]
self.cs_db_id = state[-1]
# Call the parent's __setstate__ with the other tuple elements.
super().__setstate__(state[0:-5])
def __add__(self, other):
"""
Allows for "adding" (read: concatenating) of fingerprints
"""
if other is None:
return self
# First, concatenate the underlying numpy arrays together
sum_fp = Fingerprint(np.concatenate([self, other]))
if self.method.value != other.method.value:
# Then, try to combine the FingerprintingMethod types
try:
sum_fp.method = FingerprintingMethod(
self.method.value + other.method.value)
except ValueError:
"""
The numbers don't add up! Probably happened because we tried to
add a basic type fingerprint with an incompatible combination
type
"""
raise ArithmeticError("Cannot add incompatible fingerprints")
else:
sum_fp.method = self.method
# Then, add the labels together
sum_fp.labels = self.labels + other.labels
# Then, adopt the original quantities
sum_fp.predicted_quantity = self.predicted_quantity
# All done
return sum_fp
def __radd__(self, other):
"""
"Reverse-add" helper. Required in order to use the += operator
"""
return self.__add__(other)
def changeset_to_fingerprint(changeset: Changeset, method: FingerprintingMethod,
filetree_dictionary: Word2Vec=None,
neighbor_dictionary: Word2Vec=None) -> Fingerprint:
"""
Primary method of this module. Creates a numerical fingerprint vector
representation of a Changeset using the specified method. This should always
be used instead of the __private fingerprint generation functions.
:param changeset: a closed Changeset object
:param method: one of the FingerprintingMethod enumerated types
:param w2v_dictionary: a gensim Word2Vec object containing a numerical
dictionary. This required if using anything other than the histogram method
:returns: the resulting Fingerprint object
"""
# First, a few sanity checks
if changeset.open:
raise ValueError("Cannot convert an open changeset to a fingerprint")
if method == FingerprintingMethod.undefined:
raise ValueError(
"Cannot create a fingerprint with an undefined creation method")
# Start by fetching the file basenames from the changeset
basenames = changeset.get_basenames()
result_fingerprint = None
# Then generate a histogram fingerprint
if method.value % 2 == 1:
# All odd methods contain a histogram
result_fingerprint = result_fingerprint + __histogram_fingerprint(basenames)
# Then generate a filetree fingerprint
if method.requires_filetree_dict():
if filetree_dictionary is None:
raise ValueError("Missing filetree w2v dictionary")
result_fingerprint = result_fingerprint + __filetree_fingerprint(
basenames, filetree_dictionary)
# Then the neighbor fingerprint
if method.requires_neighbor_dict():
if neighbor_dictionary is None:
raise ValueError("Missing neighbor w2v dictionary")
result_fingerprint = result_fingerprint + __neighbor_fingerprint(
basenames, neighbor_dictionary)
# Then add the labels
result_fingerprint.labels = changeset.labels
# Then use quantity prediction
result_fingerprint.predicted_quantity = changeset.predicted_quantity
# Then add the origin changeset's database ID
try:
result_fingerprint.cs_db_id = changeset.db_id
except AttributeError:
result_fingerprint.cs_db_id = None
# All done!
return result_fingerprint
def __histogram_fingerprint(basenames: list, num_bins: int=200) -> Fingerprint:
"""
Creates an ASCII histogram fingerprint of the characters from a list of
basenames.
:param basenames: the list of basenames
:param numBins: The number of bins to use in the histogram
:returns: a normalized NumPy histogram
"""
ascii_sum_vector = []
for name in basenames:
name_cha = ''.join([i for i in name if i.isalpha()])
name_asc = sum(ord(c) for c in name_cha)
ascii_sum_vector.append(name_asc) # list of ASCII sum
# Normalized Hist
ele_num = len(ascii_sum_vector) # length of ASCII sum list
ybin = [0]
min_bin = 200
max_bin = 2000
bin_size = int((max_bin - min_bin) / (int(num_bins) - 1))
ybin = ybin + list(range(min_bin, max_bin - bin_size, bin_size))
ybin.append(10000)
if ele_num == 0:
ele_num = 1
raw_histogram = np.histogram(ascii_sum_vector, bins=ybin)[0]
normalized_histogram = raw_histogram * 1.0 / ele_num # Normalized hist
return Fingerprint(normalized_histogram, method=FingerprintingMethod.histogram)
def __w2v_fingerprint_array(basenames: list, w2v_dictionary: Word2Vec) -> np.ndarray:
"""
Creates an array that could be used to create a Fingerprint using a provided
word2vec dictionary from a list of basenames. This function should not be
used directly; instead, use either of the the __filetree_fingerprint or
__neighbor_fingerprint wrapper functions.
:param basenames: the list of basenames
:param w2v_dictionary: a gensim.models.Word2Vec object representing the
pre-made dictionary
:returns: a NumPy array that could be used to create a Fingerprint
"""
fingerprint_arr = np.array([0] * 200)
for basename in basenames:
# Clean up the basenames, just in case
basename = basename.rstrip('\",\n').strip('[').strip(' ').strip('\"')
basename = basename.strip('\,').rstrip(',\"').strip('\t').strip(',')
# Now look up each basename in the dictionary
if basename in w2v_dictionary:
fingerprint_arr = w2v_dictionary[basename] + fingerprint_arr
# Normalization Math
fin = fingerprint_arr * fingerprint_arr
vector_size = sqrt(fin.sum())
# Hack to make sure fingerprint doesn't get divided by 0
if vector_size == 0:
vector_size = 1
fingerprint_arr = fingerprint_arr / vector_size
return fingerprint_arr
def __filetree_fingerprint(basenames: list, w2v_dictionary: Word2Vec) -> Fingerprint:
"""
A wrapper function that creates a Fingerprint using
FingerprintingMethod.filetree. See __w2v_fingerprint_array for more details
"""
return Fingerprint(__w2v_fingerprint_array(basenames, w2v_dictionary),
method=FingerprintingMethod.filetree)
def __neighbor_fingerprint(basenames: list, w2v_dictionary: Word2Vec) -> Fingerprint:
"""
A wrapper function that creates a Fingerprint using
FingerprintingMethod.neighbor. See __w2v_fingerprint_array for more details
"""
return Fingerprint(__w2v_fingerprint_array(basenames, w2v_dictionary),
method=FingerprintingMethod.neighbor)
|
# DeltaSherlock. See README.md for usage. See LICENSE for MIT/X11 license info.
# pylint: disable=W0201,W1401,R0903
"""
DeltaSherlock fingerprinting module. Contains methods for generating a
filesystem fingerprint based on a changeset
"""
from math import sqrt
from enum import Enum, unique
from gensim.models import Word2Vec
import numpy as np
from deltasherlock.common.changesets import Changeset
@unique
class FingerprintingMethod(Enum):
"""
An enumerated type containing representations of each fingerprinting method.
Notice how adding the integer values of two or more "basic" methods results
in the appropriate "combination" method. Also, all odd values incorporate a
histogram fingerprint, while the evens do not. This numbering scheme should
remain as backward compatible as possible, since these values are used in
the server's database (see source for FingerprintWrapper)
"""
undefined = 0
histogram = 1
filetree = 2
histofiletree = 3
neighbor = 4
histoneighbor = 5
filetreeneighbor = 6
combined = 7
def requires_filetree_dict(self):
return (self.value == self.filetree.value or self.value == self.histofiletree.value or self.value == self.combined.value)
def requires_neighbor_dict(self):
return (self.value == self.neighbor.value or self.value == self.histoneighbor.value or self.value == self.combined.value)
class Fingerprint(np.ndarray):
"""
A wrapper around a numpy array designed to handle numerical vector
representations of changesets. The best way to instantiate a Fingerprint
is by providing a raw numpy array and FingerprintingMethod as parameters
(ie. fp = Fingerprint(arr, method=neighbor)), and then manually setting the
remaining attributes
:attribute method: the FingerprintingMethod used to create this fingerprint
:attribute labels: a list of labels contained within this fingerprint
:attribute predicted_quantity: the quantity of events (ie an application
installation) that probably occurred during the recording interval.
Determined by the original Changeset
:attribute db_id: an optional identifier populated when a fingerprint is
"unwrapped" from the database
:attribute cs_db_id: an optional identifier populated when a fingerprint is
"unwrapped" from the database. This points to the fingerprint's origin
changeset
Adapted from https://docs.scipy.org/doc/numpy/user/basics.subclassing.html
"""
def __new__(cls, input_array, method=FingerprintingMethod.undefined):
"""
Our "constructor." Required for subclassing of numpy array. See link in
class docstring
"""
# Input array is an already formed ndarray instance
# We first cast to be our class type
obj = np.asarray(input_array).view(cls)
# add the new attribute to the created instance
obj.method = method
obj.labels = []
obj.predicted_quantity = -1
obj.db_id = None
obj.cs_db_id = None
# Finally, we must return the newly created object:
return obj
def __array_finalize__(self, obj):
"""
Required for subclassing of ndarray. See link in class docstring
"""
if obj is None:
return
# Redeclare all member attributes here, for subclassing purposes
self.method = getattr(obj, 'method', FingerprintingMethod.undefined)
self.labels = getattr(obj, 'labels', [])
self.predicted_quantity = getattr(obj, 'predicted_quantity', -1)
self.db_id = getattr(obj, 'db_id', None)
self.cs_db_id = getattr(obj, 'cs_db_id', None)
def __reduce__(self):
"""
Reduction method, for pickling. We have to override this because ndarray's
__reduce__ does not handle our custom attributes. Adapted from:
http://stackoverflow.com/a/26599346
"""
# Call parent __reduce__
pickled_state = super().__reduce__()
# Create our own tuple to pass to __setstate__
new_state = pickled_state[
2] + (self.method, self.labels, self.predicted_quantity, self.db_id, self.cs_db_id)
# Return a tuple that replaces the parent's __setstate__ tuple with our
# own
return (pickled_state[0], pickled_state[1], new_state)
def __setstate__(self, state):
"""
Set-state method, for pickling. Wee have to override this because ndarray's
__setstate__ does not handle our custom attributes. Adapted from:
http://stackoverflow.com/a/26599346
"""
# Recall our own member attributes from state
self.method = state[-5]
self.labels = state[-4]
self.predicted_quantity = state[-3]
self.db_id = state[-2]
self.cs_db_id = state[-1]
# Call the parent's __setstate__ with the other tuple elements.
super().__setstate__(state[0:-5])
def __add__(self, other):
"""
Allows for "adding" (read: concatenating) of fingerprints
"""
if other is None:
return self
# First, concatenate the underlying numpy arrays together
sum_fp = Fingerprint(np.concatenate([self, other]))
if self.method.value != other.method.value:
# Then, try to combine the FingerprintingMethod types
try:
sum_fp.method = FingerprintingMethod(
self.method.value + other.method.value)
except ValueError:
"""
The numbers don't add up! Probably happened because we tried to
add a basic type fingerprint with an incompatible combination
type
"""
raise ArithmeticError("Cannot add incompatible fingerprints")
else:
sum_fp.method = self.method
# Then, add the labels together
sum_fp.labels = self.labels + other.labels
# Then, adopt the original quantities
sum_fp.predicted_quantity = self.predicted_quantity
# All done
return sum_fp
def __radd__(self, other):
"""
"Reverse-add" helper. Required in order to use the += operator
"""
return self.__add__(other)
def changeset_to_fingerprint(changeset: Changeset, method: FingerprintingMethod,
filetree_dictionary: Word2Vec=None,
neighbor_dictionary: Word2Vec=None) -> Fingerprint:
"""
Primary method of this module. Creates a numerical fingerprint vector
representation of a Changeset using the specified method. This should always
be used instead of the __private fingerprint generation functions.
:param changeset: a closed Changeset object
:param method: one of the FingerprintingMethod enumerated types
:param w2v_dictionary: a gensim Word2Vec object containing a numerical
dictionary. This required if using anything other than the histogram method
:returns: the resulting Fingerprint object
"""
# First, a few sanity checks
if changeset.open:
raise ValueError("Cannot convert an open changeset to a fingerprint")
if method == FingerprintingMethod.undefined:
raise ValueError(
"Cannot create a fingerprint with an undefined creation method")
# Start by fetching the file basenames from the changeset
basenames = changeset.get_basenames()
result_fingerprint = None
# Then generate a histogram fingerprint
if method.value % 2 == 1:
# All odd methods contain a histogram
result_fingerprint = result_fingerprint + __histogram_fingerprint(basenames)
# Then generate a filetree fingerprint
if method.requires_filetree_dict():
if filetree_dictionary is None:
raise ValueError("Missing filetree w2v dictionary")
result_fingerprint = result_fingerprint + __filetree_fingerprint(
basenames, filetree_dictionary)
# Then the neighbor fingerprint
if method.requires_neighbor_dict():
if neighbor_dictionary is None:
raise ValueError("Missing neighbor w2v dictionary")
result_fingerprint = result_fingerprint + __neighbor_fingerprint(
basenames, neighbor_dictionary)
# Then add the labels
result_fingerprint.labels = changeset.labels
# Then use quantity prediction
result_fingerprint.predicted_quantity = changeset.predicted_quantity
# Then add the origin changeset's database ID
try:
result_fingerprint.cs_db_id = changeset.db_id
except AttributeError:
result_fingerprint.cs_db_id = None
# All done!
return result_fingerprint
def __histogram_fingerprint(basenames: list, num_bins: int=200) -> Fingerprint:
"""
Creates an ASCII histogram fingerprint of the characters from a list of
basenames.
:param basenames: the list of basenames
:param numBins: The number of bins to use in the histogram
:returns: a normalized NumPy histogram
"""
ascii_sum_vector = []
for name in basenames:
name_cha = ''.join([i for i in name if i.isalpha()])
name_asc = sum(ord(c) for c in name_cha)
ascii_sum_vector.append(name_asc) # list of ASCII sum
# Normalized Hist
ele_num = len(ascii_sum_vector) # length of ASCII sum list
ybin = [0]
min_bin = 200
max_bin = 2000
bin_size = int((max_bin - min_bin) / (int(num_bins) - 1))
ybin = ybin + list(range(min_bin, max_bin - bin_size, bin_size))
ybin.append(10000)
if ele_num == 0:
ele_num = 1
raw_histogram = np.histogram(ascii_sum_vector, bins=ybin)[0]
normalized_histogram = raw_histogram * 1.0 / ele_num # Normalized hist
return Fingerprint(normalized_histogram, method=FingerprintingMethod.histogram)
def __w2v_fingerprint_array(basenames: list, w2v_dictionary: Word2Vec) -> np.ndarray:
"""
Creates an array that could be used to create a Fingerprint using a provided
word2vec dictionary from a list of basenames. This function should not be
used directly; instead, use either of the the __filetree_fingerprint or
__neighbor_fingerprint wrapper functions.
:param basenames: the list of basenames
:param w2v_dictionary: a gensim.models.Word2Vec object representing the
pre-made dictionary
:returns: a NumPy array that could be used to create a Fingerprint
"""
fingerprint_arr = np.array([0] * 200)
for basename in basenames:
# Clean up the basenames, just in case
basename = basename.rstrip('\",\n').strip('[').strip(' ').strip('\"')
basename = basename.strip('\,').rstrip(',\"').strip('\t').strip(',')
# Now look up each basename in the dictionary
if basename in w2v_dictionary:
fingerprint_arr = w2v_dictionary[basename] + fingerprint_arr
# Normalization Math
fin = fingerprint_arr * fingerprint_arr
vector_size = sqrt(fin.sum())
# Hack to make sure fingerprint doesn't get divided by 0
if vector_size == 0:
vector_size = 1
fingerprint_arr = fingerprint_arr / vector_size
return fingerprint_arr
def __filetree_fingerprint(basenames: list, w2v_dictionary: Word2Vec) -> Fingerprint:
"""
A wrapper function that creates a Fingerprint using
FingerprintingMethod.filetree. See __w2v_fingerprint_array for more details
"""
return Fingerprint(__w2v_fingerprint_array(basenames, w2v_dictionary),
method=FingerprintingMethod.filetree)
def __neighbor_fingerprint(basenames: list, w2v_dictionary: Word2Vec) -> Fingerprint:
"""
A wrapper function that creates a Fingerprint using
FingerprintingMethod.neighbor. See __w2v_fingerprint_array for more details
"""
return Fingerprint(__w2v_fingerprint_array(basenames, w2v_dictionary),
method=FingerprintingMethod.neighbor)
|
en
| 0.752497
|
# DeltaSherlock. See README.md for usage. See LICENSE for MIT/X11 license info. # pylint: disable=W0201,W1401,R0903 DeltaSherlock fingerprinting module. Contains methods for generating a filesystem fingerprint based on a changeset An enumerated type containing representations of each fingerprinting method. Notice how adding the integer values of two or more "basic" methods results in the appropriate "combination" method. Also, all odd values incorporate a histogram fingerprint, while the evens do not. This numbering scheme should remain as backward compatible as possible, since these values are used in the server's database (see source for FingerprintWrapper) A wrapper around a numpy array designed to handle numerical vector representations of changesets. The best way to instantiate a Fingerprint is by providing a raw numpy array and FingerprintingMethod as parameters (ie. fp = Fingerprint(arr, method=neighbor)), and then manually setting the remaining attributes :attribute method: the FingerprintingMethod used to create this fingerprint :attribute labels: a list of labels contained within this fingerprint :attribute predicted_quantity: the quantity of events (ie an application installation) that probably occurred during the recording interval. Determined by the original Changeset :attribute db_id: an optional identifier populated when a fingerprint is "unwrapped" from the database :attribute cs_db_id: an optional identifier populated when a fingerprint is "unwrapped" from the database. This points to the fingerprint's origin changeset Adapted from https://docs.scipy.org/doc/numpy/user/basics.subclassing.html Our "constructor." Required for subclassing of numpy array. See link in class docstring # Input array is an already formed ndarray instance # We first cast to be our class type # add the new attribute to the created instance # Finally, we must return the newly created object: Required for subclassing of ndarray. See link in class docstring # Redeclare all member attributes here, for subclassing purposes Reduction method, for pickling. We have to override this because ndarray's __reduce__ does not handle our custom attributes. Adapted from: http://stackoverflow.com/a/26599346 # Call parent __reduce__ # Create our own tuple to pass to __setstate__ # Return a tuple that replaces the parent's __setstate__ tuple with our # own Set-state method, for pickling. Wee have to override this because ndarray's __setstate__ does not handle our custom attributes. Adapted from: http://stackoverflow.com/a/26599346 # Recall our own member attributes from state # Call the parent's __setstate__ with the other tuple elements. Allows for "adding" (read: concatenating) of fingerprints # First, concatenate the underlying numpy arrays together # Then, try to combine the FingerprintingMethod types The numbers don't add up! Probably happened because we tried to add a basic type fingerprint with an incompatible combination type # Then, add the labels together # Then, adopt the original quantities # All done "Reverse-add" helper. Required in order to use the += operator Primary method of this module. Creates a numerical fingerprint vector representation of a Changeset using the specified method. This should always be used instead of the __private fingerprint generation functions. :param changeset: a closed Changeset object :param method: one of the FingerprintingMethod enumerated types :param w2v_dictionary: a gensim Word2Vec object containing a numerical dictionary. This required if using anything other than the histogram method :returns: the resulting Fingerprint object # First, a few sanity checks # Start by fetching the file basenames from the changeset # Then generate a histogram fingerprint # All odd methods contain a histogram # Then generate a filetree fingerprint # Then the neighbor fingerprint # Then add the labels # Then use quantity prediction # Then add the origin changeset's database ID # All done! Creates an ASCII histogram fingerprint of the characters from a list of basenames. :param basenames: the list of basenames :param numBins: The number of bins to use in the histogram :returns: a normalized NumPy histogram # list of ASCII sum # Normalized Hist # length of ASCII sum list # Normalized hist Creates an array that could be used to create a Fingerprint using a provided word2vec dictionary from a list of basenames. This function should not be used directly; instead, use either of the the __filetree_fingerprint or __neighbor_fingerprint wrapper functions. :param basenames: the list of basenames :param w2v_dictionary: a gensim.models.Word2Vec object representing the pre-made dictionary :returns: a NumPy array that could be used to create a Fingerprint # Clean up the basenames, just in case # Now look up each basename in the dictionary # Normalization Math # Hack to make sure fingerprint doesn't get divided by 0 A wrapper function that creates a Fingerprint using FingerprintingMethod.filetree. See __w2v_fingerprint_array for more details A wrapper function that creates a Fingerprint using FingerprintingMethod.neighbor. See __w2v_fingerprint_array for more details
| 2.515005
| 3
|
manga_py/providers/shakai_ru.py
|
tgaugry/manga-py
| 0
|
6629319
|
<reponame>tgaugry/manga-py<filename>manga_py/providers/shakai_ru.py
from manga_py.provider import Provider
from .helpers.std import Std
class ShakaiRu(Provider, Std):
_api_url = 'http://shakai.ru/take/api-manga/request/shakai'
def get_chapter_index(self) -> str:
idx = self.chapter.get('data-first')
return idx.replace('_', '-')
def get_content(self):
idx = self._get_name(r'/manga[^/]*/(\d+)')
_ = {
'dataRun': 'api-manga',
'dataRequest': idx
}
page_content = str(self.http_post(self._api_url, data=_))
return self.json.loads(page_content)
def get_manga_name(self) -> str:
parser = self.content.get('post', [])
idx = self._get_name(r'/manga[^/]*/(\d+)')
parser = parser[3] if len(parser) > 3 else idx
return parser.split('/')[0].strip()
def get_chapters(self):
return self.content.get('data', [])[::-1]
def get_files(self):
chapter = self.chapter
if isinstance(chapter, dict):
return chapter.get('data-second', [])
return []
def get_cover(self):
pass # FIXME HOME
def book_meta(self) -> dict:
# todo meta
pass
main = ShakaiRu
|
from manga_py.provider import Provider
from .helpers.std import Std
class ShakaiRu(Provider, Std):
_api_url = 'http://shakai.ru/take/api-manga/request/shakai'
def get_chapter_index(self) -> str:
idx = self.chapter.get('data-first')
return idx.replace('_', '-')
def get_content(self):
idx = self._get_name(r'/manga[^/]*/(\d+)')
_ = {
'dataRun': 'api-manga',
'dataRequest': idx
}
page_content = str(self.http_post(self._api_url, data=_))
return self.json.loads(page_content)
def get_manga_name(self) -> str:
parser = self.content.get('post', [])
idx = self._get_name(r'/manga[^/]*/(\d+)')
parser = parser[3] if len(parser) > 3 else idx
return parser.split('/')[0].strip()
def get_chapters(self):
return self.content.get('data', [])[::-1]
def get_files(self):
chapter = self.chapter
if isinstance(chapter, dict):
return chapter.get('data-second', [])
return []
def get_cover(self):
pass # FIXME HOME
def book_meta(self) -> dict:
# todo meta
pass
main = ShakaiRu
|
es
| 0.290124
|
# FIXME HOME # todo meta
| 2.686996
| 3
|
setup.py
|
coetaur0/LEAN
| 2
|
6629320
|
<reponame>coetaur0/LEAN
from setuptools import setup
setup(name='LEAN',
version='0.0.1',
url='https://github.com/coetaur0/ESIM',
license='Apache 2',
author='<NAME>',
author_email='<EMAIL>',
description='Implementation of the LEAN model for NLI with PyTorch',
packages=[
'lean'
],
install_requires=[
'wget',
'numpy',
'nltk',
'matplotlib',
'tqdm',
'torch'
])
|
from setuptools import setup
setup(name='LEAN',
version='0.0.1',
url='https://github.com/coetaur0/ESIM',
license='Apache 2',
author='<NAME>',
author_email='<EMAIL>',
description='Implementation of the LEAN model for NLI with PyTorch',
packages=[
'lean'
],
install_requires=[
'wget',
'numpy',
'nltk',
'matplotlib',
'tqdm',
'torch'
])
|
none
| 1
| 1.175131
| 1
|
|
Python3/636.py
|
rakhi2001/ecom7
| 854
|
6629321
|
__________________________________________________________________________________________________
sample 72 ms submission
class Solution:
def exclusiveTime(self, n: 'int', logs: 'List[str]') -> 'List[int]':
res = [0] * n
stack = []
for log in logs:
idx, start, time = log.split(':')
idx, start, time = int(idx), start == 'start', int(time)
if start:
if stack:
res[stack[-1]] += time - last_time
stack.append(idx)
last_time = time
else:
stack.pop()
res[idx] += time - last_time + 1
last_time = time + 1
return res
__________________________________________________________________________________________________
sample 12952 kb submission
from collections import deque
class Solution:
def exclusiveTime(self, n: int, logs: List[str]) -> List[int]:
stack = deque() # task and total time accumulated so far and whether it is cut off
times = [0] * n
last_time = 0
for log in logs:
task, state, time = log.split(':')
task = int(task)
time = int(time)
is_end = state == 'end'
if not stack:
stack.append([task, 0, False])
elif is_end:
times[task] += stack[-1][1] + time - last_time
if not stack[-1][2]:
times[task] += 1
del stack[-1]
else: # start
stack[-1][1] += time - last_time
if stack[-1][2]: # if cut off, last time is closing, which takes up 1, so subtract it
stack[-1][1] -= 1
stack[-1][2] = True
stack.append([task, 0, False])
last_time = time
return times
__________________________________________________________________________________________________
|
__________________________________________________________________________________________________
sample 72 ms submission
class Solution:
def exclusiveTime(self, n: 'int', logs: 'List[str]') -> 'List[int]':
res = [0] * n
stack = []
for log in logs:
idx, start, time = log.split(':')
idx, start, time = int(idx), start == 'start', int(time)
if start:
if stack:
res[stack[-1]] += time - last_time
stack.append(idx)
last_time = time
else:
stack.pop()
res[idx] += time - last_time + 1
last_time = time + 1
return res
__________________________________________________________________________________________________
sample 12952 kb submission
from collections import deque
class Solution:
def exclusiveTime(self, n: int, logs: List[str]) -> List[int]:
stack = deque() # task and total time accumulated so far and whether it is cut off
times = [0] * n
last_time = 0
for log in logs:
task, state, time = log.split(':')
task = int(task)
time = int(time)
is_end = state == 'end'
if not stack:
stack.append([task, 0, False])
elif is_end:
times[task] += stack[-1][1] + time - last_time
if not stack[-1][2]:
times[task] += 1
del stack[-1]
else: # start
stack[-1][1] += time - last_time
if stack[-1][2]: # if cut off, last time is closing, which takes up 1, so subtract it
stack[-1][1] -= 1
stack[-1][2] = True
stack.append([task, 0, False])
last_time = time
return times
__________________________________________________________________________________________________
|
en
| 0.904456
|
# task and total time accumulated so far and whether it is cut off # start # if cut off, last time is closing, which takes up 1, so subtract it
| 2.921459
| 3
|
grizli_aws/check_prep_results.py
|
grizli-project/grizli-aws
| 0
|
6629322
|
<gh_stars>0
"""
Check drizzled images from prep and log checks to a file
"""
def run_check(check_root='GrizliPrep', query='./j*/Prep/*phot.fits'):
import glob
import os
import time
qfiles = glob.glob(query)
try:
checked = [line.strip() for line in open(check_root+'Passed.log').readlines()]
except:
checked = []
try:
failed = [line.strip() for line in open(check_root+'Failed.log').readlines()]
except:
failed = []
for file in qfiles:
#root=os.path.basename(file)[:14]
root=os.path.basename(file).split('_phot')[0]#[:14]
if root in checked:
print('Already checked: {0}'.format(root))
continue
path = os.path.dirname(file)
os.system('ds9 {0}/{1}*sci.fits {0}/../Extractions/{1}*grism*.fits & '.format(path, root))
time.sleep(2)
os.system('scale_threedhst; ds9_match wcs')
os.system('open {0}/*fine.png'.format(path))
x = input('{0}, OK? [y/n] '.format(root))
if x == 'n':
if root not in failed:
failed.append(root)
fp = open(check_root+'Failed.log','w')
fp.writelines([item+'\n' for item in failed])
fp.close()
else:
checked.append(root)
fp = open(check_root+'Passed.log','w')
fp.writelines([item+'\n' for item in checked])
fp.close()
def sync_new():
checked = [line.strip() for line in open(check_root+'Passed.log').readlines()]
pass
|
"""
Check drizzled images from prep and log checks to a file
"""
def run_check(check_root='GrizliPrep', query='./j*/Prep/*phot.fits'):
import glob
import os
import time
qfiles = glob.glob(query)
try:
checked = [line.strip() for line in open(check_root+'Passed.log').readlines()]
except:
checked = []
try:
failed = [line.strip() for line in open(check_root+'Failed.log').readlines()]
except:
failed = []
for file in qfiles:
#root=os.path.basename(file)[:14]
root=os.path.basename(file).split('_phot')[0]#[:14]
if root in checked:
print('Already checked: {0}'.format(root))
continue
path = os.path.dirname(file)
os.system('ds9 {0}/{1}*sci.fits {0}/../Extractions/{1}*grism*.fits & '.format(path, root))
time.sleep(2)
os.system('scale_threedhst; ds9_match wcs')
os.system('open {0}/*fine.png'.format(path))
x = input('{0}, OK? [y/n] '.format(root))
if x == 'n':
if root not in failed:
failed.append(root)
fp = open(check_root+'Failed.log','w')
fp.writelines([item+'\n' for item in failed])
fp.close()
else:
checked.append(root)
fp = open(check_root+'Passed.log','w')
fp.writelines([item+'\n' for item in checked])
fp.close()
def sync_new():
checked = [line.strip() for line in open(check_root+'Passed.log').readlines()]
pass
|
en
| 0.596861
|
Check drizzled images from prep and log checks to a file #root=os.path.basename(file)[:14] #[:14]
| 2.373952
| 2
|
testbench/plot_benchmark.py
|
clemenshage/grslra
| 0
|
6629323
|
<reponame>clemenshage/grslra<filename>testbench/plot_benchmark.py
import numpy as np
from matplotlib import pyplot as plt
data = np.load('lpnorm_benchmark.npz')
algos = data["algos"]
msizes = data["msizes"]
res = data["res"]
for i in range(len(algos)):
plt.loglog(np.square(msizes), res[i, :], linewidth=2.5, linestyle="-", label=algos[i].__name__.replace('_', ' '))
plt.legend(loc="lower right")
plt.show()
|
import numpy as np
from matplotlib import pyplot as plt
data = np.load('lpnorm_benchmark.npz')
algos = data["algos"]
msizes = data["msizes"]
res = data["res"]
for i in range(len(algos)):
plt.loglog(np.square(msizes), res[i, :], linewidth=2.5, linestyle="-", label=algos[i].__name__.replace('_', ' '))
plt.legend(loc="lower right")
plt.show()
|
none
| 1
| 3.13175
| 3
|
|
dcgan/utils.py
|
euirim/clone-wars-gan
| 0
|
6629324
|
import torch
import torchvision.transforms as transforms
import torchvision.datasets as dset
# Directory containing the data.
root = "../data/full"
def get_dataloader(params):
"""
Loads the dataset and applies proproccesing steps to it.
Returns a PyTorch DataLoader.
"""
# Data proprecessing.
transform = transforms.Compose(
[
transforms.Resize(params["imsize"]),
transforms.CenterCrop(params["imsize"]),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
)
# Create the dataset.
dataset = dset.ImageFolder(root=root, transform=transform)
# Create the dataloader.
dataloader = torch.utils.data.DataLoader(
dataset, batch_size=params["bsize"], shuffle=True
)
return dataloader
|
import torch
import torchvision.transforms as transforms
import torchvision.datasets as dset
# Directory containing the data.
root = "../data/full"
def get_dataloader(params):
"""
Loads the dataset and applies proproccesing steps to it.
Returns a PyTorch DataLoader.
"""
# Data proprecessing.
transform = transforms.Compose(
[
transforms.Resize(params["imsize"]),
transforms.CenterCrop(params["imsize"]),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
)
# Create the dataset.
dataset = dset.ImageFolder(root=root, transform=transform)
# Create the dataloader.
dataloader = torch.utils.data.DataLoader(
dataset, batch_size=params["bsize"], shuffle=True
)
return dataloader
|
en
| 0.545218
|
# Directory containing the data. Loads the dataset and applies proproccesing steps to it. Returns a PyTorch DataLoader. # Data proprecessing. # Create the dataset. # Create the dataloader.
| 2.861716
| 3
|
src/spinnaker_ros_lsm/venv/lib/python2.7/site-packages/pacman/model/placements/placements.py
|
Roboy/LSM_SpiNNaker_MyoArm
| 2
|
6629325
|
<filename>src/spinnaker_ros_lsm/venv/lib/python2.7/site-packages/pacman/model/placements/placements.py
from pacman.exceptions import (PacmanSubvertexAlreadyPlacedError,
PacmanSubvertexNotPlacedError,
PacmanProcessorAlreadyOccupiedError,
PacmanProcessorNotOccupiedError)
class Placements(object):
""" Represents a list of placements
"""
def __init__(self, placements=None):
"""
:param placements: The initial list of placements
:type placements: iterable of\
:py:class:`pacman.model.placements.placement.Placement`
:raise PacmanSubvertexAlreadyPlacedError:
If there is any subvertex with more than one placement.
:raise PacmanProcessorAlreadyOccupiedError:
If two placements are made to the same processor.
"""
self._placements = dict()
self._subvertices = dict()
if placements is not None:
self.add_placements(placements)
@property
def n_placements(self):
""" The number of placements
"""
return len(self._placements)
def add_placements(self, placements):
"""
:param placements: The list of placements
:type placements: iterable of\
:py:class:`pacman.model.placements.placement.Placement`
:return: None
:rtype: None
"""
for placement in placements:
self.add_placement(placement)
def add_placement(self, placement):
""" Add a placement
:param placement: The placement to add
:type placement:
:py:class:`pacman.model.placements.placement.Placement`
:return: None
:rtype: None
:raise PacmanSubvertexAlreadyPlacedError:
If there is any subvertex with more than one placement.
:raise PacmanProcessorAlreadyOccupiedError:
If two placements are made to the same processor.
"""
placement_id = (placement.x, placement.y, placement.p)
if placement_id in self._placements:
raise PacmanProcessorAlreadyOccupiedError(placement_id)
if placement.subvertex in self._subvertices:
raise PacmanSubvertexAlreadyPlacedError(placement.subvertex)
self._placements[placement_id] = placement
self._subvertices[placement.subvertex] = placement
def get_subvertex_on_processor(self, x, y, p):
""" Return the subvertex on a specific processor or None if the\
processor has not been allocated
:param x: the x coordinate of the chip
:type x: int
:param y: the y coordinate of the chip
:type y: int
:param p: the processor on the chip
:type p: int
:return: the subvertex placed on the given processor or None if no\
such placement has been made
:rtype: :py:class:`pacman.model.subgraph.subvertex.PartitionedVertex`
:raise None: does not raise any known exceptions
"""
placement_id = (x, y, p)
try:
return self._placements[placement_id].subvertex
except KeyError:
raise PacmanProcessorNotOccupiedError(placement_id)
def get_placement_of_subvertex(self, subvertex):
""" Return the placement information for a subvertex
:param subvertex: The subvertex to find the placement of
:type subvertex:
:py:class:`pacman.model.subgraph.subvertex.PartitionedVertex`
:return: The placement
:rtype: :py:class:`pacman.model.placements.placement.Placement`
:raise PacmanSubvertexNotPlacedError: If the subvertex has not been\
placed.
"""
try:
return self._subvertices[subvertex]
except KeyError:
raise PacmanSubvertexNotPlacedError(subvertex)
def get_placed_processors(self):
"""Returns an iterable of processors with assigned subvertices.
:return: Iterable of (x, y, p) tuples indicating processors with
assigned subvertices.
:rtype: iterable
"""
return self._placements.iterkeys()
def is_subvertex_on_processor(self, x, y, p):
""" Determine if a subvertex is assigned to a processor.
:param int x: x coordinate of processor.
:param int y: y coordinate of processor.
:param int p: Index of processor.
:return bool: Whether the processor has an assigned subvertex.
"""
return (x, y, p) in self._placements
@property
def placements(self):
""" All of the placements
:return: iterable of placements
:rtype: iterable of\
:py:class:`pacman.model.placements.placement.Placement`
:raise None: does not raise any known exceptions
"""
return self._placements.itervalues()
def __repr__(self):
output = ""
for placement in self._placements:
output += placement.__repr__()
return output
def __iter__(self):
""" An iterator for the placements object within
:return:
"""
return iter(self.placements)
def __len__(self):
return len(self._placements)
|
<filename>src/spinnaker_ros_lsm/venv/lib/python2.7/site-packages/pacman/model/placements/placements.py
from pacman.exceptions import (PacmanSubvertexAlreadyPlacedError,
PacmanSubvertexNotPlacedError,
PacmanProcessorAlreadyOccupiedError,
PacmanProcessorNotOccupiedError)
class Placements(object):
""" Represents a list of placements
"""
def __init__(self, placements=None):
"""
:param placements: The initial list of placements
:type placements: iterable of\
:py:class:`pacman.model.placements.placement.Placement`
:raise PacmanSubvertexAlreadyPlacedError:
If there is any subvertex with more than one placement.
:raise PacmanProcessorAlreadyOccupiedError:
If two placements are made to the same processor.
"""
self._placements = dict()
self._subvertices = dict()
if placements is not None:
self.add_placements(placements)
@property
def n_placements(self):
""" The number of placements
"""
return len(self._placements)
def add_placements(self, placements):
"""
:param placements: The list of placements
:type placements: iterable of\
:py:class:`pacman.model.placements.placement.Placement`
:return: None
:rtype: None
"""
for placement in placements:
self.add_placement(placement)
def add_placement(self, placement):
""" Add a placement
:param placement: The placement to add
:type placement:
:py:class:`pacman.model.placements.placement.Placement`
:return: None
:rtype: None
:raise PacmanSubvertexAlreadyPlacedError:
If there is any subvertex with more than one placement.
:raise PacmanProcessorAlreadyOccupiedError:
If two placements are made to the same processor.
"""
placement_id = (placement.x, placement.y, placement.p)
if placement_id in self._placements:
raise PacmanProcessorAlreadyOccupiedError(placement_id)
if placement.subvertex in self._subvertices:
raise PacmanSubvertexAlreadyPlacedError(placement.subvertex)
self._placements[placement_id] = placement
self._subvertices[placement.subvertex] = placement
def get_subvertex_on_processor(self, x, y, p):
""" Return the subvertex on a specific processor or None if the\
processor has not been allocated
:param x: the x coordinate of the chip
:type x: int
:param y: the y coordinate of the chip
:type y: int
:param p: the processor on the chip
:type p: int
:return: the subvertex placed on the given processor or None if no\
such placement has been made
:rtype: :py:class:`pacman.model.subgraph.subvertex.PartitionedVertex`
:raise None: does not raise any known exceptions
"""
placement_id = (x, y, p)
try:
return self._placements[placement_id].subvertex
except KeyError:
raise PacmanProcessorNotOccupiedError(placement_id)
def get_placement_of_subvertex(self, subvertex):
""" Return the placement information for a subvertex
:param subvertex: The subvertex to find the placement of
:type subvertex:
:py:class:`pacman.model.subgraph.subvertex.PartitionedVertex`
:return: The placement
:rtype: :py:class:`pacman.model.placements.placement.Placement`
:raise PacmanSubvertexNotPlacedError: If the subvertex has not been\
placed.
"""
try:
return self._subvertices[subvertex]
except KeyError:
raise PacmanSubvertexNotPlacedError(subvertex)
def get_placed_processors(self):
"""Returns an iterable of processors with assigned subvertices.
:return: Iterable of (x, y, p) tuples indicating processors with
assigned subvertices.
:rtype: iterable
"""
return self._placements.iterkeys()
def is_subvertex_on_processor(self, x, y, p):
""" Determine if a subvertex is assigned to a processor.
:param int x: x coordinate of processor.
:param int y: y coordinate of processor.
:param int p: Index of processor.
:return bool: Whether the processor has an assigned subvertex.
"""
return (x, y, p) in self._placements
@property
def placements(self):
""" All of the placements
:return: iterable of placements
:rtype: iterable of\
:py:class:`pacman.model.placements.placement.Placement`
:raise None: does not raise any known exceptions
"""
return self._placements.itervalues()
def __repr__(self):
output = ""
for placement in self._placements:
output += placement.__repr__()
return output
def __iter__(self):
""" An iterator for the placements object within
:return:
"""
return iter(self.placements)
def __len__(self):
return len(self._placements)
|
en
| 0.702503
|
Represents a list of placements :param placements: The initial list of placements :type placements: iterable of\ :py:class:`pacman.model.placements.placement.Placement` :raise PacmanSubvertexAlreadyPlacedError: If there is any subvertex with more than one placement. :raise PacmanProcessorAlreadyOccupiedError: If two placements are made to the same processor. The number of placements :param placements: The list of placements :type placements: iterable of\ :py:class:`pacman.model.placements.placement.Placement` :return: None :rtype: None Add a placement :param placement: The placement to add :type placement: :py:class:`pacman.model.placements.placement.Placement` :return: None :rtype: None :raise PacmanSubvertexAlreadyPlacedError: If there is any subvertex with more than one placement. :raise PacmanProcessorAlreadyOccupiedError: If two placements are made to the same processor. Return the subvertex on a specific processor or None if the\ processor has not been allocated :param x: the x coordinate of the chip :type x: int :param y: the y coordinate of the chip :type y: int :param p: the processor on the chip :type p: int :return: the subvertex placed on the given processor or None if no\ such placement has been made :rtype: :py:class:`pacman.model.subgraph.subvertex.PartitionedVertex` :raise None: does not raise any known exceptions Return the placement information for a subvertex :param subvertex: The subvertex to find the placement of :type subvertex: :py:class:`pacman.model.subgraph.subvertex.PartitionedVertex` :return: The placement :rtype: :py:class:`pacman.model.placements.placement.Placement` :raise PacmanSubvertexNotPlacedError: If the subvertex has not been\ placed. Returns an iterable of processors with assigned subvertices. :return: Iterable of (x, y, p) tuples indicating processors with assigned subvertices. :rtype: iterable Determine if a subvertex is assigned to a processor. :param int x: x coordinate of processor. :param int y: y coordinate of processor. :param int p: Index of processor. :return bool: Whether the processor has an assigned subvertex. All of the placements :return: iterable of placements :rtype: iterable of\ :py:class:`pacman.model.placements.placement.Placement` :raise None: does not raise any known exceptions An iterator for the placements object within :return:
| 2.796817
| 3
|
userhandling/migrations/0017_auto_20191229_2002.py
|
cknaut/cinemaple
| 1
|
6629326
|
<reponame>cknaut/cinemaple
# Generated by Django 2.2.8 on 2019-12-30 01:02
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('userhandling', '0016_auto_20191126_2101'),
]
operations = [
migrations.RenameField(
model_name='movie',
old_name='tmdbID',
new_name='tmdbid',
),
]
|
# Generated by Django 2.2.8 on 2019-12-30 01:02
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('userhandling', '0016_auto_20191126_2101'),
]
operations = [
migrations.RenameField(
model_name='movie',
old_name='tmdbID',
new_name='tmdbid',
),
]
|
en
| 0.867003
|
# Generated by Django 2.2.8 on 2019-12-30 01:02
| 1.770158
| 2
|
tests/__init__.py
|
billionai/RFI
| 2
|
6629327
|
"""Core functionality testing module."""
|
"""Core functionality testing module."""
|
en
| 0.2966
|
Core functionality testing module.
| 0.880316
| 1
|
tp/tp02/ex5.py
|
UPB-FILS/sde
| 5
|
6629328
|
girlsList = []
girlsList.append('Ana')
girlsList.append('Diana')
girlsList.append('Alexandra')
girlsList.append('Ana')
# a
girlsList.sort()
print (girlsList)
# b
auxList = []
for name in girlsList:
if name not in auxList:
auxList.append(name)
for name in auxList:
print (name + " " + str(girlsList.count(name)))
# c
occurencesList = []
for name in auxList:
occurencesList.append(girlsList.count(name))
maxCount = max(occurencesList)
print (maxCount)
for name in auxList:
if girlsList.count(name) == maxCount:
print (name)
# d
occurencesList = []
for name in auxList:
occurencesList.append(girlsList.count(name))
minCount = min(occurencesList)
print (minCount)
for name in auxList:
if girlsList.count(name) == minCount:
print (name)
# e
girlsList.reverse()
print (girlsList)
|
girlsList = []
girlsList.append('Ana')
girlsList.append('Diana')
girlsList.append('Alexandra')
girlsList.append('Ana')
# a
girlsList.sort()
print (girlsList)
# b
auxList = []
for name in girlsList:
if name not in auxList:
auxList.append(name)
for name in auxList:
print (name + " " + str(girlsList.count(name)))
# c
occurencesList = []
for name in auxList:
occurencesList.append(girlsList.count(name))
maxCount = max(occurencesList)
print (maxCount)
for name in auxList:
if girlsList.count(name) == maxCount:
print (name)
# d
occurencesList = []
for name in auxList:
occurencesList.append(girlsList.count(name))
minCount = min(occurencesList)
print (minCount)
for name in auxList:
if girlsList.count(name) == minCount:
print (name)
# e
girlsList.reverse()
print (girlsList)
|
it
| 0.401875
|
# a # b # c # d # e
| 3.833473
| 4
|
cos.py
|
dydx-git/Calcy
| 0
|
6629329
|
import math
def Cos(param1,param2, param3):
try:
if param1==None or param2 != '':
if param3 == False:
param1=math.cos(float(param2))
else:
param1=math.cos(math.radians(float(param2)))
else:
if param3 == False:
param1=math.cos(float(param1))
else:
param1=math.cos(math.radians(float(param1)))
except:
pass
return param1, ''
|
import math
def Cos(param1,param2, param3):
try:
if param1==None or param2 != '':
if param3 == False:
param1=math.cos(float(param2))
else:
param1=math.cos(math.radians(float(param2)))
else:
if param3 == False:
param1=math.cos(float(param1))
else:
param1=math.cos(math.radians(float(param1)))
except:
pass
return param1, ''
|
none
| 1
| 3.496783
| 3
|
|
src/intersection/intersection.py
|
0xF4D3C0D3/ray-tracer-challenge-with-python
| 0
|
6629330
|
<reponame>0xF4D3C0D3/ray-tracer-challenge-with-python<gh_stars>0
import numpy as np
class Intersection(np.ndarray):
def __new__(cls, ts, mask, obj):
self = ts.T.view(cls)
self.mask = mask.squeeze()
self.obj = obj
return self
def __eq__(self, other):
return np.allclose(self, other)
@property
def count(self):
return self.shape[0]
@property
def hit(self):
return self.min(axis=1)
|
import numpy as np
class Intersection(np.ndarray):
def __new__(cls, ts, mask, obj):
self = ts.T.view(cls)
self.mask = mask.squeeze()
self.obj = obj
return self
def __eq__(self, other):
return np.allclose(self, other)
@property
def count(self):
return self.shape[0]
@property
def hit(self):
return self.min(axis=1)
|
none
| 1
| 2.824681
| 3
|
|
src/query/APIQuery.py
|
nekumelon/SonOfAnton
| 0
|
6629331
|
<gh_stars>0
"""
APIQuery should start by checking if a cached completion exists. If it doesn't-
prompt the API for a completion, cache it and use it.
"""
import openai, os, json, re, IO.io as io
from encoder.encoder import get_encoder
MAX_TOKENS = 2048;
with open('config.json') as configFile:
config = json.loads(configFile.read());
openai.api_key = config['OPENAI_API_KEY'];
encoder = get_encoder();
def clamp(num, min_value, max_value): # https://www.tutorialspoint.com/How-to-clamp-floating-numbers-in-Pythons
return max(min(num, max_value), min_value);
def tokenizePrompt(prompt):
return encoder.encode(prompt);
def handleCompletion(completion): # Get the text of a completion and prepare it
assert completion and completion != '', 'Unable to handle no/blank completion';
completionText = completion['choices'][0]['text']; # Get the text from the first (Best) completion choice
completionText = re.sub(r'^\s+', '', completionText); # Remove the new lines from the start of the text
return completionText;
def getCachedCompletions():
if (os.path.exists('completionsCache.json')):
with open('completionsCache.json', 'r') as cacheFile:
try:
completions = json.loads(cacheFile.read());
return completions;
except (json.JSONDecodeError):
return {};
return {};
def getCachedCompletion(prompt):
completions = getCachedCompletions();
if (prompt in completions):
return completions[prompt];
def cacheCompletion(prompt, completion):
completions = getCachedCompletions();
completions[prompt] = completion;
with open('completionsCache.json', 'w') as cacheFile:
cacheFile.write(json.dumps(completions));
def promptGPT3(prompt, APIEngine, maxTokens):
cachedCompletion = getCachedCompletion(prompt);
if (cachedCompletion):
io.out(handleCompletion(cachedCompletion));
return;
tokens = tokenizePrompt(prompt);
completion = openai.Completion.create(
engine = APIEngine,
prompt = prompt,
temperature = 0.65,
max_tokens = clamp(MAX_TOKENS - len(tokens), 1, maxTokens)
);
if (completion and 'choices' in completion):
cacheCompletion(prompt, completion);
io.out(handleCompletion(completion));
return;
io.out('Sorry. I don\'t know that one.');
def APIQuery(query, APIEngine, maxTokens):
promptGPT3(query, APIEngine, maxTokens);
|
"""
APIQuery should start by checking if a cached completion exists. If it doesn't-
prompt the API for a completion, cache it and use it.
"""
import openai, os, json, re, IO.io as io
from encoder.encoder import get_encoder
MAX_TOKENS = 2048;
with open('config.json') as configFile:
config = json.loads(configFile.read());
openai.api_key = config['OPENAI_API_KEY'];
encoder = get_encoder();
def clamp(num, min_value, max_value): # https://www.tutorialspoint.com/How-to-clamp-floating-numbers-in-Pythons
return max(min(num, max_value), min_value);
def tokenizePrompt(prompt):
return encoder.encode(prompt);
def handleCompletion(completion): # Get the text of a completion and prepare it
assert completion and completion != '', 'Unable to handle no/blank completion';
completionText = completion['choices'][0]['text']; # Get the text from the first (Best) completion choice
completionText = re.sub(r'^\s+', '', completionText); # Remove the new lines from the start of the text
return completionText;
def getCachedCompletions():
if (os.path.exists('completionsCache.json')):
with open('completionsCache.json', 'r') as cacheFile:
try:
completions = json.loads(cacheFile.read());
return completions;
except (json.JSONDecodeError):
return {};
return {};
def getCachedCompletion(prompt):
completions = getCachedCompletions();
if (prompt in completions):
return completions[prompt];
def cacheCompletion(prompt, completion):
completions = getCachedCompletions();
completions[prompt] = completion;
with open('completionsCache.json', 'w') as cacheFile:
cacheFile.write(json.dumps(completions));
def promptGPT3(prompt, APIEngine, maxTokens):
cachedCompletion = getCachedCompletion(prompt);
if (cachedCompletion):
io.out(handleCompletion(cachedCompletion));
return;
tokens = tokenizePrompt(prompt);
completion = openai.Completion.create(
engine = APIEngine,
prompt = prompt,
temperature = 0.65,
max_tokens = clamp(MAX_TOKENS - len(tokens), 1, maxTokens)
);
if (completion and 'choices' in completion):
cacheCompletion(prompt, completion);
io.out(handleCompletion(completion));
return;
io.out('Sorry. I don\'t know that one.');
def APIQuery(query, APIEngine, maxTokens):
promptGPT3(query, APIEngine, maxTokens);
|
en
| 0.85387
|
APIQuery should start by checking if a cached completion exists. If it doesn't- prompt the API for a completion, cache it and use it. # https://www.tutorialspoint.com/How-to-clamp-floating-numbers-in-Pythons # Get the text of a completion and prepare it # Get the text from the first (Best) completion choice # Remove the new lines from the start of the text
| 2.859187
| 3
|
aio_request/response_classifier.py
|
anna-money/aio-request
| 6
|
6629332
|
import abc
import enum
from typing import Dict, Optional
from .base import Header, Response
class ResponseVerdict(enum.Enum):
ACCEPT = 1
REJECT = 2
class ResponseClassifier(abc.ABC):
__slots__ = ()
@abc.abstractmethod
def classify(self, response: Response) -> ResponseVerdict:
...
class DefaultResponseClassifier(ResponseClassifier):
__slots__ = (
"_network_errors_code",
"_verdict_for_status",
)
def __init__(self, network_errors_code: int = 489, verdict_for_status: Optional[Dict[int, ResponseVerdict]] = None):
self._network_errors_code = network_errors_code
self._verdict_for_status = verdict_for_status or {}
def classify(self, response: Response) -> ResponseVerdict:
verdict = self._verdict_for_status.get(response.status)
if verdict is not None:
return verdict
if Header.X_DO_NOT_RETRY in response.headers:
return ResponseVerdict.ACCEPT
if response.is_server_error():
return ResponseVerdict.REJECT
if response.status == self._network_errors_code:
return ResponseVerdict.REJECT
if response.status == 408:
return ResponseVerdict.REJECT
if response.status == 429:
return ResponseVerdict.REJECT
return ResponseVerdict.ACCEPT
|
import abc
import enum
from typing import Dict, Optional
from .base import Header, Response
class ResponseVerdict(enum.Enum):
ACCEPT = 1
REJECT = 2
class ResponseClassifier(abc.ABC):
__slots__ = ()
@abc.abstractmethod
def classify(self, response: Response) -> ResponseVerdict:
...
class DefaultResponseClassifier(ResponseClassifier):
__slots__ = (
"_network_errors_code",
"_verdict_for_status",
)
def __init__(self, network_errors_code: int = 489, verdict_for_status: Optional[Dict[int, ResponseVerdict]] = None):
self._network_errors_code = network_errors_code
self._verdict_for_status = verdict_for_status or {}
def classify(self, response: Response) -> ResponseVerdict:
verdict = self._verdict_for_status.get(response.status)
if verdict is not None:
return verdict
if Header.X_DO_NOT_RETRY in response.headers:
return ResponseVerdict.ACCEPT
if response.is_server_error():
return ResponseVerdict.REJECT
if response.status == self._network_errors_code:
return ResponseVerdict.REJECT
if response.status == 408:
return ResponseVerdict.REJECT
if response.status == 429:
return ResponseVerdict.REJECT
return ResponseVerdict.ACCEPT
|
none
| 1
| 3.043765
| 3
|
|
pip_services3_data/IGetter.py
|
pip-services-python/pip-services-data-python
| 0
|
6629333
|
# -*- coding: utf-8 -*-
"""
pip_services3_data.IGetter
~~~~~~~~~~~~~~~~~~~~~~~~~
Interface for data getters.
:copyright: Conceptual Vision Consulting LLC 2018-2019, see AUTHORS for more details.
:license: MIT, see LICENSE for more details.
"""
from typing import Any, Optional, TypeVar
from pip_services3_commons.data import IIdentifiable
T = TypeVar('T') # Declare type variable
class IGetter(IIdentifiable):
"""
Interface for data processing components that can get data items.
"""
def get_one_by_id(self, correlation_id: Optional[str], id: Any) -> T:
"""
Gets a data items by its unique id.
:param correlation_id: (optional) transaction id to trace execution through call chain.
:param id: an id of item to be retrieved.
:return: an item by its id.
"""
raise NotImplementedError('Method from interface definition')
|
# -*- coding: utf-8 -*-
"""
pip_services3_data.IGetter
~~~~~~~~~~~~~~~~~~~~~~~~~
Interface for data getters.
:copyright: Conceptual Vision Consulting LLC 2018-2019, see AUTHORS for more details.
:license: MIT, see LICENSE for more details.
"""
from typing import Any, Optional, TypeVar
from pip_services3_commons.data import IIdentifiable
T = TypeVar('T') # Declare type variable
class IGetter(IIdentifiable):
"""
Interface for data processing components that can get data items.
"""
def get_one_by_id(self, correlation_id: Optional[str], id: Any) -> T:
"""
Gets a data items by its unique id.
:param correlation_id: (optional) transaction id to trace execution through call chain.
:param id: an id of item to be retrieved.
:return: an item by its id.
"""
raise NotImplementedError('Method from interface definition')
|
en
| 0.749155
|
# -*- coding: utf-8 -*- pip_services3_data.IGetter ~~~~~~~~~~~~~~~~~~~~~~~~~ Interface for data getters. :copyright: Conceptual Vision Consulting LLC 2018-2019, see AUTHORS for more details. :license: MIT, see LICENSE for more details. # Declare type variable Interface for data processing components that can get data items. Gets a data items by its unique id. :param correlation_id: (optional) transaction id to trace execution through call chain. :param id: an id of item to be retrieved. :return: an item by its id.
| 2.413539
| 2
|
libp2p/routing/interfaces.py
|
swedneck/py-libp2p
| 0
|
6629334
|
from abc import ABC, abstractmethod
from typing import Iterable
from libp2p.peer.id import ID
from libp2p.peer.peerinfo import PeerInfo
class IContentRouting(ABC):
@abstractmethod
def provide(self, cid: bytes, announce: bool = True) -> None:
"""
Provide adds the given cid to the content routing system. If announce is True,
it also announces it, otherwise it is just kept in the local
accounting of which objects are being provided.
"""
@abstractmethod
def find_provider_iter(self, cid: bytes, count: int) -> Iterable[PeerInfo]:
"""
Search for peers who are able to provide a given key
returns an iterator of peer.PeerInfo
"""
class IPeerRouting(ABC):
@abstractmethod
async def find_peer(self, peer_id: ID) -> PeerInfo:
"""
Find specific Peer
FindPeer searches for a peer with given peer_id, returns a peer.PeerInfo
with relevant addresses.
"""
|
from abc import ABC, abstractmethod
from typing import Iterable
from libp2p.peer.id import ID
from libp2p.peer.peerinfo import PeerInfo
class IContentRouting(ABC):
@abstractmethod
def provide(self, cid: bytes, announce: bool = True) -> None:
"""
Provide adds the given cid to the content routing system. If announce is True,
it also announces it, otherwise it is just kept in the local
accounting of which objects are being provided.
"""
@abstractmethod
def find_provider_iter(self, cid: bytes, count: int) -> Iterable[PeerInfo]:
"""
Search for peers who are able to provide a given key
returns an iterator of peer.PeerInfo
"""
class IPeerRouting(ABC):
@abstractmethod
async def find_peer(self, peer_id: ID) -> PeerInfo:
"""
Find specific Peer
FindPeer searches for a peer with given peer_id, returns a peer.PeerInfo
with relevant addresses.
"""
|
en
| 0.883183
|
Provide adds the given cid to the content routing system. If announce is True, it also announces it, otherwise it is just kept in the local accounting of which objects are being provided. Search for peers who are able to provide a given key returns an iterator of peer.PeerInfo Find specific Peer FindPeer searches for a peer with given peer_id, returns a peer.PeerInfo with relevant addresses.
| 3.149146
| 3
|
7/TKnapsack.py
|
pipSu/Algorithm_Assignment
| 0
|
6629335
|
def __main():
weight = [4, 2, 5,]
bulk = [1, 2, 3,]
value = [5, 2, 6,]
cw = 5
cb = 4
n = len(weight)
m = [ [[0,0] for j in range(max(cw,cb))]
for i in range(n)
]
print(m)
tk(weight,bulk,value,m, n,cw,cb)
def tk(w,b,v,m, n,cw,cb,):
for i in range(n-1,-1,-1):
maxew = min(w[i], cw)
maxeb = min(b[i], cb)
maxe = max(maxew, maxeb)
if i == n:
for j in range( maxe ):
m[i][j] =[0, 0]
#for j in range(maxe, max(cb, cw)+1):
j = maxe
while j <= max(cb, cw):
m[i][j] = [v[i],v[i]]
j += 1
else:
for j in range( maxe ):
m[i][j] =m[i+1][j]
for j in range(maxe, max(cb, cw)+1):
if j>b[i]+1:
m[i][j][0] = max(m[i+1][j][0], m[i+1][j-w[i]][0]+v[i] )
m[i][j][1] = max(m[i+1][j][1], m[i+1][j-b[i]][1]+v[i] )
m[i][j][0] = min(m[i][j][0], m[i][j][1])
m[i][j][1] = m[i][j][0]
__main()
|
def __main():
weight = [4, 2, 5,]
bulk = [1, 2, 3,]
value = [5, 2, 6,]
cw = 5
cb = 4
n = len(weight)
m = [ [[0,0] for j in range(max(cw,cb))]
for i in range(n)
]
print(m)
tk(weight,bulk,value,m, n,cw,cb)
def tk(w,b,v,m, n,cw,cb,):
for i in range(n-1,-1,-1):
maxew = min(w[i], cw)
maxeb = min(b[i], cb)
maxe = max(maxew, maxeb)
if i == n:
for j in range( maxe ):
m[i][j] =[0, 0]
#for j in range(maxe, max(cb, cw)+1):
j = maxe
while j <= max(cb, cw):
m[i][j] = [v[i],v[i]]
j += 1
else:
for j in range( maxe ):
m[i][j] =m[i+1][j]
for j in range(maxe, max(cb, cw)+1):
if j>b[i]+1:
m[i][j][0] = max(m[i+1][j][0], m[i+1][j-w[i]][0]+v[i] )
m[i][j][1] = max(m[i+1][j][1], m[i+1][j-b[i]][1]+v[i] )
m[i][j][0] = min(m[i][j][0], m[i][j][1])
m[i][j][1] = m[i][j][0]
__main()
|
zh
| 0.16912
|
#for j in range(maxe, max(cb, cw)+1):
| 2.830388
| 3
|
project2_jarvis_ai.py
|
shrihari272/Artificial_Intelligence
| 1
|
6629336
|
<reponame>shrihari272/Artificial_Intelligence<gh_stars>1-10
import pyttsx3
import time
import webbrowser
import random
import speech_recognition as sr
import wikipedia
import socket
import datetime
import wolframalpha
import os
import sys
import python_weather
import asyncio
from clint.textui import progress
import requests
import shutil
import subprocess
engine = pyttsx3.init('sapi5')
voices = engine.getProperty('voices')
engine.setProperty('rate',170)
engine.setProperty('voice', voices[0].id)
client = wolframalpha.Client('QAEXLK-RY9HY2PHAT')
class Ai:
future = True
now = datetime.datetime.now()
def is_connected(self):
try:
socket.create_connection(("1.1.1.1", 53))
return True
except OSError:
pass
return False
def mycommand(self):
r = sr.Recognizer()
with sr.Microphone() as source:
print("Listening...")
audio = r.listen(source)
try:
print('Recognizing...')
self.word = r.recognize_google(audio, language='en-in')
print('User: ' + self.word)
except:
connect = self.is_connected()
if connect:
self.mycommand()
else:
self.talk('Reconnecting...')
time.sleep(10)
self.mycommand()
def talk(self,audio):
print('Computer: ' + audio)
engine.say(audio)
engine.runAndWait()
def update_ai(self):
url = 'https://raw.githubusercontent.com/shrihari272/Artificial_intelligence/main/project2_jarvis_ai.py'
r = requests.get(url, stream = True)
with open("project2_jarvis_ai.py", "wb") as f:
total_length = int(r.headers.get('content-length'))
for update in progress.bar(r.iter_content(chunk_size = 2391975),expected_size =(total_length / 1024) + 1):
if update:
f.write(update)
def analyze(self):
self.word =self.word.lower()
if 'open' in self.word:
self.future = False
result = True
self.word = self.word.replace('open' ,'')
self.word = self.word.replace(' ' ,'')
self.open_dict ={
'amazon':'www.amazon.in',
'flipkart':'www.flipkart.com',
'google':'www.google.com',
'youtube':'www.youtube.com',
'whatsapp':'web.whatsapp.com',
'instagram':'www.instagram.com',
'facebook':'www.facebook.com',
'sanpchat':'www.snapchat.com',
'stackoverflow':'www.stackoverflow.com'
}
for linkkey in self.open_dict:
if linkkey in self.word:
result = False
if 'stackoverflow' in linkkey:
self.talk('Here you go to Stack Over flow. Happy coding!')
webbrowser.open(self.open_dict.get(linkkey))
else:
self.talk('Opening ' + linkkey)
webbrowser.open(self.open_dict.get(linkkey))
if result:
try:
with open('windict_list.txt') as f:
res = True
while res:
res = f.readline()
res = res.strip('\n')
if self.word in res:
res = f.readline()
res = res.strip('\n')
os.startfile(res)
result = False
break
if result:
self.talk('This application is not installed in your system.')
self.talk('If the application is installed.')
self.talk('Then please specify its Application name and path in windict_list text file.')
except:
with open('windict_list.txt','w') as f:
pass
self.talk('This application is not installed in your system.')
def weathereport(self):
if 'weather' in self.word:
self.future = False
self.talk('Provide city name.')
self.mycommand()
print(self.word)
try:
async def getweather():
client = python_weather.Client(format=python_weather.IMPERIAL)
place = self.word
weather = await client.find(place)
for forecast in weather.forecasts:
if self.now.strftime('%d') in str(forecast.date):
self.talk(forecast.sky_text + ' and temperature will be ' + str(forecast.temperature) + ' degree celcius')
await client.close()
loop = asyncio.get_event_loop()
loop.run_until_complete(getweather())
except:
self.talk('Unable to find the city please try again.')
def wiki_search(self):
try:
res = client.self.word(self.word)
answer = next(res.results).text
if '(no data available)'in answer:
self.google_search()
else:
self.talk('Got it.')
self.future = False
self.talk(answer)
except:
try:
results = wikipedia.summary(self.word, sentences=2)
self.talk('Searching Wikipedia...')
self.talk('Got it.')
self.future = False
self.talk(results)
except:
self.future = False
self.google_search()
def search(self):
self.word =self.word.lower()
if 'where is' in self.word or 'locate' in self.word:
self.future = False
self.google_map()
if 'search' in self.word:
self.future = False
self.google_search()
else:
self.wiki_search()
def google_map(self):
self.word =self.word.replace("where is", "")
self.word =self.word.replace("locate", "")
location = self.word
self.talk("User asked to Locate")
self.talk(location)
webbrowser.open("https://www.google.nl/maps/place/" + location)
def google_search(self):
self.word = self.word.replace('search' ,'')
self.search_dict ={
'youtube':'https://www.youtube.com/results?search_query=',
'google':'https://www.google.com/search?q='
}
self.search = True
for key in self.search_dict:
if key in self.word:
self.search = False
self.word = self.word.replace(key ,'')
url = self.search_dict[key] + self.word
webbrowser.open(url)
if self.search:
url = f'https://www.google.com/search?q={self.word}'
webbrowser.open(url)
def greeting_user(self):
if 'time' in self.word:
self.future = False
self.talk(f'Now the time is: {self.now.strftime("%H:%M:%S")}')
elif 'date' in self.word:
self.future = False
self.talk('Todays date is: ' + self.now.strftime("%d"))
elif "what\'s up" in self.word or 'how are you' in self.word:
msg = ['Just doing my thing!', 'I am fine!', 'Nice!', 'I am nice and full of energy']
self.future = False
self.talk(random.choice(msg))
elif 'fine' in self.word or "good" in self.word:
self.future = False
self.talk("It's good to know that your fine")
elif "who made you" in self.word or "who created you" in self.word:
self.future = False
self.talk("I have been created by Shrihari.")
elif "who are you" in self.word:
self.future = False
self.word("I am your virtual assistant created by Shrihari")
elif "restart" in self.word:
subprocess.call(["shutdown", "/r"])
elif "hibernate" in self.word or "sleep" in self.word:
self.talk("Hibernating")
subprocess.call("shutdown / h")
def run_ai(self):
self.mycommand()
self.future = True
self.word =self.word.lower()
if 'jarvis' in self.word:
self.word = self.word.replace('jarvis' ,'')
if 'exit' in self.word:
self.talk('Okay')
sys.exit()
if ai.future:
ai.greeting_user()
if ai.future:
ai.weathereport()
if ai.future:
self.analyze()
if ai.future:
self.search()
ai = Ai()
if ai.is_connected():
ai.update_ai()
os.system('cls')
else:
ai.talk('Internet connection is required to run this application.')
ai.talk('Please try again.')
sys.exit()
try:
f = open('uname.txt')
r = f.readline()
if r == '':
f.close()
uname = input('Entre your name: ')
f = open('uname.txt','w')
f.write(uname)
f.close()
else:
uname = r
f.close()
except:
uname = input('Entre your name: ')
f = open('uname.txt','w')
f.write(uname)
f.close()
ai.talk(f'Hi {uname}, Setting the environment please wait.')
os.system('cls')
columns = shutil.get_terminal_size().columns
print("#####################".center(columns))
print(f"Welcome {uname}".center(columns))
print("#####################".center(columns))
ai.talk("Hello my name is Jarvis. Version 1.0")
ai.talk("I am your AI Assistant")
while True:
ai.run_ai()
|
import pyttsx3
import time
import webbrowser
import random
import speech_recognition as sr
import wikipedia
import socket
import datetime
import wolframalpha
import os
import sys
import python_weather
import asyncio
from clint.textui import progress
import requests
import shutil
import subprocess
engine = pyttsx3.init('sapi5')
voices = engine.getProperty('voices')
engine.setProperty('rate',170)
engine.setProperty('voice', voices[0].id)
client = wolframalpha.Client('QAEXLK-RY9HY2PHAT')
class Ai:
future = True
now = datetime.datetime.now()
def is_connected(self):
try:
socket.create_connection(("1.1.1.1", 53))
return True
except OSError:
pass
return False
def mycommand(self):
r = sr.Recognizer()
with sr.Microphone() as source:
print("Listening...")
audio = r.listen(source)
try:
print('Recognizing...')
self.word = r.recognize_google(audio, language='en-in')
print('User: ' + self.word)
except:
connect = self.is_connected()
if connect:
self.mycommand()
else:
self.talk('Reconnecting...')
time.sleep(10)
self.mycommand()
def talk(self,audio):
print('Computer: ' + audio)
engine.say(audio)
engine.runAndWait()
def update_ai(self):
url = 'https://raw.githubusercontent.com/shrihari272/Artificial_intelligence/main/project2_jarvis_ai.py'
r = requests.get(url, stream = True)
with open("project2_jarvis_ai.py", "wb") as f:
total_length = int(r.headers.get('content-length'))
for update in progress.bar(r.iter_content(chunk_size = 2391975),expected_size =(total_length / 1024) + 1):
if update:
f.write(update)
def analyze(self):
self.word =self.word.lower()
if 'open' in self.word:
self.future = False
result = True
self.word = self.word.replace('open' ,'')
self.word = self.word.replace(' ' ,'')
self.open_dict ={
'amazon':'www.amazon.in',
'flipkart':'www.flipkart.com',
'google':'www.google.com',
'youtube':'www.youtube.com',
'whatsapp':'web.whatsapp.com',
'instagram':'www.instagram.com',
'facebook':'www.facebook.com',
'sanpchat':'www.snapchat.com',
'stackoverflow':'www.stackoverflow.com'
}
for linkkey in self.open_dict:
if linkkey in self.word:
result = False
if 'stackoverflow' in linkkey:
self.talk('Here you go to Stack Over flow. Happy coding!')
webbrowser.open(self.open_dict.get(linkkey))
else:
self.talk('Opening ' + linkkey)
webbrowser.open(self.open_dict.get(linkkey))
if result:
try:
with open('windict_list.txt') as f:
res = True
while res:
res = f.readline()
res = res.strip('\n')
if self.word in res:
res = f.readline()
res = res.strip('\n')
os.startfile(res)
result = False
break
if result:
self.talk('This application is not installed in your system.')
self.talk('If the application is installed.')
self.talk('Then please specify its Application name and path in windict_list text file.')
except:
with open('windict_list.txt','w') as f:
pass
self.talk('This application is not installed in your system.')
def weathereport(self):
if 'weather' in self.word:
self.future = False
self.talk('Provide city name.')
self.mycommand()
print(self.word)
try:
async def getweather():
client = python_weather.Client(format=python_weather.IMPERIAL)
place = self.word
weather = await client.find(place)
for forecast in weather.forecasts:
if self.now.strftime('%d') in str(forecast.date):
self.talk(forecast.sky_text + ' and temperature will be ' + str(forecast.temperature) + ' degree celcius')
await client.close()
loop = asyncio.get_event_loop()
loop.run_until_complete(getweather())
except:
self.talk('Unable to find the city please try again.')
def wiki_search(self):
try:
res = client.self.word(self.word)
answer = next(res.results).text
if '(no data available)'in answer:
self.google_search()
else:
self.talk('Got it.')
self.future = False
self.talk(answer)
except:
try:
results = wikipedia.summary(self.word, sentences=2)
self.talk('Searching Wikipedia...')
self.talk('Got it.')
self.future = False
self.talk(results)
except:
self.future = False
self.google_search()
def search(self):
self.word =self.word.lower()
if 'where is' in self.word or 'locate' in self.word:
self.future = False
self.google_map()
if 'search' in self.word:
self.future = False
self.google_search()
else:
self.wiki_search()
def google_map(self):
self.word =self.word.replace("where is", "")
self.word =self.word.replace("locate", "")
location = self.word
self.talk("User asked to Locate")
self.talk(location)
webbrowser.open("https://www.google.nl/maps/place/" + location)
def google_search(self):
self.word = self.word.replace('search' ,'')
self.search_dict ={
'youtube':'https://www.youtube.com/results?search_query=',
'google':'https://www.google.com/search?q='
}
self.search = True
for key in self.search_dict:
if key in self.word:
self.search = False
self.word = self.word.replace(key ,'')
url = self.search_dict[key] + self.word
webbrowser.open(url)
if self.search:
url = f'https://www.google.com/search?q={self.word}'
webbrowser.open(url)
def greeting_user(self):
if 'time' in self.word:
self.future = False
self.talk(f'Now the time is: {self.now.strftime("%H:%M:%S")}')
elif 'date' in self.word:
self.future = False
self.talk('Todays date is: ' + self.now.strftime("%d"))
elif "what\'s up" in self.word or 'how are you' in self.word:
msg = ['Just doing my thing!', 'I am fine!', 'Nice!', 'I am nice and full of energy']
self.future = False
self.talk(random.choice(msg))
elif 'fine' in self.word or "good" in self.word:
self.future = False
self.talk("It's good to know that your fine")
elif "who made you" in self.word or "who created you" in self.word:
self.future = False
self.talk("I have been created by Shrihari.")
elif "who are you" in self.word:
self.future = False
self.word("I am your virtual assistant created by Shrihari")
elif "restart" in self.word:
subprocess.call(["shutdown", "/r"])
elif "hibernate" in self.word or "sleep" in self.word:
self.talk("Hibernating")
subprocess.call("shutdown / h")
def run_ai(self):
self.mycommand()
self.future = True
self.word =self.word.lower()
if 'jarvis' in self.word:
self.word = self.word.replace('jarvis' ,'')
if 'exit' in self.word:
self.talk('Okay')
sys.exit()
if ai.future:
ai.greeting_user()
if ai.future:
ai.weathereport()
if ai.future:
self.analyze()
if ai.future:
self.search()
ai = Ai()
if ai.is_connected():
ai.update_ai()
os.system('cls')
else:
ai.talk('Internet connection is required to run this application.')
ai.talk('Please try again.')
sys.exit()
try:
f = open('uname.txt')
r = f.readline()
if r == '':
f.close()
uname = input('Entre your name: ')
f = open('uname.txt','w')
f.write(uname)
f.close()
else:
uname = r
f.close()
except:
uname = input('Entre your name: ')
f = open('uname.txt','w')
f.write(uname)
f.close()
ai.talk(f'Hi {uname}, Setting the environment please wait.')
os.system('cls')
columns = shutil.get_terminal_size().columns
print("#####################".center(columns))
print(f"Welcome {uname}".center(columns))
print("#####################".center(columns))
ai.talk("Hello my name is Jarvis. Version 1.0")
ai.talk("I am your AI Assistant")
while True:
ai.run_ai()
|
de
| 0.243835
|
####################".center(columns)) ####################".center(columns))
| 2.702618
| 3
|
rpython/rlib/rstruct/test/test_runpack.py
|
jptomo/pypy-lang-scheme
| 34
|
6629337
|
from rpython.rtyper.test.tool import BaseRtypingTest
from rpython.rlib.rstruct.runpack import runpack
from rpython.rlib.rarithmetic import LONG_BIT
import struct
class TestRStruct(BaseRtypingTest):
def test_unpack(self):
pad = '\x00' * (LONG_BIT//8-1) # 3 or 7 null bytes
def fn():
return runpack('sll', 'a'+pad+'\x03'+pad+'\x04'+pad)[1]
assert fn() == 3
assert self.interpret(fn, []) == 3
def test_unpack_2(self):
data = struct.pack('iiii', 0, 1, 2, 4)
def fn():
a, b, c, d = runpack('iiii', data)
return a * 1000 + b * 100 + c * 10 + d
assert fn() == 124
assert self.interpret(fn, []) == 124
def test_unpack_single(self):
data = struct.pack('i', 123)
def fn():
return runpack('i', data)
assert fn() == 123
assert self.interpret(fn, []) == 123
def test_unpack_big_endian(self):
def fn():
return runpack(">i", "\x01\x02\x03\x04")
assert fn() == 0x01020304
assert self.interpret(fn, []) == 0x01020304
def test_unpack_double_big_endian(self):
def fn():
return runpack(">d", "testtest")
assert fn() == struct.unpack(">d", "testtest")[0]
assert self.interpret(fn, []) == struct.unpack(">d", "testtest")[0]
|
from rpython.rtyper.test.tool import BaseRtypingTest
from rpython.rlib.rstruct.runpack import runpack
from rpython.rlib.rarithmetic import LONG_BIT
import struct
class TestRStruct(BaseRtypingTest):
def test_unpack(self):
pad = '\x00' * (LONG_BIT//8-1) # 3 or 7 null bytes
def fn():
return runpack('sll', 'a'+pad+'\x03'+pad+'\x04'+pad)[1]
assert fn() == 3
assert self.interpret(fn, []) == 3
def test_unpack_2(self):
data = struct.pack('iiii', 0, 1, 2, 4)
def fn():
a, b, c, d = runpack('iiii', data)
return a * 1000 + b * 100 + c * 10 + d
assert fn() == 124
assert self.interpret(fn, []) == 124
def test_unpack_single(self):
data = struct.pack('i', 123)
def fn():
return runpack('i', data)
assert fn() == 123
assert self.interpret(fn, []) == 123
def test_unpack_big_endian(self):
def fn():
return runpack(">i", "\x01\x02\x03\x04")
assert fn() == 0x01020304
assert self.interpret(fn, []) == 0x01020304
def test_unpack_double_big_endian(self):
def fn():
return runpack(">d", "testtest")
assert fn() == struct.unpack(">d", "testtest")[0]
assert self.interpret(fn, []) == struct.unpack(">d", "testtest")[0]
|
en
| 0.301816
|
# 3 or 7 null bytes
| 2.500109
| 3
|
archive/run_results_parser.py
|
sungchun12/airflow-dbt-cloud
| 9
|
6629338
|
"""Utilities to parse the results of the run_results.json"""
import json
class dbt_command_run_results_parser:
def __init__(self, status_set, dbt_command_override, run_downstream_nodes):
self.status_set = status_set # ex: {'warn', 'error', 'fail'}
self.dbt_command_override = dbt_command_override
self.run_downstream_nodes = run_downstream_nodes
def get_dbt_command_output(self, run_results) -> str:
run_results_dict = self.get_run_results_to_dict(run_results)
filtered_run_results_set = self.filter_run_results_dict_by_status(run_results_dict)
dbt_command_output = self.parse_run_results_to_dbt_command(filtered_run_results_set)
return dbt_command_output
def get_run_results_to_dict(self, run_results) -> dict:
with open(run_results) as f:
run_results_dict = json.load(f)
return run_results_dict
def filter_run_results_dict_by_status(self, run_results_dict) -> set:
filtered_run_results_set = set()
run_results_models = run_results_dict.get('results')
for model in run_results_models:
if model.get('status') in self.status_set:
filtered_model_id = model.get('unique_id')
filtered_model_name = filtered_model_id.split('.')[2]
filtered_run_results_set.add(filtered_model_name)
if filtered_run_results_set == set():
raise Exception(f"No models with status {self.status_set} found in run_results.json")
else:
return filtered_run_results_set
def parse_run_results_to_dbt_command(self, filtered_run_results_set) -> str:
dbt_command_output = f"{self.dbt_command_override} --select "
if self.run_downstream_nodes == True:
for model in filtered_run_results_set:
dbt_command_output += model + '+ '
else:
for model in filtered_run_results_set:
dbt_command_output += model + ' '
return dbt_command_output
|
"""Utilities to parse the results of the run_results.json"""
import json
class dbt_command_run_results_parser:
def __init__(self, status_set, dbt_command_override, run_downstream_nodes):
self.status_set = status_set # ex: {'warn', 'error', 'fail'}
self.dbt_command_override = dbt_command_override
self.run_downstream_nodes = run_downstream_nodes
def get_dbt_command_output(self, run_results) -> str:
run_results_dict = self.get_run_results_to_dict(run_results)
filtered_run_results_set = self.filter_run_results_dict_by_status(run_results_dict)
dbt_command_output = self.parse_run_results_to_dbt_command(filtered_run_results_set)
return dbt_command_output
def get_run_results_to_dict(self, run_results) -> dict:
with open(run_results) as f:
run_results_dict = json.load(f)
return run_results_dict
def filter_run_results_dict_by_status(self, run_results_dict) -> set:
filtered_run_results_set = set()
run_results_models = run_results_dict.get('results')
for model in run_results_models:
if model.get('status') in self.status_set:
filtered_model_id = model.get('unique_id')
filtered_model_name = filtered_model_id.split('.')[2]
filtered_run_results_set.add(filtered_model_name)
if filtered_run_results_set == set():
raise Exception(f"No models with status {self.status_set} found in run_results.json")
else:
return filtered_run_results_set
def parse_run_results_to_dbt_command(self, filtered_run_results_set) -> str:
dbt_command_output = f"{self.dbt_command_override} --select "
if self.run_downstream_nodes == True:
for model in filtered_run_results_set:
dbt_command_output += model + '+ '
else:
for model in filtered_run_results_set:
dbt_command_output += model + ' '
return dbt_command_output
|
en
| 0.367851
|
Utilities to parse the results of the run_results.json # ex: {'warn', 'error', 'fail'}
| 2.90047
| 3
|
workloads/bench1/src/stats.py
|
pacslab/conc-value-perf-modelling
| 0
|
6629339
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
import json
import time
import os
import socket
import sys
import uuid
import subprocess
try:
import urllib2
from urllib2 import urlopen
except BaseException:
from urllib.request import urlopen
import decimal
# Set it to your own servers
INST_PRIV_IP_DST = "8.8.8.8"
VM_PUB_ID_DST = "https://api.ipify.org/"
def fstr(f):
"""
Convert a float number to string
"""
ctx = decimal.Context()
ctx.prec = 20
d1 = ctx.create_decimal(repr(f))
return format(d1, 'f')
def get_meminfo():
"""
Get and format the content of /proc/meminfo
"""
buf = open('/proc/meminfo').read()
buf = ','.join([v.replace(' ', '') for v in
buf.split('\n') if v])
return buf
def get_vmstat():
"""
Get and format the content of /proc/vmstat
"""
buf = open("/proc/vmstat").read()
buf = [v.replace(' ', ":") for v in buf.split("\n")]
buf = ";".join(buf)
return buf
def get_diskstat():
"""
Get and format the content of /proc/diskstats
"""
buf = open("/proc/diskstats").read()
buf = [v for v in buf.split("\n") if v]
buf = [
v.replace(
" ",
",").replace(
",,,,,,,",
",").replace(
",,,",
"").lstrip(",") for v in buf]
buf = ";".join(buf)
return buf
def get_cpuinfo():
"""
Get and format the content of /proc/cpuinfo
"""
buf = "".join(open("/proc/cpuinfo").readlines())
cpuinfo = buf.replace("\n", ";").replace("\t", "")
return cpuinfo
def get_cpuinfo_short():
""" Get CPU version information """
buf = "".join(open("/proc/cpuinfo").readlines())
cpuinfo = buf.replace("\n", ";").replace("\t", "")
a1 = cpuinfo.count("processor")
a2 = cpuinfo.split(";")[4].split(":")[1].strip()
return "%s,%s" % (a1, a2)
# return [a1, a2.split(' @ ')[0], a2.split(' @ ')[1]]
def get_inst_id():
""" Get the inst ID """
log_file = '/tmp/inst_id.txt'
new_id = str(uuid.uuid4())
try:
exist_id = open(log_file).read().strip('\n')
except BaseException:
open(log_file, 'w').write(new_id)
exist_id = new_id
return exist_id, new_id
def get_inst_priv_ip():
""" Get inst private IP """
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect((INST_PRIV_IP_DST, 80))
ip = s.getsockname()[0]
s.close()
return ip
def get_vm_priv_ip():
""" Get VM private IP """
ip = socket.gethostbyname(socket.getfqdn())
return ip
def get_vm_pub_ip():
""" Get VM public IP by querying external server """
ip = "None"
try:
ip = str(urlopen(VM_PUB_ID_DST).read().decode())
except BaseException:
pass
return ip
def get_vm_id():
""" Get VM ID from /proc/self/cgroup """
buf = open('/proc/self/cgroup').read().split('\n')[-3].split('/')
vm_id, inst_id = buf[1], buf[2]
return vm_id, inst_id
def get_uptime():
""" Get VM uptime """
uptime = ','.join(open('/proc/uptime').read().strip('\n').split(' '))
return uptime
def stat_other():
hostname = os.popen('uname -n').read().strip('\n')
kernel_ver = os.popen('uname -r').read().strip('\n')
return [hostname, kernel_ver]
def stat_basic(argv=1):
exist_id, new_id = get_inst_id()
vm_id, inst_id = get_vm_id()
uptime = get_uptime()
vm_priv_ip = get_vm_priv_ip()
# This causes some slow-downs!
# vm_pub_ip = get_vm_pub_ip()
vm_pub_ip = vm_priv_ip
inst_priv_ip = get_inst_priv_ip()
cpu_info = get_cpuinfo_short()
res = {
'exist_id': exist_id,
'new_id': new_id,
'vm_id': vm_id,
'inst_id': inst_id,
'vm_priv_ip': vm_priv_ip,
'vm_pub_ip': vm_pub_ip,
'inst_priv_ip': inst_priv_ip,
'uptime': uptime,
'cpu_info': cpu_info
}
# TEST
# Simulate long cold start
# if exist_id == new_id: # if cold start
# start_time = time.time()
# while time.time() - start_time < 20:
# continue
return res
if __name__ == '__main__':
func_name = sys.argv[1]
res = eval(func_name)()
print(res)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
import json
import time
import os
import socket
import sys
import uuid
import subprocess
try:
import urllib2
from urllib2 import urlopen
except BaseException:
from urllib.request import urlopen
import decimal
# Set it to your own servers
INST_PRIV_IP_DST = "8.8.8.8"
VM_PUB_ID_DST = "https://api.ipify.org/"
def fstr(f):
"""
Convert a float number to string
"""
ctx = decimal.Context()
ctx.prec = 20
d1 = ctx.create_decimal(repr(f))
return format(d1, 'f')
def get_meminfo():
"""
Get and format the content of /proc/meminfo
"""
buf = open('/proc/meminfo').read()
buf = ','.join([v.replace(' ', '') for v in
buf.split('\n') if v])
return buf
def get_vmstat():
"""
Get and format the content of /proc/vmstat
"""
buf = open("/proc/vmstat").read()
buf = [v.replace(' ', ":") for v in buf.split("\n")]
buf = ";".join(buf)
return buf
def get_diskstat():
"""
Get and format the content of /proc/diskstats
"""
buf = open("/proc/diskstats").read()
buf = [v for v in buf.split("\n") if v]
buf = [
v.replace(
" ",
",").replace(
",,,,,,,",
",").replace(
",,,",
"").lstrip(",") for v in buf]
buf = ";".join(buf)
return buf
def get_cpuinfo():
"""
Get and format the content of /proc/cpuinfo
"""
buf = "".join(open("/proc/cpuinfo").readlines())
cpuinfo = buf.replace("\n", ";").replace("\t", "")
return cpuinfo
def get_cpuinfo_short():
""" Get CPU version information """
buf = "".join(open("/proc/cpuinfo").readlines())
cpuinfo = buf.replace("\n", ";").replace("\t", "")
a1 = cpuinfo.count("processor")
a2 = cpuinfo.split(";")[4].split(":")[1].strip()
return "%s,%s" % (a1, a2)
# return [a1, a2.split(' @ ')[0], a2.split(' @ ')[1]]
def get_inst_id():
""" Get the inst ID """
log_file = '/tmp/inst_id.txt'
new_id = str(uuid.uuid4())
try:
exist_id = open(log_file).read().strip('\n')
except BaseException:
open(log_file, 'w').write(new_id)
exist_id = new_id
return exist_id, new_id
def get_inst_priv_ip():
""" Get inst private IP """
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect((INST_PRIV_IP_DST, 80))
ip = s.getsockname()[0]
s.close()
return ip
def get_vm_priv_ip():
""" Get VM private IP """
ip = socket.gethostbyname(socket.getfqdn())
return ip
def get_vm_pub_ip():
""" Get VM public IP by querying external server """
ip = "None"
try:
ip = str(urlopen(VM_PUB_ID_DST).read().decode())
except BaseException:
pass
return ip
def get_vm_id():
""" Get VM ID from /proc/self/cgroup """
buf = open('/proc/self/cgroup').read().split('\n')[-3].split('/')
vm_id, inst_id = buf[1], buf[2]
return vm_id, inst_id
def get_uptime():
""" Get VM uptime """
uptime = ','.join(open('/proc/uptime').read().strip('\n').split(' '))
return uptime
def stat_other():
hostname = os.popen('uname -n').read().strip('\n')
kernel_ver = os.popen('uname -r').read().strip('\n')
return [hostname, kernel_ver]
def stat_basic(argv=1):
exist_id, new_id = get_inst_id()
vm_id, inst_id = get_vm_id()
uptime = get_uptime()
vm_priv_ip = get_vm_priv_ip()
# This causes some slow-downs!
# vm_pub_ip = get_vm_pub_ip()
vm_pub_ip = vm_priv_ip
inst_priv_ip = get_inst_priv_ip()
cpu_info = get_cpuinfo_short()
res = {
'exist_id': exist_id,
'new_id': new_id,
'vm_id': vm_id,
'inst_id': inst_id,
'vm_priv_ip': vm_priv_ip,
'vm_pub_ip': vm_pub_ip,
'inst_priv_ip': inst_priv_ip,
'uptime': uptime,
'cpu_info': cpu_info
}
# TEST
# Simulate long cold start
# if exist_id == new_id: # if cold start
# start_time = time.time()
# while time.time() - start_time < 20:
# continue
return res
if __name__ == '__main__':
func_name = sys.argv[1]
res = eval(func_name)()
print(res)
|
en
| 0.584185
|
#!/usr/bin/python # -*- coding: utf-8 -*- # Set it to your own servers Convert a float number to string Get and format the content of /proc/meminfo Get and format the content of /proc/vmstat Get and format the content of /proc/diskstats Get and format the content of /proc/cpuinfo Get CPU version information # return [a1, a2.split(' @ ')[0], a2.split(' @ ')[1]] Get the inst ID Get inst private IP Get VM private IP Get VM public IP by querying external server Get VM ID from /proc/self/cgroup Get VM uptime # This causes some slow-downs! # vm_pub_ip = get_vm_pub_ip() # TEST # Simulate long cold start # if exist_id == new_id: # if cold start # start_time = time.time() # while time.time() - start_time < 20: # continue
| 2.581217
| 3
|
members/forms.py
|
Freerk42/makerspaceleiden-crm
| 0
|
6629340
|
from django import forms
from django.contrib.auth.forms import UserCreationForm
from .models import Tag, clean_tag_string, AuditRecord
from django.forms import ModelForm
from members.models import User
from mailinglists.models import Mailinglist, Subscription
from unknowntags.models import Unknowntag
import re
class NewTagForm(ModelForm):
class Meta:
model = Tag
fields = [ 'tag','owner','description' ]
help_texts = {
'description': 'Set by default to who/when it was added',
}
class TagForm(ModelForm):
class Meta:
model = Tag
fields = [ 'tag', 'description', 'last_used' ]
help_texts = {
'description': 'Optional - e.g. something like "my ov card", or "blue fob".',
}
def __init__(self, *args, **kwargs):
self.canedittag = False
self.isdelete = False
if 'canedittag' in kwargs:
self.canedittag = kwargs.pop('canedittag')
if 'isdelete' in kwargs:
self.isdelete = kwargs.pop('isdelete')
self.canedittag = False
super(TagForm, self).__init__(*args, **kwargs)
self.fields['last_used'].widget.attrs['readonly'] = True
self.fields['last_used'].help_text = '(not editable)'
if not self.canedittag:
self.fields['tag'].help_text = '(not editable)'
self.fields['tag'].widget.attrs['readonly'] = True
if self.isdelete:
self.fields['description'].widget.attrs['readonly'] = True
for k,f in self.fields.items():
f.help_text = '(not editable during a delete)'
def clean_tag(self):
return clean_tag_string(self.cleaned_data['tag'])
class NewUserForm(forms.Form):
first_name = forms.CharField(max_length=User._meta.get_field('first_name').max_length)
last_name = forms.CharField(max_length=User._meta.get_field('last_name').max_length)
email = forms.EmailField()
phone_number = forms.CharField(max_length=User._meta.get_field('phone_number').max_length,
required=False, help_text="Optional; only visible to the trustees and board delegated administrators")
tag = forms.ModelChoiceField(queryset=Unknowntag.objects.all(), required = False, help_text="Optional. Leave blank to add later.")
activate_doors = forms.BooleanField(initial = True, help_text='Also give this user door permits if they did not have it yet. Only applicable if above tag is specified.')
mailing_lists = forms.ModelMultipleChoiceField(
queryset=Mailinglist.objects.all(),
required=False,
help_text="Lists to initially subscribe the user to.",
widget=forms.CheckboxSelectMultiple(attrs={"checked":""}),
)
class NewAuditRecordForm(ModelForm):
return_to = forms.CharField(widget = forms.HiddenInput())
class Meta:
model = AuditRecord
fields = [ 'action', ]
help_texts = {
'action': 'Reason why you need to become admin; e.g. "debug an issue", "fix user record", "add a mailinglist", "adjust a tag", etc.',
}
|
from django import forms
from django.contrib.auth.forms import UserCreationForm
from .models import Tag, clean_tag_string, AuditRecord
from django.forms import ModelForm
from members.models import User
from mailinglists.models import Mailinglist, Subscription
from unknowntags.models import Unknowntag
import re
class NewTagForm(ModelForm):
class Meta:
model = Tag
fields = [ 'tag','owner','description' ]
help_texts = {
'description': 'Set by default to who/when it was added',
}
class TagForm(ModelForm):
class Meta:
model = Tag
fields = [ 'tag', 'description', 'last_used' ]
help_texts = {
'description': 'Optional - e.g. something like "my ov card", or "blue fob".',
}
def __init__(self, *args, **kwargs):
self.canedittag = False
self.isdelete = False
if 'canedittag' in kwargs:
self.canedittag = kwargs.pop('canedittag')
if 'isdelete' in kwargs:
self.isdelete = kwargs.pop('isdelete')
self.canedittag = False
super(TagForm, self).__init__(*args, **kwargs)
self.fields['last_used'].widget.attrs['readonly'] = True
self.fields['last_used'].help_text = '(not editable)'
if not self.canedittag:
self.fields['tag'].help_text = '(not editable)'
self.fields['tag'].widget.attrs['readonly'] = True
if self.isdelete:
self.fields['description'].widget.attrs['readonly'] = True
for k,f in self.fields.items():
f.help_text = '(not editable during a delete)'
def clean_tag(self):
return clean_tag_string(self.cleaned_data['tag'])
class NewUserForm(forms.Form):
first_name = forms.CharField(max_length=User._meta.get_field('first_name').max_length)
last_name = forms.CharField(max_length=User._meta.get_field('last_name').max_length)
email = forms.EmailField()
phone_number = forms.CharField(max_length=User._meta.get_field('phone_number').max_length,
required=False, help_text="Optional; only visible to the trustees and board delegated administrators")
tag = forms.ModelChoiceField(queryset=Unknowntag.objects.all(), required = False, help_text="Optional. Leave blank to add later.")
activate_doors = forms.BooleanField(initial = True, help_text='Also give this user door permits if they did not have it yet. Only applicable if above tag is specified.')
mailing_lists = forms.ModelMultipleChoiceField(
queryset=Mailinglist.objects.all(),
required=False,
help_text="Lists to initially subscribe the user to.",
widget=forms.CheckboxSelectMultiple(attrs={"checked":""}),
)
class NewAuditRecordForm(ModelForm):
return_to = forms.CharField(widget = forms.HiddenInput())
class Meta:
model = AuditRecord
fields = [ 'action', ]
help_texts = {
'action': 'Reason why you need to become admin; e.g. "debug an issue", "fix user record", "add a mailinglist", "adjust a tag", etc.',
}
|
none
| 1
| 2.141053
| 2
|
|
Day 07 - The Treachery of Whales/part1-solution.py
|
CapOfCave/adventofcode-2021
| 0
|
6629341
|
<gh_stars>0
def calc_score(crabs, i):
return sum([abs(c - i) for c in crabs])
with open('./input.txt') as input:
crabs = [int(s) for s in input.readline().split(',')]
best_score = max(crabs) * len(crabs)
for test in range(max(crabs)):
new_score = calc_score(crabs, test)
if (new_score < best_score):
best_score = new_score
elif (new_score > best_score):
break # it only gets worse from here
print(best_score) # 355592
|
def calc_score(crabs, i):
return sum([abs(c - i) for c in crabs])
with open('./input.txt') as input:
crabs = [int(s) for s in input.readline().split(',')]
best_score = max(crabs) * len(crabs)
for test in range(max(crabs)):
new_score = calc_score(crabs, test)
if (new_score < best_score):
best_score = new_score
elif (new_score > best_score):
break # it only gets worse from here
print(best_score) # 355592
|
en
| 0.893625
|
# it only gets worse from here # 355592
| 3.575686
| 4
|
plugins/filetime_from_git/git_wrapper.py
|
likev/gauravssnl.github.io
| 5
|
6629342
|
<filename>plugins/filetime_from_git/git_wrapper.py
# -*- coding: utf-8 -*-
"""
Wrap python git interface for compatibility with older/newer version
"""
try:
from itertools import zip_longest
except ImportError:
from six.moves import zip_longest
import logging
import os
from time import mktime
from datetime import datetime
from pelican.utils import set_date_tzinfo
from git import Git, Repo
DEV_LOGGER = logging.getLogger(__name__)
def grouper(iterable, n, fillvalue=None):
'''
Collect data into fixed-length chunks or blocks
'''
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx
args = [iter(iterable)] * n
return zip_longest(fillvalue=fillvalue, *args)
class _GitWrapperCommon(object):
'''
Wrap git module to provide a more stable interface across versions
'''
def __init__(self, repo_path):
self.git = Git()
self.git.update_environment(
GIT_CONFIG_NOSYSTEM='true',
HOME=os.getcwd(),
XDG_CONFIG_HOME=os.getcwd()
)
self.repo = Repo(os.path.abspath("."), search_parent_directories=True)
def is_file_managed_by_git(self, path):
'''
:param path: Path to check
:returns: True if path is managed by git
'''
status, _stdout, _stderr = self.git.execute(
['git', 'ls-files', path, '--error-unmatch'],
with_extended_output=True,
with_exceptions=False)
return status == 0
def is_file_modified(self, path):
'''
Does a file have local changes not yet committed
:returns: True if file has local changes
'''
status, _stdout, _stderr = self.git.execute(
['git', 'diff', '--quiet', 'HEAD', path],
with_extended_output=True,
with_exceptions=False)
return status != 0
def get_commits_following(self, path):
'''
Get all commits including path following the file through
renames
:param path: Path which we will find commits for
:returns: Sequence of commit objects. Newest to oldest
'''
return [
commit for commit, _ in self.get_commits_and_names_iter(
path)]
def get_commits_and_names_iter(self, path):
'''
Get all commits including a given path following renames
'''
log_result = self.git.log(
'--pretty=%H',
'--follow',
'--name-only',
'--',
path).splitlines()
for commit_sha, _, filename in grouper(log_result, 3):
yield self.repo.commit(commit_sha), filename
def get_commits(self, path, follow=False):
'''
Get all commits including path
:param path: Path which we will find commits for
:param bool follow: If True we will follow path through renames
:returns: Sequence of commit objects. Newest to oldest
'''
if follow:
return self.get_commits_following(path)
else:
return self._get_commits(path)
class _GitWrapperLegacy(_GitWrapperCommon):
def _get_commits(self, path):
'''
Get all commits including path without following renames
:param path: Path which we will find commits for
:returns: Sequence of commit objects. Newest to oldest
'''
return self.repo.commits(path=path)
@staticmethod
def get_commit_date(commit, tz_name):
'''
Get datetime of commit comitted_date
'''
return set_date_tzinfo(
datetime.fromtimestamp(mktime(commit.committed_date)),
tz_name=tz_name)
class _GitWrapper(_GitWrapperCommon):
def _get_commits(self, path):
'''
Get all commits including path without following renames
:param path: Path which we will find commits for
:returns: Sequence of commit objects. Newest to oldest
.. NOTE ::
If this fails it could be that your gitpython version is out of
sync with the git binary on your distro.
Make sure you use the correct gitpython version.
Alternatively enabling GIT_FILETIME_FOLLOW may also make your
problem go away.
'''
return list(self.repo.iter_commits(paths=path))
@staticmethod
def get_commit_date(commit, tz_name):
'''
Get datetime of commit comitted_date
'''
return set_date_tzinfo(
datetime.fromtimestamp(commit.committed_date),
tz_name=tz_name)
_wrapper_cache = {}
def git_wrapper(path):
'''
Get appropriate wrapper factory and cache instance for path
'''
path = os.path.abspath(path)
if path not in _wrapper_cache:
if hasattr(Repo, 'commits'):
_wrapper_cache[path] = _GitWrapperLegacy(path)
else:
_wrapper_cache[path] = _GitWrapper(path)
return _wrapper_cache[path]
|
<filename>plugins/filetime_from_git/git_wrapper.py
# -*- coding: utf-8 -*-
"""
Wrap python git interface for compatibility with older/newer version
"""
try:
from itertools import zip_longest
except ImportError:
from six.moves import zip_longest
import logging
import os
from time import mktime
from datetime import datetime
from pelican.utils import set_date_tzinfo
from git import Git, Repo
DEV_LOGGER = logging.getLogger(__name__)
def grouper(iterable, n, fillvalue=None):
'''
Collect data into fixed-length chunks or blocks
'''
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx
args = [iter(iterable)] * n
return zip_longest(fillvalue=fillvalue, *args)
class _GitWrapperCommon(object):
'''
Wrap git module to provide a more stable interface across versions
'''
def __init__(self, repo_path):
self.git = Git()
self.git.update_environment(
GIT_CONFIG_NOSYSTEM='true',
HOME=os.getcwd(),
XDG_CONFIG_HOME=os.getcwd()
)
self.repo = Repo(os.path.abspath("."), search_parent_directories=True)
def is_file_managed_by_git(self, path):
'''
:param path: Path to check
:returns: True if path is managed by git
'''
status, _stdout, _stderr = self.git.execute(
['git', 'ls-files', path, '--error-unmatch'],
with_extended_output=True,
with_exceptions=False)
return status == 0
def is_file_modified(self, path):
'''
Does a file have local changes not yet committed
:returns: True if file has local changes
'''
status, _stdout, _stderr = self.git.execute(
['git', 'diff', '--quiet', 'HEAD', path],
with_extended_output=True,
with_exceptions=False)
return status != 0
def get_commits_following(self, path):
'''
Get all commits including path following the file through
renames
:param path: Path which we will find commits for
:returns: Sequence of commit objects. Newest to oldest
'''
return [
commit for commit, _ in self.get_commits_and_names_iter(
path)]
def get_commits_and_names_iter(self, path):
'''
Get all commits including a given path following renames
'''
log_result = self.git.log(
'--pretty=%H',
'--follow',
'--name-only',
'--',
path).splitlines()
for commit_sha, _, filename in grouper(log_result, 3):
yield self.repo.commit(commit_sha), filename
def get_commits(self, path, follow=False):
'''
Get all commits including path
:param path: Path which we will find commits for
:param bool follow: If True we will follow path through renames
:returns: Sequence of commit objects. Newest to oldest
'''
if follow:
return self.get_commits_following(path)
else:
return self._get_commits(path)
class _GitWrapperLegacy(_GitWrapperCommon):
def _get_commits(self, path):
'''
Get all commits including path without following renames
:param path: Path which we will find commits for
:returns: Sequence of commit objects. Newest to oldest
'''
return self.repo.commits(path=path)
@staticmethod
def get_commit_date(commit, tz_name):
'''
Get datetime of commit comitted_date
'''
return set_date_tzinfo(
datetime.fromtimestamp(mktime(commit.committed_date)),
tz_name=tz_name)
class _GitWrapper(_GitWrapperCommon):
def _get_commits(self, path):
'''
Get all commits including path without following renames
:param path: Path which we will find commits for
:returns: Sequence of commit objects. Newest to oldest
.. NOTE ::
If this fails it could be that your gitpython version is out of
sync with the git binary on your distro.
Make sure you use the correct gitpython version.
Alternatively enabling GIT_FILETIME_FOLLOW may also make your
problem go away.
'''
return list(self.repo.iter_commits(paths=path))
@staticmethod
def get_commit_date(commit, tz_name):
'''
Get datetime of commit comitted_date
'''
return set_date_tzinfo(
datetime.fromtimestamp(commit.committed_date),
tz_name=tz_name)
_wrapper_cache = {}
def git_wrapper(path):
'''
Get appropriate wrapper factory and cache instance for path
'''
path = os.path.abspath(path)
if path not in _wrapper_cache:
if hasattr(Repo, 'commits'):
_wrapper_cache[path] = _GitWrapperLegacy(path)
else:
_wrapper_cache[path] = _GitWrapper(path)
return _wrapper_cache[path]
|
en
| 0.873738
|
# -*- coding: utf-8 -*- Wrap python git interface for compatibility with older/newer version Collect data into fixed-length chunks or blocks # grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx Wrap git module to provide a more stable interface across versions :param path: Path to check :returns: True if path is managed by git Does a file have local changes not yet committed :returns: True if file has local changes Get all commits including path following the file through renames :param path: Path which we will find commits for :returns: Sequence of commit objects. Newest to oldest Get all commits including a given path following renames Get all commits including path :param path: Path which we will find commits for :param bool follow: If True we will follow path through renames :returns: Sequence of commit objects. Newest to oldest Get all commits including path without following renames :param path: Path which we will find commits for :returns: Sequence of commit objects. Newest to oldest Get datetime of commit comitted_date Get all commits including path without following renames :param path: Path which we will find commits for :returns: Sequence of commit objects. Newest to oldest .. NOTE :: If this fails it could be that your gitpython version is out of sync with the git binary on your distro. Make sure you use the correct gitpython version. Alternatively enabling GIT_FILETIME_FOLLOW may also make your problem go away. Get datetime of commit comitted_date Get appropriate wrapper factory and cache instance for path
| 2.217548
| 2
|
cmdix/command/cat.py
|
jaraco/pycoreutils
| 18
|
6629343
|
import sys
import itertools
from .. import lib
def parseargs(p):
"""
Add arguments and `func` to `p`.
:param p: ArgumentParser
:return: ArgumentParser
"""
p.set_defaults(func=func)
p.description = "Concatenate FILE(s), or standard input, " + "to standard output."
p.epilog = (
"If the FILE ends with '.bz2' or '.gz', the file will be "
+ "decompressed automatically."
)
p.add_argument('FILE', nargs='*')
return p
def func(args):
lines = itertools.chain.from_iterable(lib.parsefilelist(args.FILE, True))
for line in lines:
sys.stdout.write(line)
|
import sys
import itertools
from .. import lib
def parseargs(p):
"""
Add arguments and `func` to `p`.
:param p: ArgumentParser
:return: ArgumentParser
"""
p.set_defaults(func=func)
p.description = "Concatenate FILE(s), or standard input, " + "to standard output."
p.epilog = (
"If the FILE ends with '.bz2' or '.gz', the file will be "
+ "decompressed automatically."
)
p.add_argument('FILE', nargs='*')
return p
def func(args):
lines = itertools.chain.from_iterable(lib.parsefilelist(args.FILE, True))
for line in lines:
sys.stdout.write(line)
|
es
| 0.08582
|
Add arguments and `func` to `p`. :param p: ArgumentParser :return: ArgumentParser
| 3.273536
| 3
|
python/oneibl/tests/test_one_tutorial.py
|
GaelleChapuis/ibllib
| 0
|
6629344
|
import oneibl.examples.tutorial_script
print(oneibl.examples.tutorial_script)
|
import oneibl.examples.tutorial_script
print(oneibl.examples.tutorial_script)
|
none
| 1
| 1.103005
| 1
|
|
packs/trello/sensors/list_actions_sensor.py
|
userlocalhost2000/st2contrib
| 164
|
6629345
|
<reponame>userlocalhost2000/st2contrib<gh_stars>100-1000
import dateutil.parser
from trello import TrelloClient
from st2reactor.sensor.base import PollingSensor
class TrelloListSensor(PollingSensor):
"""
Sensor which monitors Trello list for a new actions (events).
For reference see Trello API Docs:
https://trello.com/docs/api/list/index.html#get-1-lists-idlist-actions
"""
TRIGGER = 'trello.new_action'
def __init__(self, sensor_service, config=None, poll_interval=None):
"""
Set defaults and validate YAML config metadata.
"""
super(TrelloListSensor, self).__init__(sensor_service, config, poll_interval)
self._logger = self._sensor_service.get_logger(__name__)
list_actions_sensor = self._config.get('list_actions_sensor')
if not list_actions_sensor:
raise ValueError('[TrelloListSensor]: "list_sensor" config value is required!')
self._lists = list_actions_sensor.get('lists', [])
if not self._lists:
raise ValueError('[TrelloListSensor]'
'"lists" config value should have at least one entry!')
def setup(self):
"""
Run the sensor initialization / setup code (if any).
"""
pass
def poll(self):
"""
Iterate through all Trello lists from sensor config.
Fetch latest actions for each Trello List, filter by type.
Start reading feed where we stopped last time
by passing `since` date parameter to Trello API.
Save latest event `date` in st2 key-value storage for each Trello list.
"""
self._logger.debug('[TrelloListSensor]: Entering into listen mode ...')
for trello_list_config in self._lists:
self._update_credentials_by_precedence(trello_list_config)
l = TrelloList(**trello_list_config)
self._logger.debug("[TrelloListSensor]: Processing queue for Trello list: '%s'"
% l.list_id)
actions = l.fetch_actions(
filter=trello_list_config.get('filter') or None,
since=self._sensor_service.get_value(l.key_name)
)
for action in reversed(actions):
self._logger.debug("[TrelloListSensor]: Found new action for Trello list: '%r'"
% action)
self._sensor_service.dispatch(trigger=self.TRIGGER, payload=action)
if is_date(action.get('date')):
self._sensor_service.set_value(l.key_name, action.get('date'))
def _update_credentials_by_precedence(self, trello_list_config):
"""
Find Trello API credentials (`api_token` and `token`) from config.
Precedence:
1. First try to find `api_key` (list config)
2. If not found - go to parent level (config for all lists)
3. If not found - go to parent level (global config)
It means that every Trello list can have its own unique API credentials to login.
:param trello_list_config: Configuration for single Trello list
:type trello_list_config: ``dict``
:rtype: ``None``
"""
if not trello_list_config.get('api_key'):
found_credentials = self._config['list_actions_sensor']\
if self._config['list_actions_sensor'].get('api_key') else self._config
trello_list_config['api_key'] = found_credentials.get('api_key')
trello_list_config['token'] = found_credentials.get('token')
def cleanup(self):
"""
Run the sensor cleanup code (if any).
"""
pass
def add_trigger(self, trigger):
"""
Runs when trigger is created
"""
pass
def update_trigger(self, trigger):
"""
Runs when trigger is updated
"""
pass
def remove_trigger(self, trigger):
"""
Runs when trigger is deleted
"""
pass
class TrelloList(object):
"""
Sugar class to work with Trello Lists.
"""
def __init__(self, board_id, list_id, api_key, token=None, **kwargs):
"""
Validate inputs and connect to Trello API.
Exception is thrown if input details are not correct.
:param board_id: Trello board ID where the List is located
:type board_id: ``str``
:param list_id: Trello List ID itself
:type list_id: ``str``
:param api_key: Trello API key
:type api_key: ``str``
:param token: Trello API token
:type token: ``str``
"""
self.board_id = board_id
self.list_id = list_id
self.api_key = api_key
# assume empty string '' as None
self.token = token or None
self.validate()
self._client = TrelloClient(api_key=self.api_key, token=self.token)
self._list = self._client.get_board(self.board_id).get_list(self.list_id)
def validate(self):
"""
Ensure that Trello list details are correct.
Raise an exception if validation failed.
"""
if not self.api_key:
raise ValueError('[TrelloListSensor] "api_key" config value is required!')
assert isinstance(self.api_key, basestring)
if self.token:
assert isinstance(self.token, basestring)
if not self.board_id:
raise ValueError('[TrelloListSensor]: "board_id" config value is required!')
assert isinstance(self.board_id, basestring)
if not self.list_id:
raise ValueError('[TrelloListSensor]: "list_id" config value is required!')
assert isinstance(self.list_id, basestring)
@property
def key_name(self):
"""
Generate unique key name for built-in storage based on config values.
:rtype: ``str``
"""
return '{}.{}.date'.format(self.board_id, self.list_id)
def fetch_actions(self, filter=None, since=None):
"""
Fetch actions for Trello List with possibility to specify filters.
Example API request:
https://api.trello.com/1/lists/{list_id}/actions?filter=createCard&since=2015-09-14T21:45:56.850Z&key={key_id}&token={token_id}
:param filter: Action types to filter, separated by comma or as a sequence.
:type filter: ``str`` or ``list``
:param since: Filter actions since specified date.
:type since: ``str``
:return: Events occurred in Trello list.
:rtype: ``list`` of ``dict``
"""
return self._client.fetch_json(
'/lists/' + self._list.id + '/actions',
query_params={
'filter': filter,
'since': since,
})
def is_date(string):
"""
Check if input string is date-formatted.
:param string: Input date
:type string: ``str``
:rtype: ``bool``
"""
try:
dateutil.parser.parse(string)
return True
except ValueError:
return False
|
import dateutil.parser
from trello import TrelloClient
from st2reactor.sensor.base import PollingSensor
class TrelloListSensor(PollingSensor):
"""
Sensor which monitors Trello list for a new actions (events).
For reference see Trello API Docs:
https://trello.com/docs/api/list/index.html#get-1-lists-idlist-actions
"""
TRIGGER = 'trello.new_action'
def __init__(self, sensor_service, config=None, poll_interval=None):
"""
Set defaults and validate YAML config metadata.
"""
super(TrelloListSensor, self).__init__(sensor_service, config, poll_interval)
self._logger = self._sensor_service.get_logger(__name__)
list_actions_sensor = self._config.get('list_actions_sensor')
if not list_actions_sensor:
raise ValueError('[TrelloListSensor]: "list_sensor" config value is required!')
self._lists = list_actions_sensor.get('lists', [])
if not self._lists:
raise ValueError('[TrelloListSensor]'
'"lists" config value should have at least one entry!')
def setup(self):
"""
Run the sensor initialization / setup code (if any).
"""
pass
def poll(self):
"""
Iterate through all Trello lists from sensor config.
Fetch latest actions for each Trello List, filter by type.
Start reading feed where we stopped last time
by passing `since` date parameter to Trello API.
Save latest event `date` in st2 key-value storage for each Trello list.
"""
self._logger.debug('[TrelloListSensor]: Entering into listen mode ...')
for trello_list_config in self._lists:
self._update_credentials_by_precedence(trello_list_config)
l = TrelloList(**trello_list_config)
self._logger.debug("[TrelloListSensor]: Processing queue for Trello list: '%s'"
% l.list_id)
actions = l.fetch_actions(
filter=trello_list_config.get('filter') or None,
since=self._sensor_service.get_value(l.key_name)
)
for action in reversed(actions):
self._logger.debug("[TrelloListSensor]: Found new action for Trello list: '%r'"
% action)
self._sensor_service.dispatch(trigger=self.TRIGGER, payload=action)
if is_date(action.get('date')):
self._sensor_service.set_value(l.key_name, action.get('date'))
def _update_credentials_by_precedence(self, trello_list_config):
"""
Find Trello API credentials (`api_token` and `token`) from config.
Precedence:
1. First try to find `api_key` (list config)
2. If not found - go to parent level (config for all lists)
3. If not found - go to parent level (global config)
It means that every Trello list can have its own unique API credentials to login.
:param trello_list_config: Configuration for single Trello list
:type trello_list_config: ``dict``
:rtype: ``None``
"""
if not trello_list_config.get('api_key'):
found_credentials = self._config['list_actions_sensor']\
if self._config['list_actions_sensor'].get('api_key') else self._config
trello_list_config['api_key'] = found_credentials.get('api_key')
trello_list_config['token'] = found_credentials.get('token')
def cleanup(self):
"""
Run the sensor cleanup code (if any).
"""
pass
def add_trigger(self, trigger):
"""
Runs when trigger is created
"""
pass
def update_trigger(self, trigger):
"""
Runs when trigger is updated
"""
pass
def remove_trigger(self, trigger):
"""
Runs when trigger is deleted
"""
pass
class TrelloList(object):
"""
Sugar class to work with Trello Lists.
"""
def __init__(self, board_id, list_id, api_key, token=None, **kwargs):
"""
Validate inputs and connect to Trello API.
Exception is thrown if input details are not correct.
:param board_id: Trello board ID where the List is located
:type board_id: ``str``
:param list_id: Trello List ID itself
:type list_id: ``str``
:param api_key: Trello API key
:type api_key: ``str``
:param token: Trello API token
:type token: ``str``
"""
self.board_id = board_id
self.list_id = list_id
self.api_key = api_key
# assume empty string '' as None
self.token = token or None
self.validate()
self._client = TrelloClient(api_key=self.api_key, token=self.token)
self._list = self._client.get_board(self.board_id).get_list(self.list_id)
def validate(self):
"""
Ensure that Trello list details are correct.
Raise an exception if validation failed.
"""
if not self.api_key:
raise ValueError('[TrelloListSensor] "api_key" config value is required!')
assert isinstance(self.api_key, basestring)
if self.token:
assert isinstance(self.token, basestring)
if not self.board_id:
raise ValueError('[TrelloListSensor]: "board_id" config value is required!')
assert isinstance(self.board_id, basestring)
if not self.list_id:
raise ValueError('[TrelloListSensor]: "list_id" config value is required!')
assert isinstance(self.list_id, basestring)
@property
def key_name(self):
"""
Generate unique key name for built-in storage based on config values.
:rtype: ``str``
"""
return '{}.{}.date'.format(self.board_id, self.list_id)
def fetch_actions(self, filter=None, since=None):
"""
Fetch actions for Trello List with possibility to specify filters.
Example API request:
https://api.trello.com/1/lists/{list_id}/actions?filter=createCard&since=2015-09-14T21:45:56.850Z&key={key_id}&token={token_id}
:param filter: Action types to filter, separated by comma or as a sequence.
:type filter: ``str`` or ``list``
:param since: Filter actions since specified date.
:type since: ``str``
:return: Events occurred in Trello list.
:rtype: ``list`` of ``dict``
"""
return self._client.fetch_json(
'/lists/' + self._list.id + '/actions',
query_params={
'filter': filter,
'since': since,
})
def is_date(string):
"""
Check if input string is date-formatted.
:param string: Input date
:type string: ``str``
:rtype: ``bool``
"""
try:
dateutil.parser.parse(string)
return True
except ValueError:
return False
|
en
| 0.68544
|
Sensor which monitors Trello list for a new actions (events). For reference see Trello API Docs: https://trello.com/docs/api/list/index.html#get-1-lists-idlist-actions Set defaults and validate YAML config metadata. Run the sensor initialization / setup code (if any). Iterate through all Trello lists from sensor config. Fetch latest actions for each Trello List, filter by type. Start reading feed where we stopped last time by passing `since` date parameter to Trello API. Save latest event `date` in st2 key-value storage for each Trello list. Find Trello API credentials (`api_token` and `token`) from config. Precedence: 1. First try to find `api_key` (list config) 2. If not found - go to parent level (config for all lists) 3. If not found - go to parent level (global config) It means that every Trello list can have its own unique API credentials to login. :param trello_list_config: Configuration for single Trello list :type trello_list_config: ``dict`` :rtype: ``None`` Run the sensor cleanup code (if any). Runs when trigger is created Runs when trigger is updated Runs when trigger is deleted Sugar class to work with Trello Lists. Validate inputs and connect to Trello API. Exception is thrown if input details are not correct. :param board_id: Trello board ID where the List is located :type board_id: ``str`` :param list_id: Trello List ID itself :type list_id: ``str`` :param api_key: Trello API key :type api_key: ``str`` :param token: Trello API token :type token: ``str`` # assume empty string '' as None Ensure that Trello list details are correct. Raise an exception if validation failed. Generate unique key name for built-in storage based on config values. :rtype: ``str`` Fetch actions for Trello List with possibility to specify filters. Example API request: https://api.trello.com/1/lists/{list_id}/actions?filter=createCard&since=2015-09-14T21:45:56.850Z&key={key_id}&token={token_id} :param filter: Action types to filter, separated by comma or as a sequence. :type filter: ``str`` or ``list`` :param since: Filter actions since specified date. :type since: ``str`` :return: Events occurred in Trello list. :rtype: ``list`` of ``dict`` Check if input string is date-formatted. :param string: Input date :type string: ``str`` :rtype: ``bool``
| 2.914999
| 3
|
client.py
|
mjsir911/py-piemessage
| 0
|
6629346
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import uuid
import socket
import time
__appname__ = "pymessage"
__author__ = "<NAME>, <NAME>"
__copyright__ = ""
__credits__ = "<NAME>, <NAME>"
__license__ = "new BSD 3-Clause"
__version__ = "0.0.3"
__maintainers__ = "<NAME>, <NAME>"
__email__ = "<EMAIL>, <EMAIL>"
__status__ = "Prototype"
__module__ = ""
address = ('localhost', 5350)
lguid = '0'
def connect():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(address)
sock.send((hex(uuid.getnode()) + '\n').encode() + bytes(False)) # ik this is such BAD CODE
print("sent")
sock.send(lguid.encode())
print('sent latest guid: {}'.format(lguid))
# contents = "latest guid +5: {}".format(lguid + '5')
msg = True
fullmsg = ''
while msg:
msg = sock.recv(16).decode() # low byte count for whatever reason
#print('mes rec: {}'.format(msg))
fullmsg += msg
print('received message: {}'.format(fullmsg))
sock.close()
connect()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import uuid
import socket
import time
__appname__ = "pymessage"
__author__ = "<NAME>, <NAME>"
__copyright__ = ""
__credits__ = "<NAME>, <NAME>"
__license__ = "new BSD 3-Clause"
__version__ = "0.0.3"
__maintainers__ = "<NAME>, <NAME>"
__email__ = "<EMAIL>, <EMAIL>"
__status__ = "Prototype"
__module__ = ""
address = ('localhost', 5350)
lguid = '0'
def connect():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(address)
sock.send((hex(uuid.getnode()) + '\n').encode() + bytes(False)) # ik this is such BAD CODE
print("sent")
sock.send(lguid.encode())
print('sent latest guid: {}'.format(lguid))
# contents = "latest guid +5: {}".format(lguid + '5')
msg = True
fullmsg = ''
while msg:
msg = sock.recv(16).decode() # low byte count for whatever reason
#print('mes rec: {}'.format(msg))
fullmsg += msg
print('received message: {}'.format(fullmsg))
sock.close()
connect()
|
en
| 0.510271
|
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # ik this is such BAD CODE # contents = "latest guid +5: {}".format(lguid + '5') # low byte count for whatever reason #print('mes rec: {}'.format(msg))
| 2.301695
| 2
|
aries_cloudagent/commands/tests/test_start.py
|
VersesGitHub/aries-cloudagent-python
| 1
|
6629347
|
import sys
from asynctest import TestCase as AsyncTestCase
from asynctest import mock as async_mock
from ...config.error import ArgsParseError
from .. import start as test_module
class TestStart(AsyncTestCase):
def test_bad_args(self):
with self.assertRaises(ArgsParseError):
test_module.execute([])
with self.assertRaises(SystemExit):
test_module.execute(["bad"])
async def test_start_shutdown_app(self):
mock_conductor = async_mock.MagicMock(
setup=async_mock.CoroutineMock(),
start=async_mock.CoroutineMock(),
stop=async_mock.CoroutineMock(),
)
await test_module.start_app(mock_conductor)
await test_module.shutdown_app(mock_conductor)
def test_exec_start(self):
with async_mock.patch.object(
test_module, "start_app", autospec=True
) as start_app, async_mock.patch.object(
test_module, "run_loop"
) as run_loop, async_mock.patch.object(
test_module, "shutdown_app", autospec=True
) as shutdown_app, async_mock.patch.object(
test_module, "uvloop", async_mock.MagicMock()
) as mock_uvloop:
mock_uvloop.install = async_mock.MagicMock()
test_module.execute(
[
"-it",
"http",
"0.0.0.0",
"80",
"-ot",
"http",
"--endpoint",
"0.0.0.0",
"80",
"--no-ledger",
]
)
start_app.assert_called_once()
assert isinstance(start_app.call_args[0][0], test_module.Conductor)
shutdown_app.assert_called_once()
assert isinstance(shutdown_app.call_args[0][0], test_module.Conductor)
run_loop.assert_called_once()
async def test_run_loop(self):
startup = async_mock.CoroutineMock()
startup_call = startup()
shutdown = async_mock.CoroutineMock()
shutdown_call = shutdown()
with async_mock.patch.object(
test_module, "asyncio", autospec=True
) as mock_asyncio:
test_module.run_loop(startup_call, shutdown_call)
mock_add = mock_asyncio.get_event_loop.return_value.add_signal_handler
mock_add.assert_called_once()
init_coro = mock_asyncio.ensure_future.call_args[0][0]
mock_asyncio.get_event_loop.return_value.run_forever.assert_called_once()
await init_coro
startup.assert_awaited_once()
done_calls = (
mock_asyncio.get_event_loop.return_value.add_signal_handler.call_args
)
done_calls[0][1]() # exec partial
done_coro = mock_asyncio.ensure_future.call_args[0][0]
tasks = [
async_mock.MagicMock(),
async_mock.MagicMock(cancel=async_mock.MagicMock()),
]
mock_asyncio.gather = async_mock.CoroutineMock()
if sys.version_info.major == 3 and sys.version_info.minor > 6:
mock_asyncio.all_tasks.return_value = tasks
mock_asyncio.current_task.return_value = tasks[0]
else:
mock_asyncio.Task.all_tasks.return_value = tasks
mock_asyncio.Task.current_task.return_value = tasks[0]
await done_coro
shutdown.assert_awaited_once()
async def test_run_loop_init_x(self):
startup = async_mock.CoroutineMock(side_effect=KeyError("the front fell off"))
startup_call = startup()
shutdown = async_mock.CoroutineMock()
shutdown_call = shutdown()
with async_mock.patch.object(
test_module, "asyncio", autospec=True
) as mock_asyncio, async_mock.patch.object(
test_module, "LOGGER", autospec=True
) as mock_logger:
test_module.run_loop(startup_call, shutdown_call)
mock_add = mock_asyncio.get_event_loop.return_value.add_signal_handler
mock_add.assert_called_once()
init_coro = mock_asyncio.ensure_future.call_args[0][0]
mock_asyncio.get_event_loop.return_value.run_forever.assert_called_once()
await init_coro
startup.assert_awaited_once()
done_calls = (
mock_asyncio.get_event_loop.return_value.add_signal_handler.call_args
)
done_calls[0][1]() # exec partial
done_coro = mock_asyncio.ensure_future.call_args[0][0]
task = async_mock.MagicMock()
mock_asyncio.gather = async_mock.CoroutineMock()
if sys.version_info.major == 3 and sys.version_info.minor > 6:
mock_asyncio.all_tasks.return_value = [task]
mock_asyncio.current_task.return_value = task
else:
mock_asyncio.Task.all_tasks.return_value = [task]
mock_asyncio.Task.current_task.return_value = task
await done_coro
shutdown.assert_awaited_once()
mock_logger.exception.assert_called_once()
def test_main(self):
with async_mock.patch.object(
test_module, "__name__", "__main__"
) as mock_name, async_mock.patch.object(
test_module, "execute", async_mock.MagicMock()
) as mock_execute:
test_module.main()
mock_execute.assert_called_once
|
import sys
from asynctest import TestCase as AsyncTestCase
from asynctest import mock as async_mock
from ...config.error import ArgsParseError
from .. import start as test_module
class TestStart(AsyncTestCase):
def test_bad_args(self):
with self.assertRaises(ArgsParseError):
test_module.execute([])
with self.assertRaises(SystemExit):
test_module.execute(["bad"])
async def test_start_shutdown_app(self):
mock_conductor = async_mock.MagicMock(
setup=async_mock.CoroutineMock(),
start=async_mock.CoroutineMock(),
stop=async_mock.CoroutineMock(),
)
await test_module.start_app(mock_conductor)
await test_module.shutdown_app(mock_conductor)
def test_exec_start(self):
with async_mock.patch.object(
test_module, "start_app", autospec=True
) as start_app, async_mock.patch.object(
test_module, "run_loop"
) as run_loop, async_mock.patch.object(
test_module, "shutdown_app", autospec=True
) as shutdown_app, async_mock.patch.object(
test_module, "uvloop", async_mock.MagicMock()
) as mock_uvloop:
mock_uvloop.install = async_mock.MagicMock()
test_module.execute(
[
"-it",
"http",
"0.0.0.0",
"80",
"-ot",
"http",
"--endpoint",
"0.0.0.0",
"80",
"--no-ledger",
]
)
start_app.assert_called_once()
assert isinstance(start_app.call_args[0][0], test_module.Conductor)
shutdown_app.assert_called_once()
assert isinstance(shutdown_app.call_args[0][0], test_module.Conductor)
run_loop.assert_called_once()
async def test_run_loop(self):
startup = async_mock.CoroutineMock()
startup_call = startup()
shutdown = async_mock.CoroutineMock()
shutdown_call = shutdown()
with async_mock.patch.object(
test_module, "asyncio", autospec=True
) as mock_asyncio:
test_module.run_loop(startup_call, shutdown_call)
mock_add = mock_asyncio.get_event_loop.return_value.add_signal_handler
mock_add.assert_called_once()
init_coro = mock_asyncio.ensure_future.call_args[0][0]
mock_asyncio.get_event_loop.return_value.run_forever.assert_called_once()
await init_coro
startup.assert_awaited_once()
done_calls = (
mock_asyncio.get_event_loop.return_value.add_signal_handler.call_args
)
done_calls[0][1]() # exec partial
done_coro = mock_asyncio.ensure_future.call_args[0][0]
tasks = [
async_mock.MagicMock(),
async_mock.MagicMock(cancel=async_mock.MagicMock()),
]
mock_asyncio.gather = async_mock.CoroutineMock()
if sys.version_info.major == 3 and sys.version_info.minor > 6:
mock_asyncio.all_tasks.return_value = tasks
mock_asyncio.current_task.return_value = tasks[0]
else:
mock_asyncio.Task.all_tasks.return_value = tasks
mock_asyncio.Task.current_task.return_value = tasks[0]
await done_coro
shutdown.assert_awaited_once()
async def test_run_loop_init_x(self):
startup = async_mock.CoroutineMock(side_effect=KeyError("the front fell off"))
startup_call = startup()
shutdown = async_mock.CoroutineMock()
shutdown_call = shutdown()
with async_mock.patch.object(
test_module, "asyncio", autospec=True
) as mock_asyncio, async_mock.patch.object(
test_module, "LOGGER", autospec=True
) as mock_logger:
test_module.run_loop(startup_call, shutdown_call)
mock_add = mock_asyncio.get_event_loop.return_value.add_signal_handler
mock_add.assert_called_once()
init_coro = mock_asyncio.ensure_future.call_args[0][0]
mock_asyncio.get_event_loop.return_value.run_forever.assert_called_once()
await init_coro
startup.assert_awaited_once()
done_calls = (
mock_asyncio.get_event_loop.return_value.add_signal_handler.call_args
)
done_calls[0][1]() # exec partial
done_coro = mock_asyncio.ensure_future.call_args[0][0]
task = async_mock.MagicMock()
mock_asyncio.gather = async_mock.CoroutineMock()
if sys.version_info.major == 3 and sys.version_info.minor > 6:
mock_asyncio.all_tasks.return_value = [task]
mock_asyncio.current_task.return_value = task
else:
mock_asyncio.Task.all_tasks.return_value = [task]
mock_asyncio.Task.current_task.return_value = task
await done_coro
shutdown.assert_awaited_once()
mock_logger.exception.assert_called_once()
def test_main(self):
with async_mock.patch.object(
test_module, "__name__", "__main__"
) as mock_name, async_mock.patch.object(
test_module, "execute", async_mock.MagicMock()
) as mock_execute:
test_module.main()
mock_execute.assert_called_once
|
en
| 0.676316
|
# exec partial # exec partial
| 2.314826
| 2
|
userbot/modules/animasi2.py
|
Yansaii/Bdrl-Ubot
| 0
|
6629348
|
<reponame>Yansaii/Bdrl-Ubot
from time import sleep
from userbot import CMD_HELP
from userbot.events import register
@register(outgoing=True, pattern='^.sadboy(?: |$)(.*)')
async def typewriter(typew):
typew.pattern_match.group(1)
sleep(2)
await typew.edit("`Pertama-tama kamu cantik`")
sleep(2)
await typew.edit("`Kedua kamu manis`")
sleep(1)
await typew.edit("`Dan yang terakhir kamu kek anjing`")
# Create by myself @localheart
@register(outgoing=True, pattern='^.punten(?: |$)(.*)')
async def typewriter(typew):
typew.pattern_match.group(1)
await typew.edit("`\n┻┳|―-∩`"
"`\n┳┻| ヽ`"
"`\n┻┳| ● |`"
"`\n┳┻|▼) _ノ`"
"`\n┻┳| ̄ )`"
"`\n┳ミ( ̄ /`"
"`\n┻┳T ̄|`"
"\n**Punten**")
# Create by myself @localheart
@register(outgoing=True, pattern='^.pantau(?: |$)(.*)')
async def typewriter(typew):
typew.pattern_match.group(1)
await typew.edit("`\n┻┳|―-∩`"
"`\n┳┻| ヽ`"
"`\n┻┳| ● |`"
"`\n┳┻|▼) _ノ`"
"`\n┻┳| ̄ )`"
"`\n┳ミ( ̄ /`"
"`\n┻┳T ̄|`"
"\n**Masih Ku Pantau**")
# Create by myself @localheart
@register(outgoing=True, pattern='^.idiot(?: |$)(.*)')
async def typewriter(typew):
typew.pattern_match.group(1)
await typew.edit("\n╭╮╱╱╭╮"
"\n┃╰╮╭╯┃"
"\n╰╮╰╯╭┻━┳╮╭╮"
"\n╱╰╮╭┫╭╮┃┃┃┃"
"\n╱╱┃┃┃╰╯┃╰╯┃"
"\n╱╱╰╯╰━━┻━━╯"
"\nㅤㅤㅤ"
"\n╭━━━╮"
"\n┃╭━╮┃"
"\n┃┃╱┃┣━┳━━╮"
"\n┃╰━╯┃╭┫┃━┫"
"\n┃╭━╮┃┃┃┃━┫"
"\n╰╯╱╰┻╯╰━━╯"
"\nㅤㅤㅤ"
"\n╭━━━╮╱╭╮╱╱╱╭╮"
"\n┃╭━━╯╱┃┃╱╱╭╯╰╮"
"\n┃╰━━┳━╯┣┳━┻╮╭╯"
"\n┃╭━━┫╭╮┣┫╭╮┃┃"
"\n┃╰━━┫╰╯┃┃╰╯┃╰╮"
"\n╰━━━┻━━┻┻━━┻━╯"
"\nㅤㅤㅤ"
"\n╭━╮╱╭╮"
"\n┃┃╰╮┃┃"
"\n┃╭╮╰╯┣━━╮"
"\n┃┃╰╮┃┃╭╮┃"
"\n┃┃╱┃┃┃╰╯┃"
"\n╰╯╱╰━┻━━╯"
"\nㅤㅤㅤ"
"\n╭━━━╮╱╱╱╱╱╭╮╱╭╮"
"\n╰╮╭╮┃╱╱╱╱╱┃┃╭╯╰╮"
"\n╱┃┃┃┣━━┳╮╭┫╰┻╮╭╯"
"\n╱┃┃┃┃╭╮┃┃┃┃╭╮┃┃"
"\n╭╯╰╯┃╰╯┃╰╯┃╰╯┃╰╮"
"\n╰━━━┻━━┻━━┻━━┻━╯")
CMD_HELP.update({
"animasi3":
"𝘾𝙤𝙢𝙢𝙖𝙣𝙙: `.sadboy`\
\n↳ : Biasalah sadboy hikss\
\n\n𝘾𝙤𝙢𝙢𝙖𝙣𝙙: `.punten` dan `.pantau`\
\n↳ : Coba aja hehehe.\
\n\n𝘾𝙤𝙢𝙢𝙖𝙣𝙙: `.idiot`\
\n↳ : u're ediot xixixi.\
\n\n𝘾𝙤𝙢𝙢𝙖𝙣𝙙: `kosong`\
\n↳ : Tunggu update selanjutnya kawan."
})
|
from time import sleep
from userbot import CMD_HELP
from userbot.events import register
@register(outgoing=True, pattern='^.sadboy(?: |$)(.*)')
async def typewriter(typew):
typew.pattern_match.group(1)
sleep(2)
await typew.edit("`Pertama-tama kamu cantik`")
sleep(2)
await typew.edit("`Kedua kamu manis`")
sleep(1)
await typew.edit("`Dan yang terakhir kamu kek anjing`")
# Create by myself @localheart
@register(outgoing=True, pattern='^.punten(?: |$)(.*)')
async def typewriter(typew):
typew.pattern_match.group(1)
await typew.edit("`\n┻┳|―-∩`"
"`\n┳┻| ヽ`"
"`\n┻┳| ● |`"
"`\n┳┻|▼) _ノ`"
"`\n┻┳| ̄ )`"
"`\n┳ミ( ̄ /`"
"`\n┻┳T ̄|`"
"\n**Punten**")
# Create by myself @localheart
@register(outgoing=True, pattern='^.pantau(?: |$)(.*)')
async def typewriter(typew):
typew.pattern_match.group(1)
await typew.edit("`\n┻┳|―-∩`"
"`\n┳┻| ヽ`"
"`\n┻┳| ● |`"
"`\n┳┻|▼) _ノ`"
"`\n┻┳| ̄ )`"
"`\n┳ミ( ̄ /`"
"`\n┻┳T ̄|`"
"\n**Masih Ku Pantau**")
# Create by myself @localheart
@register(outgoing=True, pattern='^.idiot(?: |$)(.*)')
async def typewriter(typew):
typew.pattern_match.group(1)
await typew.edit("\n╭╮╱╱╭╮"
"\n┃╰╮╭╯┃"
"\n╰╮╰╯╭┻━┳╮╭╮"
"\n╱╰╮╭┫╭╮┃┃┃┃"
"\n╱╱┃┃┃╰╯┃╰╯┃"
"\n╱╱╰╯╰━━┻━━╯"
"\nㅤㅤㅤ"
"\n╭━━━╮"
"\n┃╭━╮┃"
"\n┃┃╱┃┣━┳━━╮"
"\n┃╰━╯┃╭┫┃━┫"
"\n┃╭━╮┃┃┃┃━┫"
"\n╰╯╱╰┻╯╰━━╯"
"\nㅤㅤㅤ"
"\n╭━━━╮╱╭╮╱╱╱╭╮"
"\n┃╭━━╯╱┃┃╱╱╭╯╰╮"
"\n┃╰━━┳━╯┣┳━┻╮╭╯"
"\n┃╭━━┫╭╮┣┫╭╮┃┃"
"\n┃╰━━┫╰╯┃┃╰╯┃╰╮"
"\n╰━━━┻━━┻┻━━┻━╯"
"\nㅤㅤㅤ"
"\n╭━╮╱╭╮"
"\n┃┃╰╮┃┃"
"\n┃╭╮╰╯┣━━╮"
"\n┃┃╰╮┃┃╭╮┃"
"\n┃┃╱┃┃┃╰╯┃"
"\n╰╯╱╰━┻━━╯"
"\nㅤㅤㅤ"
"\n╭━━━╮╱╱╱╱╱╭╮╱╭╮"
"\n╰╮╭╮┃╱╱╱╱╱┃┃╭╯╰╮"
"\n╱┃┃┃┣━━┳╮╭┫╰┻╮╭╯"
"\n╱┃┃┃┃╭╮┃┃┃┃╭╮┃┃"
"\n╭╯╰╯┃╰╯┃╰╯┃╰╯┃╰╮"
"\n╰━━━┻━━┻━━┻━━┻━╯")
CMD_HELP.update({
"animasi3":
"𝘾𝙤𝙢𝙢𝙖𝙣𝙙: `.sadboy`\
\n↳ : Biasalah sadboy hikss\
\n\n𝘾𝙤𝙢𝙢𝙖𝙣𝙙: `.punten` dan `.pantau`\
\n↳ : Coba aja hehehe.\
\n\n𝘾𝙤𝙢𝙢𝙖𝙣𝙙: `.idiot`\
\n↳ : u're ediot xixixi.\
\n\n𝘾𝙤𝙢𝙢𝙖𝙣𝙙: `kosong`\
\n↳ : Tunggu update selanjutnya kawan."
})
|
en
| 0.892644
|
# Create by myself @localheart # Create by myself @localheart # Create by myself @localheart
| 2.457355
| 2
|
adv/berserker.py
|
pfleg/dl
| 0
|
6629349
|
from core.advbase import *
from slot.a import *
from slot.d import *
def module():
return Berserker
class Berserker(Adv):
a3 = ('lo',0.3)
conf = {}
conf['slots.a'] = The_Shining_Overlord()+Primal_Crisis()
conf['slots.poison.a'] = The_Shining_Overlord()+Primal_Crisis()
conf['acl'] = """
`dragon.act('c3 s end'),fsc
`s3, not self.s3_buff and fsc
`s4
`s1,fsc
`fs, x=3
"""
coab = ['Berserker','Ieyasu','Wand','Curran']
share = ['Curran']
def s1_proc(self, e):
Debuff(e.name, 0.05, 10, 0.4, 'attack')
if __name__ == '__main__':
from core.simulate import test_with_argv
test_with_argv(None, *sys.argv)
|
from core.advbase import *
from slot.a import *
from slot.d import *
def module():
return Berserker
class Berserker(Adv):
a3 = ('lo',0.3)
conf = {}
conf['slots.a'] = The_Shining_Overlord()+Primal_Crisis()
conf['slots.poison.a'] = The_Shining_Overlord()+Primal_Crisis()
conf['acl'] = """
`dragon.act('c3 s end'),fsc
`s3, not self.s3_buff and fsc
`s4
`s1,fsc
`fs, x=3
"""
coab = ['Berserker','Ieyasu','Wand','Curran']
share = ['Curran']
def s1_proc(self, e):
Debuff(e.name, 0.05, 10, 0.4, 'attack')
if __name__ == '__main__':
from core.simulate import test_with_argv
test_with_argv(None, *sys.argv)
|
en
| 0.392334
|
`dragon.act('c3 s end'),fsc `s3, not self.s3_buff and fsc `s4 `s1,fsc `fs, x=3
| 2.000803
| 2
|
lab_5/25.py
|
Mmalikov1337/python_labs
| 0
|
6629350
|
a = int(input('Enter number: '))
n = int(input('Enter degree: '))
a1 = a
for k in range (n - 1):
a *= a1
print (a)
|
a = int(input('Enter number: '))
n = int(input('Enter degree: '))
a1 = a
for k in range (n - 1):
a *= a1
print (a)
|
none
| 1
| 3.942759
| 4
|