after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def main(args):
# load and preprocess dataset
data = load_data(args)
features = torch.FloatTensor(data.features)
labels = torch.LongTensor(data.labels)
if hasattr(torch, "BoolTensor"):
train_mask = torch.BoolTensor(data.train_mask)
val_mask = torch.BoolTensor(data.val_mask)
test_mask = torch.BoolTensor(data.test_mask)
else:
train_mask = torch.ByteTensor(data.train_mask)
val_mask = torch.ByteTensor(data.val_mask)
test_mask = torch.ByteTensor(data.test_mask)
num_feats = features.shape[1]
n_classes = data.num_labels
n_edges = data.graph.number_of_edges()
print(
"""----Data statistics------'
#Edges %d
#Classes %d
#Train samples %d
#Val samples %d
#Test samples %d"""
% (
n_edges,
n_classes,
train_mask.sum().item(),
val_mask.sum().item(),
test_mask.sum().item(),
)
)
if args.gpu < 0:
cuda = False
else:
cuda = True
torch.cuda.set_device(args.gpu)
features = features.cuda()
labels = labels.cuda()
train_mask = train_mask.cuda()
val_mask = val_mask.cuda()
test_mask = test_mask.cuda()
g = data.graph
# add self loop
g.remove_edges_from(nx.selfloop_edges(g))
g = DGLGraph(g)
g.add_edges(g.nodes(), g.nodes())
n_edges = g.number_of_edges()
# create model
heads = ([args.num_heads] * args.num_layers) + [args.num_out_heads]
model = GAT(
g,
args.num_layers,
num_feats,
args.num_hidden,
n_classes,
heads,
F.elu,
args.in_drop,
args.attn_drop,
args.negative_slope,
args.residual,
)
print(model)
stopper = EarlyStopping(patience=100)
if cuda:
model.cuda()
loss_fcn = torch.nn.CrossEntropyLoss()
# use optimizer
optimizer = torch.optim.Adam(
model.parameters(), lr=args.lr, weight_decay=args.weight_decay
)
# initialize graph
dur = []
for epoch in range(args.epochs):
model.train()
if epoch >= 3:
t0 = time.time()
# forward
logits = model(features)
loss = loss_fcn(logits[train_mask], labels[train_mask])
optimizer.zero_grad()
loss.backward()
optimizer.step()
if epoch >= 3:
dur.append(time.time() - t0)
train_acc = accuracy(logits[train_mask], labels[train_mask])
if args.fastmode:
val_acc = accuracy(logits[val_mask], labels[val_mask])
else:
val_acc = evaluate(model, features, labels, val_mask)
if stopper.step(val_acc, model):
break
print(
"Epoch {:05d} | Time(s) {:.4f} | Loss {:.4f} | TrainAcc {:.4f} |"
" ValAcc {:.4f} | ETputs(KTEPS) {:.2f}".format(
epoch,
np.mean(dur),
loss.item(),
train_acc,
val_acc,
n_edges / np.mean(dur) / 1000,
)
)
print()
model.load_state_dict(torch.load("es_checkpoint.pt"))
acc = evaluate(model, features, labels, test_mask)
print("Test Accuracy {:.4f}".format(acc))
|
def main(args):
# load and preprocess dataset
data = load_data(args)
features = torch.FloatTensor(data.features)
labels = torch.LongTensor(data.labels)
if hasattr(torch, "BoolTensor"):
train_mask = torch.BoolTensor(data.train_mask)
val_mask = torch.BoolTensor(data.val_mask)
test_mask = torch.BoolTensor(data.test_mask)
else:
train_mask = torch.ByteTensor(data.train_mask)
val_mask = torch.ByteTensor(data.val_mask)
test_mask = torch.ByteTensor(data.test_mask)
num_feats = features.shape[1]
n_classes = data.num_labels
n_edges = data.graph.number_of_edges()
print(
"""----Data statistics------'
#Edges %d
#Classes %d
#Train samples %d
#Val samples %d
#Test samples %d"""
% (
n_edges,
n_classes,
train_mask.sum().item(),
val_mask.sum().item(),
test_mask.sum().item(),
)
)
if args.gpu < 0:
cuda = False
else:
cuda = True
torch.cuda.set_device(args.gpu)
features = features.cuda()
labels = labels.cuda()
train_mask = train_mask.cuda()
val_mask = val_mask.cuda()
test_mask = test_mask.cuda()
g = data.graph
# add self loop
g.remove_edges_from(g.selfloop_edges())
g = DGLGraph(g)
g.add_edges(g.nodes(), g.nodes())
n_edges = g.number_of_edges()
# create model
heads = ([args.num_heads] * args.num_layers) + [args.num_out_heads]
model = GAT(
g,
args.num_layers,
num_feats,
args.num_hidden,
n_classes,
heads,
F.elu,
args.in_drop,
args.attn_drop,
args.negative_slope,
args.residual,
)
print(model)
stopper = EarlyStopping(patience=100)
if cuda:
model.cuda()
loss_fcn = torch.nn.CrossEntropyLoss()
# use optimizer
optimizer = torch.optim.Adam(
model.parameters(), lr=args.lr, weight_decay=args.weight_decay
)
# initialize graph
dur = []
for epoch in range(args.epochs):
model.train()
if epoch >= 3:
t0 = time.time()
# forward
logits = model(features)
loss = loss_fcn(logits[train_mask], labels[train_mask])
optimizer.zero_grad()
loss.backward()
optimizer.step()
if epoch >= 3:
dur.append(time.time() - t0)
train_acc = accuracy(logits[train_mask], labels[train_mask])
if args.fastmode:
val_acc = accuracy(logits[val_mask], labels[val_mask])
else:
val_acc = evaluate(model, features, labels, val_mask)
if stopper.step(val_acc, model):
break
print(
"Epoch {:05d} | Time(s) {:.4f} | Loss {:.4f} | TrainAcc {:.4f} |"
" ValAcc {:.4f} | ETputs(KTEPS) {:.2f}".format(
epoch,
np.mean(dur),
loss.item(),
train_acc,
val_acc,
n_edges / np.mean(dur) / 1000,
)
)
print()
model.load_state_dict(torch.load("es_checkpoint.pt"))
acc = evaluate(model, features, labels, test_mask)
print("Test Accuracy {:.4f}".format(acc))
|
https://github.com/dmlc/dgl/issues/755
|
test_shared_mem_store.test_init ... /var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: Initializer is not set. Use zero initializer instead. To suppress this warning, use `set_initializer` to explicitly specify which initializer to use.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access node data of all nodes.It's recommended to node data of a subset of nodes directly.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access edge data of all edges.It's recommended to edge data of a subset of edges directly.
warnings.warn(msg, warn_type)
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_node_feat for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_node_test4 for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_in for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_edge_feat for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_edge_test4 for shared memory
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: Initializer is not set. Use zero initializer instead. To suppress this warning, use `set_initializer` to explicitly specify which initializer to use.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access node data of all nodes.It's recommended to node data of a subset of nodes directly.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access edge data of all edges.It's recommended to edge data of a subset of edges directly.
warnings.warn(msg, warn_type)
Arrays are not almost equal to 7 decimals
(mismatch 100.0%)
x: array([20., 20., 20., 20., 20., 20., 20., 20., 20., 20.], dtype=float32)
y: array(11)
Traceback (most recent call last):
File "/var/jenkins_home/workspace/DGL_PR-752@2/tests/distributed/test_shared_mem_store.py", line 65, in check_init_func
check_array_shared_memory(g, worker_id, [g.nodes[:].data['test4'], g.edges[:].data['test4']])
File "/var/jenkins_home/workspace/DGL_PR-752@2/tests/distributed/test_shared_mem_store.py", line 28, in check_array_shared_memory
assert_almost_equal(F.asnumpy(arr[0]), i + 10)
File "/usr/local/lib/python3.5/dist-packages/numpy/testing/nose_tools/utils.py", line 567, in assert_almost_equal
return assert_array_almost_equal(actual, desired, decimal, err_msg)
File "/usr/local/lib/python3.5/dist-packages/numpy/testing/nose_tools/utils.py", line 965, in assert_array_almost_equal
precision=decimal)
File "/usr/local/lib/python3.5/dist-packages/numpy/testing/nose_tools/utils.py", line 781, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Arrays are not almost equal to 7 decimals
(mismatch 100.0%)
x: array([20., 20., 20., 20., 20., 20., 20., 20., 20., 20.], dtype=float32)
y: array(11)
FAIL
|
AssertionError
|
def main(args):
# load and preprocess dataset
data = load_data(args)
features = torch.FloatTensor(data.features)
labels = torch.LongTensor(data.labels)
if hasattr(torch, "BoolTensor"):
train_mask = torch.BoolTensor(data.train_mask)
val_mask = torch.BoolTensor(data.val_mask)
test_mask = torch.BoolTensor(data.test_mask)
else:
train_mask = torch.ByteTensor(data.train_mask)
val_mask = torch.ByteTensor(data.val_mask)
test_mask = torch.ByteTensor(data.test_mask)
in_feats = features.shape[1]
n_classes = data.num_labels
n_edges = data.graph.number_of_edges()
print(
"""----Data statistics------'
#Edges %d
#Classes %d
#Train samples %d
#Val samples %d
#Test samples %d"""
% (
n_edges,
n_classes,
train_mask.sum().item(),
val_mask.sum().item(),
test_mask.sum().item(),
)
)
if args.gpu < 0:
cuda = False
else:
cuda = True
torch.cuda.set_device(args.gpu)
features = features.cuda()
labels = labels.cuda()
train_mask = train_mask.cuda()
val_mask = val_mask.cuda()
test_mask = test_mask.cuda()
# graph preprocess and calculate normalization factor
g = data.graph
g.remove_edges_from(nx.selfloop_edges(g))
g = DGLGraph(g)
# add self loop
g.add_edges(g.nodes(), g.nodes())
n_edges = g.number_of_edges()
# normalization
degs = g.in_degrees().float()
norm = torch.pow(degs, -0.5)
norm[torch.isinf(norm)] = 0
if cuda:
norm = norm.cuda()
g.ndata["norm"] = norm.unsqueeze(1)
# create GCN model
model = GCN(
g, in_feats, args.n_hidden, n_classes, args.n_layers, F.relu, args.dropout
)
if cuda:
model.cuda()
loss_fcn = torch.nn.CrossEntropyLoss()
# use optimizer
optimizer = torch.optim.Adam(
model.parameters(), lr=args.lr, weight_decay=args.weight_decay
)
# initialize graph
dur = []
for epoch in range(args.n_epochs):
model.train()
if epoch >= 3:
t0 = time.time()
# forward
logits = model(features)
loss = loss_fcn(logits[train_mask], labels[train_mask])
optimizer.zero_grad()
loss.backward()
optimizer.step()
if epoch >= 3:
dur.append(time.time() - t0)
acc = evaluate(model, features, labels, val_mask)
print(
"Epoch {:05d} | Time(s) {:.4f} | Loss {:.4f} | Accuracy {:.4f} | "
"ETputs(KTEPS) {:.2f}".format(
epoch, np.mean(dur), loss.item(), acc, n_edges / np.mean(dur) / 1000
)
)
print()
acc = evaluate(model, features, labels, test_mask)
print("Test Accuracy {:.4f}".format(acc))
|
def main(args):
# load and preprocess dataset
data = load_data(args)
features = torch.FloatTensor(data.features)
labels = torch.LongTensor(data.labels)
if hasattr(torch, "BoolTensor"):
train_mask = torch.BoolTensor(data.train_mask)
val_mask = torch.BoolTensor(data.val_mask)
test_mask = torch.BoolTensor(data.test_mask)
else:
train_mask = torch.ByteTensor(data.train_mask)
val_mask = torch.ByteTensor(data.val_mask)
test_mask = torch.ByteTensor(data.test_mask)
in_feats = features.shape[1]
n_classes = data.num_labels
n_edges = data.graph.number_of_edges()
print(
"""----Data statistics------'
#Edges %d
#Classes %d
#Train samples %d
#Val samples %d
#Test samples %d"""
% (
n_edges,
n_classes,
train_mask.sum().item(),
val_mask.sum().item(),
test_mask.sum().item(),
)
)
if args.gpu < 0:
cuda = False
else:
cuda = True
torch.cuda.set_device(args.gpu)
features = features.cuda()
labels = labels.cuda()
train_mask = train_mask.cuda()
val_mask = val_mask.cuda()
test_mask = test_mask.cuda()
# graph preprocess and calculate normalization factor
g = data.graph
g.remove_edges_from(g.selfloop_edges())
g = DGLGraph(g)
# add self loop
g.add_edges(g.nodes(), g.nodes())
n_edges = g.number_of_edges()
# normalization
degs = g.in_degrees().float()
norm = torch.pow(degs, -0.5)
norm[torch.isinf(norm)] = 0
if cuda:
norm = norm.cuda()
g.ndata["norm"] = norm.unsqueeze(1)
# create GCN model
model = GCN(
g, in_feats, args.n_hidden, n_classes, args.n_layers, F.relu, args.dropout
)
if cuda:
model.cuda()
loss_fcn = torch.nn.CrossEntropyLoss()
# use optimizer
optimizer = torch.optim.Adam(
model.parameters(), lr=args.lr, weight_decay=args.weight_decay
)
# initialize graph
dur = []
for epoch in range(args.n_epochs):
model.train()
if epoch >= 3:
t0 = time.time()
# forward
logits = model(features)
loss = loss_fcn(logits[train_mask], labels[train_mask])
optimizer.zero_grad()
loss.backward()
optimizer.step()
if epoch >= 3:
dur.append(time.time() - t0)
acc = evaluate(model, features, labels, val_mask)
print(
"Epoch {:05d} | Time(s) {:.4f} | Loss {:.4f} | Accuracy {:.4f} | "
"ETputs(KTEPS) {:.2f}".format(
epoch, np.mean(dur), loss.item(), acc, n_edges / np.mean(dur) / 1000
)
)
print()
acc = evaluate(model, features, labels, test_mask)
print("Test Accuracy {:.4f}".format(acc))
|
https://github.com/dmlc/dgl/issues/755
|
test_shared_mem_store.test_init ... /var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: Initializer is not set. Use zero initializer instead. To suppress this warning, use `set_initializer` to explicitly specify which initializer to use.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access node data of all nodes.It's recommended to node data of a subset of nodes directly.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access edge data of all edges.It's recommended to edge data of a subset of edges directly.
warnings.warn(msg, warn_type)
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_node_feat for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_node_test4 for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_in for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_edge_feat for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_edge_test4 for shared memory
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: Initializer is not set. Use zero initializer instead. To suppress this warning, use `set_initializer` to explicitly specify which initializer to use.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access node data of all nodes.It's recommended to node data of a subset of nodes directly.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access edge data of all edges.It's recommended to edge data of a subset of edges directly.
warnings.warn(msg, warn_type)
Arrays are not almost equal to 7 decimals
(mismatch 100.0%)
x: array([20., 20., 20., 20., 20., 20., 20., 20., 20., 20.], dtype=float32)
y: array(11)
Traceback (most recent call last):
File "/var/jenkins_home/workspace/DGL_PR-752@2/tests/distributed/test_shared_mem_store.py", line 65, in check_init_func
check_array_shared_memory(g, worker_id, [g.nodes[:].data['test4'], g.edges[:].data['test4']])
File "/var/jenkins_home/workspace/DGL_PR-752@2/tests/distributed/test_shared_mem_store.py", line 28, in check_array_shared_memory
assert_almost_equal(F.asnumpy(arr[0]), i + 10)
File "/usr/local/lib/python3.5/dist-packages/numpy/testing/nose_tools/utils.py", line 567, in assert_almost_equal
return assert_array_almost_equal(actual, desired, decimal, err_msg)
File "/usr/local/lib/python3.5/dist-packages/numpy/testing/nose_tools/utils.py", line 965, in assert_array_almost_equal
precision=decimal)
File "/usr/local/lib/python3.5/dist-packages/numpy/testing/nose_tools/utils.py", line 781, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Arrays are not almost equal to 7 decimals
(mismatch 100.0%)
x: array([20., 20., 20., 20., 20., 20., 20., 20., 20., 20.], dtype=float32)
y: array(11)
FAIL
|
AssertionError
|
def main(args):
# load and preprocess dataset
data = load_data(args)
features = torch.FloatTensor(data.features)
labels = torch.LongTensor(data.labels)
if hasattr(torch, "BoolTensor"):
train_mask = torch.BoolTensor(data.train_mask)
val_mask = torch.BoolTensor(data.val_mask)
test_mask = torch.BoolTensor(data.test_mask)
else:
train_mask = torch.ByteTensor(data.train_mask)
val_mask = torch.ByteTensor(data.val_mask)
test_mask = torch.ByteTensor(data.test_mask)
in_feats = features.shape[1]
n_classes = data.num_labels
n_edges = data.graph.number_of_edges()
print(
"""----Data statistics------'
#Edges %d
#Classes %d
#Train samples %d
#Val samples %d
#Test samples %d"""
% (
n_edges,
n_classes,
train_mask.sum().item(),
val_mask.sum().item(),
test_mask.sum().item(),
)
)
if args.gpu < 0:
cuda = False
else:
cuda = True
torch.cuda.set_device(args.gpu)
features = features.cuda()
labels = labels.cuda()
train_mask = train_mask.cuda()
val_mask = val_mask.cuda()
test_mask = test_mask.cuda()
# graph preprocess and calculate normalization factor
g = data.graph
# add self loop
if args.self_loop:
g.remove_edges_from(nx.selfloop_edges(g))
g.add_edges_from(zip(g.nodes(), g.nodes()))
g = DGLGraph(g)
n_edges = g.number_of_edges()
# normalization
degs = g.in_degrees().float()
norm = torch.pow(degs, -0.5)
norm[torch.isinf(norm)] = 0
if cuda:
norm = norm.cuda()
g.ndata["norm"] = norm.unsqueeze(1)
# create GCN model
model = GCN(
g, in_feats, args.n_hidden, n_classes, args.n_layers, F.relu, args.dropout
)
if cuda:
model.cuda()
loss_fcn = torch.nn.CrossEntropyLoss()
# use optimizer
optimizer = torch.optim.Adam(
model.parameters(), lr=args.lr, weight_decay=args.weight_decay
)
# initialize graph
dur = []
for epoch in range(args.n_epochs):
model.train()
if epoch >= 3:
t0 = time.time()
# forward
logits = model(features)
loss = loss_fcn(logits[train_mask], labels[train_mask])
optimizer.zero_grad()
loss.backward()
optimizer.step()
if epoch >= 3:
dur.append(time.time() - t0)
acc = evaluate(model, features, labels, val_mask)
print(
"Epoch {:05d} | Time(s) {:.4f} | Loss {:.4f} | Accuracy {:.4f} | "
"ETputs(KTEPS) {:.2f}".format(
epoch, np.mean(dur), loss.item(), acc, n_edges / np.mean(dur) / 1000
)
)
print()
acc = evaluate(model, features, labels, test_mask)
print("Test Accuracy {:.4f}".format(acc))
|
def main(args):
# load and preprocess dataset
data = load_data(args)
features = torch.FloatTensor(data.features)
labels = torch.LongTensor(data.labels)
if hasattr(torch, "BoolTensor"):
train_mask = torch.BoolTensor(data.train_mask)
val_mask = torch.BoolTensor(data.val_mask)
test_mask = torch.BoolTensor(data.test_mask)
else:
train_mask = torch.ByteTensor(data.train_mask)
val_mask = torch.ByteTensor(data.val_mask)
test_mask = torch.ByteTensor(data.test_mask)
in_feats = features.shape[1]
n_classes = data.num_labels
n_edges = data.graph.number_of_edges()
print(
"""----Data statistics------'
#Edges %d
#Classes %d
#Train samples %d
#Val samples %d
#Test samples %d"""
% (
n_edges,
n_classes,
train_mask.sum().item(),
val_mask.sum().item(),
test_mask.sum().item(),
)
)
if args.gpu < 0:
cuda = False
else:
cuda = True
torch.cuda.set_device(args.gpu)
features = features.cuda()
labels = labels.cuda()
train_mask = train_mask.cuda()
val_mask = val_mask.cuda()
test_mask = test_mask.cuda()
# graph preprocess and calculate normalization factor
g = data.graph
# add self loop
if args.self_loop:
g.remove_edges_from(g.selfloop_edges())
g.add_edges_from(zip(g.nodes(), g.nodes()))
g = DGLGraph(g)
n_edges = g.number_of_edges()
# normalization
degs = g.in_degrees().float()
norm = torch.pow(degs, -0.5)
norm[torch.isinf(norm)] = 0
if cuda:
norm = norm.cuda()
g.ndata["norm"] = norm.unsqueeze(1)
# create GCN model
model = GCN(
g, in_feats, args.n_hidden, n_classes, args.n_layers, F.relu, args.dropout
)
if cuda:
model.cuda()
loss_fcn = torch.nn.CrossEntropyLoss()
# use optimizer
optimizer = torch.optim.Adam(
model.parameters(), lr=args.lr, weight_decay=args.weight_decay
)
# initialize graph
dur = []
for epoch in range(args.n_epochs):
model.train()
if epoch >= 3:
t0 = time.time()
# forward
logits = model(features)
loss = loss_fcn(logits[train_mask], labels[train_mask])
optimizer.zero_grad()
loss.backward()
optimizer.step()
if epoch >= 3:
dur.append(time.time() - t0)
acc = evaluate(model, features, labels, val_mask)
print(
"Epoch {:05d} | Time(s) {:.4f} | Loss {:.4f} | Accuracy {:.4f} | "
"ETputs(KTEPS) {:.2f}".format(
epoch, np.mean(dur), loss.item(), acc, n_edges / np.mean(dur) / 1000
)
)
print()
acc = evaluate(model, features, labels, test_mask)
print("Test Accuracy {:.4f}".format(acc))
|
https://github.com/dmlc/dgl/issues/755
|
test_shared_mem_store.test_init ... /var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: Initializer is not set. Use zero initializer instead. To suppress this warning, use `set_initializer` to explicitly specify which initializer to use.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access node data of all nodes.It's recommended to node data of a subset of nodes directly.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access edge data of all edges.It's recommended to edge data of a subset of edges directly.
warnings.warn(msg, warn_type)
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_node_feat for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_node_test4 for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_in for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_edge_feat for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_edge_test4 for shared memory
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: Initializer is not set. Use zero initializer instead. To suppress this warning, use `set_initializer` to explicitly specify which initializer to use.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access node data of all nodes.It's recommended to node data of a subset of nodes directly.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access edge data of all edges.It's recommended to edge data of a subset of edges directly.
warnings.warn(msg, warn_type)
Arrays are not almost equal to 7 decimals
(mismatch 100.0%)
x: array([20., 20., 20., 20., 20., 20., 20., 20., 20., 20.], dtype=float32)
y: array(11)
Traceback (most recent call last):
File "/var/jenkins_home/workspace/DGL_PR-752@2/tests/distributed/test_shared_mem_store.py", line 65, in check_init_func
check_array_shared_memory(g, worker_id, [g.nodes[:].data['test4'], g.edges[:].data['test4']])
File "/var/jenkins_home/workspace/DGL_PR-752@2/tests/distributed/test_shared_mem_store.py", line 28, in check_array_shared_memory
assert_almost_equal(F.asnumpy(arr[0]), i + 10)
File "/usr/local/lib/python3.5/dist-packages/numpy/testing/nose_tools/utils.py", line 567, in assert_almost_equal
return assert_array_almost_equal(actual, desired, decimal, err_msg)
File "/usr/local/lib/python3.5/dist-packages/numpy/testing/nose_tools/utils.py", line 965, in assert_array_almost_equal
precision=decimal)
File "/usr/local/lib/python3.5/dist-packages/numpy/testing/nose_tools/utils.py", line 781, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Arrays are not almost equal to 7 decimals
(mismatch 100.0%)
x: array([20., 20., 20., 20., 20., 20., 20., 20., 20., 20.], dtype=float32)
y: array(11)
FAIL
|
AssertionError
|
def main(args):
# load and preprocess dataset
data = load_data(args)
features = torch.FloatTensor(data.features)
labels = torch.LongTensor(data.labels)
if hasattr(torch, "BoolTensor"):
train_mask = torch.BoolTensor(data.train_mask)
val_mask = torch.BoolTensor(data.val_mask)
test_mask = torch.BoolTensor(data.test_mask)
else:
train_mask = torch.ByteTensor(data.train_mask)
val_mask = torch.ByteTensor(data.val_mask)
test_mask = torch.ByteTensor(data.test_mask)
in_feats = features.shape[1]
n_classes = data.num_labels
n_edges = data.graph.number_of_edges()
print(
"""----Data statistics------'
#Edges %d
#Classes %d
#Train samples %d
#Val samples %d
#Test samples %d"""
% (
n_edges,
n_classes,
train_mask.sum().item(),
val_mask.sum().item(),
test_mask.sum().item(),
)
)
if args.gpu < 0:
cuda = False
else:
cuda = True
torch.cuda.set_device(args.gpu)
features = features.cuda()
labels = labels.cuda()
train_mask = train_mask.cuda()
val_mask = val_mask.cuda()
test_mask = test_mask.cuda()
print("use cuda:", args.gpu)
# graph preprocess and calculate normalization factor
g = data.graph
g.remove_edges_from(nx.selfloop_edges(g))
g = DGLGraph(g)
n_edges = g.number_of_edges()
# create GraphSAGE model
model = GraphSAGE(
g,
in_feats,
args.n_hidden,
n_classes,
args.n_layers,
F.relu,
args.dropout,
args.aggregator_type,
)
if cuda:
model.cuda()
loss_fcn = torch.nn.CrossEntropyLoss()
# use optimizer
optimizer = torch.optim.Adam(
model.parameters(), lr=args.lr, weight_decay=args.weight_decay
)
# initialize graph
dur = []
for epoch in range(args.n_epochs):
model.train()
if epoch >= 3:
t0 = time.time()
# forward
logits = model(features)
loss = loss_fcn(logits[train_mask], labels[train_mask])
optimizer.zero_grad()
loss.backward()
optimizer.step()
if epoch >= 3:
dur.append(time.time() - t0)
acc = evaluate(model, features, labels, val_mask)
print(
"Epoch {:05d} | Time(s) {:.4f} | Loss {:.4f} | Accuracy {:.4f} | "
"ETputs(KTEPS) {:.2f}".format(
epoch, np.mean(dur), loss.item(), acc, n_edges / np.mean(dur) / 1000
)
)
print()
acc = evaluate(model, features, labels, test_mask)
print("Test Accuracy {:.4f}".format(acc))
|
def main(args):
# load and preprocess dataset
data = load_data(args)
features = torch.FloatTensor(data.features)
labels = torch.LongTensor(data.labels)
if hasattr(torch, "BoolTensor"):
train_mask = torch.BoolTensor(data.train_mask)
val_mask = torch.BoolTensor(data.val_mask)
test_mask = torch.BoolTensor(data.test_mask)
else:
train_mask = torch.ByteTensor(data.train_mask)
val_mask = torch.ByteTensor(data.val_mask)
test_mask = torch.ByteTensor(data.test_mask)
in_feats = features.shape[1]
n_classes = data.num_labels
n_edges = data.graph.number_of_edges()
print(
"""----Data statistics------'
#Edges %d
#Classes %d
#Train samples %d
#Val samples %d
#Test samples %d"""
% (
n_edges,
n_classes,
train_mask.sum().item(),
val_mask.sum().item(),
test_mask.sum().item(),
)
)
if args.gpu < 0:
cuda = False
else:
cuda = True
torch.cuda.set_device(args.gpu)
features = features.cuda()
labels = labels.cuda()
train_mask = train_mask.cuda()
val_mask = val_mask.cuda()
test_mask = test_mask.cuda()
print("use cuda:", args.gpu)
# graph preprocess and calculate normalization factor
g = data.graph
g.remove_edges_from(g.selfloop_edges())
g = DGLGraph(g)
n_edges = g.number_of_edges()
# create GraphSAGE model
model = GraphSAGE(
g,
in_feats,
args.n_hidden,
n_classes,
args.n_layers,
F.relu,
args.dropout,
args.aggregator_type,
)
if cuda:
model.cuda()
loss_fcn = torch.nn.CrossEntropyLoss()
# use optimizer
optimizer = torch.optim.Adam(
model.parameters(), lr=args.lr, weight_decay=args.weight_decay
)
# initialize graph
dur = []
for epoch in range(args.n_epochs):
model.train()
if epoch >= 3:
t0 = time.time()
# forward
logits = model(features)
loss = loss_fcn(logits[train_mask], labels[train_mask])
optimizer.zero_grad()
loss.backward()
optimizer.step()
if epoch >= 3:
dur.append(time.time() - t0)
acc = evaluate(model, features, labels, val_mask)
print(
"Epoch {:05d} | Time(s) {:.4f} | Loss {:.4f} | Accuracy {:.4f} | "
"ETputs(KTEPS) {:.2f}".format(
epoch, np.mean(dur), loss.item(), acc, n_edges / np.mean(dur) / 1000
)
)
print()
acc = evaluate(model, features, labels, test_mask)
print("Test Accuracy {:.4f}".format(acc))
|
https://github.com/dmlc/dgl/issues/755
|
test_shared_mem_store.test_init ... /var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: Initializer is not set. Use zero initializer instead. To suppress this warning, use `set_initializer` to explicitly specify which initializer to use.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access node data of all nodes.It's recommended to node data of a subset of nodes directly.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access edge data of all edges.It's recommended to edge data of a subset of edges directly.
warnings.warn(msg, warn_type)
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_node_feat for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_node_test4 for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_in for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_edge_feat for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_edge_test4 for shared memory
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: Initializer is not set. Use zero initializer instead. To suppress this warning, use `set_initializer` to explicitly specify which initializer to use.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access node data of all nodes.It's recommended to node data of a subset of nodes directly.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access edge data of all edges.It's recommended to edge data of a subset of edges directly.
warnings.warn(msg, warn_type)
Arrays are not almost equal to 7 decimals
(mismatch 100.0%)
x: array([20., 20., 20., 20., 20., 20., 20., 20., 20., 20.], dtype=float32)
y: array(11)
Traceback (most recent call last):
File "/var/jenkins_home/workspace/DGL_PR-752@2/tests/distributed/test_shared_mem_store.py", line 65, in check_init_func
check_array_shared_memory(g, worker_id, [g.nodes[:].data['test4'], g.edges[:].data['test4']])
File "/var/jenkins_home/workspace/DGL_PR-752@2/tests/distributed/test_shared_mem_store.py", line 28, in check_array_shared_memory
assert_almost_equal(F.asnumpy(arr[0]), i + 10)
File "/usr/local/lib/python3.5/dist-packages/numpy/testing/nose_tools/utils.py", line 567, in assert_almost_equal
return assert_array_almost_equal(actual, desired, decimal, err_msg)
File "/usr/local/lib/python3.5/dist-packages/numpy/testing/nose_tools/utils.py", line 965, in assert_array_almost_equal
precision=decimal)
File "/usr/local/lib/python3.5/dist-packages/numpy/testing/nose_tools/utils.py", line 781, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Arrays are not almost equal to 7 decimals
(mismatch 100.0%)
x: array([20., 20., 20., 20., 20., 20., 20., 20., 20., 20.], dtype=float32)
y: array(11)
FAIL
|
AssertionError
|
def main(args):
# load and preprocess dataset
data = load_data(args)
features = torch.FloatTensor(data.features)
labels = torch.LongTensor(data.labels)
if hasattr(torch, "BoolTensor"):
train_mask = torch.BoolTensor(data.train_mask)
val_mask = torch.BoolTensor(data.val_mask)
test_mask = torch.BoolTensor(data.test_mask)
else:
train_mask = torch.ByteTensor(data.train_mask)
val_mask = torch.ByteTensor(data.val_mask)
test_mask = torch.ByteTensor(data.test_mask)
in_feats = features.shape[1]
n_classes = data.num_labels
n_edges = data.graph.number_of_edges()
print(
"""----Data statistics------'
#Edges %d
#Classes %d
#Train samples %d
#Val samples %d
#Test samples %d"""
% (
n_edges,
n_classes,
train_mask.sum().item(),
val_mask.sum().item(),
test_mask.sum().item(),
)
)
if args.gpu < 0:
cuda = False
else:
cuda = True
torch.cuda.set_device(args.gpu)
features = features.cuda()
labels = labels.cuda()
train_mask = train_mask.cuda()
val_mask = val_mask.cuda()
test_mask = test_mask.cuda()
# graph preprocess and calculate normalization factor
g = data.graph
# add self loop
if args.self_loop:
g.remove_edges_from(nx.selfloop_edges(g))
g.add_edges_from(zip(g.nodes(), g.nodes()))
g = DGLGraph(g)
n_edges = g.number_of_edges()
# normalization
degs = g.in_degrees().float()
norm = torch.pow(degs, -0.5)
norm[torch.isinf(norm)] = 0
if cuda:
norm = norm.cuda()
g.ndata["norm"] = norm.unsqueeze(1)
# create GCN model
GNN, config = get_model_and_config(args.model)
model = GNN(g, in_feats, n_classes, *config["extra_args"])
if cuda:
model.cuda()
print(model)
loss_fcn = torch.nn.CrossEntropyLoss()
# use optimizer
optimizer = torch.optim.Adam(
model.parameters(), lr=config["lr"], weight_decay=config["weight_decay"]
)
# initialize graph
dur = []
for epoch in range(200):
model.train()
if epoch >= 3:
t0 = time.time()
# forward
logits = model(features)
loss = loss_fcn(logits[train_mask], labels[train_mask])
optimizer.zero_grad()
loss.backward()
optimizer.step()
if epoch >= 3:
dur.append(time.time() - t0)
acc = evaluate(model, features, labels, val_mask)
print(
"Epoch {:05d} | Time(s) {:.4f} | Loss {:.4f} | Accuracy {:.4f} | "
"ETputs(KTEPS) {:.2f}".format(
epoch, np.mean(dur), loss.item(), acc, n_edges / np.mean(dur) / 1000
)
)
print()
acc = evaluate(model, features, labels, test_mask)
print("Test Accuracy {:.4f}".format(acc))
|
def main(args):
# load and preprocess dataset
data = load_data(args)
features = torch.FloatTensor(data.features)
labels = torch.LongTensor(data.labels)
if hasattr(torch, "BoolTensor"):
train_mask = torch.BoolTensor(data.train_mask)
val_mask = torch.BoolTensor(data.val_mask)
test_mask = torch.BoolTensor(data.test_mask)
else:
train_mask = torch.ByteTensor(data.train_mask)
val_mask = torch.ByteTensor(data.val_mask)
test_mask = torch.ByteTensor(data.test_mask)
in_feats = features.shape[1]
n_classes = data.num_labels
n_edges = data.graph.number_of_edges()
print(
"""----Data statistics------'
#Edges %d
#Classes %d
#Train samples %d
#Val samples %d
#Test samples %d"""
% (
n_edges,
n_classes,
train_mask.sum().item(),
val_mask.sum().item(),
test_mask.sum().item(),
)
)
if args.gpu < 0:
cuda = False
else:
cuda = True
torch.cuda.set_device(args.gpu)
features = features.cuda()
labels = labels.cuda()
train_mask = train_mask.cuda()
val_mask = val_mask.cuda()
test_mask = test_mask.cuda()
# graph preprocess and calculate normalization factor
g = data.graph
# add self loop
if args.self_loop:
g.remove_edges_from(g.selfloop_edges())
g.add_edges_from(zip(g.nodes(), g.nodes()))
g = DGLGraph(g)
n_edges = g.number_of_edges()
# normalization
degs = g.in_degrees().float()
norm = torch.pow(degs, -0.5)
norm[torch.isinf(norm)] = 0
if cuda:
norm = norm.cuda()
g.ndata["norm"] = norm.unsqueeze(1)
# create GCN model
GNN, config = get_model_and_config(args.model)
model = GNN(g, in_feats, n_classes, *config["extra_args"])
if cuda:
model.cuda()
print(model)
loss_fcn = torch.nn.CrossEntropyLoss()
# use optimizer
optimizer = torch.optim.Adam(
model.parameters(), lr=config["lr"], weight_decay=config["weight_decay"]
)
# initialize graph
dur = []
for epoch in range(200):
model.train()
if epoch >= 3:
t0 = time.time()
# forward
logits = model(features)
loss = loss_fcn(logits[train_mask], labels[train_mask])
optimizer.zero_grad()
loss.backward()
optimizer.step()
if epoch >= 3:
dur.append(time.time() - t0)
acc = evaluate(model, features, labels, val_mask)
print(
"Epoch {:05d} | Time(s) {:.4f} | Loss {:.4f} | Accuracy {:.4f} | "
"ETputs(KTEPS) {:.2f}".format(
epoch, np.mean(dur), loss.item(), acc, n_edges / np.mean(dur) / 1000
)
)
print()
acc = evaluate(model, features, labels, test_mask)
print("Test Accuracy {:.4f}".format(acc))
|
https://github.com/dmlc/dgl/issues/755
|
test_shared_mem_store.test_init ... /var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: Initializer is not set. Use zero initializer instead. To suppress this warning, use `set_initializer` to explicitly specify which initializer to use.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access node data of all nodes.It's recommended to node data of a subset of nodes directly.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access edge data of all edges.It's recommended to edge data of a subset of edges directly.
warnings.warn(msg, warn_type)
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_node_feat for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_node_test4 for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_in for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_edge_feat for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_edge_test4 for shared memory
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: Initializer is not set. Use zero initializer instead. To suppress this warning, use `set_initializer` to explicitly specify which initializer to use.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access node data of all nodes.It's recommended to node data of a subset of nodes directly.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access edge data of all edges.It's recommended to edge data of a subset of edges directly.
warnings.warn(msg, warn_type)
Arrays are not almost equal to 7 decimals
(mismatch 100.0%)
x: array([20., 20., 20., 20., 20., 20., 20., 20., 20., 20.], dtype=float32)
y: array(11)
Traceback (most recent call last):
File "/var/jenkins_home/workspace/DGL_PR-752@2/tests/distributed/test_shared_mem_store.py", line 65, in check_init_func
check_array_shared_memory(g, worker_id, [g.nodes[:].data['test4'], g.edges[:].data['test4']])
File "/var/jenkins_home/workspace/DGL_PR-752@2/tests/distributed/test_shared_mem_store.py", line 28, in check_array_shared_memory
assert_almost_equal(F.asnumpy(arr[0]), i + 10)
File "/usr/local/lib/python3.5/dist-packages/numpy/testing/nose_tools/utils.py", line 567, in assert_almost_equal
return assert_array_almost_equal(actual, desired, decimal, err_msg)
File "/usr/local/lib/python3.5/dist-packages/numpy/testing/nose_tools/utils.py", line 965, in assert_array_almost_equal
precision=decimal)
File "/usr/local/lib/python3.5/dist-packages/numpy/testing/nose_tools/utils.py", line 781, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Arrays are not almost equal to 7 decimals
(mismatch 100.0%)
x: array([20., 20., 20., 20., 20., 20., 20., 20., 20., 20.], dtype=float32)
y: array(11)
FAIL
|
AssertionError
|
def main(args):
# load and preprocess dataset
data = load_data(args)
features = torch.FloatTensor(data.features)
labels = torch.LongTensor(data.labels)
if hasattr(torch, "BoolTensor"):
train_mask = torch.BoolTensor(data.train_mask)
val_mask = torch.BoolTensor(data.val_mask)
test_mask = torch.BoolTensor(data.test_mask)
else:
train_mask = torch.ByteTensor(data.train_mask)
val_mask = torch.ByteTensor(data.val_mask)
test_mask = torch.ByteTensor(data.test_mask)
in_feats = features.shape[1]
n_classes = data.num_labels
n_edges = data.graph.number_of_edges()
print(
"""----Data statistics------'
#Edges %d
#Classes %d
#Train samples %d
#Val samples %d
#Test samples %d"""
% (
n_edges,
n_classes,
train_mask.sum().item(),
val_mask.sum().item(),
test_mask.sum().item(),
)
)
if args.gpu < 0:
cuda = False
else:
cuda = True
torch.cuda.set_device(args.gpu)
features = features.cuda()
labels = labels.cuda()
train_mask = train_mask.cuda()
val_mask = val_mask.cuda()
test_mask = test_mask.cuda()
# graph preprocess and calculate normalization factor
g = data.graph
# add self loop
if args.self_loop:
g.remove_edges_from(nx.selfloop_edges(g))
g.add_edges_from(zip(g.nodes(), g.nodes()))
g = DGLGraph(g)
n_edges = g.number_of_edges()
# create TAGCN model
model = TAGCN(
g, in_feats, args.n_hidden, n_classes, args.n_layers, F.relu, args.dropout
)
if cuda:
model.cuda()
loss_fcn = torch.nn.CrossEntropyLoss()
# use optimizer
optimizer = torch.optim.Adam(
model.parameters(), lr=args.lr, weight_decay=args.weight_decay
)
# initialize graph
dur = []
for epoch in range(args.n_epochs):
model.train()
if epoch >= 3:
t0 = time.time()
# forward
logits = model(features)
loss = loss_fcn(logits[train_mask], labels[train_mask])
optimizer.zero_grad()
loss.backward()
optimizer.step()
if epoch >= 3:
dur.append(time.time() - t0)
acc = evaluate(model, features, labels, val_mask)
print(
"Epoch {:05d} | Time(s) {:.4f} | Loss {:.4f} | Accuracy {:.4f} | "
"ETputs(KTEPS) {:.2f}".format(
epoch, np.mean(dur), loss.item(), acc, n_edges / np.mean(dur) / 1000
)
)
print()
acc = evaluate(model, features, labels, test_mask)
print("Test Accuracy {:.4f}".format(acc))
|
def main(args):
# load and preprocess dataset
data = load_data(args)
features = torch.FloatTensor(data.features)
labels = torch.LongTensor(data.labels)
if hasattr(torch, "BoolTensor"):
train_mask = torch.BoolTensor(data.train_mask)
val_mask = torch.BoolTensor(data.val_mask)
test_mask = torch.BoolTensor(data.test_mask)
else:
train_mask = torch.ByteTensor(data.train_mask)
val_mask = torch.ByteTensor(data.val_mask)
test_mask = torch.ByteTensor(data.test_mask)
in_feats = features.shape[1]
n_classes = data.num_labels
n_edges = data.graph.number_of_edges()
print(
"""----Data statistics------'
#Edges %d
#Classes %d
#Train samples %d
#Val samples %d
#Test samples %d"""
% (
n_edges,
n_classes,
train_mask.sum().item(),
val_mask.sum().item(),
test_mask.sum().item(),
)
)
if args.gpu < 0:
cuda = False
else:
cuda = True
torch.cuda.set_device(args.gpu)
features = features.cuda()
labels = labels.cuda()
train_mask = train_mask.cuda()
val_mask = val_mask.cuda()
test_mask = test_mask.cuda()
# graph preprocess and calculate normalization factor
g = data.graph
# add self loop
if args.self_loop:
g.remove_edges_from(g.selfloop_edges())
g.add_edges_from(zip(g.nodes(), g.nodes()))
g = DGLGraph(g)
n_edges = g.number_of_edges()
# create TAGCN model
model = TAGCN(
g, in_feats, args.n_hidden, n_classes, args.n_layers, F.relu, args.dropout
)
if cuda:
model.cuda()
loss_fcn = torch.nn.CrossEntropyLoss()
# use optimizer
optimizer = torch.optim.Adam(
model.parameters(), lr=args.lr, weight_decay=args.weight_decay
)
# initialize graph
dur = []
for epoch in range(args.n_epochs):
model.train()
if epoch >= 3:
t0 = time.time()
# forward
logits = model(features)
loss = loss_fcn(logits[train_mask], labels[train_mask])
optimizer.zero_grad()
loss.backward()
optimizer.step()
if epoch >= 3:
dur.append(time.time() - t0)
acc = evaluate(model, features, labels, val_mask)
print(
"Epoch {:05d} | Time(s) {:.4f} | Loss {:.4f} | Accuracy {:.4f} | "
"ETputs(KTEPS) {:.2f}".format(
epoch, np.mean(dur), loss.item(), acc, n_edges / np.mean(dur) / 1000
)
)
print()
acc = evaluate(model, features, labels, test_mask)
print("Test Accuracy {:.4f}".format(acc))
|
https://github.com/dmlc/dgl/issues/755
|
test_shared_mem_store.test_init ... /var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: Initializer is not set. Use zero initializer instead. To suppress this warning, use `set_initializer` to explicitly specify which initializer to use.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access node data of all nodes.It's recommended to node data of a subset of nodes directly.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access edge data of all edges.It's recommended to edge data of a subset of edges directly.
warnings.warn(msg, warn_type)
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_node_feat for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_node_test4 for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_in for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_edge_feat for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_edge_test4 for shared memory
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: Initializer is not set. Use zero initializer instead. To suppress this warning, use `set_initializer` to explicitly specify which initializer to use.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access node data of all nodes.It's recommended to node data of a subset of nodes directly.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access edge data of all edges.It's recommended to edge data of a subset of edges directly.
warnings.warn(msg, warn_type)
Arrays are not almost equal to 7 decimals
(mismatch 100.0%)
x: array([20., 20., 20., 20., 20., 20., 20., 20., 20., 20.], dtype=float32)
y: array(11)
Traceback (most recent call last):
File "/var/jenkins_home/workspace/DGL_PR-752@2/tests/distributed/test_shared_mem_store.py", line 65, in check_init_func
check_array_shared_memory(g, worker_id, [g.nodes[:].data['test4'], g.edges[:].data['test4']])
File "/var/jenkins_home/workspace/DGL_PR-752@2/tests/distributed/test_shared_mem_store.py", line 28, in check_array_shared_memory
assert_almost_equal(F.asnumpy(arr[0]), i + 10)
File "/usr/local/lib/python3.5/dist-packages/numpy/testing/nose_tools/utils.py", line 567, in assert_almost_equal
return assert_array_almost_equal(actual, desired, decimal, err_msg)
File "/usr/local/lib/python3.5/dist-packages/numpy/testing/nose_tools/utils.py", line 965, in assert_array_almost_equal
precision=decimal)
File "/usr/local/lib/python3.5/dist-packages/numpy/testing/nose_tools/utils.py", line 781, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Arrays are not almost equal to 7 decimals
(mismatch 100.0%)
x: array([20., 20., 20., 20., 20., 20., 20., 20., 20., 20.], dtype=float32)
y: array(11)
FAIL
|
AssertionError
|
def load_cora_data():
data = citegrh.load_cora()
features = th.FloatTensor(data.features)
labels = th.LongTensor(data.labels)
mask = th.ByteTensor(data.train_mask)
g = data.graph
# add self loop
g.remove_edges_from(nx.selfloop_edges(g))
g = DGLGraph(g)
g.add_edges(g.nodes(), g.nodes())
return g, features, labels, mask
|
def load_cora_data():
data = citegrh.load_cora()
features = th.FloatTensor(data.features)
labels = th.LongTensor(data.labels)
mask = th.ByteTensor(data.train_mask)
g = data.graph
# add self loop
g.remove_edges_from(g.selfloop_edges())
g = DGLGraph(g)
g.add_edges(g.nodes(), g.nodes())
return g, features, labels, mask
|
https://github.com/dmlc/dgl/issues/755
|
test_shared_mem_store.test_init ... /var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: Initializer is not set. Use zero initializer instead. To suppress this warning, use `set_initializer` to explicitly specify which initializer to use.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access node data of all nodes.It's recommended to node data of a subset of nodes directly.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access edge data of all edges.It's recommended to edge data of a subset of edges directly.
warnings.warn(msg, warn_type)
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_node_feat for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_node_test4 for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_in for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_edge_feat for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_edge_test4 for shared memory
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: Initializer is not set. Use zero initializer instead. To suppress this warning, use `set_initializer` to explicitly specify which initializer to use.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access node data of all nodes.It's recommended to node data of a subset of nodes directly.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access edge data of all edges.It's recommended to edge data of a subset of edges directly.
warnings.warn(msg, warn_type)
Arrays are not almost equal to 7 decimals
(mismatch 100.0%)
x: array([20., 20., 20., 20., 20., 20., 20., 20., 20., 20.], dtype=float32)
y: array(11)
Traceback (most recent call last):
File "/var/jenkins_home/workspace/DGL_PR-752@2/tests/distributed/test_shared_mem_store.py", line 65, in check_init_func
check_array_shared_memory(g, worker_id, [g.nodes[:].data['test4'], g.edges[:].data['test4']])
File "/var/jenkins_home/workspace/DGL_PR-752@2/tests/distributed/test_shared_mem_store.py", line 28, in check_array_shared_memory
assert_almost_equal(F.asnumpy(arr[0]), i + 10)
File "/usr/local/lib/python3.5/dist-packages/numpy/testing/nose_tools/utils.py", line 567, in assert_almost_equal
return assert_array_almost_equal(actual, desired, decimal, err_msg)
File "/usr/local/lib/python3.5/dist-packages/numpy/testing/nose_tools/utils.py", line 965, in assert_array_almost_equal
precision=decimal)
File "/usr/local/lib/python3.5/dist-packages/numpy/testing/nose_tools/utils.py", line 781, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Arrays are not almost equal to 7 decimals
(mismatch 100.0%)
x: array([20., 20., 20., 20., 20., 20., 20., 20., 20., 20.], dtype=float32)
y: array(11)
FAIL
|
AssertionError
|
def load_cora_data():
data = citegrh.load_cora()
features = torch.FloatTensor(data.features)
labels = torch.LongTensor(data.labels)
mask = torch.ByteTensor(data.train_mask)
g = data.graph
# add self loop
g.remove_edges_from(nx.selfloop_edges(g))
g = DGLGraph(g)
g.add_edges(g.nodes(), g.nodes())
return g, features, labels, mask
|
def load_cora_data():
data = citegrh.load_cora()
features = torch.FloatTensor(data.features)
labels = torch.LongTensor(data.labels)
mask = torch.ByteTensor(data.train_mask)
g = data.graph
# add self loop
g.remove_edges_from(g.selfloop_edges())
g = DGLGraph(g)
g.add_edges(g.nodes(), g.nodes())
return g, features, labels, mask
|
https://github.com/dmlc/dgl/issues/755
|
test_shared_mem_store.test_init ... /var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: Initializer is not set. Use zero initializer instead. To suppress this warning, use `set_initializer` to explicitly specify which initializer to use.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access node data of all nodes.It's recommended to node data of a subset of nodes directly.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access edge data of all edges.It's recommended to edge data of a subset of edges directly.
warnings.warn(msg, warn_type)
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_node_feat for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_node_test4 for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_in for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_edge_feat for shared memory
[06:09:05] /var/jenkins_home/workspace/DGL_PR-752@2/src/runtime/shared_mem.cc:32: remove /test_graph1_edge_test4 for shared memory
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: Initializer is not set. Use zero initializer instead. To suppress this warning, use `set_initializer` to explicitly specify which initializer to use.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access node data of all nodes.It's recommended to node data of a subset of nodes directly.
warnings.warn(msg, warn_type)
/var/jenkins_home/workspace/DGL_PR-752@2/python/dgl/base.py:18: UserWarning: It may not be safe to access edge data of all edges.It's recommended to edge data of a subset of edges directly.
warnings.warn(msg, warn_type)
Arrays are not almost equal to 7 decimals
(mismatch 100.0%)
x: array([20., 20., 20., 20., 20., 20., 20., 20., 20., 20.], dtype=float32)
y: array(11)
Traceback (most recent call last):
File "/var/jenkins_home/workspace/DGL_PR-752@2/tests/distributed/test_shared_mem_store.py", line 65, in check_init_func
check_array_shared_memory(g, worker_id, [g.nodes[:].data['test4'], g.edges[:].data['test4']])
File "/var/jenkins_home/workspace/DGL_PR-752@2/tests/distributed/test_shared_mem_store.py", line 28, in check_array_shared_memory
assert_almost_equal(F.asnumpy(arr[0]), i + 10)
File "/usr/local/lib/python3.5/dist-packages/numpy/testing/nose_tools/utils.py", line 567, in assert_almost_equal
return assert_array_almost_equal(actual, desired, decimal, err_msg)
File "/usr/local/lib/python3.5/dist-packages/numpy/testing/nose_tools/utils.py", line 965, in assert_array_almost_equal
precision=decimal)
File "/usr/local/lib/python3.5/dist-packages/numpy/testing/nose_tools/utils.py", line 781, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Arrays are not almost equal to 7 decimals
(mismatch 100.0%)
x: array([20., 20., 20., 20., 20., 20., 20., 20., 20., 20.], dtype=float32)
y: array(11)
FAIL
|
AssertionError
|
def __init__(self, graph_list, node_attrs, edge_attrs):
# create batched graph index
batched_index = gi.disjoint_union([g._graph for g in graph_list])
# create batched node and edge frames
if len(node_attrs) == 0:
batched_node_frame = FrameRef(Frame(num_rows=batched_index.number_of_nodes()))
else:
# NOTE: following code will materialize the columns of the input graphs.
cols = {
key: F.cat(
[gr._node_frame[key] for gr in graph_list if gr.number_of_nodes() > 0],
dim=0,
)
for key in node_attrs
}
batched_node_frame = FrameRef(Frame(cols))
if len(edge_attrs) == 0:
batched_edge_frame = FrameRef(Frame(num_rows=batched_index.number_of_edges()))
else:
cols = {
key: F.cat(
[gr._edge_frame[key] for gr in graph_list if gr.number_of_edges() > 0],
dim=0,
)
for key in edge_attrs
}
batched_edge_frame = FrameRef(Frame(cols))
super(BatchedDGLGraph, self).__init__(
graph_data=batched_index,
node_frame=batched_node_frame,
edge_frame=batched_edge_frame,
)
# extra members
self._batch_size = 0
self._batch_num_nodes = []
self._batch_num_edges = []
for gr in graph_list:
if isinstance(gr, BatchedDGLGraph):
# handle the input is again a batched graph.
self._batch_size += gr._batch_size
self._batch_num_nodes += gr._batch_num_nodes
self._batch_num_edges += gr._batch_num_edges
else:
self._batch_size += 1
self._batch_num_nodes.append(gr.number_of_nodes())
self._batch_num_edges.append(gr.number_of_edges())
|
def __init__(self, graph_list, node_attrs, edge_attrs):
# create batched graph index
batched_index = gi.disjoint_union([g._graph for g in graph_list])
# create batched node and edge frames
# NOTE: following code will materialize the columns of the input graphs.
cols = {
key: F.cat(
[gr._node_frame[key] for gr in graph_list if gr.number_of_nodes() > 0],
dim=0,
)
for key in node_attrs
}
batched_node_frame = FrameRef(Frame(cols))
cols = {
key: F.cat(
[gr._edge_frame[key] for gr in graph_list if gr.number_of_edges() > 0],
dim=0,
)
for key in edge_attrs
}
batched_edge_frame = FrameRef(Frame(cols))
super(BatchedDGLGraph, self).__init__(
graph_data=batched_index,
node_frame=batched_node_frame,
edge_frame=batched_edge_frame,
)
# extra members
self._batch_size = 0
self._batch_num_nodes = []
self._batch_num_edges = []
for gr in graph_list:
if isinstance(gr, BatchedDGLGraph):
# handle the input is again a batched graph.
self._batch_size += gr._batch_size
self._batch_num_nodes += gr._batch_num_nodes
self._batch_num_edges += gr._batch_num_edges
else:
self._batch_size += 1
self._batch_num_nodes.append(gr.number_of_nodes())
self._batch_num_edges.append(gr.number_of_edges())
|
https://github.com/dmlc/dgl/issues/167
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/zy1404/repos/dgl/python/dgl/view.py", line 61, in __setitem__
self._graph.set_n_repr({key : val}, self._nodes)
File "/home/zy1404/repos/dgl/python/dgl/graph.py", line 821, in set_n_repr
self._node_frame[key] = val
File "/home/zy1404/repos/dgl/python/dgl/frame.py", line 618, in __setitem__
self.update_column(key, val, inplace=False)
File "/home/zy1404/repos/dgl/python/dgl/frame.py", line 647, in update_column
self._frame[name] = col
File "/home/zy1404/repos/dgl/python/dgl/frame.py", line 291, in __setitem__
self.update_column(name, data)
File "/home/zy1404/repos/dgl/python/dgl/frame.py", line 364, in update_column
(self.num_rows, len(col)))
dgl._ffi.base.DGLError: Expected data to have 0 rows, got 7.
|
dgl._ffi.base.DGLError
|
def node_should_be_modified(self, node):
"""Checks if the import statement imports ``get_image_uri`` from the correct module.
Args:
node (ast.ImportFrom): a node that represents a ``from ... import ... `` statement.
For more, see https://docs.python.org/3/library/ast.html#abstract-grammar.
Returns:
bool: If the import statement imports ``get_image_uri`` from the correct module.
"""
return (
node is not None
and node.module in GET_IMAGE_URI_NAMESPACES
and any(name.name == GET_IMAGE_URI_NAME for name in node.names)
)
|
def node_should_be_modified(self, node):
"""Checks if the import statement imports ``get_image_uri`` from the correct module.
Args:
node (ast.ImportFrom): a node that represents a ``from ... import ... `` statement.
For more, see https://docs.python.org/3/library/ast.html#abstract-grammar.
Returns:
bool: If the import statement imports ``get_image_uri`` from the correct module.
"""
return node.module in GET_IMAGE_URI_NAMESPACES and any(
name.name == GET_IMAGE_URI_NAME for name in node.names
)
|
https://github.com/aws/sagemaker-python-sdk/issues/1847
|
❯ cat v1.py
import sagemaker
from sagemaker.predictor import csv_serializer
csv_serializer.__doc___
❯ sagemaker-upgrade-v2 --in-file v1.py --out-file v2.py
Traceback (most recent call last):
File "~/testvenv/bin/sagemaker-upgrade-v2", line 8, in <module>
sys.exit(main())
File "~/testvenv/lib/python3.8/site-packages/sagemaker/cli/compatibility/v2/sagemaker_upgrade_v2.py", line 78, in main
_update_file(args.in_file, args.out_file)
File "~/testvenv/lib/python3.8/site-packages/sagemaker/cli/compatibility/v2/sagemaker_upgrade_v2.py", line 50, in _update_file
updater_cls(input_path=input_file, output_path=output_file).update()
File "~/testvenv/lib/python3.8/site-packages/sagemaker/cli/compatibility/v2/files.py", line 72, in update
output = self._update_ast(self._read_input_file())
File "~/testvenv/lib/python3.8/site-packages/sagemaker/cli/compatibility/v2/files.py", line 86, in _update_ast
return ASTTransformer().visit(input_ast)
File "/usr/lib/python3.8/ast.py", line 363, in visit
return visitor(node)
File "~/testvenv/lib/python3.8/site-packages/sagemaker/cli/compatibility/v2/ast_transformer.py", line 136, in visit_Module
self.generic_visit(node)
File "/usr/lib/python3.8/ast.py", line 439, in generic_visit
value = self.visit(value)
File "/usr/lib/python3.8/ast.py", line 363, in visit
return visitor(node)
File "~/testvenv/lib/python3.8/site-packages/sagemaker/cli/compatibility/v2/ast_transformer.py", line 155, in visit_ImportFrom
node = import_checker.check_and_modify_node(node)
File "~/testvenv/lib/python3.8/site-packages/sagemaker/cli/compatibility/v2/modifiers/modifier.py", line 26, in check_and_modify_node
if self.node_should_be_modified(node):
File "~/testvenv/lib/python3.8/site-packages/sagemaker/cli/compatibility/v2/modifiers/image_uris.py", line 115, in node_should_be_modified
return node.module in GET_IMAGE_URI_NAMESPACES and any(
AttributeError: 'NoneType' object has no attribute 'module'
|
AttributeError
|
def _compose(self, detached=False):
"""
Args:
detached:
"""
compose_cmd = "docker-compose"
command = [
compose_cmd,
"-f",
os.path.join(self.container_root, DOCKER_COMPOSE_FILENAME),
"up",
"--build",
"--abort-on-container-exit"
if not detached
else "--detach", # mutually exclusive
]
logger.info("docker command: %s", " ".join(command))
return command
|
def _compose(self, detached=False):
"""
Args:
detached:
"""
compose_cmd = "docker-compose"
command = [
compose_cmd,
"-f",
os.path.join(self.container_root, DOCKER_COMPOSE_FILENAME),
"up",
"--build",
"--abort-on-container-exit",
]
if detached:
command.append("-d")
logger.info("docker command: %s", " ".join(command))
return command
|
https://github.com/aws/sagemaker-python-sdk/issues/1374
|
Exception in thread Thread-4:
Traceback (most recent call last):
File "/home/user/test/venv/lib/python3.6/site-packages/sagemaker/local/image.py", line 614, in run
_stream_output(self.process)
File "/home/user/test/venv/lib/python3.6/site-packages/sagemaker/local/image.py", line 673, in _stream_output
raise RuntimeError("Process exited with code: %s" % exit_code)
RuntimeError: Process exited with code: 1
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/home/user/test/venv/lib/python3.6/site-packages/sagemaker/local/image.py", line 619, in run
raise RuntimeError(msg)
RuntimeError: Failed to run: ['docker-compose', '-f', '/tmp/tmpxxte1jk0/docker-compose.yaml', 'up', '--build', '--abort-on-container-exit', '-d'], Process exited with code: 1
|
RuntimeError
|
def prepare_framework_container_def(model, instance_type, s3_operations):
"""Prepare the framework model container information. Specify related S3
operations for Airflow to perform. (Upload `source_dir` )
Args:
model (sagemaker.model.FrameworkModel): The framework model
instance_type (str): The EC2 instance type to deploy this Model to. For
example, 'ml.p2.xlarge'.
s3_operations (dict): The dict to specify S3 operations (upload
`source_dir` ).
Returns:
dict: The container information of this framework model.
"""
deploy_image = model.image
if not deploy_image:
region_name = model.sagemaker_session.boto_session.region_name
deploy_image = model.serving_image_uri(region_name, instance_type)
base_name = utils.base_name_from_image(deploy_image)
model.name = model.name or utils.name_from_base(base_name)
bucket = model.bucket or model.sagemaker_session._default_bucket
script = os.path.basename(model.entry_point)
key = "{}/source/sourcedir.tar.gz".format(model.name)
if model.source_dir and model.source_dir.lower().startswith("s3://"):
code_dir = model.source_dir
model.uploaded_code = fw_utils.UploadedCode(
s3_prefix=code_dir, script_name=script
)
else:
code_dir = "s3://{}/{}".format(bucket, key)
model.uploaded_code = fw_utils.UploadedCode(
s3_prefix=code_dir, script_name=script
)
s3_operations["S3Upload"] = [
{
"Path": model.source_dir or script,
"Bucket": bucket,
"Key": key,
"Tar": True,
}
]
deploy_env = dict(model.env)
deploy_env.update(model._framework_env_vars())
try:
if model.model_server_workers:
deploy_env[sagemaker.model.MODEL_SERVER_WORKERS_PARAM_NAME.upper()] = str(
model.model_server_workers
)
except AttributeError:
# This applies to a FrameworkModel which is not SageMaker Deep Learning Framework Model
pass
return sagemaker.container_def(deploy_image, model.model_data, deploy_env)
|
def prepare_framework_container_def(model, instance_type, s3_operations):
"""Prepare the framework model container information. Specify related S3
operations for Airflow to perform. (Upload `source_dir` )
Args:
model (sagemaker.model.FrameworkModel): The framework model
instance_type (str): The EC2 instance type to deploy this Model to. For
example, 'ml.p2.xlarge'.
s3_operations (dict): The dict to specify S3 operations (upload
`source_dir` ).
Returns:
dict: The container information of this framework model.
"""
deploy_image = model.image
if not deploy_image:
region_name = model.sagemaker_session.boto_session.region_name
deploy_image = fw_utils.create_image_uri(
region_name,
model.__framework_name__,
instance_type,
model.framework_version,
model.py_version,
)
base_name = utils.base_name_from_image(deploy_image)
model.name = model.name or utils.name_from_base(base_name)
bucket = model.bucket or model.sagemaker_session._default_bucket
script = os.path.basename(model.entry_point)
key = "{}/source/sourcedir.tar.gz".format(model.name)
if model.source_dir and model.source_dir.lower().startswith("s3://"):
code_dir = model.source_dir
model.uploaded_code = fw_utils.UploadedCode(
s3_prefix=code_dir, script_name=script
)
else:
code_dir = "s3://{}/{}".format(bucket, key)
model.uploaded_code = fw_utils.UploadedCode(
s3_prefix=code_dir, script_name=script
)
s3_operations["S3Upload"] = [
{
"Path": model.source_dir or script,
"Bucket": bucket,
"Key": key,
"Tar": True,
}
]
deploy_env = dict(model.env)
deploy_env.update(model._framework_env_vars())
try:
if model.model_server_workers:
deploy_env[sagemaker.model.MODEL_SERVER_WORKERS_PARAM_NAME.upper()] = str(
model.model_server_workers
)
except AttributeError:
# This applies to a FrameworkModel which is not SageMaker Deep Learning Framework Model
pass
return sagemaker.container_def(deploy_image, model.model_data, deploy_env)
|
https://github.com/aws/sagemaker-python-sdk/issues/1201
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-42-23cc1e7f4991> in <module>()
3 role=workflow_execution_role,
4 inputs=training_data_uri,
----> 5 s3_bucket=model_output_path
6 )
~/anaconda3/envs/python3/lib/python3.6/site-packages/stepfunctions/template/pipeline/train.py in __init__(self, estimator, role, inputs, s3_bucket, client, **kwargs)
64 self.pipeline_name = 'training-pipeline-{date}'.format(date=self._generate_timestamp())
65
---> 66 self.definition = self.build_workflow_definition()
67 self.input_template = self._extract_input_template(self.definition)
68
~/anaconda3/envs/python3/lib/python3.6/site-packages/stepfunctions/template/pipeline/train.py in build_workflow_definition(self)
95 instance_type=train_instance_type,
96 model=model,
---> 97 model_name=default_name
98 )
99
~/anaconda3/envs/python3/lib/python3.6/site-packages/stepfunctions/steps/sagemaker.py in __init__(self, state_id, model, model_name, instance_type, **kwargs)
171 """
172 if isinstance(model, FrameworkModel):
--> 173 parameters = model_config(model=model, instance_type=instance_type, role=model.role, image=model.image)
174 if model_name:
175 parameters['ModelName'] = model_name
~/anaconda3/envs/python3/lib/python3.6/site-packages/sagemaker/workflow/airflow.py in model_config(instance_type, model, role, image)
577
578 if isinstance(model, sagemaker.model.FrameworkModel):
--> 579 container_def = prepare_framework_container_def(model, instance_type, s3_operations)
580 else:
581 container_def = model.prepare_container_def(instance_type)
~/anaconda3/envs/python3/lib/python3.6/site-packages/sagemaker/workflow/airflow.py in prepare_framework_container_def(model, instance_type, s3_operations)
519 deploy_image = fw_utils.create_image_uri(
520 region_name,
--> 521 model.__framework_name__,
522 instance_type,
523 model.framework_version,
AttributeError: 'Model' object has no attribute '__framework_name__'
|
AttributeError
|
def _get_transform_args(self, desc, inputs, name, volume_kms_key):
"""Format training args to pass in sagemaker_session.train.
Args:
desc (dict): the response from DescribeTrainingJob API.
inputs (str): an S3 uri where new input dataset is stored.
name (str): the name of the step job.
volume_kms_key (str): The KMS key id to encrypt data on the storage volume attached to
the ML compute instance(s).
Returns (dcit): a dictionary that can be used as args of
sagemaker_session.transform method.
"""
transform_args = {}
transform_args["job_name"] = name
transform_args["model_name"] = desc["ModelName"]
transform_args["output_config"] = desc["TransformOutput"]
transform_args["resource_config"] = desc["TransformResources"]
transform_args["data_processing"] = desc["DataProcessing"]
transform_args["tags"] = []
transform_args["strategy"] = None
transform_args["max_concurrent_transforms"] = None
transform_args["max_payload"] = None
transform_args["env"] = None
transform_args["experiment_config"] = None
input_config = desc["TransformInput"]
input_config["DataSource"]["S3DataSource"]["S3Uri"] = inputs
transform_args["input_config"] = input_config
if volume_kms_key is not None:
transform_args["resource_config"]["VolumeKmsKeyId"] = volume_kms_key
if "BatchStrategy" in desc:
transform_args["strategy"] = desc["BatchStrategy"]
if "MaxConcurrentTransforms" in desc:
transform_args["max_concurrent_transforms"] = desc["MaxConcurrentTransforms"]
if "MaxPayloadInMB" in desc:
transform_args["max_payload"] = desc["MaxPayloadInMB"]
if "Environment" in desc:
transform_args["env"] = desc["Environment"]
return transform_args
|
def _get_transform_args(self, desc, inputs, name, volume_kms_key):
"""Format training args to pass in sagemaker_session.train.
Args:
desc (dict): the response from DescribeTrainingJob API.
inputs (str): an S3 uri where new input dataset is stored.
name (str): the name of the step job.
volume_kms_key (str): The KMS key id to encrypt data on the storage volume attached to
the ML compute instance(s).
Returns (dcit): a dictionary that can be used as args of
sagemaker_session.transform method.
"""
transform_args = {}
transform_args["job_name"] = name
transform_args["model_name"] = desc["ModelName"]
transform_args["output_config"] = desc["TransformOutput"]
transform_args["resource_config"] = desc["TransformResources"]
transform_args["data_processing"] = desc["DataProcessing"]
transform_args["tags"] = []
transform_args["strategy"] = None
transform_args["max_concurrent_transforms"] = None
transform_args["max_payload"] = None
transform_args["env"] = None
input_config = desc["TransformInput"]
input_config["DataSource"]["S3DataSource"]["S3Uri"] = inputs
transform_args["input_config"] = input_config
if volume_kms_key is not None:
transform_args["resource_config"]["VolumeKmsKeyId"] = volume_kms_key
if "BatchStrategy" in desc:
transform_args["strategy"] = desc["BatchStrategy"]
if "MaxConcurrentTransforms" in desc:
transform_args["max_concurrent_transforms"] = desc["MaxConcurrentTransforms"]
if "MaxPayloadInMB" in desc:
transform_args["max_payload"] = desc["MaxPayloadInMB"]
if "Environment" in desc:
transform_args["env"] = desc["Environment"]
return transform_args
|
https://github.com/aws/sagemaker-python-sdk/issues/276
|
2018-07-05 11:30:31,569 INFO - root - running container entrypoint
2018-07-05 11:30:31,570 INFO - root - starting train task
2018-07-05 11:30:31,573 INFO - container_support.training - Training starting
2018-07-05 11:30:31,575 INFO - container_support.environment - starting metrics service
2018-07-05 11:30:31,578 ERROR - container_support.training - uncaught exception during training: [Errno 2] No such file or directory: 'telegraf'
Traceback (most recent call last):
File "/usr/local/lib/python3.5/dist-packages/container_support/training.py", line 32, in start
env.start_metrics_if_enabled()
File "/usr/local/lib/python3.5/dist-packages/container_support/environment.py", line 124, in start_metrics_if_enabled
subprocess.Popen(['telegraf', '--config', telegraf_conf])
File "/usr/lib/python3.5/subprocess.py", line 947, in __init__
restore_signals, start_new_session)
File "/usr/lib/python3.5/subprocess.py", line 1551, in _execute_child
raise child_exception_type(errno_num, err_msg)
FileNotFoundError: [Errno 2] No such file or directory: 'telegraf'
|
FileNotFoundError
|
def _generate_compose_file(
self, command, additional_volumes=None, additional_env_vars=None
):
"""Writes a config file describing a training/hosting environment.
This method generates a docker compose configuration file, it has an entry for each container
that will be created (based on self.hosts). it calls
:meth:~sagemaker.local_session.SageMakerContainer._create_docker_host to generate the config
for each individual container.
Args:
command (str): either 'train' or 'serve'
additional_volumes (list): a list of volumes that will be mapped to the containers
additional_env_vars (dict): a dictionary with additional environment variables to be
passed on to the containers.
Returns: (dict) A dictionary representation of the configuration that was written.
"""
boto_session = self.sagemaker_session.boto_session
additional_volumes = additional_volumes or []
additional_env_vars = additional_env_vars or {}
environment = []
optml_dirs = set()
aws_creds = _aws_credentials(boto_session)
if aws_creds is not None:
environment.extend(aws_creds)
additional_env_var_list = [
"{}={}".format(k, v) for k, v in additional_env_vars.items()
]
environment.extend(additional_env_var_list)
if command == "train":
optml_dirs = {"output", "output/data", "input"}
services = {
h: self._create_docker_host(
h, environment, optml_dirs, command, additional_volumes
)
for h in self.hosts
}
content = {
# Use version 2.3 as a minimum so that we can specify the runtime
"version": "2.3",
"services": services,
"networks": {"sagemaker-local": {"name": "sagemaker-local"}},
}
docker_compose_path = os.path.join(self.container_root, DOCKER_COMPOSE_FILENAME)
yaml_content = yaml.dump(content, default_flow_style=False)
logger.info("docker compose file: \n{}".format(yaml_content))
with open(docker_compose_path, "w") as f:
f.write(yaml_content)
return content
|
def _generate_compose_file(
self, command, additional_volumes=None, additional_env_vars=None
):
"""Writes a config file describing a training/hosting environment.
This method generates a docker compose configuration file, it has an entry for each container
that will be created (based on self.hosts). it calls
:meth:~sagemaker.local_session.SageMakerContainer._create_docker_host to generate the config
for each individual container.
Args:
command (str): either 'train' or 'serve'
additional_volumes (list): a list of volumes that will be mapped to the containers
additional_env_vars (dict): a dictionary with additional environment variables to be
passed on to the containers.
Returns: (dict) A dictionary representation of the configuration that was written.
"""
boto_session = self.sagemaker_session.boto_session
additional_env_vars = additional_env_vars or []
additional_volumes = additional_volumes or {}
environment = []
optml_dirs = set()
aws_creds = _aws_credentials(boto_session)
if aws_creds is not None:
environment.extend(aws_creds)
additional_env_var_list = [
"{}={}".format(k, v) for k, v in additional_env_vars.items()
]
environment.extend(additional_env_var_list)
if command == "train":
optml_dirs = {"output", "output/data", "input"}
services = {
h: self._create_docker_host(
h, environment, optml_dirs, command, additional_volumes
)
for h in self.hosts
}
content = {
# Use version 2.3 as a minimum so that we can specify the runtime
"version": "2.3",
"services": services,
"networks": {"sagemaker-local": {"name": "sagemaker-local"}},
}
docker_compose_path = os.path.join(self.container_root, DOCKER_COMPOSE_FILENAME)
yaml_content = yaml.dump(content, default_flow_style=False)
logger.info("docker compose file: \n{}".format(yaml_content))
with open(docker_compose_path, "w") as f:
f.write(yaml_content)
return content
|
https://github.com/aws/sagemaker-python-sdk/issues/421
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-34-104847d86296> in <module>()
61 m.set_hyperparameters(**hyperparameters)
62
---> 63 m.fit({k:'file://'+v for k,v in inputs.items()})
64
65 predictor = m.deploy(1, 'local_gpu')
~/anaconda3/envs/mxnet_p36/lib/python3.6/site-packages/sagemaker/estimator.py in fit(self, inputs, wait, logs, job_name)
189 self._prepare_for_training(job_name=job_name)
190
--> 191 self.latest_training_job = _TrainingJob.start_new(self, inputs)
192 if wait:
193 self.latest_training_job.wait(logs=logs)
~/anaconda3/envs/mxnet_p36/lib/python3.6/site-packages/sagemaker/estimator.py in start_new(cls, estimator, inputs)
415 resource_config=config['resource_config'], vpc_config=config['vpc_config'],
416 hyperparameters=hyperparameters, stop_condition=config['stop_condition'],
--> 417 tags=estimator.tags)
418
419 return cls(estimator.sagemaker_session, estimator._current_job_name)
~/anaconda3/envs/mxnet_p36/lib/python3.6/site-packages/sagemaker/session.py in train(self, image, input_mode, input_config, role, job_name, output_config, resource_config, vpc_config, hyperparameters, stop_condition, tags)
276 LOGGER.info('Creating training-job with name: {}'.format(job_name))
277 LOGGER.debug('train request: {}'.format(json.dumps(train_request, indent=4)))
--> 278 self.sagemaker_client.create_training_job(**train_request)
279
280 def tune(self, job_name, strategy, objective_type, objective_metric_name,
~/anaconda3/envs/mxnet_p36/lib/python3.6/site-packages/sagemaker/local/local_session.py in create_training_job(self, TrainingJobName, AlgorithmSpecification, InputDataConfig, OutputDataConfig, ResourceConfig, **kwargs)
73 training_job = _LocalTrainingJob(container)
74 hyperparameters = kwargs['HyperParameters'] if 'HyperParameters' in kwargs else {}
---> 75 training_job.start(InputDataConfig, hyperparameters)
76
77 LocalSagemakerClient._training_jobs[TrainingJobName] = training_job
~/anaconda3/envs/mxnet_p36/lib/python3.6/site-packages/sagemaker/local/entities.py in start(self, input_data_config, hyperparameters)
58 self.state = self._TRAINING
59
---> 60 self.model_artifacts = self.container.train(input_data_config, hyperparameters)
61 self.end = datetime.datetime.now()
62 self.state = self._COMPLETED
~/anaconda3/envs/mxnet_p36/lib/python3.6/site-packages/sagemaker/local/image.py in train(self, input_data_config, hyperparameters)
109 training_env_vars = {
110 REGION_ENV_NAME: self.sagemaker_session.boto_region_name,
--> 111 TRAINING_JOB_NAME_ENV_NAME: json.loads(hyperparameters.get(sagemaker.model.JOB_NAME_PARAM_NAME)),
112 }
113 compose_data = self._generate_compose_file('train', additional_volumes=volumes,
~/anaconda3/envs/mxnet_p36/lib/python3.6/json/__init__.py in loads(s, encoding, cls, object_hook, parse_float, parse_int, parse_constant, object_pairs_hook, **kw)
346 if not isinstance(s, (bytes, bytearray)):
347 raise TypeError('the JSON object must be str, bytes or bytearray, '
--> 348 'not {!r}'.format(s.__class__.__name__))
349 s = s.decode(detect_encoding(s), 'surrogatepass')
350
TypeError: the JSON object must be str, bytes or bytearray, not 'NoneType'
|
TypeError
|
def secondary_training_status_message(job_description, prev_description):
"""Returns a string contains last modified time and the secondary training job status message.
Args:
job_description: Returned response from DescribeTrainingJob call
prev_description: Previous job description from DescribeTrainingJob call
Returns:
str: Job status string to be printed.
"""
if (
job_description is None
or job_description.get("SecondaryStatusTransitions") is None
or len(job_description.get("SecondaryStatusTransitions")) == 0
):
return ""
prev_description_secondary_transitions = (
prev_description.get("SecondaryStatusTransitions")
if prev_description is not None
else None
)
prev_transitions_num = (
len(prev_description["SecondaryStatusTransitions"])
if prev_description_secondary_transitions is not None
else 0
)
current_transitions = job_description["SecondaryStatusTransitions"]
if len(current_transitions) == prev_transitions_num:
# Secondary status is not changed but the message changed.
transitions_to_print = current_transitions[-1:]
else:
# Secondary status is changed we need to print all the entries.
transitions_to_print = current_transitions[
prev_transitions_num - len(current_transitions) :
]
status_strs = []
for transition in transitions_to_print:
message = transition["StatusMessage"]
time_str = datetime.utcfromtimestamp(
time.mktime(job_description["LastModifiedTime"].timetuple())
).strftime("%Y-%m-%d %H:%M:%S")
status_strs.append("{} {} - {}".format(time_str, transition["Status"], message))
return "\n".join(status_strs)
|
def secondary_training_status_message(job_description, prev_description):
"""Returns a string contains start time and the secondary training job status message.
Args:
job_description: Returned response from DescribeTrainingJob call
prev_description: Previous job description from DescribeTrainingJob call
Returns:
str: Job status string to be printed.
"""
if (
job_description is None
or job_description.get("SecondaryStatusTransitions") is None
or len(job_description.get("SecondaryStatusTransitions")) == 0
):
return ""
prev_description_secondary_transitions = (
prev_description.get("SecondaryStatusTransitions")
if prev_description is not None
else None
)
prev_transitions_num = (
len(prev_description["SecondaryStatusTransitions"])
if prev_description_secondary_transitions is not None
else 0
)
current_transitions = job_description["SecondaryStatusTransitions"]
transitions_to_print = (
current_transitions[-1:]
if len(current_transitions) == prev_transitions_num
else current_transitions[prev_transitions_num - len(current_transitions) :]
)
status_strs = []
for transition in transitions_to_print:
message = transition["StatusMessage"]
time_str = datetime.utcfromtimestamp(
time.mktime(job_description["LastModifiedTime"].timetuple())
).strftime("%Y-%m-%d %H:%M:%S")
status_strs.append("{} {} - {}".format(time_str, transition["Status"], message))
return "\n".join(status_strs)
|
https://github.com/aws/sagemaker-python-sdk/issues/421
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-34-104847d86296> in <module>()
61 m.set_hyperparameters(**hyperparameters)
62
---> 63 m.fit({k:'file://'+v for k,v in inputs.items()})
64
65 predictor = m.deploy(1, 'local_gpu')
~/anaconda3/envs/mxnet_p36/lib/python3.6/site-packages/sagemaker/estimator.py in fit(self, inputs, wait, logs, job_name)
189 self._prepare_for_training(job_name=job_name)
190
--> 191 self.latest_training_job = _TrainingJob.start_new(self, inputs)
192 if wait:
193 self.latest_training_job.wait(logs=logs)
~/anaconda3/envs/mxnet_p36/lib/python3.6/site-packages/sagemaker/estimator.py in start_new(cls, estimator, inputs)
415 resource_config=config['resource_config'], vpc_config=config['vpc_config'],
416 hyperparameters=hyperparameters, stop_condition=config['stop_condition'],
--> 417 tags=estimator.tags)
418
419 return cls(estimator.sagemaker_session, estimator._current_job_name)
~/anaconda3/envs/mxnet_p36/lib/python3.6/site-packages/sagemaker/session.py in train(self, image, input_mode, input_config, role, job_name, output_config, resource_config, vpc_config, hyperparameters, stop_condition, tags)
276 LOGGER.info('Creating training-job with name: {}'.format(job_name))
277 LOGGER.debug('train request: {}'.format(json.dumps(train_request, indent=4)))
--> 278 self.sagemaker_client.create_training_job(**train_request)
279
280 def tune(self, job_name, strategy, objective_type, objective_metric_name,
~/anaconda3/envs/mxnet_p36/lib/python3.6/site-packages/sagemaker/local/local_session.py in create_training_job(self, TrainingJobName, AlgorithmSpecification, InputDataConfig, OutputDataConfig, ResourceConfig, **kwargs)
73 training_job = _LocalTrainingJob(container)
74 hyperparameters = kwargs['HyperParameters'] if 'HyperParameters' in kwargs else {}
---> 75 training_job.start(InputDataConfig, hyperparameters)
76
77 LocalSagemakerClient._training_jobs[TrainingJobName] = training_job
~/anaconda3/envs/mxnet_p36/lib/python3.6/site-packages/sagemaker/local/entities.py in start(self, input_data_config, hyperparameters)
58 self.state = self._TRAINING
59
---> 60 self.model_artifacts = self.container.train(input_data_config, hyperparameters)
61 self.end = datetime.datetime.now()
62 self.state = self._COMPLETED
~/anaconda3/envs/mxnet_p36/lib/python3.6/site-packages/sagemaker/local/image.py in train(self, input_data_config, hyperparameters)
109 training_env_vars = {
110 REGION_ENV_NAME: self.sagemaker_session.boto_region_name,
--> 111 TRAINING_JOB_NAME_ENV_NAME: json.loads(hyperparameters.get(sagemaker.model.JOB_NAME_PARAM_NAME)),
112 }
113 compose_data = self._generate_compose_file('train', additional_volumes=volumes,
~/anaconda3/envs/mxnet_p36/lib/python3.6/json/__init__.py in loads(s, encoding, cls, object_hook, parse_float, parse_int, parse_constant, object_pairs_hook, **kw)
346 if not isinstance(s, (bytes, bytearray)):
347 raise TypeError('the JSON object must be str, bytes or bytearray, '
--> 348 'not {!r}'.format(s.__class__.__name__))
349 s = s.decode(detect_encoding(s), 'surrogatepass')
350
TypeError: the JSON object must be str, bytes or bytearray, not 'NoneType'
|
TypeError
|
def __init__(self, training_job_name, metric_names=None, sagemaker_session=None):
"""Initialize a ``TrainingJobAnalytics`` instance.
Args:
training_job_name (str): name of the TrainingJob to analyze.
metric_names (list, optional): string names of all the metrics to collect for this training job.
If not specified, then it will use all metric names configured for this job.
sagemaker_session (sagemaker.session.Session): Session object which manages interactions with
Amazon SageMaker APIs and any other AWS services needed. If not specified, one is specified
using the default AWS configuration chain.
"""
sagemaker_session = sagemaker_session or Session()
self._sage_client = sagemaker_session.sagemaker_client
self._cloudwatch = sagemaker_session.boto_session.client("cloudwatch")
self._training_job_name = training_job_name
if metric_names:
self._metric_names = metric_names
else:
self._metric_names = self._metric_names_for_training_job()
self.clear_cache()
|
def __init__(self, training_job_name, metric_names, sagemaker_session=None):
"""Initialize a ``TrainingJobAnalytics`` instance.
Args:
training_job_name (str): name of the TrainingJob to analyze.
metric_names (list): string names of all the metrics to collect for this training job
sagemaker_session (sagemaker.session.Session): Session object which manages interactions with
Amazon SageMaker APIs and any other AWS services needed. If not specified, one is specified
using the default AWS configuration chain.
"""
sagemaker_session = sagemaker_session or Session()
self._sage_client = sagemaker_session.sagemaker_client
self._cloudwatch = sagemaker_session.boto_session.client("cloudwatch")
self._training_job_name = training_job_name
self._metric_names = metric_names
self.clear_cache()
|
https://github.com/aws/sagemaker-python-sdk/issues/273
|
TypeError Traceback (most recent call last)
<ipython-input-145-5976c283bde7> in <module>()
----> 1 estimator.training_job_analytics
~/anaconda3/envs/mxnet_p36/lib/python3.6/site-packages/sagemaker/estimator.py in training_job_analytics(self)
325 if self._current_job_name is None:
326 raise ValueError('Estimator is not associated with a TrainingJob')
--> 327 return TrainingJobAnalytics(self._current_job_name)
328
329
TypeError: __init__() missing 1 required positional argument: 'metric_names'
|
TypeError
|
def training_job_analytics(self):
"""Return a ``TrainingJobAnalytics`` object for the current training job."""
if self._current_job_name is None:
raise ValueError("Estimator is not associated with a TrainingJob")
return TrainingJobAnalytics(
self._current_job_name, sagemaker_session=self.sagemaker_session
)
|
def training_job_analytics(self):
"""Return a ``TrainingJobAnalytics`` object for the current training job."""
if self._current_job_name is None:
raise ValueError("Estimator is not associated with a TrainingJob")
return TrainingJobAnalytics(self._current_job_name)
|
https://github.com/aws/sagemaker-python-sdk/issues/273
|
TypeError Traceback (most recent call last)
<ipython-input-145-5976c283bde7> in <module>()
----> 1 estimator.training_job_analytics
~/anaconda3/envs/mxnet_p36/lib/python3.6/site-packages/sagemaker/estimator.py in training_job_analytics(self)
325 if self._current_job_name is None:
326 raise ValueError('Estimator is not associated with a TrainingJob')
--> 327 return TrainingJobAnalytics(self._current_job_name)
328
329
TypeError: __init__() missing 1 required positional argument: 'metric_names'
|
TypeError
|
def _download_folder(self, bucket_name, prefix, target):
boto_session = self.sagemaker_session.boto_session
s3 = boto_session.resource("s3")
bucket = s3.Bucket(bucket_name)
for obj_sum in bucket.objects.filter(Prefix=prefix):
# if obj_sum is a folder object skip it.
if obj_sum.key != "" and obj_sum.key[-1] == "/":
continue
obj = s3.Object(obj_sum.bucket_name, obj_sum.key)
s3_relative_path = obj_sum.key[len(prefix) :].lstrip("/")
file_path = os.path.join(target, s3_relative_path)
try:
os.makedirs(os.path.dirname(file_path))
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
pass
obj.download_file(file_path)
|
def _download_folder(self, bucket_name, prefix, target):
boto_session = self.sagemaker_session.boto_session
s3 = boto_session.resource("s3")
bucket = s3.Bucket(bucket_name)
for obj_sum in bucket.objects.filter(Prefix=prefix):
obj = s3.Object(obj_sum.bucket_name, obj_sum.key)
s3_relative_path = obj_sum.key[len(prefix) :].lstrip("/")
file_path = os.path.join(target, s3_relative_path)
try:
os.makedirs(os.path.dirname(file_path))
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
pass
obj.download_file(file_path)
|
https://github.com/aws/sagemaker-python-sdk/issues/245
|
<sagemaker.tensorflow.estimator.TensorFlow object at 0x107b86610>
INFO:sagemaker:Creating training-job with name: sagemaker-tensorflow-2018-06-21-11-17-58-273
Traceback (most recent call last):
File "test.py", line 28, in <module>
iris_estimator.fit(train_data_location)
File "/Library/Python/2.7/site-packages/sagemaker/tensorflow/estimator.py", line 243, in fit
fit_super()
File "/Library/Python/2.7/site-packages/sagemaker/tensorflow/estimator.py", line 225, in fit_super
super(TensorFlow, self).fit(inputs, wait, logs, job_name)
File "/Library/Python/2.7/site-packages/sagemaker/estimator.py", line 177, in fit
self.latest_training_job = _TrainingJob.start_new(self, inputs)
File "/Library/Python/2.7/site-packages/sagemaker/estimator.py", line 362, in start_new
stop_condition=config['stop_condition'], tags=estimator.tags)
File "/Library/Python/2.7/site-packages/sagemaker/session.py", line 264, in train
self.sagemaker_client.create_training_job(**train_request)
File "/Library/Python/2.7/site-packages/sagemaker/local/local_session.py", line 75, in create_training_job
self.s3_model_artifacts = self.train_container.train(InputDataConfig, HyperParameters)
File "/Library/Python/2.7/site-packages/sagemaker/local/image.py", line 95, in train
volumes = self._prepare_training_volumes(data_dir, input_data_config, hyperparameters)
File "/Library/Python/2.7/site-packages/sagemaker/local/image.py", line 312, in _prepare_training_volumes
self._download_folder(bucket_name, key, channel_dir)
File "/Library/Python/2.7/site-packages/sagemaker/local/image.py", line 275, in _download_folder
obj.download_file(file_path)
File "/Library/Python/2.7/site-packages/boto3/s3/inject.py", line 314, in object_download_file
ExtraArgs=ExtraArgs, Callback=Callback, Config=Config)
File "/Library/Python/2.7/site-packages/boto3/s3/inject.py", line 172, in download_file
extra_args=ExtraArgs, callback=Callback)
File "/Library/Python/2.7/site-packages/boto3/s3/transfer.py", line 307, in download_file
future.result()
File "/Library/Python/2.7/site-packages/s3transfer/futures.py", line 73, in result
return self._coordinator.result()
File "/Library/Python/2.7/site-packages/s3transfer/futures.py", line 233, in result
raise self._exception
OSError: [Errno 21] Is a directory
|
OSError
|
def describe(self):
"""Prints out a response from the DescribeProcessingJob API call."""
return self.sagemaker_session.describe_processing_job(self.job_name)
|
def describe(self, print_response=True):
"""Prints out a response from the DescribeProcessingJob API call."""
describe_response = self.sagemaker_session.describe_processing_job(self.job_name)
if print_response:
print(describe_response)
return describe_response
|
https://github.com/aws/sagemaker-python-sdk/issues/245
|
<sagemaker.tensorflow.estimator.TensorFlow object at 0x107b86610>
INFO:sagemaker:Creating training-job with name: sagemaker-tensorflow-2018-06-21-11-17-58-273
Traceback (most recent call last):
File "test.py", line 28, in <module>
iris_estimator.fit(train_data_location)
File "/Library/Python/2.7/site-packages/sagemaker/tensorflow/estimator.py", line 243, in fit
fit_super()
File "/Library/Python/2.7/site-packages/sagemaker/tensorflow/estimator.py", line 225, in fit_super
super(TensorFlow, self).fit(inputs, wait, logs, job_name)
File "/Library/Python/2.7/site-packages/sagemaker/estimator.py", line 177, in fit
self.latest_training_job = _TrainingJob.start_new(self, inputs)
File "/Library/Python/2.7/site-packages/sagemaker/estimator.py", line 362, in start_new
stop_condition=config['stop_condition'], tags=estimator.tags)
File "/Library/Python/2.7/site-packages/sagemaker/session.py", line 264, in train
self.sagemaker_client.create_training_job(**train_request)
File "/Library/Python/2.7/site-packages/sagemaker/local/local_session.py", line 75, in create_training_job
self.s3_model_artifacts = self.train_container.train(InputDataConfig, HyperParameters)
File "/Library/Python/2.7/site-packages/sagemaker/local/image.py", line 95, in train
volumes = self._prepare_training_volumes(data_dir, input_data_config, hyperparameters)
File "/Library/Python/2.7/site-packages/sagemaker/local/image.py", line 312, in _prepare_training_volumes
self._download_folder(bucket_name, key, channel_dir)
File "/Library/Python/2.7/site-packages/sagemaker/local/image.py", line 275, in _download_folder
obj.download_file(file_path)
File "/Library/Python/2.7/site-packages/boto3/s3/inject.py", line 314, in object_download_file
ExtraArgs=ExtraArgs, Callback=Callback, Config=Config)
File "/Library/Python/2.7/site-packages/boto3/s3/inject.py", line 172, in download_file
extra_args=ExtraArgs, callback=Callback)
File "/Library/Python/2.7/site-packages/boto3/s3/transfer.py", line 307, in download_file
future.result()
File "/Library/Python/2.7/site-packages/s3transfer/futures.py", line 73, in result
return self._coordinator.result()
File "/Library/Python/2.7/site-packages/s3transfer/futures.py", line 233, in result
raise self._exception
OSError: [Errno 21] Is a directory
|
OSError
|
def make_blueprint(config):
view = Blueprint("main", __name__)
@view.route("/")
def index():
return render_template("index.html")
@view.route("/generate", methods=("GET", "POST"))
def generate():
if logged_in():
flash(
gettext(
"You were redirected because you are already logged in. "
"If you want to create a new account, you should log out "
"first."
),
"notification",
)
return redirect(url_for(".lookup"))
codename = generate_unique_codename(config)
# Generate a unique id for each browser tab and associate the codename with this id.
# This will allow retrieval of the codename displayed in the tab from which the source has
# clicked to proceed to /generate (ref. issue #4458)
tab_id = urlsafe_b64encode(os.urandom(64)).decode()
codenames = session.get("codenames", {})
codenames[tab_id] = codename
session["codenames"] = codenames
session["new_user"] = True
return render_template("generate.html", codename=codename, tab_id=tab_id)
@view.route("/org-logo")
def select_logo():
if os.path.exists(
os.path.join(current_app.static_folder, "i", "custom_logo.png")
):
return redirect(url_for("static", filename="i/custom_logo.png"))
else:
return redirect(url_for("static", filename="i/logo.png"))
@view.route("/create", methods=["POST"])
def create():
if session.get("logged_in", False):
flash(
gettext(
"You are already logged in. Please verify your codename above as it "
+ "may differ from the one displayed on the previous page."
),
"notification",
)
else:
tab_id = request.form["tab_id"]
codename = session["codenames"][tab_id]
session["codename"] = codename
del session["codenames"]
filesystem_id = current_app.crypto_util.hash_codename(codename)
try:
source = Source(filesystem_id, current_app.crypto_util.display_id())
except ValueError as e:
current_app.logger.error(e)
flash(
gettext(
"There was a temporary problem creating your account. "
"Please try again."
),
"error",
)
return redirect(url_for(".index"))
db.session.add(source)
try:
db.session.commit()
except IntegrityError as e:
db.session.rollback()
current_app.logger.error(
"Attempt to create a source with duplicate codename: %s" % (e,)
)
# Issue 2386: don't log in on duplicates
del session["codename"]
# Issue 4361: Delete 'logged_in' if it's in the session
try:
del session["logged_in"]
except KeyError:
pass
abort(500)
else:
os.mkdir(current_app.storage.path(filesystem_id))
session["logged_in"] = True
return redirect(url_for(".lookup"))
@view.route("/lookup", methods=("GET",))
@login_required
def lookup():
replies = []
source_inbox = (
Reply.query.filter(Reply.source_id == g.source.id)
.filter(Reply.deleted_by_source == False)
.all()
) # noqa
for reply in source_inbox:
reply_path = current_app.storage.path(
g.filesystem_id,
reply.filename,
)
try:
with io.open(reply_path, "rb") as f:
contents = f.read()
reply_obj = current_app.crypto_util.decrypt(g.codename, contents)
reply.decrypted = reply_obj
except UnicodeDecodeError:
current_app.logger.error("Could not decode reply %s" % reply.filename)
except FileNotFoundError:
current_app.logger.error("Reply file missing: %s" % reply.filename)
else:
reply.date = datetime.utcfromtimestamp(os.stat(reply_path).st_mtime)
replies.append(reply)
# Sort the replies by date
replies.sort(key=operator.attrgetter("date"), reverse=True)
# Generate a keypair to encrypt replies from the journalist
# Only do this if the journalist has flagged the source as one
# that they would like to reply to. (Issue #140.)
if (
not current_app.crypto_util.get_fingerprint(g.filesystem_id)
and g.source.flagged
):
db_uri = current_app.config["SQLALCHEMY_DATABASE_URI"]
async_genkey(current_app.crypto_util, db_uri, g.filesystem_id, g.codename)
return render_template(
"lookup.html",
allow_document_uploads=current_app.instance_config.allow_document_uploads,
codename=g.codename,
replies=replies,
flagged=g.source.flagged,
new_user=session.get("new_user", None),
haskey=current_app.crypto_util.get_fingerprint(g.filesystem_id),
form=SubmissionForm(),
)
@view.route("/submit", methods=("POST",))
@login_required
def submit():
allow_document_uploads = current_app.instance_config.allow_document_uploads
form = SubmissionForm()
if not form.validate():
for field, errors in form.errors.items():
for error in errors:
flash(error, "error")
return redirect(url_for("main.lookup"))
msg = request.form["msg"]
fh = None
if allow_document_uploads and "fh" in request.files:
fh = request.files["fh"]
# Don't submit anything if it was an "empty" submission. #878
if not (msg or fh):
if allow_document_uploads:
flash(
gettext("You must enter a message or choose a file to submit."),
"error",
)
else:
flash(gettext("You must enter a message."), "error")
return redirect(url_for("main.lookup"))
fnames = []
journalist_filename = g.source.journalist_filename
first_submission = g.source.interaction_count == 0
if msg:
g.source.interaction_count += 1
fnames.append(
current_app.storage.save_message_submission(
g.filesystem_id,
g.source.interaction_count,
journalist_filename,
msg,
)
)
if fh:
g.source.interaction_count += 1
fnames.append(
current_app.storage.save_file_submission(
g.filesystem_id,
g.source.interaction_count,
journalist_filename,
fh.filename,
fh.stream,
)
)
if first_submission:
flash_message = render_template("first_submission_flashed_message.html")
flash(Markup(flash_message), "success")
else:
if msg and not fh:
html_contents = gettext("Thanks! We received your message.")
elif fh and not msg:
html_contents = gettext("Thanks! We received your document.")
else:
html_contents = gettext(
"Thanks! We received your message and document."
)
flash_message = render_template(
"next_submission_flashed_message.html", html_contents=html_contents
)
flash(Markup(flash_message), "success")
new_submissions = []
for fname in fnames:
submission = Submission(g.source, fname)
db.session.add(submission)
new_submissions.append(submission)
if g.source.pending:
g.source.pending = False
# Generate a keypair now, if there's enough entropy (issue #303)
# (gpg reads 300 bytes from /dev/random)
entropy_avail = get_entropy_estimate()
if entropy_avail >= 2400:
db_uri = current_app.config["SQLALCHEMY_DATABASE_URI"]
async_genkey(
current_app.crypto_util, db_uri, g.filesystem_id, g.codename
)
current_app.logger.info(
"generating key, entropy: {}".format(entropy_avail)
)
else:
current_app.logger.warn(
"skipping key generation. entropy: {}".format(entropy_avail)
)
g.source.last_updated = datetime.utcnow()
db.session.commit()
for sub in new_submissions:
store.async_add_checksum_for_file(sub)
normalize_timestamps(g.filesystem_id)
return redirect(url_for("main.lookup"))
@view.route("/delete", methods=("POST",))
@login_required
def delete():
"""This deletes the reply from the source's inbox, but preserves
the history for journalists such that they can view conversation
history.
"""
query = Reply.query.filter_by(
filename=request.form["reply_filename"], source_id=g.source.id
)
reply = get_one_or_else(query, current_app.logger, abort)
reply.deleted_by_source = True
db.session.add(reply)
db.session.commit()
flash(gettext("Reply deleted"), "notification")
return redirect(url_for(".lookup"))
@view.route("/delete-all", methods=("POST",))
@login_required
def batch_delete():
replies = (
Reply.query.filter(Reply.source_id == g.source.id)
.filter(Reply.deleted_by_source == False)
.all()
) # noqa
if len(replies) == 0:
current_app.logger.error("Found no replies when at least one was expected")
return redirect(url_for(".lookup"))
for reply in replies:
reply.deleted_by_source = True
db.session.add(reply)
db.session.commit()
flash(gettext("All replies have been deleted"), "notification")
return redirect(url_for(".lookup"))
@view.route("/login", methods=("GET", "POST"))
def login():
form = LoginForm()
if form.validate_on_submit():
codename = request.form["codename"].strip()
if valid_codename(codename):
session.update(codename=codename, logged_in=True)
return redirect(url_for(".lookup", from_login="1"))
else:
current_app.logger.info("Login failed for invalid codename")
flash(gettext("Sorry, that is not a recognized codename."), "error")
return render_template("login.html", form=form)
@view.route("/logout")
def logout():
"""
If a user is logged in, show them a logout page that prompts them to
click the New Identity button in Tor Browser to complete their session.
Otherwise redirect to the main Source Interface page.
"""
if logged_in():
# Clear the session after we render the message so it's localized
# If a user specified a locale, save it and restore it
user_locale = g.locale
session.clear()
session["locale"] = user_locale
return render_template("logout.html")
else:
return redirect(url_for(".index"))
return view
|
def make_blueprint(config):
view = Blueprint("main", __name__)
@view.route("/")
def index():
return render_template("index.html")
@view.route("/generate", methods=("GET", "POST"))
def generate():
if logged_in():
flash(
gettext(
"You were redirected because you are already logged in. "
"If you want to create a new account, you should log out "
"first."
),
"notification",
)
return redirect(url_for(".lookup"))
codename = generate_unique_codename(config)
# Generate a unique id for each browser tab and associate the codename with this id.
# This will allow retrieval of the codename displayed in the tab from which the source has
# clicked to proceed to /generate (ref. issue #4458)
tab_id = urlsafe_b64encode(os.urandom(64)).decode()
codenames = session.get("codenames", {})
codenames[tab_id] = codename
session["codenames"] = codenames
session["new_user"] = True
return render_template("generate.html", codename=codename, tab_id=tab_id)
@view.route("/org-logo")
def select_logo():
if os.path.exists(
os.path.join(current_app.static_folder, "i", "custom_logo.png")
):
return redirect(url_for("static", filename="i/custom_logo.png"))
else:
return redirect(url_for("static", filename="i/logo.png"))
@view.route("/create", methods=["POST"])
def create():
if session.get("logged_in", False):
flash(
gettext(
"You are already logged in. Please verify your codename above as it "
+ "may differ from the one displayed on the previous page."
),
"notification",
)
else:
tab_id = request.form["tab_id"]
codename = session["codenames"][tab_id]
session["codename"] = codename
del session["codenames"]
filesystem_id = current_app.crypto_util.hash_codename(codename)
try:
source = Source(filesystem_id, current_app.crypto_util.display_id())
except ValueError as e:
current_app.logger.error(e)
flash(
gettext(
"There was a temporary problem creating your account. "
"Please try again."
),
"error",
)
return redirect(url_for(".index"))
db.session.add(source)
try:
db.session.commit()
except IntegrityError as e:
db.session.rollback()
current_app.logger.error(
"Attempt to create a source with duplicate codename: %s" % (e,)
)
# Issue 2386: don't log in on duplicates
del session["codename"]
# Issue 4361: Delete 'logged_in' if it's in the session
try:
del session["logged_in"]
except KeyError:
pass
abort(500)
else:
os.mkdir(current_app.storage.path(filesystem_id))
session["logged_in"] = True
return redirect(url_for(".lookup"))
@view.route("/lookup", methods=("GET",))
@login_required
def lookup():
replies = []
source_inbox = (
Reply.query.filter(Reply.source_id == g.source.id)
.filter(Reply.deleted_by_source == False)
.all()
) # noqa
for reply in source_inbox:
reply_path = current_app.storage.path(
g.filesystem_id,
reply.filename,
)
try:
with io.open(reply_path, "rb") as f:
contents = f.read()
reply_obj = current_app.crypto_util.decrypt(g.codename, contents)
reply.decrypted = reply_obj
except UnicodeDecodeError:
current_app.logger.error("Could not decode reply %s" % reply.filename)
else:
reply.date = datetime.utcfromtimestamp(os.stat(reply_path).st_mtime)
replies.append(reply)
# Sort the replies by date
replies.sort(key=operator.attrgetter("date"), reverse=True)
# Generate a keypair to encrypt replies from the journalist
# Only do this if the journalist has flagged the source as one
# that they would like to reply to. (Issue #140.)
if (
not current_app.crypto_util.get_fingerprint(g.filesystem_id)
and g.source.flagged
):
db_uri = current_app.config["SQLALCHEMY_DATABASE_URI"]
async_genkey(current_app.crypto_util, db_uri, g.filesystem_id, g.codename)
return render_template(
"lookup.html",
allow_document_uploads=current_app.instance_config.allow_document_uploads,
codename=g.codename,
replies=replies,
flagged=g.source.flagged,
new_user=session.get("new_user", None),
haskey=current_app.crypto_util.get_fingerprint(g.filesystem_id),
form=SubmissionForm(),
)
@view.route("/submit", methods=("POST",))
@login_required
def submit():
allow_document_uploads = current_app.instance_config.allow_document_uploads
form = SubmissionForm()
if not form.validate():
for field, errors in form.errors.items():
for error in errors:
flash(error, "error")
return redirect(url_for("main.lookup"))
msg = request.form["msg"]
fh = None
if allow_document_uploads and "fh" in request.files:
fh = request.files["fh"]
# Don't submit anything if it was an "empty" submission. #878
if not (msg or fh):
if allow_document_uploads:
flash(
gettext("You must enter a message or choose a file to submit."),
"error",
)
else:
flash(gettext("You must enter a message."), "error")
return redirect(url_for("main.lookup"))
fnames = []
journalist_filename = g.source.journalist_filename
first_submission = g.source.interaction_count == 0
if msg:
g.source.interaction_count += 1
fnames.append(
current_app.storage.save_message_submission(
g.filesystem_id,
g.source.interaction_count,
journalist_filename,
msg,
)
)
if fh:
g.source.interaction_count += 1
fnames.append(
current_app.storage.save_file_submission(
g.filesystem_id,
g.source.interaction_count,
journalist_filename,
fh.filename,
fh.stream,
)
)
if first_submission:
flash_message = render_template("first_submission_flashed_message.html")
flash(Markup(flash_message), "success")
else:
if msg and not fh:
html_contents = gettext("Thanks! We received your message.")
elif fh and not msg:
html_contents = gettext("Thanks! We received your document.")
else:
html_contents = gettext(
"Thanks! We received your message and document."
)
flash_message = render_template(
"next_submission_flashed_message.html", html_contents=html_contents
)
flash(Markup(flash_message), "success")
new_submissions = []
for fname in fnames:
submission = Submission(g.source, fname)
db.session.add(submission)
new_submissions.append(submission)
if g.source.pending:
g.source.pending = False
# Generate a keypair now, if there's enough entropy (issue #303)
# (gpg reads 300 bytes from /dev/random)
entropy_avail = get_entropy_estimate()
if entropy_avail >= 2400:
db_uri = current_app.config["SQLALCHEMY_DATABASE_URI"]
async_genkey(
current_app.crypto_util, db_uri, g.filesystem_id, g.codename
)
current_app.logger.info(
"generating key, entropy: {}".format(entropy_avail)
)
else:
current_app.logger.warn(
"skipping key generation. entropy: {}".format(entropy_avail)
)
g.source.last_updated = datetime.utcnow()
db.session.commit()
for sub in new_submissions:
store.async_add_checksum_for_file(sub)
normalize_timestamps(g.filesystem_id)
return redirect(url_for("main.lookup"))
@view.route("/delete", methods=("POST",))
@login_required
def delete():
"""This deletes the reply from the source's inbox, but preserves
the history for journalists such that they can view conversation
history.
"""
query = Reply.query.filter_by(
filename=request.form["reply_filename"], source_id=g.source.id
)
reply = get_one_or_else(query, current_app.logger, abort)
reply.deleted_by_source = True
db.session.add(reply)
db.session.commit()
flash(gettext("Reply deleted"), "notification")
return redirect(url_for(".lookup"))
@view.route("/delete-all", methods=("POST",))
@login_required
def batch_delete():
replies = (
Reply.query.filter(Reply.source_id == g.source.id)
.filter(Reply.deleted_by_source == False)
.all()
) # noqa
if len(replies) == 0:
current_app.logger.error("Found no replies when at least one was expected")
return redirect(url_for(".lookup"))
for reply in replies:
reply.deleted_by_source = True
db.session.add(reply)
db.session.commit()
flash(gettext("All replies have been deleted"), "notification")
return redirect(url_for(".lookup"))
@view.route("/login", methods=("GET", "POST"))
def login():
form = LoginForm()
if form.validate_on_submit():
codename = request.form["codename"].strip()
if valid_codename(codename):
session.update(codename=codename, logged_in=True)
return redirect(url_for(".lookup", from_login="1"))
else:
current_app.logger.info("Login failed for invalid codename")
flash(gettext("Sorry, that is not a recognized codename."), "error")
return render_template("login.html", form=form)
@view.route("/logout")
def logout():
"""
If a user is logged in, show them a logout page that prompts them to
click the New Identity button in Tor Browser to complete their session.
Otherwise redirect to the main Source Interface page.
"""
if logged_in():
# Clear the session after we render the message so it's localized
# If a user specified a locale, save it and restore it
user_locale = g.locale
session.clear()
session["locale"] = user_locale
return render_template("logout.html")
else:
return redirect(url_for(".index"))
return view
|
https://github.com/freedomofpress/securedrop/issues/5402
|
[Wed Jul 22 22:10:08.442888 2020] [wsgi:error] [pid 11154:tid 115647289255680] [remote 127.0.0.1:40108] [2020-07-22 22:10:08,442] ERROR in app: Exception on /lookup [GET]
[Wed Jul 22 22:10:08.442937 2020] [wsgi:error] [pid 11154:tid 115647289255680] [remote 127.0.0.1:40108] Traceback (most recent call last):
[Wed Jul 22 22:10:08.442946 2020] [wsgi:error] [pid 11154:tid 115647289255680] [remote 127.0.0.1:40108] File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/app.py", line 2292, in wsgi_app
[Wed Jul 22 22:10:08.442953 2020] [wsgi:error] [pid 11154:tid 115647289255680] [remote 127.0.0.1:40108] response = self.full_dispatch_request()
[Wed Jul 22 22:10:08.442959 2020] [wsgi:error] [pid 11154:tid 115647289255680] [remote 127.0.0.1:40108] File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/app.py", line 1815, in full_dispatch_request
[Wed Jul 22 22:10:08.442966 2020] [wsgi:error] [pid 11154:tid 115647289255680] [remote 127.0.0.1:40108] rv = self.handle_user_exception(e)
[Wed Jul 22 22:10:08.442972 2020] [wsgi:error] [pid 11154:tid 115647289255680] [remote 127.0.0.1:40108] File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/app.py", line 1718, in handle_user_exception
[Wed Jul 22 22:10:08.442979 2020] [wsgi:error] [pid 11154:tid 115647289255680] [remote 127.0.0.1:40108] reraise(exc_type, exc_value, tb)
[Wed Jul 22 22:10:08.442985 2020] [wsgi:error] [pid 11154:tid 115647289255680] [remote 127.0.0.1:40108] File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/_compat.py", line 35, in reraise
[Wed Jul 22 22:10:08.442992 2020] [wsgi:error] [pid 11154:tid 115647289255680] [remote 127.0.0.1:40108] raise value
[Wed Jul 22 22:10:08.442998 2020] [wsgi:error] [pid 11154:tid 115647289255680] [remote 127.0.0.1:40108] File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/app.py", line 1813, in full_dispatch_request
[Wed Jul 22 22:10:08.443005 2020] [wsgi:error] [pid 11154:tid 115647289255680] [remote 127.0.0.1:40108] rv = self.dispatch_request()
[Wed Jul 22 22:10:08.443011 2020] [wsgi:error] [pid 11154:tid 115647289255680] [remote 127.0.0.1:40108] File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/app.py", line 1799, in dispatch_request
[Wed Jul 22 22:10:08.443017 2020] [wsgi:error] [pid 11154:tid 115647289255680] [remote 127.0.0.1:40108] return self.view_functions[rule.endpoint](**req.view_args)
[Wed Jul 22 22:10:08.443023 2020] [wsgi:error] [pid 11154:tid 115647289255680] [remote 127.0.0.1:40108] File "/var/www/securedrop/source_app/decorators.py", line 12, in decorated_function
[Wed Jul 22 22:10:08.443029 2020] [wsgi:error] [pid 11154:tid 115647289255680] [remote 127.0.0.1:40108] return f(*args, **kwargs)
[Wed Jul 22 22:10:08.443060 2020] [wsgi:error] [pid 11154:tid 115647289255680] [remote 127.0.0.1:40108] File "/var/www/securedrop/source_app/main.py", line 115, in lookup
[Wed Jul 22 22:10:08.443067 2020] [wsgi:error] [pid 11154:tid 115647289255680] [remote 127.0.0.1:40108] with io.open(reply_path, "rb") as f:
[Wed Jul 22 22:10:08.443076 2020] [wsgi:error] [pid 11154:tid 115647289255680] [remote 127.0.0.1:40108] FileNotFoundError: [Errno 2] No such file or directory: '/var/lib/securedrop/store/YJSI6RQBP5JTHYYMZ4L4MUTQWAI7MNTK5ZUU4K2OXSD34PALIFT7DK6LTAYT43VLGAEOPWBBNV6JWDTHQPFITD6UFPLNYN25RHMJJOY=/2-damp_burner-reply.gpg'
[Wed Jul 22 22:10:08.443111 2020] [wsgi:error] [pid 11154:tid 115647289255680] [remote 127.0.0.1:40108]
|
FileNotFoundError
|
def lookup():
replies = []
source_inbox = (
Reply.query.filter(Reply.source_id == g.source.id)
.filter(Reply.deleted_by_source == False)
.all()
) # noqa
for reply in source_inbox:
reply_path = current_app.storage.path(
g.filesystem_id,
reply.filename,
)
try:
with io.open(reply_path, "rb") as f:
contents = f.read()
reply_obj = current_app.crypto_util.decrypt(g.codename, contents)
reply.decrypted = reply_obj
except UnicodeDecodeError:
current_app.logger.error("Could not decode reply %s" % reply.filename)
except FileNotFoundError:
current_app.logger.error("Reply file missing: %s" % reply.filename)
else:
reply.date = datetime.utcfromtimestamp(os.stat(reply_path).st_mtime)
replies.append(reply)
# Sort the replies by date
replies.sort(key=operator.attrgetter("date"), reverse=True)
# Generate a keypair to encrypt replies from the journalist
# Only do this if the journalist has flagged the source as one
# that they would like to reply to. (Issue #140.)
if (
not current_app.crypto_util.get_fingerprint(g.filesystem_id)
and g.source.flagged
):
db_uri = current_app.config["SQLALCHEMY_DATABASE_URI"]
async_genkey(current_app.crypto_util, db_uri, g.filesystem_id, g.codename)
return render_template(
"lookup.html",
allow_document_uploads=current_app.instance_config.allow_document_uploads,
codename=g.codename,
replies=replies,
flagged=g.source.flagged,
new_user=session.get("new_user", None),
haskey=current_app.crypto_util.get_fingerprint(g.filesystem_id),
form=SubmissionForm(),
)
|
def lookup():
replies = []
source_inbox = (
Reply.query.filter(Reply.source_id == g.source.id)
.filter(Reply.deleted_by_source == False)
.all()
) # noqa
for reply in source_inbox:
reply_path = current_app.storage.path(
g.filesystem_id,
reply.filename,
)
try:
with io.open(reply_path, "rb") as f:
contents = f.read()
reply_obj = current_app.crypto_util.decrypt(g.codename, contents)
reply.decrypted = reply_obj
except UnicodeDecodeError:
current_app.logger.error("Could not decode reply %s" % reply.filename)
else:
reply.date = datetime.utcfromtimestamp(os.stat(reply_path).st_mtime)
replies.append(reply)
# Sort the replies by date
replies.sort(key=operator.attrgetter("date"), reverse=True)
# Generate a keypair to encrypt replies from the journalist
# Only do this if the journalist has flagged the source as one
# that they would like to reply to. (Issue #140.)
if (
not current_app.crypto_util.get_fingerprint(g.filesystem_id)
and g.source.flagged
):
db_uri = current_app.config["SQLALCHEMY_DATABASE_URI"]
async_genkey(current_app.crypto_util, db_uri, g.filesystem_id, g.codename)
return render_template(
"lookup.html",
allow_document_uploads=current_app.instance_config.allow_document_uploads,
codename=g.codename,
replies=replies,
flagged=g.source.flagged,
new_user=session.get("new_user", None),
haskey=current_app.crypto_util.get_fingerprint(g.filesystem_id),
form=SubmissionForm(),
)
|
https://github.com/freedomofpress/securedrop/issues/5402
|
[Wed Jul 22 22:10:08.442888 2020] [wsgi:error] [pid 11154:tid 115647289255680] [remote 127.0.0.1:40108] [2020-07-22 22:10:08,442] ERROR in app: Exception on /lookup [GET]
[Wed Jul 22 22:10:08.442937 2020] [wsgi:error] [pid 11154:tid 115647289255680] [remote 127.0.0.1:40108] Traceback (most recent call last):
[Wed Jul 22 22:10:08.442946 2020] [wsgi:error] [pid 11154:tid 115647289255680] [remote 127.0.0.1:40108] File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/app.py", line 2292, in wsgi_app
[Wed Jul 22 22:10:08.442953 2020] [wsgi:error] [pid 11154:tid 115647289255680] [remote 127.0.0.1:40108] response = self.full_dispatch_request()
[Wed Jul 22 22:10:08.442959 2020] [wsgi:error] [pid 11154:tid 115647289255680] [remote 127.0.0.1:40108] File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/app.py", line 1815, in full_dispatch_request
[Wed Jul 22 22:10:08.442966 2020] [wsgi:error] [pid 11154:tid 115647289255680] [remote 127.0.0.1:40108] rv = self.handle_user_exception(e)
[Wed Jul 22 22:10:08.442972 2020] [wsgi:error] [pid 11154:tid 115647289255680] [remote 127.0.0.1:40108] File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/app.py", line 1718, in handle_user_exception
[Wed Jul 22 22:10:08.442979 2020] [wsgi:error] [pid 11154:tid 115647289255680] [remote 127.0.0.1:40108] reraise(exc_type, exc_value, tb)
[Wed Jul 22 22:10:08.442985 2020] [wsgi:error] [pid 11154:tid 115647289255680] [remote 127.0.0.1:40108] File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/_compat.py", line 35, in reraise
[Wed Jul 22 22:10:08.442992 2020] [wsgi:error] [pid 11154:tid 115647289255680] [remote 127.0.0.1:40108] raise value
[Wed Jul 22 22:10:08.442998 2020] [wsgi:error] [pid 11154:tid 115647289255680] [remote 127.0.0.1:40108] File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/app.py", line 1813, in full_dispatch_request
[Wed Jul 22 22:10:08.443005 2020] [wsgi:error] [pid 11154:tid 115647289255680] [remote 127.0.0.1:40108] rv = self.dispatch_request()
[Wed Jul 22 22:10:08.443011 2020] [wsgi:error] [pid 11154:tid 115647289255680] [remote 127.0.0.1:40108] File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/app.py", line 1799, in dispatch_request
[Wed Jul 22 22:10:08.443017 2020] [wsgi:error] [pid 11154:tid 115647289255680] [remote 127.0.0.1:40108] return self.view_functions[rule.endpoint](**req.view_args)
[Wed Jul 22 22:10:08.443023 2020] [wsgi:error] [pid 11154:tid 115647289255680] [remote 127.0.0.1:40108] File "/var/www/securedrop/source_app/decorators.py", line 12, in decorated_function
[Wed Jul 22 22:10:08.443029 2020] [wsgi:error] [pid 11154:tid 115647289255680] [remote 127.0.0.1:40108] return f(*args, **kwargs)
[Wed Jul 22 22:10:08.443060 2020] [wsgi:error] [pid 11154:tid 115647289255680] [remote 127.0.0.1:40108] File "/var/www/securedrop/source_app/main.py", line 115, in lookup
[Wed Jul 22 22:10:08.443067 2020] [wsgi:error] [pid 11154:tid 115647289255680] [remote 127.0.0.1:40108] with io.open(reply_path, "rb") as f:
[Wed Jul 22 22:10:08.443076 2020] [wsgi:error] [pid 11154:tid 115647289255680] [remote 127.0.0.1:40108] FileNotFoundError: [Errno 2] No such file or directory: '/var/lib/securedrop/store/YJSI6RQBP5JTHYYMZ4L4MUTQWAI7MNTK5ZUU4K2OXSD34PALIFT7DK6LTAYT43VLGAEOPWBBNV6JWDTHQPFITD6UFPLNYN25RHMJJOY=/2-damp_burner-reply.gpg'
[Wed Jul 22 22:10:08.443111 2020] [wsgi:error] [pid 11154:tid 115647289255680] [remote 127.0.0.1:40108]
|
FileNotFoundError
|
def make_blueprint(config):
api = Blueprint("api", __name__)
@api.route("/")
def get_endpoints():
endpoints = {
"sources_url": "/api/v1/sources",
"current_user_url": "/api/v1/user",
"submissions_url": "/api/v1/submissions",
"replies_url": "/api/v1/replies",
"auth_token_url": "/api/v1/token",
}
return jsonify(endpoints), 200
# Before every post, we validate the payload before processing the request
@api.before_request
def validate_data():
if request.method == "POST":
# flag, star, and logout can have empty payloads
if not request.data:
dataless_endpoints = [
"add_star",
"remove_star",
"flag",
"logout",
]
for endpoint in dataless_endpoints:
if request.endpoint == "api." + endpoint:
return
return abort(400, "malformed request")
# other requests must have valid JSON payload
else:
try:
json.loads(request.data.decode("utf-8"))
except ValueError:
return abort(400, "malformed request")
@api.route("/token", methods=["POST"])
def get_token():
creds = json.loads(request.data.decode("utf-8"))
username = creds.get("username", None)
passphrase = creds.get("passphrase", None)
one_time_code = creds.get("one_time_code", None)
if username is None:
return abort(400, "username field is missing")
if passphrase is None:
return abort(400, "passphrase field is missing")
if one_time_code is None:
return abort(400, "one_time_code field is missing")
try:
journalist = Journalist.login(username, passphrase, one_time_code)
token_expiry = datetime.utcnow() + timedelta(
seconds=TOKEN_EXPIRATION_MINS * 60
)
response = jsonify(
{
"token": journalist.generate_api_token(
expiration=TOKEN_EXPIRATION_MINS * 60
),
"expiration": token_expiry.isoformat() + "Z",
"journalist_uuid": journalist.uuid,
"journalist_first_name": journalist.first_name,
"journalist_last_name": journalist.last_name,
}
)
# Update access metadata
journalist.last_access = datetime.utcnow()
db.session.add(journalist)
db.session.commit()
return response, 200
except (
LoginThrottledException,
InvalidUsernameException,
BadTokenException,
WrongPasswordException,
):
return abort(403, "Token authentication failed.")
@api.route("/sources", methods=["GET"])
@token_required
def get_all_sources():
sources = Source.query.filter_by(pending=False, deleted_at=None).all()
return jsonify({"sources": [source.to_json() for source in sources]}), 200
@api.route("/sources/<source_uuid>", methods=["GET", "DELETE"])
@token_required
def single_source(source_uuid):
if request.method == "GET":
source = get_or_404(Source, source_uuid, column=Source.uuid)
return jsonify(source.to_json()), 200
elif request.method == "DELETE":
source = get_or_404(Source, source_uuid, column=Source.uuid)
utils.delete_collection(source.filesystem_id)
return jsonify({"message": "Source and submissions deleted"}), 200
@api.route("/sources/<source_uuid>/add_star", methods=["POST"])
@token_required
def add_star(source_uuid):
source = get_or_404(Source, source_uuid, column=Source.uuid)
utils.make_star_true(source.filesystem_id)
db.session.commit()
return jsonify({"message": "Star added"}), 201
@api.route("/sources/<source_uuid>/remove_star", methods=["DELETE"])
@token_required
def remove_star(source_uuid):
source = get_or_404(Source, source_uuid, column=Source.uuid)
utils.make_star_false(source.filesystem_id)
db.session.commit()
return jsonify({"message": "Star removed"}), 200
@api.route("/sources/<source_uuid>/flag", methods=["POST"])
@token_required
def flag(source_uuid):
source = get_or_404(Source, source_uuid, column=Source.uuid)
source.flagged = True
db.session.commit()
return jsonify({"message": "Source flagged for reply"}), 200
@api.route("/sources/<source_uuid>/submissions", methods=["GET"])
@token_required
def all_source_submissions(source_uuid):
source = get_or_404(Source, source_uuid, column=Source.uuid)
return jsonify(
{"submissions": [submission.to_json() for submission in source.submissions]}
), 200
@api.route(
"/sources/<source_uuid>/submissions/<submission_uuid>/download", # noqa
methods=["GET"],
)
@token_required
def download_submission(source_uuid, submission_uuid):
get_or_404(Source, source_uuid, column=Source.uuid)
submission = get_or_404(Submission, submission_uuid, column=Submission.uuid)
# Mark as downloaded
submission.downloaded = True
db.session.commit()
return utils.serve_file_with_etag(submission)
@api.route("/sources/<source_uuid>/replies/<reply_uuid>/download", methods=["GET"])
@token_required
def download_reply(source_uuid, reply_uuid):
get_or_404(Source, source_uuid, column=Source.uuid)
reply = get_or_404(Reply, reply_uuid, column=Reply.uuid)
return utils.serve_file_with_etag(reply)
@api.route(
"/sources/<source_uuid>/submissions/<submission_uuid>",
methods=["GET", "DELETE"],
)
@token_required
def single_submission(source_uuid, submission_uuid):
if request.method == "GET":
get_or_404(Source, source_uuid, column=Source.uuid)
submission = get_or_404(Submission, submission_uuid, column=Submission.uuid)
return jsonify(submission.to_json()), 200
elif request.method == "DELETE":
get_or_404(Source, source_uuid, column=Source.uuid)
submission = get_or_404(Submission, submission_uuid, column=Submission.uuid)
utils.delete_file_object(submission)
return jsonify({"message": "Submission deleted"}), 200
@api.route("/sources/<source_uuid>/replies", methods=["GET", "POST"])
@token_required
def all_source_replies(source_uuid):
if request.method == "GET":
source = get_or_404(Source, source_uuid, column=Source.uuid)
return jsonify(
{"replies": [reply.to_json() for reply in source.replies]}
), 200
elif request.method == "POST":
source = get_or_404(Source, source_uuid, column=Source.uuid)
if request.json is None:
abort(400, "please send requests in valid JSON")
if "reply" not in request.json:
abort(400, "reply not found in request body")
user = get_user_object(request)
data = request.json
if not data["reply"]:
abort(400, "reply should not be empty")
source.interaction_count += 1
try:
filename = current_app.storage.save_pre_encrypted_reply(
source.filesystem_id,
source.interaction_count,
source.journalist_filename,
data["reply"],
)
except NotEncrypted:
return jsonify({"message": "You must encrypt replies client side"}), 400
# issue #3918
filename = path.basename(filename)
reply = Reply(user, source, filename)
reply_uuid = data.get("uuid", None)
if reply_uuid is not None:
# check that is is parseable
try:
UUID(reply_uuid)
except ValueError:
abort(400, "'uuid' was not a valid UUID")
reply.uuid = reply_uuid
try:
db.session.add(reply)
db.session.add(source)
db.session.commit()
except IntegrityError as e:
db.session.rollback()
if "UNIQUE constraint failed: replies.uuid" in str(e):
abort(409, "That UUID is already in use.")
else:
raise e
return jsonify(
{
"message": "Your reply has been stored",
"uuid": reply.uuid,
"filename": reply.filename,
}
), 201
@api.route("/sources/<source_uuid>/replies/<reply_uuid>", methods=["GET", "DELETE"])
@token_required
def single_reply(source_uuid, reply_uuid):
get_or_404(Source, source_uuid, column=Source.uuid)
reply = get_or_404(Reply, reply_uuid, column=Reply.uuid)
if request.method == "GET":
return jsonify(reply.to_json()), 200
elif request.method == "DELETE":
utils.delete_file_object(reply)
return jsonify({"message": "Reply deleted"}), 200
@api.route("/submissions", methods=["GET"])
@token_required
def get_all_submissions():
submissions = Submission.query.all()
return jsonify(
{
"submissions": [
submission.to_json()
for submission in submissions
if submission.source
]
}
), 200
@api.route("/replies", methods=["GET"])
@token_required
def get_all_replies():
replies = Reply.query.all()
return jsonify({"replies": [reply.to_json() for reply in replies]}), 200
@api.route("/user", methods=["GET"])
@token_required
def get_current_user():
user = get_user_object(request)
return jsonify(user.to_json()), 200
@api.route("/logout", methods=["POST"])
@token_required
def logout():
user = get_user_object(request)
auth_token = request.headers.get("Authorization").split(" ")[1]
utils.revoke_token(user, auth_token)
return jsonify({"message": "Your token has been revoked."}), 200
def _handle_api_http_exception(error):
# Workaround for no blueprint-level 404/5 error handlers, see:
# https://github.com/pallets/flask/issues/503#issuecomment-71383286
response = jsonify({"error": error.name, "message": error.description})
return response, error.code
for code in default_exceptions:
api.errorhandler(code)(_handle_api_http_exception)
return api
|
def make_blueprint(config):
api = Blueprint("api", __name__)
@api.route("/")
def get_endpoints():
endpoints = {
"sources_url": "/api/v1/sources",
"current_user_url": "/api/v1/user",
"submissions_url": "/api/v1/submissions",
"replies_url": "/api/v1/replies",
"auth_token_url": "/api/v1/token",
}
return jsonify(endpoints), 200
# Before every post, we validate the payload before processing the request
@api.before_request
def validate_data():
if request.method == "POST":
# flag, star, and logout can have empty payloads
if not request.data:
dataless_endpoints = [
"add_star",
"remove_star",
"flag",
"logout",
]
for endpoint in dataless_endpoints:
if request.endpoint == "api." + endpoint:
return
return abort(400, "malformed request")
# other requests must have valid JSON payload
else:
try:
json.loads(request.data.decode("utf-8"))
except ValueError:
return abort(400, "malformed request")
@api.route("/token", methods=["POST"])
def get_token():
creds = json.loads(request.data.decode("utf-8"))
username = creds.get("username", None)
passphrase = creds.get("passphrase", None)
one_time_code = creds.get("one_time_code", None)
if username is None:
return abort(400, "username field is missing")
if passphrase is None:
return abort(400, "passphrase field is missing")
if one_time_code is None:
return abort(400, "one_time_code field is missing")
try:
journalist = Journalist.login(username, passphrase, one_time_code)
token_expiry = datetime.utcnow() + timedelta(
seconds=TOKEN_EXPIRATION_MINS * 60
)
response = jsonify(
{
"token": journalist.generate_api_token(
expiration=TOKEN_EXPIRATION_MINS * 60
),
"expiration": token_expiry.isoformat() + "Z",
"journalist_uuid": journalist.uuid,
"journalist_first_name": journalist.first_name,
"journalist_last_name": journalist.last_name,
}
)
# Update access metadata
journalist.last_access = datetime.utcnow()
db.session.add(journalist)
db.session.commit()
return response, 200
except (
LoginThrottledException,
InvalidUsernameException,
BadTokenException,
WrongPasswordException,
):
return abort(403, "Token authentication failed.")
@api.route("/sources", methods=["GET"])
@token_required
def get_all_sources():
sources = Source.query.filter_by(pending=False, deleted_at=None).all()
return jsonify({"sources": [source.to_json() for source in sources]}), 200
@api.route("/sources/<source_uuid>", methods=["GET", "DELETE"])
@token_required
def single_source(source_uuid):
if request.method == "GET":
source = get_or_404(Source, source_uuid, column=Source.uuid)
return jsonify(source.to_json()), 200
elif request.method == "DELETE":
source = get_or_404(Source, source_uuid, column=Source.uuid)
utils.delete_collection(source.filesystem_id)
return jsonify({"message": "Source and submissions deleted"}), 200
@api.route("/sources/<source_uuid>/add_star", methods=["POST"])
@token_required
def add_star(source_uuid):
source = get_or_404(Source, source_uuid, column=Source.uuid)
utils.make_star_true(source.filesystem_id)
db.session.commit()
return jsonify({"message": "Star added"}), 201
@api.route("/sources/<source_uuid>/remove_star", methods=["DELETE"])
@token_required
def remove_star(source_uuid):
source = get_or_404(Source, source_uuid, column=Source.uuid)
utils.make_star_false(source.filesystem_id)
db.session.commit()
return jsonify({"message": "Star removed"}), 200
@api.route("/sources/<source_uuid>/flag", methods=["POST"])
@token_required
def flag(source_uuid):
source = get_or_404(Source, source_uuid, column=Source.uuid)
source.flagged = True
db.session.commit()
return jsonify({"message": "Source flagged for reply"}), 200
@api.route("/sources/<source_uuid>/submissions", methods=["GET"])
@token_required
def all_source_submissions(source_uuid):
source = get_or_404(Source, source_uuid, column=Source.uuid)
return jsonify(
{"submissions": [submission.to_json() for submission in source.submissions]}
), 200
@api.route(
"/sources/<source_uuid>/submissions/<submission_uuid>/download", # noqa
methods=["GET"],
)
@token_required
def download_submission(source_uuid, submission_uuid):
get_or_404(Source, source_uuid, column=Source.uuid)
submission = get_or_404(Submission, submission_uuid, column=Submission.uuid)
# Mark as downloaded
submission.downloaded = True
db.session.commit()
return utils.serve_file_with_etag(submission)
@api.route("/sources/<source_uuid>/replies/<reply_uuid>/download", methods=["GET"])
@token_required
def download_reply(source_uuid, reply_uuid):
get_or_404(Source, source_uuid, column=Source.uuid)
reply = get_or_404(Reply, reply_uuid, column=Reply.uuid)
return utils.serve_file_with_etag(reply)
@api.route(
"/sources/<source_uuid>/submissions/<submission_uuid>",
methods=["GET", "DELETE"],
)
@token_required
def single_submission(source_uuid, submission_uuid):
if request.method == "GET":
get_or_404(Source, source_uuid, column=Source.uuid)
submission = get_or_404(Submission, submission_uuid, column=Submission.uuid)
return jsonify(submission.to_json()), 200
elif request.method == "DELETE":
get_or_404(Source, source_uuid, column=Source.uuid)
submission = get_or_404(Submission, submission_uuid, column=Submission.uuid)
utils.delete_file_object(submission)
return jsonify({"message": "Submission deleted"}), 200
@api.route("/sources/<source_uuid>/replies", methods=["GET", "POST"])
@token_required
def all_source_replies(source_uuid):
if request.method == "GET":
source = get_or_404(Source, source_uuid, column=Source.uuid)
return jsonify(
{"replies": [reply.to_json() for reply in source.replies]}
), 200
elif request.method == "POST":
source = get_or_404(Source, source_uuid, column=Source.uuid)
if request.json is None:
abort(400, "please send requests in valid JSON")
if "reply" not in request.json:
abort(400, "reply not found in request body")
user = get_user_object(request)
data = request.json
if not data["reply"]:
abort(400, "reply should not be empty")
source.interaction_count += 1
try:
filename = current_app.storage.save_pre_encrypted_reply(
source.filesystem_id,
source.interaction_count,
source.journalist_filename,
data["reply"],
)
except NotEncrypted:
return jsonify({"message": "You must encrypt replies client side"}), 400
# issue #3918
filename = path.basename(filename)
reply = Reply(user, source, filename)
reply_uuid = data.get("uuid", None)
if reply_uuid is not None:
# check that is is parseable
try:
UUID(reply_uuid)
except ValueError:
abort(400, "'uuid' was not a valid UUID")
reply.uuid = reply_uuid
try:
db.session.add(reply)
db.session.add(source)
db.session.commit()
except IntegrityError as e:
db.session.rollback()
if "UNIQUE constraint failed: replies.uuid" in str(e):
abort(409, "That UUID is already in use.")
else:
raise e
return jsonify(
{
"message": "Your reply has been stored",
"uuid": reply.uuid,
"filename": reply.filename,
}
), 201
@api.route("/sources/<source_uuid>/replies/<reply_uuid>", methods=["GET", "DELETE"])
@token_required
def single_reply(source_uuid, reply_uuid):
get_or_404(Source, source_uuid, column=Source.uuid)
reply = get_or_404(Reply, reply_uuid, column=Reply.uuid)
if request.method == "GET":
return jsonify(reply.to_json()), 200
elif request.method == "DELETE":
utils.delete_file_object(reply)
return jsonify({"message": "Reply deleted"}), 200
@api.route("/submissions", methods=["GET"])
@token_required
def get_all_submissions():
submissions = Submission.query.all()
return jsonify(
{"submissions": [submission.to_json() for submission in submissions]}
), 200
@api.route("/replies", methods=["GET"])
@token_required
def get_all_replies():
replies = Reply.query.all()
return jsonify({"replies": [reply.to_json() for reply in replies]}), 200
@api.route("/user", methods=["GET"])
@token_required
def get_current_user():
user = get_user_object(request)
return jsonify(user.to_json()), 200
@api.route("/logout", methods=["POST"])
@token_required
def logout():
user = get_user_object(request)
auth_token = request.headers.get("Authorization").split(" ")[1]
utils.revoke_token(user, auth_token)
return jsonify({"message": "Your token has been revoked."}), 200
def _handle_api_http_exception(error):
# Workaround for no blueprint-level 404/5 error handlers, see:
# https://github.com/pallets/flask/issues/503#issuecomment-71383286
response = jsonify({"error": error.name, "message": error.description})
return response, error.code
for code in default_exceptions:
api.errorhandler(code)(_handle_api_http_exception)
return api
|
https://github.com/freedomofpress/securedrop/issues/5315
|
Traceback (most recent call last):
File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/app.py", line 2309, in __call__
return self.wsgi_app(environ, start_response)
File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/app.py", line 2295, in wsgi_app
response = self.handle_exception(e)
File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/app.py", line 1741, in handle_exception
reraise(exc_type, exc_value, tb)
File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/_compat.py", line 35, in reraise
raise value
File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/app.py", line 1815, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/app.py", line 1718, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/_compat.py", line 35, in reraise
raise value
File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/app.py", line 1799, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/Users/redshiftzero/Documents/Github/securedrop-1.2.0-rc1/securedrop/securedrop/journalist_app/api.py", line 48, in decorated_function
return f(*args, **kwargs)
File "/Users/redshiftzero/Documents/Github/securedrop-1.2.0-rc1/securedrop/securedrop/journalist_app/api.py", line 303, in get_all_submissions
submission in submissions]}), 200
File "/Users/redshiftzero/Documents/Github/securedrop-1.2.0-rc1/securedrop/securedrop/journalist_app/api.py", line 303, in <listcomp>
submission in submissions]}), 200
File "/Users/redshiftzero/Documents/Github/securedrop-1.2.0-rc1/securedrop/securedrop/models.py", line 218, in to_json
source_uuid=self.source.uuid),
AttributeError: 'NoneType' object has no attribute 'uuid'
|
AttributeError
|
def get_all_submissions():
submissions = Submission.query.all()
return jsonify(
{
"submissions": [
submission.to_json() for submission in submissions if submission.source
]
}
), 200
|
def get_all_submissions():
submissions = Submission.query.all()
return jsonify(
{"submissions": [submission.to_json() for submission in submissions]}
), 200
|
https://github.com/freedomofpress/securedrop/issues/5315
|
Traceback (most recent call last):
File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/app.py", line 2309, in __call__
return self.wsgi_app(environ, start_response)
File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/app.py", line 2295, in wsgi_app
response = self.handle_exception(e)
File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/app.py", line 1741, in handle_exception
reraise(exc_type, exc_value, tb)
File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/_compat.py", line 35, in reraise
raise value
File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/app.py", line 1815, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/app.py", line 1718, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/_compat.py", line 35, in reraise
raise value
File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/app.py", line 1799, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/Users/redshiftzero/Documents/Github/securedrop-1.2.0-rc1/securedrop/securedrop/journalist_app/api.py", line 48, in decorated_function
return f(*args, **kwargs)
File "/Users/redshiftzero/Documents/Github/securedrop-1.2.0-rc1/securedrop/securedrop/journalist_app/api.py", line 303, in get_all_submissions
submission in submissions]}), 200
File "/Users/redshiftzero/Documents/Github/securedrop-1.2.0-rc1/securedrop/securedrop/journalist_app/api.py", line 303, in <listcomp>
submission in submissions]}), 200
File "/Users/redshiftzero/Documents/Github/securedrop-1.2.0-rc1/securedrop/securedrop/models.py", line 218, in to_json
source_uuid=self.source.uuid),
AttributeError: 'NoneType' object has no attribute 'uuid'
|
AttributeError
|
def to_json(self) -> "Dict[str, Union[str, int, bool]]":
json_submission = {
"source_url": url_for("api.single_source", source_uuid=self.source.uuid)
if self.source
else None,
"submission_url": url_for(
"api.single_submission",
source_uuid=self.source.uuid,
submission_uuid=self.uuid,
)
if self.source
else None,
"filename": self.filename,
"size": self.size,
"is_read": self.downloaded,
"uuid": self.uuid,
"download_url": url_for(
"api.download_submission",
source_uuid=self.source.uuid,
submission_uuid=self.uuid,
)
if self.source
else None,
}
return json_submission
|
def to_json(self) -> "Dict[str, Union[str, int, bool]]":
json_submission = {
"source_url": url_for("api.single_source", source_uuid=self.source.uuid),
"submission_url": url_for(
"api.single_submission",
source_uuid=self.source.uuid,
submission_uuid=self.uuid,
),
"filename": self.filename,
"size": self.size,
"is_read": self.downloaded,
"uuid": self.uuid,
"download_url": url_for(
"api.download_submission",
source_uuid=self.source.uuid,
submission_uuid=self.uuid,
),
}
return json_submission
|
https://github.com/freedomofpress/securedrop/issues/5315
|
Traceback (most recent call last):
File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/app.py", line 2309, in __call__
return self.wsgi_app(environ, start_response)
File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/app.py", line 2295, in wsgi_app
response = self.handle_exception(e)
File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/app.py", line 1741, in handle_exception
reraise(exc_type, exc_value, tb)
File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/_compat.py", line 35, in reraise
raise value
File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/app.py", line 1815, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/app.py", line 1718, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/_compat.py", line 35, in reraise
raise value
File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/app.py", line 1799, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/Users/redshiftzero/Documents/Github/securedrop-1.2.0-rc1/securedrop/securedrop/journalist_app/api.py", line 48, in decorated_function
return f(*args, **kwargs)
File "/Users/redshiftzero/Documents/Github/securedrop-1.2.0-rc1/securedrop/securedrop/journalist_app/api.py", line 303, in get_all_submissions
submission in submissions]}), 200
File "/Users/redshiftzero/Documents/Github/securedrop-1.2.0-rc1/securedrop/securedrop/journalist_app/api.py", line 303, in <listcomp>
submission in submissions]}), 200
File "/Users/redshiftzero/Documents/Github/securedrop-1.2.0-rc1/securedrop/securedrop/models.py", line 218, in to_json
source_uuid=self.source.uuid),
AttributeError: 'NoneType' object has no attribute 'uuid'
|
AttributeError
|
def main(staging=False):
app = journalist_app.create_app(config)
with app.app_context():
# Add two test users
test_password = "correct horse battery staple profanity oil chewy"
test_otp_secret = "JHCOGO7VCER3EJ4L"
add_test_user("journalist", test_password, test_otp_secret, is_admin=True)
if staging:
return
add_test_user("dellsberg", test_password, test_otp_secret, is_admin=False)
journalist_tobe_deleted = add_test_user(
"clarkkent",
test_password,
test_otp_secret,
is_admin=False,
first_name="Clark",
last_name="Kent",
)
# Add test sources and submissions
num_sources = int(os.getenv("NUM_SOURCES", 2))
for i in range(num_sources):
if i == 0:
# For the first source, the journalist who replied will be deleted
create_source_and_submissions(
journalist_who_replied=journalist_tobe_deleted
)
continue
create_source_and_submissions()
# Now let us delete one journalist
db.session.delete(journalist_tobe_deleted)
db.session.commit()
|
def main(staging=False):
app = journalist_app.create_app(config)
with app.app_context():
# Add two test users
test_password = "correct horse battery staple profanity oil chewy"
test_otp_secret = "JHCOGO7VCER3EJ4L"
add_test_user("journalist", test_password, test_otp_secret, is_admin=True)
if staging:
return
add_test_user("dellsberg", test_password, test_otp_secret, is_admin=False)
# Add test sources and submissions
num_sources = int(os.getenv("NUM_SOURCES", 2))
for _ in range(num_sources):
create_source_and_submissions()
|
https://github.com/freedomofpress/securedrop/issues/5176
|
172.17.0.1 - - [31/Mar/2020 20:57:10] "GET /api/v1/replies HTTP/1.1" 500 -
Traceback (most recent call last):
File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/app.py", line 2309, in __call__
return self.wsgi_app(environ, start_response)
File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/app.py", line 2295, in wsgi_app
response = self.handle_exception(e)
File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/app.py", line 1741, in handle_exception
reraise(exc_type, exc_value, tb)
File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/_compat.py", line 35, in reraise
raise value
File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/app.py", line 1815, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/app.py", line 1718, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/_compat.py", line 35, in reraise
raise value
File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/app.py", line 1799, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/Users/redshiftzero/Documents/Github/securedrop-1.2.0-rc2/securedrop/securedrop/journalist_app/api.py", line 48, in decorated_function
return f(*args, **kwargs)
File "/Users/redshiftzero/Documents/Github/securedrop-1.2.0-rc2/securedrop/securedrop/journalist_app/api.py", line 310, in get_all_replies
{'replies': [reply.to_json() for reply in replies]}), 200
File "/Users/redshiftzero/Documents/Github/securedrop-1.2.0-rc2/securedrop/securedrop/journalist_app/api.py", line 310, in <listcomp>
{'replies': [reply.to_json() for reply in replies]}), 200
File "/Users/redshiftzero/Documents/Github/securedrop-1.2.0-rc2/securedrop/securedrop/models.py", line 289, in to_json
'journalist_username': self.journalist.username,
AttributeError: 'NoneType' object has no attribute 'username'
|
AttributeError
|
def create_source_and_submissions(
num_submissions=2, num_replies=2, journalist_who_replied=None
):
# Store source in database
codename = current_app.crypto_util.genrandomid()
filesystem_id = current_app.crypto_util.hash_codename(codename)
journalist_designation = current_app.crypto_util.display_id()
source = Source(filesystem_id, journalist_designation)
source.pending = False
db.session.add(source)
db.session.commit()
# Generate submissions directory and generate source key
os.mkdir(current_app.storage.path(source.filesystem_id))
current_app.crypto_util.genkeypair(source.filesystem_id, codename)
# Generate some test submissions
for _ in range(num_submissions):
source.interaction_count += 1
fpath = current_app.storage.save_message_submission(
source.filesystem_id,
source.interaction_count,
source.journalist_filename,
next(submissions),
)
source.last_updated = datetime.datetime.utcnow()
submission = Submission(source, fpath)
db.session.add(submission)
# Generate some test replies
for _ in range(num_replies):
source.interaction_count += 1
fname = "{}-{}-reply.gpg".format(
source.interaction_count, source.journalist_filename
)
current_app.crypto_util.encrypt(
next(replies),
[
current_app.crypto_util.getkey(source.filesystem_id),
config.JOURNALIST_KEY,
],
current_app.storage.path(source.filesystem_id, fname),
)
if not journalist_who_replied:
journalist = Journalist.query.first()
else:
journalist = journalist_who_replied
reply = Reply(journalist, source, fname)
db.session.add(reply)
db.session.commit()
print(
"Test source (codename: '{}', journalist designation '{}') "
"added with {} submissions and {} replies".format(
codename, journalist_designation, num_submissions, num_replies
)
)
|
def create_source_and_submissions(num_submissions=2, num_replies=2):
# Store source in database
codename = current_app.crypto_util.genrandomid()
filesystem_id = current_app.crypto_util.hash_codename(codename)
journalist_designation = current_app.crypto_util.display_id()
source = Source(filesystem_id, journalist_designation)
source.pending = False
db.session.add(source)
db.session.commit()
# Generate submissions directory and generate source key
os.mkdir(current_app.storage.path(source.filesystem_id))
current_app.crypto_util.genkeypair(source.filesystem_id, codename)
# Generate some test submissions
for _ in range(num_submissions):
source.interaction_count += 1
fpath = current_app.storage.save_message_submission(
source.filesystem_id,
source.interaction_count,
source.journalist_filename,
next(submissions),
)
source.last_updated = datetime.datetime.utcnow()
submission = Submission(source, fpath)
db.session.add(submission)
# Generate some test replies
for _ in range(num_replies):
source.interaction_count += 1
fname = "{}-{}-reply.gpg".format(
source.interaction_count, source.journalist_filename
)
current_app.crypto_util.encrypt(
next(replies),
[
current_app.crypto_util.getkey(source.filesystem_id),
config.JOURNALIST_KEY,
],
current_app.storage.path(source.filesystem_id, fname),
)
journalist = Journalist.query.first()
reply = Reply(journalist, source, fname)
db.session.add(reply)
db.session.commit()
print(
"Test source (codename: '{}', journalist designation '{}') "
"added with {} submissions and {} replies".format(
codename, journalist_designation, num_submissions, num_replies
)
)
|
https://github.com/freedomofpress/securedrop/issues/5176
|
172.17.0.1 - - [31/Mar/2020 20:57:10] "GET /api/v1/replies HTTP/1.1" 500 -
Traceback (most recent call last):
File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/app.py", line 2309, in __call__
return self.wsgi_app(environ, start_response)
File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/app.py", line 2295, in wsgi_app
response = self.handle_exception(e)
File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/app.py", line 1741, in handle_exception
reraise(exc_type, exc_value, tb)
File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/_compat.py", line 35, in reraise
raise value
File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/app.py", line 1815, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/app.py", line 1718, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/_compat.py", line 35, in reraise
raise value
File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/app.py", line 1799, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/Users/redshiftzero/Documents/Github/securedrop-1.2.0-rc2/securedrop/securedrop/journalist_app/api.py", line 48, in decorated_function
return f(*args, **kwargs)
File "/Users/redshiftzero/Documents/Github/securedrop-1.2.0-rc2/securedrop/securedrop/journalist_app/api.py", line 310, in get_all_replies
{'replies': [reply.to_json() for reply in replies]}), 200
File "/Users/redshiftzero/Documents/Github/securedrop-1.2.0-rc2/securedrop/securedrop/journalist_app/api.py", line 310, in <listcomp>
{'replies': [reply.to_json() for reply in replies]}), 200
File "/Users/redshiftzero/Documents/Github/securedrop-1.2.0-rc2/securedrop/securedrop/models.py", line 289, in to_json
'journalist_username': self.journalist.username,
AttributeError: 'NoneType' object has no attribute 'username'
|
AttributeError
|
def to_json(self):
# type: () -> Dict[str, Union[str, int, bool]]
username = "deleted"
first_name = ""
last_name = ""
uuid = "deleted"
if self.journalist:
username = self.journalist.username
first_name = self.journalist.first_name
last_name = self.journalist.last_name
uuid = self.journalist.uuid
json_submission = {
"source_url": url_for("api.single_source", source_uuid=self.source.uuid),
"reply_url": url_for(
"api.single_reply", source_uuid=self.source.uuid, reply_uuid=self.uuid
),
"filename": self.filename,
"size": self.size,
"journalist_username": username,
"journalist_first_name": first_name,
"journalist_last_name": last_name,
"journalist_uuid": uuid,
"uuid": self.uuid,
"is_deleted_by_source": self.deleted_by_source,
}
return json_submission
|
def to_json(self):
# type: () -> Dict[str, Union[str, int, bool]]
json_submission = {
"source_url": url_for("api.single_source", source_uuid=self.source.uuid),
"reply_url": url_for(
"api.single_reply", source_uuid=self.source.uuid, reply_uuid=self.uuid
),
"filename": self.filename,
"size": self.size,
"journalist_username": self.journalist.username,
"journalist_first_name": self.journalist.first_name,
"journalist_last_name": self.journalist.last_name,
"journalist_uuid": self.journalist.uuid,
"uuid": self.uuid,
"is_deleted_by_source": self.deleted_by_source,
}
return json_submission
|
https://github.com/freedomofpress/securedrop/issues/5176
|
172.17.0.1 - - [31/Mar/2020 20:57:10] "GET /api/v1/replies HTTP/1.1" 500 -
Traceback (most recent call last):
File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/app.py", line 2309, in __call__
return self.wsgi_app(environ, start_response)
File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/app.py", line 2295, in wsgi_app
response = self.handle_exception(e)
File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/app.py", line 1741, in handle_exception
reraise(exc_type, exc_value, tb)
File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/_compat.py", line 35, in reraise
raise value
File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/app.py", line 1815, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/app.py", line 1718, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/_compat.py", line 35, in reraise
raise value
File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages/flask/app.py", line 1799, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/Users/redshiftzero/Documents/Github/securedrop-1.2.0-rc2/securedrop/securedrop/journalist_app/api.py", line 48, in decorated_function
return f(*args, **kwargs)
File "/Users/redshiftzero/Documents/Github/securedrop-1.2.0-rc2/securedrop/securedrop/journalist_app/api.py", line 310, in get_all_replies
{'replies': [reply.to_json() for reply in replies]}), 200
File "/Users/redshiftzero/Documents/Github/securedrop-1.2.0-rc2/securedrop/securedrop/journalist_app/api.py", line 310, in <listcomp>
{'replies': [reply.to_json() for reply in replies]}), 200
File "/Users/redshiftzero/Documents/Github/securedrop-1.2.0-rc2/securedrop/securedrop/models.py", line 289, in to_json
'journalist_username': self.journalist.username,
AttributeError: 'NoneType' object has no attribute 'username'
|
AttributeError
|
def make_blueprint(config):
view = Blueprint("main", __name__)
@view.route("/")
def index():
return render_template("index.html")
@view.route("/generate", methods=("GET", "POST"))
def generate():
if logged_in():
flash(
gettext(
"You were redirected because you are already logged in. "
"If you want to create a new account, you should log out "
"first."
),
"notification",
)
return redirect(url_for(".lookup"))
codename = generate_unique_codename(config)
session["codename"] = codename
session["new_user"] = True
return render_template("generate.html", codename=codename)
@view.route("/org-logo")
def select_logo():
if os.path.exists(
os.path.join(current_app.static_folder, "i", "custom_logo.png")
):
return redirect(url_for("static", filename="i/custom_logo.png"))
else:
return redirect(url_for("static", filename="i/logo.png"))
@view.route("/create", methods=["POST"])
def create():
filesystem_id = current_app.crypto_util.hash_codename(session["codename"])
source = Source(filesystem_id, current_app.crypto_util.display_id())
db.session.add(source)
try:
db.session.commit()
except IntegrityError as e:
db.session.rollback()
current_app.logger.error(
"Attempt to create a source with duplicate codename: %s" % (e,)
)
# Issue 2386: don't log in on duplicates
del session["codename"]
# Issue 4361: Delete 'logged_in' if it's in the session
try:
del session["logged_in"]
except KeyError:
pass
abort(500)
else:
os.mkdir(current_app.storage.path(filesystem_id))
session["logged_in"] = True
return redirect(url_for(".lookup"))
@view.route("/lookup", methods=("GET",))
@login_required
def lookup():
replies = []
source_inbox = (
Reply.query.filter(Reply.source_id == g.source.id)
.filter(Reply.deleted_by_source == False)
.all()
) # noqa
for reply in source_inbox:
reply_path = current_app.storage.path(
g.filesystem_id,
reply.filename,
)
try:
with io.open(reply_path, "rb") as f:
contents = f.read()
reply_obj = current_app.crypto_util.decrypt(g.codename, contents)
if six.PY2: # Python2
reply.decrypted = reply_obj.decode("utf-8")
else:
reply.decrypted = reply_obj
except UnicodeDecodeError:
current_app.logger.error("Could not decode reply %s" % reply.filename)
else:
reply.date = datetime.utcfromtimestamp(os.stat(reply_path).st_mtime)
replies.append(reply)
# Sort the replies by date
replies.sort(key=operator.attrgetter("date"), reverse=True)
# Generate a keypair to encrypt replies from the journalist
# Only do this if the journalist has flagged the source as one
# that they would like to reply to. (Issue #140.)
if not current_app.crypto_util.getkey(g.filesystem_id) and g.source.flagged:
db_uri = current_app.config["SQLALCHEMY_DATABASE_URI"]
async_genkey(current_app.crypto_util, db_uri, g.filesystem_id, g.codename)
return render_template(
"lookup.html",
codename=g.codename,
replies=replies,
flagged=g.source.flagged,
new_user=session.get("new_user", None),
haskey=current_app.crypto_util.getkey(g.filesystem_id),
)
@view.route("/submit", methods=("POST",))
@login_required
def submit():
msg = request.form["msg"]
fh = None
if "fh" in request.files:
fh = request.files["fh"]
# Don't submit anything if it was an "empty" submission. #878
if not (msg or fh):
flash(
gettext("You must enter a message or choose a file to submit."), "error"
)
return redirect(url_for("main.lookup"))
fnames = []
journalist_filename = g.source.journalist_filename
first_submission = g.source.interaction_count == 0
if msg:
g.source.interaction_count += 1
fnames.append(
current_app.storage.save_message_submission(
g.filesystem_id,
g.source.interaction_count,
journalist_filename,
msg,
)
)
if fh:
g.source.interaction_count += 1
fnames.append(
current_app.storage.save_file_submission(
g.filesystem_id,
g.source.interaction_count,
journalist_filename,
fh.filename,
fh.stream,
)
)
if first_submission:
msg = render_template("first_submission_flashed_message.html")
flash(Markup(msg), "success")
else:
if msg and not fh:
html_contents = gettext("Thanks! We received your message.")
elif not msg and fh:
html_contents = gettext("Thanks! We received your document.")
else:
html_contents = gettext(
"Thanks! We received your message and document."
)
msg = render_template(
"next_submission_flashed_message.html", html_contents=html_contents
)
flash(Markup(msg), "success")
new_submissions = []
for fname in fnames:
submission = Submission(g.source, fname)
db.session.add(submission)
new_submissions.append(submission)
if g.source.pending:
g.source.pending = False
# Generate a keypair now, if there's enough entropy (issue #303)
# (gpg reads 300 bytes from /dev/random)
entropy_avail = get_entropy_estimate()
if entropy_avail >= 2400:
db_uri = current_app.config["SQLALCHEMY_DATABASE_URI"]
async_genkey(
current_app.crypto_util, db_uri, g.filesystem_id, g.codename
)
current_app.logger.info(
"generating key, entropy: {}".format(entropy_avail)
)
else:
current_app.logger.warn(
"skipping key generation. entropy: {}".format(entropy_avail)
)
g.source.last_updated = datetime.utcnow()
db.session.commit()
for sub in new_submissions:
store.async_add_checksum_for_file(sub)
normalize_timestamps(g.filesystem_id)
return redirect(url_for("main.lookup"))
@view.route("/delete", methods=("POST",))
@login_required
def delete():
"""This deletes the reply from the source's inbox, but preserves
the history for journalists such that they can view conversation
history.
"""
query = Reply.query.filter_by(
filename=request.form["reply_filename"], source_id=g.source.id
)
reply = get_one_or_else(query, current_app.logger, abort)
reply.deleted_by_source = True
db.session.add(reply)
db.session.commit()
flash(gettext("Reply deleted"), "notification")
return redirect(url_for(".lookup"))
@view.route("/delete-all", methods=("POST",))
@login_required
def batch_delete():
replies = (
Reply.query.filter(Reply.source_id == g.source.id)
.filter(Reply.deleted_by_source == False)
.all()
) # noqa
if len(replies) == 0:
current_app.logger.error("Found no replies when at least one was expected")
return redirect(url_for(".lookup"))
for reply in replies:
reply.deleted_by_source = True
db.session.add(reply)
db.session.commit()
flash(gettext("All replies have been deleted"), "notification")
return redirect(url_for(".lookup"))
@view.route("/login", methods=("GET", "POST"))
def login():
form = LoginForm()
if form.validate_on_submit():
codename = request.form["codename"].strip()
if valid_codename(codename):
session.update(codename=codename, logged_in=True)
return redirect(url_for(".lookup", from_login="1"))
else:
current_app.logger.info("Login failed for invalid codename")
flash(gettext("Sorry, that is not a recognized codename."), "error")
return render_template("login.html", form=form)
@view.route("/logout")
def logout():
if logged_in():
msg = render_template("logout_flashed_message.html")
# Clear the session after we render the message so it's localized
# If a user specified a locale, save it and restore it
user_locale = g.locale
session.clear()
session["locale"] = user_locale
flash(Markup(msg), "important hide-if-not-tor-browser")
return redirect(url_for(".index"))
return view
|
def make_blueprint(config):
view = Blueprint("main", __name__)
@view.route("/")
def index():
return render_template("index.html")
@view.route("/generate", methods=("GET", "POST"))
def generate():
if logged_in():
flash(
gettext(
"You were redirected because you are already logged in. "
"If you want to create a new account, you should log out "
"first."
),
"notification",
)
return redirect(url_for(".lookup"))
codename = generate_unique_codename(config)
session["codename"] = codename
session["new_user"] = True
return render_template("generate.html", codename=codename)
@view.route("/org-logo")
def select_logo():
if os.path.exists(
os.path.join(current_app.static_folder, "i", "custom_logo.png")
):
return redirect(url_for("static", filename="i/custom_logo.png"))
else:
return redirect(url_for("static", filename="i/logo.png"))
@view.route("/create", methods=["POST"])
def create():
filesystem_id = current_app.crypto_util.hash_codename(session["codename"])
source = Source(filesystem_id, current_app.crypto_util.display_id())
db.session.add(source)
try:
db.session.commit()
except IntegrityError as e:
db.session.rollback()
current_app.logger.error(
"Attempt to create a source with duplicate codename: %s" % (e,)
)
# Issue 2386: don't log in on duplicates
del session["codename"]
abort(500)
else:
os.mkdir(current_app.storage.path(filesystem_id))
session["logged_in"] = True
return redirect(url_for(".lookup"))
@view.route("/lookup", methods=("GET",))
@login_required
def lookup():
replies = []
source_inbox = (
Reply.query.filter(Reply.source_id == g.source.id)
.filter(Reply.deleted_by_source == False)
.all()
) # noqa
for reply in source_inbox:
reply_path = current_app.storage.path(
g.filesystem_id,
reply.filename,
)
try:
with io.open(reply_path, "rb") as f:
contents = f.read()
reply_obj = current_app.crypto_util.decrypt(g.codename, contents)
if six.PY2: # Python2
reply.decrypted = reply_obj.decode("utf-8")
else:
reply.decrypted = reply_obj
except UnicodeDecodeError:
current_app.logger.error("Could not decode reply %s" % reply.filename)
else:
reply.date = datetime.utcfromtimestamp(os.stat(reply_path).st_mtime)
replies.append(reply)
# Sort the replies by date
replies.sort(key=operator.attrgetter("date"), reverse=True)
# Generate a keypair to encrypt replies from the journalist
# Only do this if the journalist has flagged the source as one
# that they would like to reply to. (Issue #140.)
if not current_app.crypto_util.getkey(g.filesystem_id) and g.source.flagged:
db_uri = current_app.config["SQLALCHEMY_DATABASE_URI"]
async_genkey(current_app.crypto_util, db_uri, g.filesystem_id, g.codename)
return render_template(
"lookup.html",
codename=g.codename,
replies=replies,
flagged=g.source.flagged,
new_user=session.get("new_user", None),
haskey=current_app.crypto_util.getkey(g.filesystem_id),
)
@view.route("/submit", methods=("POST",))
@login_required
def submit():
msg = request.form["msg"]
fh = None
if "fh" in request.files:
fh = request.files["fh"]
# Don't submit anything if it was an "empty" submission. #878
if not (msg or fh):
flash(
gettext("You must enter a message or choose a file to submit."), "error"
)
return redirect(url_for("main.lookup"))
fnames = []
journalist_filename = g.source.journalist_filename
first_submission = g.source.interaction_count == 0
if msg:
g.source.interaction_count += 1
fnames.append(
current_app.storage.save_message_submission(
g.filesystem_id,
g.source.interaction_count,
journalist_filename,
msg,
)
)
if fh:
g.source.interaction_count += 1
fnames.append(
current_app.storage.save_file_submission(
g.filesystem_id,
g.source.interaction_count,
journalist_filename,
fh.filename,
fh.stream,
)
)
if first_submission:
msg = render_template("first_submission_flashed_message.html")
flash(Markup(msg), "success")
else:
if msg and not fh:
html_contents = gettext("Thanks! We received your message.")
elif not msg and fh:
html_contents = gettext("Thanks! We received your document.")
else:
html_contents = gettext(
"Thanks! We received your message and document."
)
msg = render_template(
"next_submission_flashed_message.html", html_contents=html_contents
)
flash(Markup(msg), "success")
new_submissions = []
for fname in fnames:
submission = Submission(g.source, fname)
db.session.add(submission)
new_submissions.append(submission)
if g.source.pending:
g.source.pending = False
# Generate a keypair now, if there's enough entropy (issue #303)
# (gpg reads 300 bytes from /dev/random)
entropy_avail = get_entropy_estimate()
if entropy_avail >= 2400:
db_uri = current_app.config["SQLALCHEMY_DATABASE_URI"]
async_genkey(
current_app.crypto_util, db_uri, g.filesystem_id, g.codename
)
current_app.logger.info(
"generating key, entropy: {}".format(entropy_avail)
)
else:
current_app.logger.warn(
"skipping key generation. entropy: {}".format(entropy_avail)
)
g.source.last_updated = datetime.utcnow()
db.session.commit()
for sub in new_submissions:
store.async_add_checksum_for_file(sub)
normalize_timestamps(g.filesystem_id)
return redirect(url_for("main.lookup"))
@view.route("/delete", methods=("POST",))
@login_required
def delete():
"""This deletes the reply from the source's inbox, but preserves
the history for journalists such that they can view conversation
history.
"""
query = Reply.query.filter_by(
filename=request.form["reply_filename"], source_id=g.source.id
)
reply = get_one_or_else(query, current_app.logger, abort)
reply.deleted_by_source = True
db.session.add(reply)
db.session.commit()
flash(gettext("Reply deleted"), "notification")
return redirect(url_for(".lookup"))
@view.route("/delete-all", methods=("POST",))
@login_required
def batch_delete():
replies = (
Reply.query.filter(Reply.source_id == g.source.id)
.filter(Reply.deleted_by_source == False)
.all()
) # noqa
if len(replies) == 0:
current_app.logger.error("Found no replies when at least one was expected")
return redirect(url_for(".lookup"))
for reply in replies:
reply.deleted_by_source = True
db.session.add(reply)
db.session.commit()
flash(gettext("All replies have been deleted"), "notification")
return redirect(url_for(".lookup"))
@view.route("/login", methods=("GET", "POST"))
def login():
form = LoginForm()
if form.validate_on_submit():
codename = request.form["codename"].strip()
if valid_codename(codename):
session.update(codename=codename, logged_in=True)
return redirect(url_for(".lookup", from_login="1"))
else:
current_app.logger.info("Login failed for invalid codename")
flash(gettext("Sorry, that is not a recognized codename."), "error")
return render_template("login.html", form=form)
@view.route("/logout")
def logout():
if logged_in():
msg = render_template("logout_flashed_message.html")
# Clear the session after we render the message so it's localized
# If a user specified a locale, save it and restore it
user_locale = g.locale
session.clear()
session["locale"] = user_locale
flash(Markup(msg), "important hide-if-not-tor-browser")
return redirect(url_for(".index"))
return view
|
https://github.com/freedomofpress/securedrop/issues/4361
|
[Thu Apr 18 09:46:09.516056 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] [2019-04-18 09:46:09,510] ERROR in app: Exception on / [GET]
[Thu Apr 18 09:46:09.516238 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] Traceback (most recent call last):
[Thu Apr 18 09:46:09.516279 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 2292, in wsgi_app
[Thu Apr 18 09:46:09.516317 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] response = self.full_dispatch_request()
[Thu Apr 18 09:46:09.516363 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1815, in full_dispatch_request
[Thu Apr 18 09:46:09.516442 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] rv = self.handle_user_exception(e)
[Thu Apr 18 09:46:09.516479 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1718, in handle_user_exception
[Thu Apr 18 09:46:09.516514 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] reraise(exc_type, exc_value, tb)
[Thu Apr 18 09:46:09.516549 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1811, in full_dispatch_request
[Thu Apr 18 09:46:09.516584 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] rv = self.preprocess_request()
[Thu Apr 18 09:46:09.516619 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 2087, in preprocess_request
[Thu Apr 18 09:46:09.516654 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] rv = func()
[Thu Apr 18 09:46:09.516688 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] File "/var/www/securedrop/source_app/decorators.py", line 23, in decorated_function
[Thu Apr 18 09:46:09.516724 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] return f(*args, **kwargs)
[Thu Apr 18 09:46:09.516758 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] File "/var/www/securedrop/source_app/__init__.py", line 159, in setup_g
[Thu Apr 18 09:46:09.516793 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] g.codename = session['codename']
[Thu Apr 18 09:46:09.516828 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] File "/usr/local/lib/python2.7/dist-packages/werkzeug/local.py", line 377, in <lambda>
[Thu Apr 18 09:46:09.516864 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] __getitem__ = lambda x, i: x._get_current_object()[i]
[Thu Apr 18 09:46:09.516899 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] File "/usr/local/lib/python2.7/dist-packages/flask/sessions.py", line 83, in __getitem__
[Thu Apr 18 09:46:09.516933 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] return super(SecureCookieSession, self).__getitem__(key)
[Thu Apr 18 09:46:09.516968 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] KeyError: 'codename'
|
KeyError
|
def create():
filesystem_id = current_app.crypto_util.hash_codename(session["codename"])
source = Source(filesystem_id, current_app.crypto_util.display_id())
db.session.add(source)
try:
db.session.commit()
except IntegrityError as e:
db.session.rollback()
current_app.logger.error(
"Attempt to create a source with duplicate codename: %s" % (e,)
)
# Issue 2386: don't log in on duplicates
del session["codename"]
# Issue 4361: Delete 'logged_in' if it's in the session
try:
del session["logged_in"]
except KeyError:
pass
abort(500)
else:
os.mkdir(current_app.storage.path(filesystem_id))
session["logged_in"] = True
return redirect(url_for(".lookup"))
|
def create():
filesystem_id = current_app.crypto_util.hash_codename(session["codename"])
source = Source(filesystem_id, current_app.crypto_util.display_id())
db.session.add(source)
try:
db.session.commit()
except IntegrityError as e:
db.session.rollback()
current_app.logger.error(
"Attempt to create a source with duplicate codename: %s" % (e,)
)
# Issue 2386: don't log in on duplicates
del session["codename"]
abort(500)
else:
os.mkdir(current_app.storage.path(filesystem_id))
session["logged_in"] = True
return redirect(url_for(".lookup"))
|
https://github.com/freedomofpress/securedrop/issues/4361
|
[Thu Apr 18 09:46:09.516056 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] [2019-04-18 09:46:09,510] ERROR in app: Exception on / [GET]
[Thu Apr 18 09:46:09.516238 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] Traceback (most recent call last):
[Thu Apr 18 09:46:09.516279 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 2292, in wsgi_app
[Thu Apr 18 09:46:09.516317 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] response = self.full_dispatch_request()
[Thu Apr 18 09:46:09.516363 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1815, in full_dispatch_request
[Thu Apr 18 09:46:09.516442 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] rv = self.handle_user_exception(e)
[Thu Apr 18 09:46:09.516479 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1718, in handle_user_exception
[Thu Apr 18 09:46:09.516514 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] reraise(exc_type, exc_value, tb)
[Thu Apr 18 09:46:09.516549 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1811, in full_dispatch_request
[Thu Apr 18 09:46:09.516584 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] rv = self.preprocess_request()
[Thu Apr 18 09:46:09.516619 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 2087, in preprocess_request
[Thu Apr 18 09:46:09.516654 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] rv = func()
[Thu Apr 18 09:46:09.516688 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] File "/var/www/securedrop/source_app/decorators.py", line 23, in decorated_function
[Thu Apr 18 09:46:09.516724 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] return f(*args, **kwargs)
[Thu Apr 18 09:46:09.516758 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] File "/var/www/securedrop/source_app/__init__.py", line 159, in setup_g
[Thu Apr 18 09:46:09.516793 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] g.codename = session['codename']
[Thu Apr 18 09:46:09.516828 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] File "/usr/local/lib/python2.7/dist-packages/werkzeug/local.py", line 377, in <lambda>
[Thu Apr 18 09:46:09.516864 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] __getitem__ = lambda x, i: x._get_current_object()[i]
[Thu Apr 18 09:46:09.516899 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] File "/usr/local/lib/python2.7/dist-packages/flask/sessions.py", line 83, in __getitem__
[Thu Apr 18 09:46:09.516933 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] return super(SecureCookieSession, self).__getitem__(key)
[Thu Apr 18 09:46:09.516968 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] KeyError: 'codename'
|
KeyError
|
def translate_desktop(self, args):
messages_file = os.path.join(args.translations_dir, "desktop.pot")
if args.extract_update:
sources = args.sources.split(",")
k = {"_cwd": args.translations_dir}
xgettext(
"--output=desktop.pot",
"--language=Desktop",
"--keyword",
"--keyword=Name",
"--package-version",
args.version,
"--msgid-bugs-address=securedrop@freedom.press",
"--copyright-holder=Freedom of the Press Foundation",
*sources,
**k,
)
sed("-i", "-e", '/^"POT-Creation-Date/d', messages_file, **k)
if self.file_is_modified(messages_file):
for f in os.listdir(args.translations_dir):
if not f.endswith(".po"):
continue
po_file = os.path.join(args.translations_dir, f)
msgmerge("--update", po_file, messages_file)
log.warning("messages translations updated in " + messages_file)
else:
log.warning("desktop translations are already up to date")
if args.compile:
pos = filter(lambda f: f.endswith(".po"), os.listdir(args.translations_dir))
linguas = map(lambda l: l[:-3], pos)
content = "\n".join(linguas) + "\n"
open(join(args.translations_dir, "LINGUAS"), "w").write(content)
for source in args.sources.split(","):
target = source.rstrip(".in")
msgfmt(
"--desktop",
"--template",
source,
"-o",
target,
"-d",
".",
_cwd=args.translations_dir,
)
|
def translate_desktop(self, args):
messages_file = os.path.join(args.translations_dir, "desktop.pot")
if args.extract_update:
sources = args.sources.split(",")
k = {"_cwd": args.translations_dir}
xgettext(
"--output=desktop.pot",
"--language=Desktop",
"--keyword",
"--keyword=Name",
"--package-version",
args.version,
"--msgid-bugs-address=securedrop@freedom.press",
"--copyright-holder=Freedom of the Press Foundation",
*sources,
**k,
)
sed("-i", "-e", '/^"POT-Creation-Date/d', messages_file, **k)
if self.file_is_modified(messages_file):
for f in os.listdir(args.translations_dir):
if not f.endswith(".po"):
continue
po_file = os.path.join(args.translations_dir, f)
msgmerge("--update", po_file, messages_file)
log.warning("messages translations updated in " + messages_file)
else:
log.warning("desktop translations are already up to date")
if args.compile:
pos = filter(lambda f: f.endswith(".po"), os.listdir(args.translations_dir))
linguas = map(lambda l: l.rstrip(".po"), pos)
content = "\n".join(linguas) + "\n"
open(join(args.translations_dir, "LINGUAS"), "w").write(content)
for source in args.sources.split(","):
target = source.rstrip(".in")
msgfmt(
"--desktop",
"--template",
source,
"-o",
target,
"-d",
".",
_cwd=args.translations_dir,
)
|
https://github.com/freedomofpress/securedrop/issues/4192
|
$ securedrop/bin/dev-shell ./i18n_tool.py --verbose translate-desktop --compile
Run with DOCKER_BUILD_VERBOSE=true for more information
Docker image build in progress done !
2019-02-25 17:28:43,373 INFO <Command u'/usr/bin/msgfmt --desktop --template desktop-journalist-icon.j2.in -o desktop-journalist-icon.j2 -d .'>: starting process
2019-02-25 17:28:43,380 INFO <Command u'/usr/bin/msgfmt --desktop --template desktop-journalist-icon.j2.in -o desktop-journalist-icon.j2 -d .', pid 9>: process started
Traceback (most recent call last):
File "./i18n_tool.py", line 372, in <module>
sys.exit(I18NTool().main(sys.argv[1:]))
File "./i18n_tool.py", line 366, in main
return args.func(args)
File "./i18n_tool.py", line 139, in translate_desktop
_cwd=args.translations_dir)
File "/usr/local/lib/python2.7/dist-packages/sh.py", line 1427, in __call__
return RunningCommand(cmd, call_args, stdin, stdout, stderr)
File "/usr/local/lib/python2.7/dist-packages/sh.py", line 774, in __init__
self.wait()
File "/usr/local/lib/python2.7/dist-packages/sh.py", line 792, in wait
self.handle_command_exit_code(exit_code)
File "/usr/local/lib/python2.7/dist-packages/sh.py", line 815, in handle_command_exit_code
raise exc
sh.ErrorReturnCode_1:
RAN: /usr/bin/msgfmt --desktop --template desktop-journalist-icon.j2.in -o desktop-journalist-icon.j2 -d .
STDOUT:
STDERR:
/usr/bin/msgfmt: error while opening "r.po" for reading: No such file or directory
|
sh.ErrorReturnCode_1
|
def token_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
try:
auth_header = request.headers["Authorization"]
except KeyError:
return abort(403, "API token not found in Authorization header.")
if auth_header:
split = auth_header.split(" ")
if len(split) != 2 or split[0] != "Token":
abort(403, "Malformed authorization header.")
auth_token = split[1]
else:
auth_token = ""
if not Journalist.validate_api_token_and_get_user(auth_token):
return abort(403, "API token is invalid or expired.")
return f(*args, **kwargs)
return decorated_function
|
def token_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
try:
auth_header = request.headers["Authorization"]
except KeyError:
return abort(403, "API token not found in Authorization header.")
if auth_header:
auth_token = auth_header.split(" ")[1]
else:
auth_token = ""
if not Journalist.validate_api_token_and_get_user(auth_token):
return abort(403, "API token is invalid or expired.")
return f(*args, **kwargs)
return decorated_function
|
https://github.com/freedomofpress/securedrop/issues/4053
|
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 2309, in __call__
return self.wsgi_app(environ, start_response)
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 2295, in wsgi_app
response = self.handle_exception(e)
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1741, in handle_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1815, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1718, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1799, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/home/heartsucker/code/freedomofpress/securedrop/securedrop/journalist_app/api.py", line 40, in decorated_function
auth_token = auth_header.split(" ")[1]
IndexError: list index out of range
|
IndexError
|
def decorated_function(*args, **kwargs):
try:
auth_header = request.headers["Authorization"]
except KeyError:
return abort(403, "API token not found in Authorization header.")
if auth_header:
split = auth_header.split(" ")
if len(split) != 2 or split[0] != "Token":
abort(403, "Malformed authorization header.")
auth_token = split[1]
else:
auth_token = ""
if not Journalist.validate_api_token_and_get_user(auth_token):
return abort(403, "API token is invalid or expired.")
return f(*args, **kwargs)
|
def decorated_function(*args, **kwargs):
try:
auth_header = request.headers["Authorization"]
except KeyError:
return abort(403, "API token not found in Authorization header.")
if auth_header:
auth_token = auth_header.split(" ")[1]
else:
auth_token = ""
if not Journalist.validate_api_token_and_get_user(auth_token):
return abort(403, "API token is invalid or expired.")
return f(*args, **kwargs)
|
https://github.com/freedomofpress/securedrop/issues/4053
|
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 2309, in __call__
return self.wsgi_app(environ, start_response)
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 2295, in wsgi_app
response = self.handle_exception(e)
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1741, in handle_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1815, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1718, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1799, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/home/heartsucker/code/freedomofpress/securedrop/securedrop/journalist_app/api.py", line 40, in decorated_function
auth_token = auth_header.split(" ")[1]
IndexError: list index out of range
|
IndexError
|
def export_pubkey(self, name):
fingerprint = self.getkey(name)
if fingerprint:
return self.gpg.export_keys(fingerprint)
else:
return None
|
def export_pubkey(self, name):
fingerprint = self.getkey(name)
return self.gpg.export_keys(fingerprint)
|
https://github.com/freedomofpress/securedrop/issues/4005
|
Traceback (most recent call last):
File "/home/heartsucker/code/freedomofpress/securedrop-client/securedrop_client/logic.py", line 189, in <lambda>
lambda: self.completed_api_call(new_thread_id, callback))
File "/home/heartsucker/code/freedomofpress/securedrop-client/securedrop_client/logic.py", line 242, in completed_api_call
user_callback(result_data)
File "/home/heartsucker/code/freedomofpress/securedrop-client/securedrop_client/logic.py", line 419, in on_synced
self.gpg.import_key(source.uuid, pub_key)
File "/home/heartsucker/code/freedomofpress/securedrop-client/securedrop_client/crypto.py", line 116, in import_key
raise RuntimeError('Expected exactly one fingerprint. Found: {}'
RuntimeError: Expected exactly one fingerprint.
|
RuntimeError
|
def make_blueprint(config):
view = Blueprint("main", __name__)
@view.route("/login", methods=("GET", "POST"))
def login():
if request.method == "POST":
user = validate_user(
request.form["username"],
request.form["password"],
request.form["token"],
)
if user:
current_app.logger.info(
"'{}' logged in with the token {}".format(
request.form["username"], request.form["token"]
)
)
# Update access metadata
user.last_access = datetime.utcnow()
db.session.add(user)
db.session.commit()
session["uid"] = user.id
return redirect(url_for("main.index"))
return render_template("login.html")
@view.route("/logout")
def logout():
session.pop("uid", None)
session.pop("expires", None)
return redirect(url_for("main.index"))
@view.route("/org-logo")
def select_logo():
if os.path.exists(
os.path.join(current_app.static_folder, "i", "custom_logo.png")
):
return redirect(url_for("static", filename="i/custom_logo.png"))
else:
return redirect(url_for("static", filename="i/logo.png"))
@view.route("/")
def index():
unstarred = []
starred = []
# Long SQLAlchemy statements look best when formatted according to
# the Pocoo style guide, IMHO:
# http://www.pocoo.org/internal/styleguide/
sources = (
Source.query.filter_by(pending=False)
.filter(Source.last_updated.isnot(None))
.order_by(Source.last_updated.desc())
.all()
)
for source in sources:
star = SourceStar.query.filter_by(source_id=source.id).first()
if star and star.starred:
starred.append(source)
else:
unstarred.append(source)
source.num_unread = len(
Submission.query.filter_by(source_id=source.id, downloaded=False).all()
)
return render_template("index.html", unstarred=unstarred, starred=starred)
@view.route("/reply", methods=("POST",))
def reply():
"""Attempt to send a Reply from a Journalist to a Source. Empty
messages are rejected, and an informative error message is flashed
on the client. In the case of unexpected errors involving database
transactions (potentially caused by racing request threads that
modify the same the database object) logging is done in such a way
so as not to write potentially sensitive information to disk, and a
generic error message is flashed on the client.
Returns:
flask.Response: The user is redirected to the same Source
collection view, regardless if the Reply is created
successfully.
"""
form = ReplyForm()
if not form.validate_on_submit():
for error in form.message.errors:
flash(error, "error")
return redirect(url_for("col.col", filesystem_id=g.filesystem_id))
g.source.interaction_count += 1
filename = "{0}-{1}-reply.gpg".format(
g.source.interaction_count, g.source.journalist_filename
)
current_app.crypto_util.encrypt(
form.message.data,
[current_app.crypto_util.getkey(g.filesystem_id), config.JOURNALIST_KEY],
output=current_app.storage.path(g.filesystem_id, filename),
)
reply = Reply(g.user, g.source, filename)
try:
db.session.add(reply)
db.session.commit()
except Exception as exc:
flash(
gettext(
"An unexpected error occurred! Please inform your administrator."
),
"error",
)
# We take a cautious approach to logging here because we're dealing
# with responses to sources. It's possible the exception message
# could contain information we don't want to write to disk.
current_app.logger.error(
"Reply from '{}' (ID {}) failed: {}!".format(
g.user.username, g.user.id, exc.__class__
)
)
else:
flash(gettext("Thanks. Your reply has been stored."), "notification")
finally:
return redirect(url_for("col.col", filesystem_id=g.filesystem_id))
@view.route("/flag", methods=("POST",))
def flag():
g.source.flagged = True
db.session.commit()
return render_template(
"flag.html",
filesystem_id=g.filesystem_id,
codename=g.source.journalist_designation,
)
@view.route("/bulk", methods=("POST",))
def bulk():
action = request.form["action"]
doc_names_selected = request.form.getlist("doc_names_selected")
selected_docs = [
doc for doc in g.source.collection if doc.filename in doc_names_selected
]
if selected_docs == []:
if action == "download":
flash(gettext("No collections selected for download."), "error")
elif action in ("delete", "confirm_delete"):
flash(gettext("No collections selected for deletion."), "error")
return redirect(url_for("col.col", filesystem_id=g.filesystem_id))
if action == "download":
source = get_source(g.filesystem_id)
return download(source.journalist_filename, selected_docs)
elif action == "delete":
return bulk_delete(g.filesystem_id, selected_docs)
elif action == "confirm_delete":
return confirm_bulk_delete(g.filesystem_id, selected_docs)
else:
abort(400)
@view.route("/regenerate-code", methods=("POST",))
def regenerate_code():
original_journalist_designation = g.source.journalist_designation
g.source.journalist_designation = current_app.crypto_util.display_id()
for item in g.source.collection:
item.filename = current_app.storage.rename_submission(
g.filesystem_id, item.filename, g.source.journalist_filename
)
db.session.commit()
flash(
gettext(
"The source '{original_name}' has been renamed to '{new_name}'"
).format(
original_name=original_journalist_designation,
new_name=g.source.journalist_designation,
),
"notification",
)
return redirect(url_for("col.col", filesystem_id=g.filesystem_id))
@view.route("/download_unread/<filesystem_id>")
def download_unread_filesystem_id(filesystem_id):
id = Source.query.filter(Source.filesystem_id == filesystem_id).one().id
submissions = Submission.query.filter(
Submission.source_id == id, Submission.downloaded == false()
).all()
if submissions == []:
flash(gettext("No unread submissions for this source."))
return redirect(url_for("col.col", filesystem_id=filesystem_id))
source = get_source(filesystem_id)
return download(source.journalist_filename, submissions)
return view
|
def make_blueprint(config):
view = Blueprint("main", __name__)
@view.route("/login", methods=("GET", "POST"))
def login():
if request.method == "POST":
user = validate_user(
request.form["username"],
request.form["password"],
request.form["token"],
)
if user:
current_app.logger.info(
"'{}' logged in with the token {}".format(
request.form["username"], request.form["token"]
)
)
# Update access metadata
user.last_access = datetime.utcnow()
db.session.add(user)
db.session.commit()
session["uid"] = user.id
return redirect(url_for("main.index"))
return render_template("login.html")
@view.route("/logout")
def logout():
session.pop("uid", None)
session.pop("expires", None)
return redirect(url_for("main.index"))
@view.route("/org-logo")
def select_logo():
if os.path.exists(
os.path.join(current_app.static_folder, "i", "custom_logo.png")
):
return redirect(url_for("static", filename="i/custom_logo.png"))
else:
return redirect(url_for("static", filename="i/logo.png"))
@view.route("/")
def index():
unstarred = []
starred = []
# Long SQLAlchemy statements look best when formatted according to
# the Pocoo style guide, IMHO:
# http://www.pocoo.org/internal/styleguide/
sources = (
Source.query.filter_by(pending=False)
.order_by(Source.last_updated.desc())
.all()
)
for source in sources:
star = SourceStar.query.filter_by(source_id=source.id).first()
if star and star.starred:
starred.append(source)
else:
unstarred.append(source)
source.num_unread = len(
Submission.query.filter_by(source_id=source.id, downloaded=False).all()
)
return render_template("index.html", unstarred=unstarred, starred=starred)
@view.route("/reply", methods=("POST",))
def reply():
"""Attempt to send a Reply from a Journalist to a Source. Empty
messages are rejected, and an informative error message is flashed
on the client. In the case of unexpected errors involving database
transactions (potentially caused by racing request threads that
modify the same the database object) logging is done in such a way
so as not to write potentially sensitive information to disk, and a
generic error message is flashed on the client.
Returns:
flask.Response: The user is redirected to the same Source
collection view, regardless if the Reply is created
successfully.
"""
form = ReplyForm()
if not form.validate_on_submit():
for error in form.message.errors:
flash(error, "error")
return redirect(url_for("col.col", filesystem_id=g.filesystem_id))
g.source.interaction_count += 1
filename = "{0}-{1}-reply.gpg".format(
g.source.interaction_count, g.source.journalist_filename
)
current_app.crypto_util.encrypt(
form.message.data,
[current_app.crypto_util.getkey(g.filesystem_id), config.JOURNALIST_KEY],
output=current_app.storage.path(g.filesystem_id, filename),
)
reply = Reply(g.user, g.source, filename)
try:
db.session.add(reply)
db.session.commit()
except Exception as exc:
flash(
gettext(
"An unexpected error occurred! Please inform your administrator."
),
"error",
)
# We take a cautious approach to logging here because we're dealing
# with responses to sources. It's possible the exception message
# could contain information we don't want to write to disk.
current_app.logger.error(
"Reply from '{}' (ID {}) failed: {}!".format(
g.user.username, g.user.id, exc.__class__
)
)
else:
flash(gettext("Thanks. Your reply has been stored."), "notification")
finally:
return redirect(url_for("col.col", filesystem_id=g.filesystem_id))
@view.route("/flag", methods=("POST",))
def flag():
g.source.flagged = True
db.session.commit()
return render_template(
"flag.html",
filesystem_id=g.filesystem_id,
codename=g.source.journalist_designation,
)
@view.route("/bulk", methods=("POST",))
def bulk():
action = request.form["action"]
doc_names_selected = request.form.getlist("doc_names_selected")
selected_docs = [
doc for doc in g.source.collection if doc.filename in doc_names_selected
]
if selected_docs == []:
if action == "download":
flash(gettext("No collections selected for download."), "error")
elif action in ("delete", "confirm_delete"):
flash(gettext("No collections selected for deletion."), "error")
return redirect(url_for("col.col", filesystem_id=g.filesystem_id))
if action == "download":
source = get_source(g.filesystem_id)
return download(source.journalist_filename, selected_docs)
elif action == "delete":
return bulk_delete(g.filesystem_id, selected_docs)
elif action == "confirm_delete":
return confirm_bulk_delete(g.filesystem_id, selected_docs)
else:
abort(400)
@view.route("/regenerate-code", methods=("POST",))
def regenerate_code():
original_journalist_designation = g.source.journalist_designation
g.source.journalist_designation = current_app.crypto_util.display_id()
for item in g.source.collection:
item.filename = current_app.storage.rename_submission(
g.filesystem_id, item.filename, g.source.journalist_filename
)
db.session.commit()
flash(
gettext(
"The source '{original_name}' has been renamed to '{new_name}'"
).format(
original_name=original_journalist_designation,
new_name=g.source.journalist_designation,
),
"notification",
)
return redirect(url_for("col.col", filesystem_id=g.filesystem_id))
@view.route("/download_unread/<filesystem_id>")
def download_unread_filesystem_id(filesystem_id):
id = Source.query.filter(Source.filesystem_id == filesystem_id).one().id
submissions = Submission.query.filter(
Submission.source_id == id, Submission.downloaded == false()
).all()
if submissions == []:
flash(gettext("No unread submissions for this source."))
return redirect(url_for("col.col", filesystem_id=filesystem_id))
source = get_source(filesystem_id)
return download(source.journalist_filename, submissions)
return view
|
https://github.com/freedomofpress/securedrop/issues/3862
|
172.17.0.1 - - [10/Oct/2018 18:49:40] "GET / HTTP/1.1" 500 -
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 2309, in __call__
return self.wsgi_app(environ, start_response)
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 2295, in wsgi_app
response = self.handle_exception(e)
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1741, in handle_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1815, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1718, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1799, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/home/heartsucker/code/freedomofpress/securedrop/securedrop/journalist_app/main.py", line 79, in index
starred=starred)
File "/usr/local/lib/python2.7/dist-packages/flask/templating.py", line 135, in render_template
context, ctx.app)
File "/usr/local/lib/python2.7/dist-packages/flask/templating.py", line 117, in _render
rv = template.render(context)
File "/usr/local/lib/python2.7/dist-packages/jinja2/environment.py", line 1008, in render
return self.environment.handle_exception(exc_info, True)
File "/usr/local/lib/python2.7/dist-packages/jinja2/environment.py", line 780, in handle_exception
reraise(exc_type, exc_value, tb)
File "/home/heartsucker/code/freedomofpress/securedrop/securedrop/journalist_templates/index.html", line 1, in top-level template code
{% extends "base.html" %}
File "/home/heartsucker/code/freedomofpress/securedrop/securedrop/journalist_templates/base.html", line 50, in top-level template code
{% block body %}{% endblock %}
File "/home/heartsucker/code/freedomofpress/securedrop/securedrop/journalist_templates/index.html", line 25, in block "body"
{% include '_source_row.html' %}
File "/home/heartsucker/code/freedomofpress/securedrop/securedrop/journalist_templates/_source_row.html", line 4, in top-level template code
<time class="date" title="{{ source.last_updated|rel_datetime_format }}" datetime="{{ source.last_updated|rel_datetime_format(fmt="%Y-%m-%d %H:%M:%S%Z") }}">{{ source.last_updated|rel_datetime_format(relative=True) }}</time>
File "/home/heartsucker/code/freedomofpress/securedrop/securedrop/template_filters.py", line 12, in rel_datetime_format
time = dates.format_timedelta(datetime.utcnow() - dt,
TypeError: unsupported operand type(s) for -: 'datetime.datetime' and 'NoneType'
|
TypeError
|
def index():
unstarred = []
starred = []
# Long SQLAlchemy statements look best when formatted according to
# the Pocoo style guide, IMHO:
# http://www.pocoo.org/internal/styleguide/
sources = (
Source.query.filter_by(pending=False)
.filter(Source.last_updated.isnot(None))
.order_by(Source.last_updated.desc())
.all()
)
for source in sources:
star = SourceStar.query.filter_by(source_id=source.id).first()
if star and star.starred:
starred.append(source)
else:
unstarred.append(source)
source.num_unread = len(
Submission.query.filter_by(source_id=source.id, downloaded=False).all()
)
return render_template("index.html", unstarred=unstarred, starred=starred)
|
def index():
unstarred = []
starred = []
# Long SQLAlchemy statements look best when formatted according to
# the Pocoo style guide, IMHO:
# http://www.pocoo.org/internal/styleguide/
sources = (
Source.query.filter_by(pending=False).order_by(Source.last_updated.desc()).all()
)
for source in sources:
star = SourceStar.query.filter_by(source_id=source.id).first()
if star and star.starred:
starred.append(source)
else:
unstarred.append(source)
source.num_unread = len(
Submission.query.filter_by(source_id=source.id, downloaded=False).all()
)
return render_template("index.html", unstarred=unstarred, starred=starred)
|
https://github.com/freedomofpress/securedrop/issues/3862
|
172.17.0.1 - - [10/Oct/2018 18:49:40] "GET / HTTP/1.1" 500 -
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 2309, in __call__
return self.wsgi_app(environ, start_response)
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 2295, in wsgi_app
response = self.handle_exception(e)
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1741, in handle_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1815, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1718, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1799, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/home/heartsucker/code/freedomofpress/securedrop/securedrop/journalist_app/main.py", line 79, in index
starred=starred)
File "/usr/local/lib/python2.7/dist-packages/flask/templating.py", line 135, in render_template
context, ctx.app)
File "/usr/local/lib/python2.7/dist-packages/flask/templating.py", line 117, in _render
rv = template.render(context)
File "/usr/local/lib/python2.7/dist-packages/jinja2/environment.py", line 1008, in render
return self.environment.handle_exception(exc_info, True)
File "/usr/local/lib/python2.7/dist-packages/jinja2/environment.py", line 780, in handle_exception
reraise(exc_type, exc_value, tb)
File "/home/heartsucker/code/freedomofpress/securedrop/securedrop/journalist_templates/index.html", line 1, in top-level template code
{% extends "base.html" %}
File "/home/heartsucker/code/freedomofpress/securedrop/securedrop/journalist_templates/base.html", line 50, in top-level template code
{% block body %}{% endblock %}
File "/home/heartsucker/code/freedomofpress/securedrop/securedrop/journalist_templates/index.html", line 25, in block "body"
{% include '_source_row.html' %}
File "/home/heartsucker/code/freedomofpress/securedrop/securedrop/journalist_templates/_source_row.html", line 4, in top-level template code
<time class="date" title="{{ source.last_updated|rel_datetime_format }}" datetime="{{ source.last_updated|rel_datetime_format(fmt="%Y-%m-%d %H:%M:%S%Z") }}">{{ source.last_updated|rel_datetime_format(relative=True) }}</time>
File "/home/heartsucker/code/freedomofpress/securedrop/securedrop/template_filters.py", line 12, in rel_datetime_format
time = dates.format_timedelta(datetime.utcnow() - dt,
TypeError: unsupported operand type(s) for -: 'datetime.datetime' and 'NoneType'
|
TypeError
|
def login():
if request.method == "POST":
codename = request.form["codename"]
if valid_codename(codename):
flagged = check_flagged(codename)
session.update(codename=codename, flagged=flagged, logged_in=True)
return redirect(url_for("lookup"))
else:
flash("Sorry, that is not a recognized codename.", "error")
return render_template("login.html")
|
def login():
if request.method == "POST":
codename = request.form["codename"]
if valid_codename(codename):
session.update(codename=codename, logged_in=True)
return redirect(url_for("lookup"))
else:
flash("Sorry, that is not a recognized codename.", "error")
return render_template("login.html")
|
https://github.com/freedomofpress/securedrop/issues/185
|
[Mon Dec 02 21:49:44 2013] [error] ERROR:source:Exception on / [GET]
[Mon Dec 02 21:49:44 2013] [error] Traceback (most recent call last):
[Mon Dec 02 21:49:44 2013] [error] File
"/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1817, in
wsgi_app
[Mon Dec 02 21:49:44 2013] [error] response =
self.full_dispatch_request()
[Mon Dec 02 21:49:44 2013] [error] File
"/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1477, in
full_dispatch_request
[Mon Dec 02 21:49:44 2013] [error] rv = self.handle_user_exception(e)
[Mon Dec 02 21:49:44 2013] [error] File
"/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1381, in
handle_user_exception
[Mon Dec 02 21:49:44 2013] [error] reraise(exc_type, exc_value, tb)
[Mon Dec 02 21:49:44 2013] [error] File
"/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1473, in
full_dispatch_request
[Mon Dec 02 21:49:44 2013] [error] rv = self.preprocess_request()
[Mon Dec 02 21:49:44 2013] [error] File
"/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1666, in
preprocess_request
[Mon Dec 02 21:49:44 2013] [error] rv = func()
[Mon Dec 02 21:49:44 2013] [error] File
"/var/www/securedrop/source.py", line 52, in decorated_function
[Mon Dec 02 21:49:44 2013] [error] return f(*args, **kwargs)
[Mon Dec 02 21:49:44 2013] [error] File
"/var/www/securedrop/source.py", line 64, in setup_g
[Mon Dec 02 21:49:44 2013] [error] g.flagged = session['flagged']
[Mon Dec 02 21:49:44 2013] [error] File
"/usr/local/lib/python2.7/dist-packages/werkzeug/local.py", line 368, in
<lambda>
[Mon Dec 02 21:49:44 2013] [error] __getitem__ = lambda x, i:
x._get_current_object()[i]
[Mon Dec 02 21:49:44 2013] [error] KeyError: 'flagged'
|
KeyError
|
def setup_g():
"""Store commonly used values in Flask's special g object"""
# ignore_static here because `crypto_util.shash` is bcrypt (very time consuming),
# and we don't need to waste time running if we're just serving a static
# resource that won't need to access these common values.
if logged_in():
# We use session.get (which defaults to None if 'flagged' is not in the
# session) to avoid a KeyError on the redirect from login/ to lookup/
g.flagged = session.get("flagged")
g.codename = session["codename"]
g.sid = crypto_util.shash(g.codename)
g.loc = store.path(g.sid)
|
def setup_g():
"""Store commonly used values in Flask's special g object"""
# ignore_static here because `crypto_util.shash` is bcrypt (very time consuming),
# and we don't need to waste time running if we're just serving a static
# resource that won't need to access these common values.
if logged_in():
g.flagged = session["flagged"]
g.codename = session["codename"]
g.sid = crypto_util.shash(g.codename)
g.loc = store.path(g.sid)
|
https://github.com/freedomofpress/securedrop/issues/185
|
[Mon Dec 02 21:49:44 2013] [error] ERROR:source:Exception on / [GET]
[Mon Dec 02 21:49:44 2013] [error] Traceback (most recent call last):
[Mon Dec 02 21:49:44 2013] [error] File
"/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1817, in
wsgi_app
[Mon Dec 02 21:49:44 2013] [error] response =
self.full_dispatch_request()
[Mon Dec 02 21:49:44 2013] [error] File
"/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1477, in
full_dispatch_request
[Mon Dec 02 21:49:44 2013] [error] rv = self.handle_user_exception(e)
[Mon Dec 02 21:49:44 2013] [error] File
"/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1381, in
handle_user_exception
[Mon Dec 02 21:49:44 2013] [error] reraise(exc_type, exc_value, tb)
[Mon Dec 02 21:49:44 2013] [error] File
"/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1473, in
full_dispatch_request
[Mon Dec 02 21:49:44 2013] [error] rv = self.preprocess_request()
[Mon Dec 02 21:49:44 2013] [error] File
"/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1666, in
preprocess_request
[Mon Dec 02 21:49:44 2013] [error] rv = func()
[Mon Dec 02 21:49:44 2013] [error] File
"/var/www/securedrop/source.py", line 52, in decorated_function
[Mon Dec 02 21:49:44 2013] [error] return f(*args, **kwargs)
[Mon Dec 02 21:49:44 2013] [error] File
"/var/www/securedrop/source.py", line 64, in setup_g
[Mon Dec 02 21:49:44 2013] [error] g.flagged = session['flagged']
[Mon Dec 02 21:49:44 2013] [error] File
"/usr/local/lib/python2.7/dist-packages/werkzeug/local.py", line 368, in
<lambda>
[Mon Dec 02 21:49:44 2013] [error] __getitem__ = lambda x, i:
x._get_current_object()[i]
[Mon Dec 02 21:49:44 2013] [error] KeyError: 'flagged'
|
KeyError
|
def lookup():
msgs = []
flagged = False
for fn in os.listdir(g.loc):
# TODO: make 'flag' a db column, so we can replace this with a db
# lookup in the future
if fn == "_FLAG":
flagged = True
continue
if fn.startswith("reply-"):
msgs.append(
dict(
id=fn,
date=str(
datetime.fromtimestamp(os.stat(store.path(g.sid, fn)).st_mtime)
),
msg=crypto_util.decrypt(
g.sid, g.codename, file(store.path(g.sid, fn)).read()
),
)
)
if flagged:
session["flagged"] = True
def async_genkey(sid, codename):
with app.app_context():
background.execute(lambda: crypto_util.genkeypair(sid, codename))
# Generate a keypair to encrypt replies from the journalist
# Only do this if the journalist has flagged the source as one
# that they would like to reply to. (Issue #140.)
if not crypto_util.getkey(g.sid) and flagged:
async_genkey(g.sid, g.codename)
return render_template(
"lookup.html",
codename=g.codename,
msgs=msgs,
flagged=flagged,
haskey=crypto_util.getkey(g.sid),
)
|
def lookup():
msgs = []
flagged = False
for fn in os.listdir(g.loc):
if fn == "_FLAG":
flagged = True
continue
if fn.startswith("reply-"):
msgs.append(
dict(
id=fn,
date=str(
datetime.fromtimestamp(os.stat(store.path(g.sid, fn)).st_mtime)
),
msg=crypto_util.decrypt(
g.sid, g.codename, file(store.path(g.sid, fn)).read()
),
)
)
if flagged:
session["flagged"] = True
def async_genkey(sid, codename):
with app.app_context():
background.execute(lambda: crypto_util.genkeypair(sid, codename))
# Generate a keypair to encrypt replies from the journalist
# Only do this if the journalist has flagged the source as one
# that they would like to reply to. (Issue #140.)
if not crypto_util.getkey(g.sid) and flagged:
async_genkey(g.sid, g.codename)
return render_template(
"lookup.html",
codename=g.codename,
msgs=msgs,
flagged=flagged,
haskey=crypto_util.getkey(g.sid),
)
|
https://github.com/freedomofpress/securedrop/issues/185
|
[Mon Dec 02 21:49:44 2013] [error] ERROR:source:Exception on / [GET]
[Mon Dec 02 21:49:44 2013] [error] Traceback (most recent call last):
[Mon Dec 02 21:49:44 2013] [error] File
"/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1817, in
wsgi_app
[Mon Dec 02 21:49:44 2013] [error] response =
self.full_dispatch_request()
[Mon Dec 02 21:49:44 2013] [error] File
"/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1477, in
full_dispatch_request
[Mon Dec 02 21:49:44 2013] [error] rv = self.handle_user_exception(e)
[Mon Dec 02 21:49:44 2013] [error] File
"/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1381, in
handle_user_exception
[Mon Dec 02 21:49:44 2013] [error] reraise(exc_type, exc_value, tb)
[Mon Dec 02 21:49:44 2013] [error] File
"/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1473, in
full_dispatch_request
[Mon Dec 02 21:49:44 2013] [error] rv = self.preprocess_request()
[Mon Dec 02 21:49:44 2013] [error] File
"/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1666, in
preprocess_request
[Mon Dec 02 21:49:44 2013] [error] rv = func()
[Mon Dec 02 21:49:44 2013] [error] File
"/var/www/securedrop/source.py", line 52, in decorated_function
[Mon Dec 02 21:49:44 2013] [error] return f(*args, **kwargs)
[Mon Dec 02 21:49:44 2013] [error] File
"/var/www/securedrop/source.py", line 64, in setup_g
[Mon Dec 02 21:49:44 2013] [error] g.flagged = session['flagged']
[Mon Dec 02 21:49:44 2013] [error] File
"/usr/local/lib/python2.7/dist-packages/werkzeug/local.py", line 368, in
<lambda>
[Mon Dec 02 21:49:44 2013] [error] __getitem__ = lambda x, i:
x._get_current_object()[i]
[Mon Dec 02 21:49:44 2013] [error] KeyError: 'flagged'
|
KeyError
|
def __getattr__(self, function_name: str) -> "ContractFunction":
if self.abi is None:
raise NoABIFound(
"There is no ABI found for this contract.",
)
if "_functions" not in self.__dict__:
raise NoABIFunctionsFound(
"The abi for this contract contains no function definitions. ",
"Are you sure you provided the correct contract abi?",
)
elif function_name not in self.__dict__["_functions"]:
raise ABIFunctionNotFound(
"The function '{}' was not found in this contract's abi. ".format(
function_name
),
"Are you sure you provided the correct contract abi?",
)
else:
return super().__getattribute__(function_name)
|
def __getattr__(self, function_name: str) -> "ContractFunction":
if self.abi is None:
raise NoABIFound(
"There is no ABI found for this contract.",
)
if "_functions" not in self.__dict__:
raise NoABIFunctionsFound(
"The abi for this contract contains no function definitions. ",
"Are you sure you provided the correct contract abi?",
)
elif function_name not in self.__dict__["_functions"]:
raise MismatchedABI(
"The function '{}' was not found in this contract's abi. ".format(
function_name
),
"Are you sure you provided the correct contract abi?",
)
else:
return super().__getattribute__(function_name)
|
https://github.com/ethereum/web3.py/issues/1560
|
Traceback (most recent call last):
File "tools/get_names.py", line 101, in <module>
main(parser.parse_args())
File "tools/get_names.py", line 89, in main
if hasattr(registrar.events, 'BidRevealed'):
File "/home/user/.local/lib/python3.7/site-packages/web3/contract.py", line 200, in __getattr__
"Are you sure you provided the correct contract abi?"
web3.exceptions.MismatchedABI: ("The event 'BidRevealed' was not found in this contract's abi. ", 'Are you sure you provided the correct contract abi?')
|
web3.exceptions.MismatchedABI
|
def __getattr__(self, event_name: str) -> "ContractEvent":
if "_events" not in self.__dict__:
raise NoABIEventsFound(
"The abi for this contract contains no event definitions. ",
"Are you sure you provided the correct contract abi?",
)
elif event_name not in self.__dict__["_events"]:
raise ABIEventFunctionNotFound(
"The event '{}' was not found in this contract's abi. ".format(event_name),
"Are you sure you provided the correct contract abi?",
)
else:
return super().__getattribute__(event_name)
|
def __getattr__(self, event_name: str) -> "ContractEvent":
if "_events" not in self.__dict__:
raise NoABIEventsFound(
"The abi for this contract contains no event definitions. ",
"Are you sure you provided the correct contract abi?",
)
elif event_name not in self.__dict__["_events"]:
raise MismatchedABI(
"The event '{}' was not found in this contract's abi. ".format(event_name),
"Are you sure you provided the correct contract abi?",
)
else:
return super().__getattribute__(event_name)
|
https://github.com/ethereum/web3.py/issues/1560
|
Traceback (most recent call last):
File "tools/get_names.py", line 101, in <module>
main(parser.parse_args())
File "tools/get_names.py", line 89, in main
if hasattr(registrar.events, 'BidRevealed'):
File "/home/user/.local/lib/python3.7/site-packages/web3/contract.py", line 200, in __getattr__
"Are you sure you provided the correct contract abi?"
web3.exceptions.MismatchedABI: ("The event 'BidRevealed' was not found in this contract's abi. ", 'Are you sure you provided the correct contract abi?')
|
web3.exceptions.MismatchedABI
|
def __getattr__(self, function_name: str) -> Any:
if self.abi is None:
raise NoABIFound(
"There is no ABI found for this contract.",
)
elif not self._functions or len(self._functions) == 0:
raise NoABIFunctionsFound(
"The ABI for this contract contains no function definitions. ",
"Are you sure you provided the correct contract ABI?",
)
elif function_name not in set(fn["name"] for fn in self._functions):
functions_available = ", ".join([fn["name"] for fn in self._functions])
raise ABIFunctionNotFound(
"The function '{}' was not found in this contract's ABI. ".format(
function_name
),
"Here is a list of all of the function names found: ",
"{}. ".format(functions_available),
"Did you mean to call one of those functions?",
)
else:
return super().__getattribute__(function_name)
|
def __getattr__(self, function_name: str) -> Any:
if self.abi is None:
raise NoABIFound(
"There is no ABI found for this contract.",
)
elif not self._functions or len(self._functions) == 0:
raise NoABIFunctionsFound(
"The ABI for this contract contains no function definitions. ",
"Are you sure you provided the correct contract ABI?",
)
elif function_name not in set(fn["name"] for fn in self._functions):
functions_available = ", ".join([fn["name"] for fn in self._functions])
raise MismatchedABI(
"The function '{}' was not found in this contract's ABI. ".format(
function_name
),
"Here is a list of all of the function names found: ",
"{}. ".format(functions_available),
"Did you mean to call one of those functions?",
)
else:
return super().__getattribute__(function_name)
|
https://github.com/ethereum/web3.py/issues/1560
|
Traceback (most recent call last):
File "tools/get_names.py", line 101, in <module>
main(parser.parse_args())
File "tools/get_names.py", line 89, in main
if hasattr(registrar.events, 'BidRevealed'):
File "/home/user/.local/lib/python3.7/site-packages/web3/contract.py", line 200, in __getattr__
"Are you sure you provided the correct contract abi?"
web3.exceptions.MismatchedABI: ("The event 'BidRevealed' was not found in this contract's abi. ", 'Are you sure you provided the correct contract abi?')
|
web3.exceptions.MismatchedABI
|
def update(self):
super().update()
if self.lifetime_elapsed <= self.in_duration:
u = self.lifetime_elapsed // self.in_duration
self.alpha = arcade.lerp(self.start_alpha, self.mid_alpha, u)
else:
u = (self.lifetime_elapsed - self.in_duration) // self.out_duration
self.alpha = arcade.lerp(self.mid_alpha, self.end_alpha, u)
|
def update(self):
super().update()
if self.lifetime_elapsed <= self.in_duration:
u = self.lifetime_elapsed / self.in_duration
self.alpha = arcade.lerp(self.start_alpha, self.mid_alpha, u)
else:
u = (self.lifetime_elapsed - self.in_duration) / self.out_duration
self.alpha = arcade.lerp(self.mid_alpha, self.end_alpha, u)
|
https://github.com/pythonarcade/arcade/issues/555
|
$ python -m arcade.examples.particle_fireworks
Traceback (most recent call last):
File "C:\Users\scott\AppData\Local\Programs\Python\Python37\lib\runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "C:\Users\scott\AppData\Local\Programs\Python\Python37\lib\runpy.py", line 85, in _run_code
exec(code, run_globals)
File "C:\cache\repo\arcade\arcade\examples\particle_fireworks.py", line 357, in <module>
arcade.run()
File "C:\cache\repo\arcade\arcade\window_commands.py", line 246, in run
pyglet.app.run()
File "C:\cache\venv\arcadedev\lib\site-packages\pyglet\app\__init__.py", line 144, in run
event_loop.run()
File "C:\cache\venv\arcadedev\lib\site-packages\pyglet\app\base.py", line 175, in run
self._run()
File "C:\cache\venv\arcadedev\lib\site-packages\pyglet\app\base.py", line 187, in _run
timeout = self.idle()
File "C:\cache\venv\arcadedev\lib\site-packages\pyglet\app\base.py", line 314, in idle
window.dispatch_event('on_draw')
File "C:\cache\venv\arcadedev\lib\site-packages\pyglet\window\__init__.py", line 1330, in dispatch_event
if EventDispatcher.dispatch_event(self, *args) != False:
File "C:\cache\venv\arcadedev\lib\site-packages\pyglet\event.py", line 428, in dispatch_event
self._raise_dispatch_exception(event_type, args, getattr(self, event_type), exception)
File "C:\cache\venv\arcadedev\lib\site-packages\pyglet\event.py", line 482, in _raise_dispatch_exception
raise exception
File "C:\cache\venv\arcadedev\lib\site-packages\pyglet\event.py", line 423, in dispatch_event
if getattr(self, event_type)(*args):
File "C:\cache\repo\arcade\arcade\examples\particle_fireworks.py", line 332, in on_draw
e.draw()
File "C:\cache\repo\arcade\arcade\emitter.py", line 167, in draw
self._particles.draw()
File "C:\cache\repo\arcade\arcade\sprite_list.py", line 803, in draw
self._calculate_sprite_buffer()
File "C:\cache\repo\arcade\arcade\sprite_list.py", line 619, in _calculate_sprite_buffer
calculate_colors()
File "C:\cache\repo\arcade\arcade\sprite_list.py", line 490, in calculate_colors
self._sprite_color_data.append(sprite.alpha)
TypeError: integer argument expected, got float
Error in sys.excepthook:
Original exception was:
|
TypeError
|
def update(self):
"""Advance the Particle's simulation"""
super().update()
self.alpha = arcade.utils.lerp(
self.start_alpha,
self.end_alpha,
self.lifetime_elapsed // self.lifetime_original,
)
|
def update(self):
"""Advance the Particle's simulation"""
super().update()
self.alpha = arcade.utils.lerp(
self.start_alpha, self.end_alpha, self.lifetime_elapsed / self.lifetime_original
)
|
https://github.com/pythonarcade/arcade/issues/555
|
$ python -m arcade.examples.particle_fireworks
Traceback (most recent call last):
File "C:\Users\scott\AppData\Local\Programs\Python\Python37\lib\runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "C:\Users\scott\AppData\Local\Programs\Python\Python37\lib\runpy.py", line 85, in _run_code
exec(code, run_globals)
File "C:\cache\repo\arcade\arcade\examples\particle_fireworks.py", line 357, in <module>
arcade.run()
File "C:\cache\repo\arcade\arcade\window_commands.py", line 246, in run
pyglet.app.run()
File "C:\cache\venv\arcadedev\lib\site-packages\pyglet\app\__init__.py", line 144, in run
event_loop.run()
File "C:\cache\venv\arcadedev\lib\site-packages\pyglet\app\base.py", line 175, in run
self._run()
File "C:\cache\venv\arcadedev\lib\site-packages\pyglet\app\base.py", line 187, in _run
timeout = self.idle()
File "C:\cache\venv\arcadedev\lib\site-packages\pyglet\app\base.py", line 314, in idle
window.dispatch_event('on_draw')
File "C:\cache\venv\arcadedev\lib\site-packages\pyglet\window\__init__.py", line 1330, in dispatch_event
if EventDispatcher.dispatch_event(self, *args) != False:
File "C:\cache\venv\arcadedev\lib\site-packages\pyglet\event.py", line 428, in dispatch_event
self._raise_dispatch_exception(event_type, args, getattr(self, event_type), exception)
File "C:\cache\venv\arcadedev\lib\site-packages\pyglet\event.py", line 482, in _raise_dispatch_exception
raise exception
File "C:\cache\venv\arcadedev\lib\site-packages\pyglet\event.py", line 423, in dispatch_event
if getattr(self, event_type)(*args):
File "C:\cache\repo\arcade\arcade\examples\particle_fireworks.py", line 332, in on_draw
e.draw()
File "C:\cache\repo\arcade\arcade\emitter.py", line 167, in draw
self._particles.draw()
File "C:\cache\repo\arcade\arcade\sprite_list.py", line 803, in draw
self._calculate_sprite_buffer()
File "C:\cache\repo\arcade\arcade\sprite_list.py", line 619, in _calculate_sprite_buffer
calculate_colors()
File "C:\cache\repo\arcade\arcade\sprite_list.py", line 490, in calculate_colors
self._sprite_color_data.append(sprite.alpha)
TypeError: integer argument expected, got float
Error in sys.excepthook:
Original exception was:
|
TypeError
|
def _calculate_sprite_buffer(self):
if self.is_static:
usage = "static"
else:
usage = "stream"
def calculate_pos_buffer():
self._sprite_pos_data = array.array("f")
# print("A")
for sprite in self.sprite_list:
self._sprite_pos_data.append(sprite.center_x)
self._sprite_pos_data.append(sprite.center_y)
self._sprite_pos_buf = shader.buffer(
self._sprite_pos_data.tobytes(), usage=usage
)
variables = ["in_pos"]
self._sprite_pos_desc = shader.BufferDescription(
self._sprite_pos_buf, "2f", variables, instanced=True
)
self._sprite_pos_changed = False
def calculate_size_buffer():
self._sprite_size_data = array.array("f")
for sprite in self.sprite_list:
self._sprite_size_data.append(sprite.width)
self._sprite_size_data.append(sprite.height)
self._sprite_size_buf = shader.buffer(
self._sprite_size_data.tobytes(), usage=usage
)
variables = ["in_size"]
self._sprite_size_desc = shader.BufferDescription(
self._sprite_size_buf, "2f", variables, instanced=True
)
self._sprite_size_changed = False
def calculate_angle_buffer():
self._sprite_angle_data = array.array("f")
for sprite in self.sprite_list:
self._sprite_angle_data.append(math.radians(sprite.angle))
self._sprite_angle_buf = shader.buffer(
self._sprite_angle_data.tobytes(), usage=usage
)
variables = ["in_angle"]
self._sprite_angle_desc = shader.BufferDescription(
self._sprite_angle_buf, "1f", variables, instanced=True
)
self._sprite_angle_changed = False
def calculate_colors():
self._sprite_color_data = array.array("B")
for sprite in self.sprite_list:
self._sprite_color_data.append(int(sprite.color[0]))
self._sprite_color_data.append(int(sprite.color[1]))
self._sprite_color_data.append(int(sprite.color[2]))
self._sprite_color_data.append(int(sprite.alpha))
self._sprite_color_buf = shader.buffer(
self._sprite_color_data.tobytes(), usage=usage
)
variables = ["in_color"]
self._sprite_color_desc = shader.BufferDescription(
self._sprite_color_buf,
"4B",
variables,
normalized=["in_color"],
instanced=True,
)
self._sprite_color_changed = False
def calculate_sub_tex_coords():
new_array_of_texture_names = []
new_array_of_images = []
new_texture = False
if self.array_of_images is None:
new_texture = True
# print()
# print("New texture start: ", new_texture)
for sprite in self.sprite_list:
# noinspection PyProtectedMember
if sprite.texture is None:
raise Exception(
"Error: Attempt to draw a sprite without a texture set."
)
name_of_texture_to_check = sprite.texture.name
if name_of_texture_to_check not in self.array_of_texture_names:
new_texture = True
# print("New because of ", name_of_texture_to_check)
if name_of_texture_to_check not in new_array_of_texture_names:
new_array_of_texture_names.append(name_of_texture_to_check)
image = sprite.texture.image
new_array_of_images.append(image)
# print("New texture end: ", new_texture)
# print(new_array_of_texture_names)
# print(self.array_of_texture_names)
# print()
if new_texture:
# Add back in any old textures. Chances are we'll need them.
for index, old_texture_name in enumerate(self.array_of_texture_names):
if (
old_texture_name not in new_array_of_texture_names
and self.array_of_images is not None
):
new_array_of_texture_names.append(old_texture_name)
image = self.array_of_images[index]
new_array_of_images.append(image)
self.array_of_texture_names = new_array_of_texture_names
self.array_of_images = new_array_of_images
# print(f"New Texture Atlas with names {self.array_of_texture_names}")
# Get their sizes
widths, heights = zip(*(i.size for i in self.array_of_images))
# Figure out what size a composite would be
total_width = sum(widths)
max_height = max(heights)
if new_texture:
# TODO: This code isn't valid, but I think some releasing might be in order.
# if self.texture is not None:
# shader.Texture.release(self.texture_id)
# Make the composite image
new_image = Image.new("RGBA", (total_width, max_height))
x_offset = 0
for image in self.array_of_images:
new_image.paste(image, (x_offset, 0))
x_offset += image.size[0]
# Create a texture out the composite image
texture_bytes = new_image.tobytes()
self._texture = shader.texture(
(new_image.width, new_image.height), 4, texture_bytes
)
if self.texture_id is None:
self.texture_id = SpriteList.next_texture_id
# Create a list with the coordinates of all the unique textures
tex_coords = []
start_x = 0.0
for image in self.array_of_images:
end_x = start_x + (image.width / total_width)
normalized_width = image.width / total_width
start_height = 1 - (image.height / max_height)
normalized_height = image.height / max_height
tex_coords.append(
[start_x, start_height, normalized_width, normalized_height]
)
start_x = end_x
# Go through each sprite and pull from the coordinate list, the proper
# coordinates for that sprite's image.
array_of_sub_tex_coords = array.array("f")
for sprite in self.sprite_list:
index = self.array_of_texture_names.index(sprite.texture.name)
for coord in tex_coords[index]:
array_of_sub_tex_coords.append(coord)
self._sprite_sub_tex_buf = shader.buffer(
array_of_sub_tex_coords.tobytes(), usage=usage
)
self._sprite_sub_tex_desc = shader.BufferDescription(
self._sprite_sub_tex_buf, "4f", ["in_sub_tex_coords"], instanced=True
)
self._sprite_sub_tex_changed = False
if len(self.sprite_list) == 0:
return
calculate_pos_buffer()
calculate_size_buffer()
calculate_angle_buffer()
calculate_sub_tex_coords()
calculate_colors()
vertices = array.array(
"f",
[
# x, y, u, v
-1.0,
-1.0,
0.0,
0.0,
-1.0,
1.0,
0.0,
1.0,
1.0,
-1.0,
1.0,
0.0,
1.0,
1.0,
1.0,
1.0,
],
)
self.vbo_buf = shader.buffer(vertices.tobytes())
vbo_buf_desc = shader.BufferDescription(
self.vbo_buf, "2f 2f", ("in_vert", "in_texture")
)
# Can add buffer to index vertices
vao_content = [
vbo_buf_desc,
self._sprite_pos_desc,
self._sprite_size_desc,
self._sprite_angle_desc,
self._sprite_sub_tex_desc,
self._sprite_color_desc,
]
self._vao1 = shader.vertex_array(self.program, vao_content)
|
def _calculate_sprite_buffer(self):
if self.is_static:
usage = "static"
else:
usage = "stream"
def calculate_pos_buffer():
self._sprite_pos_data = array.array("f")
# print("A")
for sprite in self.sprite_list:
self._sprite_pos_data.append(sprite.center_x)
self._sprite_pos_data.append(sprite.center_y)
self._sprite_pos_buf = shader.buffer(
self._sprite_pos_data.tobytes(), usage=usage
)
variables = ["in_pos"]
self._sprite_pos_desc = shader.BufferDescription(
self._sprite_pos_buf, "2f", variables, instanced=True
)
self._sprite_pos_changed = False
def calculate_size_buffer():
self._sprite_size_data = array.array("f")
for sprite in self.sprite_list:
self._sprite_size_data.append(sprite.width)
self._sprite_size_data.append(sprite.height)
self._sprite_size_buf = shader.buffer(
self._sprite_size_data.tobytes(), usage=usage
)
variables = ["in_size"]
self._sprite_size_desc = shader.BufferDescription(
self._sprite_size_buf, "2f", variables, instanced=True
)
self._sprite_size_changed = False
def calculate_angle_buffer():
self._sprite_angle_data = array.array("f")
for sprite in self.sprite_list:
self._sprite_angle_data.append(math.radians(sprite.angle))
self._sprite_angle_buf = shader.buffer(
self._sprite_angle_data.tobytes(), usage=usage
)
variables = ["in_angle"]
self._sprite_angle_desc = shader.BufferDescription(
self._sprite_angle_buf, "1f", variables, instanced=True
)
self._sprite_angle_changed = False
def calculate_colors():
self._sprite_color_data = array.array("B")
for sprite in self.sprite_list:
self._sprite_color_data.append(sprite.color[0])
self._sprite_color_data.append(sprite.color[1])
self._sprite_color_data.append(sprite.color[2])
self._sprite_color_data.append(sprite.alpha)
self._sprite_color_buf = shader.buffer(
self._sprite_color_data.tobytes(), usage=usage
)
variables = ["in_color"]
self._sprite_color_desc = shader.BufferDescription(
self._sprite_color_buf,
"4B",
variables,
normalized=["in_color"],
instanced=True,
)
self._sprite_color_changed = False
def calculate_sub_tex_coords():
new_array_of_texture_names = []
new_array_of_images = []
new_texture = False
if self.array_of_images is None:
new_texture = True
# print()
# print("New texture start: ", new_texture)
for sprite in self.sprite_list:
# noinspection PyProtectedMember
if sprite.texture is None:
raise Exception(
"Error: Attempt to draw a sprite without a texture set."
)
name_of_texture_to_check = sprite.texture.name
if name_of_texture_to_check not in self.array_of_texture_names:
new_texture = True
# print("New because of ", name_of_texture_to_check)
if name_of_texture_to_check not in new_array_of_texture_names:
new_array_of_texture_names.append(name_of_texture_to_check)
image = sprite.texture.image
new_array_of_images.append(image)
# print("New texture end: ", new_texture)
# print(new_array_of_texture_names)
# print(self.array_of_texture_names)
# print()
if new_texture:
# Add back in any old textures. Chances are we'll need them.
for index, old_texture_name in enumerate(self.array_of_texture_names):
if (
old_texture_name not in new_array_of_texture_names
and self.array_of_images is not None
):
new_array_of_texture_names.append(old_texture_name)
image = self.array_of_images[index]
new_array_of_images.append(image)
self.array_of_texture_names = new_array_of_texture_names
self.array_of_images = new_array_of_images
# print(f"New Texture Atlas with names {self.array_of_texture_names}")
# Get their sizes
widths, heights = zip(*(i.size for i in self.array_of_images))
# Figure out what size a composite would be
total_width = sum(widths)
max_height = max(heights)
if new_texture:
# TODO: This code isn't valid, but I think some releasing might be in order.
# if self.texture is not None:
# shader.Texture.release(self.texture_id)
# Make the composite image
new_image = Image.new("RGBA", (total_width, max_height))
x_offset = 0
for image in self.array_of_images:
new_image.paste(image, (x_offset, 0))
x_offset += image.size[0]
# Create a texture out the composite image
texture_bytes = new_image.tobytes()
self._texture = shader.texture(
(new_image.width, new_image.height), 4, texture_bytes
)
if self.texture_id is None:
self.texture_id = SpriteList.next_texture_id
# Create a list with the coordinates of all the unique textures
tex_coords = []
start_x = 0.0
for image in self.array_of_images:
end_x = start_x + (image.width / total_width)
normalized_width = image.width / total_width
start_height = 1 - (image.height / max_height)
normalized_height = image.height / max_height
tex_coords.append(
[start_x, start_height, normalized_width, normalized_height]
)
start_x = end_x
# Go through each sprite and pull from the coordinate list, the proper
# coordinates for that sprite's image.
array_of_sub_tex_coords = array.array("f")
for sprite in self.sprite_list:
index = self.array_of_texture_names.index(sprite.texture.name)
for coord in tex_coords[index]:
array_of_sub_tex_coords.append(coord)
self._sprite_sub_tex_buf = shader.buffer(
array_of_sub_tex_coords.tobytes(), usage=usage
)
self._sprite_sub_tex_desc = shader.BufferDescription(
self._sprite_sub_tex_buf, "4f", ["in_sub_tex_coords"], instanced=True
)
self._sprite_sub_tex_changed = False
if len(self.sprite_list) == 0:
return
calculate_pos_buffer()
calculate_size_buffer()
calculate_angle_buffer()
calculate_sub_tex_coords()
calculate_colors()
vertices = array.array(
"f",
[
# x, y, u, v
-1.0,
-1.0,
0.0,
0.0,
-1.0,
1.0,
0.0,
1.0,
1.0,
-1.0,
1.0,
0.0,
1.0,
1.0,
1.0,
1.0,
],
)
self.vbo_buf = shader.buffer(vertices.tobytes())
vbo_buf_desc = shader.BufferDescription(
self.vbo_buf, "2f 2f", ("in_vert", "in_texture")
)
# Can add buffer to index vertices
vao_content = [
vbo_buf_desc,
self._sprite_pos_desc,
self._sprite_size_desc,
self._sprite_angle_desc,
self._sprite_sub_tex_desc,
self._sprite_color_desc,
]
self._vao1 = shader.vertex_array(self.program, vao_content)
|
https://github.com/pythonarcade/arcade/issues/555
|
$ python -m arcade.examples.particle_fireworks
Traceback (most recent call last):
File "C:\Users\scott\AppData\Local\Programs\Python\Python37\lib\runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "C:\Users\scott\AppData\Local\Programs\Python\Python37\lib\runpy.py", line 85, in _run_code
exec(code, run_globals)
File "C:\cache\repo\arcade\arcade\examples\particle_fireworks.py", line 357, in <module>
arcade.run()
File "C:\cache\repo\arcade\arcade\window_commands.py", line 246, in run
pyglet.app.run()
File "C:\cache\venv\arcadedev\lib\site-packages\pyglet\app\__init__.py", line 144, in run
event_loop.run()
File "C:\cache\venv\arcadedev\lib\site-packages\pyglet\app\base.py", line 175, in run
self._run()
File "C:\cache\venv\arcadedev\lib\site-packages\pyglet\app\base.py", line 187, in _run
timeout = self.idle()
File "C:\cache\venv\arcadedev\lib\site-packages\pyglet\app\base.py", line 314, in idle
window.dispatch_event('on_draw')
File "C:\cache\venv\arcadedev\lib\site-packages\pyglet\window\__init__.py", line 1330, in dispatch_event
if EventDispatcher.dispatch_event(self, *args) != False:
File "C:\cache\venv\arcadedev\lib\site-packages\pyglet\event.py", line 428, in dispatch_event
self._raise_dispatch_exception(event_type, args, getattr(self, event_type), exception)
File "C:\cache\venv\arcadedev\lib\site-packages\pyglet\event.py", line 482, in _raise_dispatch_exception
raise exception
File "C:\cache\venv\arcadedev\lib\site-packages\pyglet\event.py", line 423, in dispatch_event
if getattr(self, event_type)(*args):
File "C:\cache\repo\arcade\arcade\examples\particle_fireworks.py", line 332, in on_draw
e.draw()
File "C:\cache\repo\arcade\arcade\emitter.py", line 167, in draw
self._particles.draw()
File "C:\cache\repo\arcade\arcade\sprite_list.py", line 803, in draw
self._calculate_sprite_buffer()
File "C:\cache\repo\arcade\arcade\sprite_list.py", line 619, in _calculate_sprite_buffer
calculate_colors()
File "C:\cache\repo\arcade\arcade\sprite_list.py", line 490, in calculate_colors
self._sprite_color_data.append(sprite.alpha)
TypeError: integer argument expected, got float
Error in sys.excepthook:
Original exception was:
|
TypeError
|
def calculate_colors():
self._sprite_color_data = array.array("B")
for sprite in self.sprite_list:
self._sprite_color_data.append(int(sprite.color[0]))
self._sprite_color_data.append(int(sprite.color[1]))
self._sprite_color_data.append(int(sprite.color[2]))
self._sprite_color_data.append(int(sprite.alpha))
self._sprite_color_buf = shader.buffer(
self._sprite_color_data.tobytes(), usage=usage
)
variables = ["in_color"]
self._sprite_color_desc = shader.BufferDescription(
self._sprite_color_buf, "4B", variables, normalized=["in_color"], instanced=True
)
self._sprite_color_changed = False
|
def calculate_colors():
self._sprite_color_data = array.array("B")
for sprite in self.sprite_list:
self._sprite_color_data.append(sprite.color[0])
self._sprite_color_data.append(sprite.color[1])
self._sprite_color_data.append(sprite.color[2])
self._sprite_color_data.append(sprite.alpha)
self._sprite_color_buf = shader.buffer(
self._sprite_color_data.tobytes(), usage=usage
)
variables = ["in_color"]
self._sprite_color_desc = shader.BufferDescription(
self._sprite_color_buf, "4B", variables, normalized=["in_color"], instanced=True
)
self._sprite_color_changed = False
|
https://github.com/pythonarcade/arcade/issues/555
|
$ python -m arcade.examples.particle_fireworks
Traceback (most recent call last):
File "C:\Users\scott\AppData\Local\Programs\Python\Python37\lib\runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "C:\Users\scott\AppData\Local\Programs\Python\Python37\lib\runpy.py", line 85, in _run_code
exec(code, run_globals)
File "C:\cache\repo\arcade\arcade\examples\particle_fireworks.py", line 357, in <module>
arcade.run()
File "C:\cache\repo\arcade\arcade\window_commands.py", line 246, in run
pyglet.app.run()
File "C:\cache\venv\arcadedev\lib\site-packages\pyglet\app\__init__.py", line 144, in run
event_loop.run()
File "C:\cache\venv\arcadedev\lib\site-packages\pyglet\app\base.py", line 175, in run
self._run()
File "C:\cache\venv\arcadedev\lib\site-packages\pyglet\app\base.py", line 187, in _run
timeout = self.idle()
File "C:\cache\venv\arcadedev\lib\site-packages\pyglet\app\base.py", line 314, in idle
window.dispatch_event('on_draw')
File "C:\cache\venv\arcadedev\lib\site-packages\pyglet\window\__init__.py", line 1330, in dispatch_event
if EventDispatcher.dispatch_event(self, *args) != False:
File "C:\cache\venv\arcadedev\lib\site-packages\pyglet\event.py", line 428, in dispatch_event
self._raise_dispatch_exception(event_type, args, getattr(self, event_type), exception)
File "C:\cache\venv\arcadedev\lib\site-packages\pyglet\event.py", line 482, in _raise_dispatch_exception
raise exception
File "C:\cache\venv\arcadedev\lib\site-packages\pyglet\event.py", line 423, in dispatch_event
if getattr(self, event_type)(*args):
File "C:\cache\repo\arcade\arcade\examples\particle_fireworks.py", line 332, in on_draw
e.draw()
File "C:\cache\repo\arcade\arcade\emitter.py", line 167, in draw
self._particles.draw()
File "C:\cache\repo\arcade\arcade\sprite_list.py", line 803, in draw
self._calculate_sprite_buffer()
File "C:\cache\repo\arcade\arcade\sprite_list.py", line 619, in _calculate_sprite_buffer
calculate_colors()
File "C:\cache\repo\arcade\arcade\sprite_list.py", line 490, in calculate_colors
self._sprite_color_data.append(sprite.alpha)
TypeError: integer argument expected, got float
Error in sys.excepthook:
Original exception was:
|
TypeError
|
def update_position(self, sprite: Sprite):
"""
Called by the Sprite class to update position, angle, size and color
of the specified sprite.
Necessary for batch drawing of items.
:param Sprite sprite: Sprite to update.
"""
if self._vao1 is None:
return
i = self.sprite_idx[sprite]
self._sprite_pos_data[i * 2] = sprite.position[0]
self._sprite_pos_data[i * 2 + 1] = sprite.position[1]
self._sprite_pos_changed = True
self._sprite_angle_data[i] = math.radians(sprite.angle)
self._sprite_angle_changed = True
self._sprite_color_data[i * 4] = int(sprite.color[0])
self._sprite_color_data[i * 4 + 1] = int(sprite.color[1])
self._sprite_color_data[i * 4 + 2] = int(sprite.color[2])
self._sprite_color_data[i * 4 + 3] = int(sprite.alpha)
self._sprite_color_changed = True
|
def update_position(self, sprite: Sprite):
"""
Called by the Sprite class to update position, angle, size and color
of the specified sprite.
Necessary for batch drawing of items.
:param Sprite sprite: Sprite to update.
"""
if self._vao1 is None:
return
i = self.sprite_idx[sprite]
self._sprite_pos_data[i * 2] = sprite.position[0]
self._sprite_pos_data[i * 2 + 1] = sprite.position[1]
self._sprite_pos_changed = True
self._sprite_angle_data[i] = math.radians(sprite.angle)
self._sprite_angle_changed = True
self._sprite_color_data[i * 4] = sprite.color[0]
self._sprite_color_data[i * 4 + 1] = sprite.color[1]
self._sprite_color_data[i * 4 + 2] = sprite.color[2]
self._sprite_color_data[i * 4 + 3] = sprite.alpha
self._sprite_color_changed = True
|
https://github.com/pythonarcade/arcade/issues/555
|
$ python -m arcade.examples.particle_fireworks
Traceback (most recent call last):
File "C:\Users\scott\AppData\Local\Programs\Python\Python37\lib\runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "C:\Users\scott\AppData\Local\Programs\Python\Python37\lib\runpy.py", line 85, in _run_code
exec(code, run_globals)
File "C:\cache\repo\arcade\arcade\examples\particle_fireworks.py", line 357, in <module>
arcade.run()
File "C:\cache\repo\arcade\arcade\window_commands.py", line 246, in run
pyglet.app.run()
File "C:\cache\venv\arcadedev\lib\site-packages\pyglet\app\__init__.py", line 144, in run
event_loop.run()
File "C:\cache\venv\arcadedev\lib\site-packages\pyglet\app\base.py", line 175, in run
self._run()
File "C:\cache\venv\arcadedev\lib\site-packages\pyglet\app\base.py", line 187, in _run
timeout = self.idle()
File "C:\cache\venv\arcadedev\lib\site-packages\pyglet\app\base.py", line 314, in idle
window.dispatch_event('on_draw')
File "C:\cache\venv\arcadedev\lib\site-packages\pyglet\window\__init__.py", line 1330, in dispatch_event
if EventDispatcher.dispatch_event(self, *args) != False:
File "C:\cache\venv\arcadedev\lib\site-packages\pyglet\event.py", line 428, in dispatch_event
self._raise_dispatch_exception(event_type, args, getattr(self, event_type), exception)
File "C:\cache\venv\arcadedev\lib\site-packages\pyglet\event.py", line 482, in _raise_dispatch_exception
raise exception
File "C:\cache\venv\arcadedev\lib\site-packages\pyglet\event.py", line 423, in dispatch_event
if getattr(self, event_type)(*args):
File "C:\cache\repo\arcade\arcade\examples\particle_fireworks.py", line 332, in on_draw
e.draw()
File "C:\cache\repo\arcade\arcade\emitter.py", line 167, in draw
self._particles.draw()
File "C:\cache\repo\arcade\arcade\sprite_list.py", line 803, in draw
self._calculate_sprite_buffer()
File "C:\cache\repo\arcade\arcade\sprite_list.py", line 619, in _calculate_sprite_buffer
calculate_colors()
File "C:\cache\repo\arcade\arcade\sprite_list.py", line 490, in calculate_colors
self._sprite_color_data.append(sprite.alpha)
TypeError: integer argument expected, got float
Error in sys.excepthook:
Original exception was:
|
TypeError
|
def update(self):
super().update()
if self.lifetime_elapsed <= self.in_duration:
u = self.lifetime_elapsed / self.in_duration
self.alpha = clamp(arcade.lerp(self.start_alpha, self.mid_alpha, u), 0, 255)
else:
u = (self.lifetime_elapsed - self.in_duration) / self.out_duration
self.alpha = clamp(arcade.lerp(self.mid_alpha, self.end_alpha, u), 0, 255)
|
def update(self):
super().update()
if self.lifetime_elapsed <= self.in_duration:
u = self.lifetime_elapsed // self.in_duration
self.alpha = arcade.lerp(self.start_alpha, self.mid_alpha, u)
else:
u = (self.lifetime_elapsed - self.in_duration) // self.out_duration
self.alpha = arcade.lerp(self.mid_alpha, self.end_alpha, u)
|
https://github.com/pythonarcade/arcade/issues/555
|
$ python -m arcade.examples.particle_fireworks
Traceback (most recent call last):
File "C:\Users\scott\AppData\Local\Programs\Python\Python37\lib\runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "C:\Users\scott\AppData\Local\Programs\Python\Python37\lib\runpy.py", line 85, in _run_code
exec(code, run_globals)
File "C:\cache\repo\arcade\arcade\examples\particle_fireworks.py", line 357, in <module>
arcade.run()
File "C:\cache\repo\arcade\arcade\window_commands.py", line 246, in run
pyglet.app.run()
File "C:\cache\venv\arcadedev\lib\site-packages\pyglet\app\__init__.py", line 144, in run
event_loop.run()
File "C:\cache\venv\arcadedev\lib\site-packages\pyglet\app\base.py", line 175, in run
self._run()
File "C:\cache\venv\arcadedev\lib\site-packages\pyglet\app\base.py", line 187, in _run
timeout = self.idle()
File "C:\cache\venv\arcadedev\lib\site-packages\pyglet\app\base.py", line 314, in idle
window.dispatch_event('on_draw')
File "C:\cache\venv\arcadedev\lib\site-packages\pyglet\window\__init__.py", line 1330, in dispatch_event
if EventDispatcher.dispatch_event(self, *args) != False:
File "C:\cache\venv\arcadedev\lib\site-packages\pyglet\event.py", line 428, in dispatch_event
self._raise_dispatch_exception(event_type, args, getattr(self, event_type), exception)
File "C:\cache\venv\arcadedev\lib\site-packages\pyglet\event.py", line 482, in _raise_dispatch_exception
raise exception
File "C:\cache\venv\arcadedev\lib\site-packages\pyglet\event.py", line 423, in dispatch_event
if getattr(self, event_type)(*args):
File "C:\cache\repo\arcade\arcade\examples\particle_fireworks.py", line 332, in on_draw
e.draw()
File "C:\cache\repo\arcade\arcade\emitter.py", line 167, in draw
self._particles.draw()
File "C:\cache\repo\arcade\arcade\sprite_list.py", line 803, in draw
self._calculate_sprite_buffer()
File "C:\cache\repo\arcade\arcade\sprite_list.py", line 619, in _calculate_sprite_buffer
calculate_colors()
File "C:\cache\repo\arcade\arcade\sprite_list.py", line 490, in calculate_colors
self._sprite_color_data.append(sprite.alpha)
TypeError: integer argument expected, got float
Error in sys.excepthook:
Original exception was:
|
TypeError
|
def update(self):
"""Advance the Particle's simulation"""
super().update()
a = arcade.utils.lerp(
self.start_alpha, self.end_alpha, self.lifetime_elapsed / self.lifetime_original
)
self.alpha = clamp(a, 0, 255)
|
def update(self):
"""Advance the Particle's simulation"""
super().update()
self.alpha = arcade.utils.lerp(
self.start_alpha,
self.end_alpha,
self.lifetime_elapsed // self.lifetime_original,
)
|
https://github.com/pythonarcade/arcade/issues/555
|
$ python -m arcade.examples.particle_fireworks
Traceback (most recent call last):
File "C:\Users\scott\AppData\Local\Programs\Python\Python37\lib\runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "C:\Users\scott\AppData\Local\Programs\Python\Python37\lib\runpy.py", line 85, in _run_code
exec(code, run_globals)
File "C:\cache\repo\arcade\arcade\examples\particle_fireworks.py", line 357, in <module>
arcade.run()
File "C:\cache\repo\arcade\arcade\window_commands.py", line 246, in run
pyglet.app.run()
File "C:\cache\venv\arcadedev\lib\site-packages\pyglet\app\__init__.py", line 144, in run
event_loop.run()
File "C:\cache\venv\arcadedev\lib\site-packages\pyglet\app\base.py", line 175, in run
self._run()
File "C:\cache\venv\arcadedev\lib\site-packages\pyglet\app\base.py", line 187, in _run
timeout = self.idle()
File "C:\cache\venv\arcadedev\lib\site-packages\pyglet\app\base.py", line 314, in idle
window.dispatch_event('on_draw')
File "C:\cache\venv\arcadedev\lib\site-packages\pyglet\window\__init__.py", line 1330, in dispatch_event
if EventDispatcher.dispatch_event(self, *args) != False:
File "C:\cache\venv\arcadedev\lib\site-packages\pyglet\event.py", line 428, in dispatch_event
self._raise_dispatch_exception(event_type, args, getattr(self, event_type), exception)
File "C:\cache\venv\arcadedev\lib\site-packages\pyglet\event.py", line 482, in _raise_dispatch_exception
raise exception
File "C:\cache\venv\arcadedev\lib\site-packages\pyglet\event.py", line 423, in dispatch_event
if getattr(self, event_type)(*args):
File "C:\cache\repo\arcade\arcade\examples\particle_fireworks.py", line 332, in on_draw
e.draw()
File "C:\cache\repo\arcade\arcade\emitter.py", line 167, in draw
self._particles.draw()
File "C:\cache\repo\arcade\arcade\sprite_list.py", line 803, in draw
self._calculate_sprite_buffer()
File "C:\cache\repo\arcade\arcade\sprite_list.py", line 619, in _calculate_sprite_buffer
calculate_colors()
File "C:\cache\repo\arcade\arcade\sprite_list.py", line 490, in calculate_colors
self._sprite_color_data.append(sprite.alpha)
TypeError: integer argument expected, got float
Error in sys.excepthook:
Original exception was:
|
TypeError
|
def read_tiled_map(tmx_file: str, scaling: float = 1, tsx_file: str = None) -> TiledMap:
"""
Given a tmx_file, this will read in a tiled map, and return
a TiledMap object.
Given a tsx_file, the map will use it as the tileset.
If tsx_file is not specified, it will use the tileset specified
within the tmx_file.
Important: Tiles must be a "collection" of images.
Hitboxes can be drawn around tiles in the tileset editor,
but only polygons are supported.
(This is a great area for PR's to improve things.)
:param str tmx_file: String with name of our TMX file
:param float scaling: Scaling factor. 0.5 will half all widths and heights
:param str tsx_file: Tileset to use (can be specified in TMX file)
:returns: Map
:rtype: TiledMap
"""
# Create a map object to store this stuff in
my_map = TiledMap()
# Read in and parse the file
tree = etree.parse(tmx_file)
# Root node should be 'map'
map_tag = tree.getroot()
# Pull attributes that should be in the file for the map
my_map.version = map_tag.attrib["version"]
my_map.orientation = map_tag.attrib["orientation"]
my_map.renderorder = map_tag.attrib["renderorder"]
my_map.width = int(map_tag.attrib["width"])
my_map.height = int(map_tag.attrib["height"])
my_map.tilewidth = int(map_tag.attrib["tilewidth"])
my_map.tileheight = int(map_tag.attrib["tileheight"])
# Background color is optional, and may or may not be in there
if "backgroundcolor" in map_tag.attrib:
# Decode the background color string
backgroundcolor_string = map_tag.attrib["backgroundcolor"]
red_hex = "0x" + backgroundcolor_string[1:3]
green_hex = "0x" + backgroundcolor_string[3:5]
blue_hex = "0x" + backgroundcolor_string[5:7]
red = int(red_hex, 16)
green = int(green_hex, 16)
blue = int(blue_hex, 16)
my_map.backgroundcolor = (red, green, blue)
my_map.nextobjectid = map_tag.attrib["nextobjectid"]
# Grab all the tilesets
tileset_tag_list = map_tag.findall("./tileset")
# --- Tileset Data ---
# Loop through each tileset
for tileset_tag in tileset_tag_list:
firstgid = int(tileset_tag.attrib["firstgid"])
if tsx_file is not None or "source" in tileset_tag.attrib:
if tsx_file is not None:
tileset_tree = etree.parse(tsx_file)
else:
source = tileset_tag.attrib["source"]
try:
tileset_tree = etree.parse(source)
except FileNotFoundError:
source = Path(tmx_file).parent / Path(source)
tileset_tree = etree.parse(source)
# Root node should be 'map'
tileset_root = tileset_tree.getroot()
tile_tag_list = tileset_root.findall("tile")
else:
# Grab each tile
tile_tag_list = tileset_tag.findall("tile")
# Loop through each tile
for tile_tag in tile_tag_list:
# Make a tile object
my_tile = Tile()
image = tile_tag.find("image")
my_tile.local_id = tile_tag.attrib["id"]
my_tile.width = int(image.attrib["width"])
my_tile.height = int(image.attrib["height"])
my_tile.source = image.attrib["source"]
key = str(int(my_tile.local_id) + 1)
my_map.global_tile_set[key] = my_tile
firstgid += 1
objectgroup = tile_tag.find("objectgroup")
if objectgroup:
my_object = objectgroup.find("object")
if my_object:
offset_x = round(float(my_object.attrib["x"]))
offset_y = round(float(my_object.attrib["y"]))
polygon = my_object.find("polygon")
if polygon is not None:
point_list = _parse_points(polygon.attrib["points"])
for point in point_list:
point[0] += offset_x
point[1] += offset_y
point[1] = my_tile.height - point[1]
point[0] -= my_tile.width // 2
point[1] -= my_tile.height // 2
point[0] *= scaling
point[1] *= scaling
point[0] = int(point[0])
point[1] = int(point[1])
my_tile.points = point_list
polygon = my_object.find("polyline")
if polygon is not None:
point_list = _parse_points(polygon.attrib["points"])
for point in point_list:
point[0] += offset_x
point[1] += offset_y
point[1] = my_tile.height - point[1]
point[0] -= my_tile.width // 2
point[1] -= my_tile.height // 2
point[0] *= scaling
point[1] *= scaling
point[0] = int(point[0])
point[1] = int(point[1])
if (
point_list[0][0] != point_list[-1][0]
or point_list[0][1] != point_list[-1][1]
):
point_list.append([point_list[0][0], point_list[0][1]])
my_tile.points = point_list
# --- Map Data ---
# Grab each layer
layer_tag_list = map_tag.findall("./layer")
for layer_tag in layer_tag_list:
layer_width = int(layer_tag.attrib["width"])
# Unzip and unencode each layer
data = layer_tag.find("data")
data_text = data.text.strip()
encoding = data.attrib["encoding"]
if "compression" in data.attrib:
compression = data.attrib["compression"]
else:
compression = None
if encoding == "csv":
layer_grid_ints = _process_csv_encoding(data_text)
elif encoding == "base64":
layer_grid_ints = _process_base64_encoding(
data_text, compression, layer_width
)
else:
print(f"Error, unexpected encoding: {encoding}.")
break
# Great, we have a grid of ints. Save that according to the layer name
my_map.layers_int_data[layer_tag.attrib["name"]] = layer_grid_ints
# Now create grid objects for each tile
layer_grid_objs = []
for row_index, row in enumerate(layer_grid_ints):
layer_grid_objs.append([])
for column_index, column in enumerate(row):
grid_loc = GridLocation()
if layer_grid_ints[row_index][column_index] != 0:
key = str(layer_grid_ints[row_index][column_index])
if key not in my_map.global_tile_set:
print(
f"Warning, tried to load '{key}' and it is not in the tileset."
)
else:
grid_loc.tile = my_map.global_tile_set[key]
if my_map.renderorder == "right-down":
adjusted_row_index = my_map.height - row_index - 1
else:
adjusted_row_index = row_index
if my_map.orientation == "orthogonal":
grid_loc.center_x = (
column_index * my_map.tilewidth + my_map.tilewidth // 2
)
grid_loc.center_y = (
adjusted_row_index * my_map.tileheight
+ my_map.tilewidth // 2
)
else:
grid_loc.center_x, grid_loc.center_y = (
isometric_grid_to_screen(
column_index,
row_index,
my_map.width,
my_map.height,
my_map.tilewidth,
my_map.tileheight,
)
)
layer_grid_objs[row_index].append(grid_loc)
my_map.layers[layer_tag.attrib["name"]] = layer_grid_objs
return my_map
|
def read_tiled_map(tmx_file: str, scaling: float = 1, tsx_file: str = None) -> TiledMap:
"""
Given a tmx_file, this will read in a tiled map, and return
a TiledMap object.
Given a tsx_file, the map will use it as the tileset.
If tsx_file is not specified, it will use the tileset specified
within the tmx_file.
Important: Tiles must be a "collection" of images.
Hitboxes can be drawn around tiles in the tileset editor,
but only polygons are supported.
(This is a great area for PR's to improve things.)
:param str tmx_file: String with name of our TMX file
:param float scaling: Scaling factor. 0.5 will half all widths and heights
:param str tsx_file: Tileset to use (can be specified in TMX file)
:returns: Map
:rtype: TiledMap
"""
# Create a map object to store this stuff in
my_map = TiledMap()
# Read in and parse the file
tree = etree.parse(tmx_file)
# Root node should be 'map'
map_tag = tree.getroot()
# Pull attributes that should be in the file for the map
my_map.version = map_tag.attrib["version"]
my_map.orientation = map_tag.attrib["orientation"]
my_map.renderorder = map_tag.attrib["renderorder"]
my_map.width = int(map_tag.attrib["width"])
my_map.height = int(map_tag.attrib["height"])
my_map.tilewidth = int(map_tag.attrib["tilewidth"])
my_map.tileheight = int(map_tag.attrib["tileheight"])
# Background color is optional, and may or may not be in there
if "backgroundcolor" in map_tag.attrib:
# Decode the background color string
backgroundcolor_string = map_tag.attrib["backgroundcolor"]
red_hex = "0x" + backgroundcolor_string[1:3]
green_hex = "0x" + backgroundcolor_string[3:5]
blue_hex = "0x" + backgroundcolor_string[5:7]
red = int(red_hex, 16)
green = int(green_hex, 16)
blue = int(blue_hex, 16)
my_map.backgroundcolor = (red, green, blue)
my_map.nextobjectid = map_tag.attrib["nextobjectid"]
# Grab all the tilesets
tileset_tag_list = map_tag.findall("./tileset")
# --- Tileset Data ---
# Loop through each tileset
for tileset_tag in tileset_tag_list:
firstgid = int(tileset_tag.attrib["firstgid"])
if tsx_file is not None or "source" in tileset_tag.attrib:
if tsx_file is not None:
tileset_tree = etree.parse(tsx_file)
else:
source = tileset_tag.attrib["source"]
try:
tileset_tree = etree.parse(source)
except FileNotFoundError:
source = Path(tmx_file).parent / Path(source)
tileset_tree = etree.parse(source)
# Root node should be 'map'
tileset_root = tileset_tree.getroot()
tile_tag_list = tileset_root.findall("tile")
else:
# Grab each tile
tile_tag_list = tileset_tag.findall("tile")
# Loop through each tile
for tile_tag in tile_tag_list:
# Make a tile object
my_tile = Tile()
image = tile_tag.find("image")
my_tile.local_id = tile_tag.attrib["id"]
my_tile.width = int(image.attrib["width"])
my_tile.height = int(image.attrib["height"])
my_tile.source = image.attrib["source"]
key = str(int(my_tile.local_id) + 1)
my_map.global_tile_set[key] = my_tile
firstgid += 1
objectgroup = tile_tag.find("objectgroup")
if objectgroup:
my_object = objectgroup.find("object")
if my_object:
offset_x = round(float(my_object.attrib["x"]))
offset_y = round(float(my_object.attrib["y"]))
polygon = my_object.find("polygon")
if polygon is not None:
point_list = _parse_points(polygon.attrib["points"])
for point in point_list:
point[0] += offset_x
point[1] += offset_y
point[1] = my_tile.height - point[1]
point[0] -= my_tile.width // 2
point[1] -= my_tile.height // 2
point[0] *= scaling
point[1] *= scaling
point[0] = int(point[0])
point[1] = int(point[1])
my_tile.points = point_list
# --- Map Data ---
# Grab each layer
layer_tag_list = map_tag.findall("./layer")
for layer_tag in layer_tag_list:
layer_width = int(layer_tag.attrib["width"])
# Unzip and unencode each layer
data = layer_tag.find("data")
data_text = data.text.strip()
encoding = data.attrib["encoding"]
if "compression" in data.attrib:
compression = data.attrib["compression"]
else:
compression = None
if encoding == "csv":
layer_grid_ints = _process_csv_encoding(data_text)
elif encoding == "base64":
layer_grid_ints = _process_base64_encoding(
data_text, compression, layer_width
)
else:
print(f"Error, unexpected encoding: {encoding}.")
break
# Great, we have a grid of ints. Save that according to the layer name
my_map.layers_int_data[layer_tag.attrib["name"]] = layer_grid_ints
# Now create grid objects for each tile
layer_grid_objs = []
for row_index, row in enumerate(layer_grid_ints):
layer_grid_objs.append([])
for column_index, column in enumerate(row):
grid_loc = GridLocation()
if layer_grid_ints[row_index][column_index] != 0:
key = str(layer_grid_ints[row_index][column_index])
if key not in my_map.global_tile_set:
print(
f"Warning, tried to load '{key}' and it is not in the tileset."
)
else:
grid_loc.tile = my_map.global_tile_set[key]
if my_map.renderorder == "right-down":
adjusted_row_index = my_map.height - row_index - 1
else:
adjusted_row_index = row_index
if my_map.orientation == "orthogonal":
grid_loc.center_x = (
column_index * my_map.tilewidth + my_map.tilewidth // 2
)
grid_loc.center_y = (
adjusted_row_index * my_map.tileheight
+ my_map.tilewidth // 2
)
else:
grid_loc.center_x, grid_loc.center_y = (
isometric_grid_to_screen(
column_index,
row_index,
my_map.width,
my_map.height,
my_map.tilewidth,
my_map.tileheight,
)
)
layer_grid_objs[row_index].append(grid_loc)
my_map.layers[layer_tag.attrib["name"]] = layer_grid_objs
return my_map
|
https://github.com/pythonarcade/arcade/issues/360
|
Traceback (most recent call last):
File "tsx_bug.py", line 5, in <module>
my_map = arcade.read_tiled_map(MAP_NAME, 1)
File "/$DIR/lib/python3.6/site-packages/arcade/read_tiled_map.py", line 160, in read_tiled_map
tileset_tree = etree.parse(source)
File "/usr/lib/python3.6/xml/etree/ElementTree.py", line 1196, in parse
tree.parse(source, parser)
File "/usr/lib/python3.6/xml/etree/ElementTree.py", line 586, in parse
source = open(source, "rb")
FileNotFoundError: [Errno 2] No such file or directory: 'tsx_test.tsx'
|
FileNotFoundError
|
def read_tiled_map(tmx_file: str, scaling, tsx_file=None) -> TiledMap:
"""
Given a tmx_file, this will read in a tiled map, and return
a TiledMap object.
Given a tsx_file, the map will use it as the tileset.
If tsx_file is not specified, it will use the tileset specified
within the tmx_file.
Important: Tiles must be a "collection" of images.
Hitboxes can be drawn around tiles in the tileset editor,
but only polygons are supported.
(This is a great area for PR's to improve things.)
"""
# Create a map object to store this stuff in
my_map = TiledMap()
# Read in and parse the file
tree = etree.parse(tmx_file)
# Root node should be 'map'
map_tag = tree.getroot()
# Pull attributes that should be in the file for the map
my_map.version = map_tag.attrib["version"]
my_map.orientation = map_tag.attrib["orientation"]
my_map.renderorder = map_tag.attrib["renderorder"]
my_map.width = int(map_tag.attrib["width"])
my_map.height = int(map_tag.attrib["height"])
my_map.tilewidth = int(map_tag.attrib["tilewidth"])
my_map.tileheight = int(map_tag.attrib["tileheight"])
# Background color is optional, and may or may not be in there
if "backgroundcolor" in map_tag.attrib:
# Decode the background color string
backgroundcolor_string = map_tag.attrib["backgroundcolor"]
red_hex = "0x" + backgroundcolor_string[1:3]
green_hex = "0x" + backgroundcolor_string[3:5]
blue_hex = "0x" + backgroundcolor_string[5:7]
red = int(red_hex, 16)
green = int(green_hex, 16)
blue = int(blue_hex, 16)
my_map.backgroundcolor = (red, green, blue)
my_map.nextobjectid = map_tag.attrib["nextobjectid"]
# Grab all the tilesets
tileset_tag_list = map_tag.findall("./tileset")
# --- Tileset Data ---
# Loop through each tileset
for tileset_tag in tileset_tag_list:
firstgid = int(tileset_tag.attrib["firstgid"])
if tsx_file is not None or "source" in tileset_tag.attrib:
if tsx_file is not None:
tileset_tree = etree.parse(tsx_file)
else:
source = tileset_tag.attrib["source"]
try:
tileset_tree = etree.parse(source)
except FileNotFoundError:
source = Path(tmx_file).parent / Path(source)
tileset_tree = etree.parse(source)
# Root node should be 'map'
tileset_root = tileset_tree.getroot()
tile_tag_list = tileset_root.findall("tile")
else:
# Grab each tile
tile_tag_list = tileset_tag.findall("tile")
# Loop through each tile
for tile_tag in tile_tag_list:
# Make a tile object
my_tile = Tile()
image = tile_tag.find("image")
my_tile.local_id = tile_tag.attrib["id"]
my_tile.width = int(image.attrib["width"])
my_tile.height = int(image.attrib["height"])
my_tile.source = image.attrib["source"]
key = str(int(my_tile.local_id) + 1)
my_map.global_tile_set[key] = my_tile
firstgid += 1
objectgroup = tile_tag.find("objectgroup")
if objectgroup:
my_object = objectgroup.find("object")
if my_object:
offset_x = round(float(my_object.attrib["x"]))
offset_y = round(float(my_object.attrib["y"]))
polygon = my_object.find("polygon")
if polygon is not None:
point_list = _parse_points(polygon.attrib["points"])
for point in point_list:
point[0] += offset_x
point[1] += offset_y
point[1] = my_tile.height - point[1]
point[0] -= my_tile.width // 2
point[1] -= my_tile.height // 2
point[0] *= scaling
point[1] *= scaling
point[0] = int(point[0])
point[1] = int(point[1])
my_tile.points = point_list
# --- Map Data ---
# Grab each layer
layer_tag_list = map_tag.findall("./layer")
for layer_tag in layer_tag_list:
layer_width = int(layer_tag.attrib["width"])
# Unzip and unencode each layer
data = layer_tag.find("data")
data_text = data.text.strip()
encoding = data.attrib["encoding"]
if "compression" in data.attrib:
compression = data.attrib["compression"]
else:
compression = None
if encoding == "csv":
layer_grid_ints = _process_csv_encoding(data_text)
elif encoding == "base64":
layer_grid_ints = _process_base64_encoding(
data_text, compression, layer_width
)
else:
print(f"Error, unexpected encoding: {encoding}.")
break
# Great, we have a grid of ints. Save that according to the layer name
my_map.layers_int_data[layer_tag.attrib["name"]] = layer_grid_ints
# Now create grid objects for each tile
layer_grid_objs = []
for row_index, row in enumerate(layer_grid_ints):
layer_grid_objs.append([])
for column_index, column in enumerate(row):
grid_loc = GridLocation()
if layer_grid_ints[row_index][column_index] != 0:
key = str(layer_grid_ints[row_index][column_index])
if key not in my_map.global_tile_set:
print(
f"Warning, tried to load '{key}' and it is not in the tileset."
)
else:
grid_loc.tile = my_map.global_tile_set[key]
if my_map.renderorder == "right-down":
adjusted_row_index = my_map.height - row_index - 1
else:
adjusted_row_index = row_index
if my_map.orientation == "orthogonal":
grid_loc.center_x = (
column_index * my_map.tilewidth + my_map.tilewidth // 2
)
grid_loc.center_y = (
adjusted_row_index * my_map.tileheight
+ my_map.tilewidth // 2
)
else:
grid_loc.center_x, grid_loc.center_y = (
isometric_grid_to_screen(
column_index,
row_index,
my_map.width,
my_map.height,
my_map.tilewidth,
my_map.tileheight,
)
)
layer_grid_objs[row_index].append(grid_loc)
my_map.layers[layer_tag.attrib["name"]] = layer_grid_objs
return my_map
|
def read_tiled_map(filename: str, scaling) -> TiledMap:
"""
Given a filename, this will read in a tiled map, and return
a TiledMap object.
Important: Tiles must be a "collection" of images and the tileset
must be embedded in the .tmx file. Hitboxes can be drawn around tiles
in the tileset editor, but only polygons are supported.
(This is a great area for PR's to improve things.)
"""
# Create a map object to store this stuff in
my_map = TiledMap()
# Read in and parse the file
tree = etree.parse(filename)
# Root node should be 'map'
map_tag = tree.getroot()
# Pull attributes that should be in the file for the map
my_map.version = map_tag.attrib["version"]
my_map.orientation = map_tag.attrib["orientation"]
my_map.renderorder = map_tag.attrib["renderorder"]
my_map.width = int(map_tag.attrib["width"])
my_map.height = int(map_tag.attrib["height"])
my_map.tilewidth = int(map_tag.attrib["tilewidth"])
my_map.tileheight = int(map_tag.attrib["tileheight"])
# Background color is optional, and may or may not be in there
if "backgroundcolor" in map_tag.attrib:
# Decode the background color string
backgroundcolor_string = map_tag.attrib["backgroundcolor"]
red_hex = "0x" + backgroundcolor_string[1:3]
green_hex = "0x" + backgroundcolor_string[3:5]
blue_hex = "0x" + backgroundcolor_string[5:7]
red = int(red_hex, 16)
green = int(green_hex, 16)
blue = int(blue_hex, 16)
my_map.backgroundcolor = (red, green, blue)
my_map.nextobjectid = map_tag.attrib["nextobjectid"]
# Grab all the tilesets
tileset_tag_list = map_tag.findall("./tileset")
# --- Tileset Data ---
# Loop through each tileset
for tileset_tag in tileset_tag_list:
firstgid = int(tileset_tag.attrib["firstgid"])
if "source" in tileset_tag.attrib:
source = tileset_tag.attrib["source"]
tileset_tree = etree.parse(source)
# Root node should be 'map'
tileset_root = tileset_tree.getroot()
tile_tag_list = tileset_root.findall("tile")
else:
# Grab each tile
tile_tag_list = tileset_tag.findall("tile")
# Loop through each tile
for tile_tag in tile_tag_list:
# Make a tile object
my_tile = Tile()
image = tile_tag.find("image")
my_tile.local_id = tile_tag.attrib["id"]
my_tile.width = int(image.attrib["width"])
my_tile.height = int(image.attrib["height"])
my_tile.source = image.attrib["source"]
key = str(int(my_tile.local_id) + 1)
my_map.global_tile_set[key] = my_tile
firstgid += 1
objectgroup = tile_tag.find("objectgroup")
if objectgroup:
my_object = objectgroup.find("object")
if my_object:
offset_x = round(float(my_object.attrib["x"]))
offset_y = round(float(my_object.attrib["y"]))
polygon = my_object.find("polygon")
if polygon is not None:
point_list = _parse_points(polygon.attrib["points"])
for point in point_list:
point[0] += offset_x
point[1] += offset_y
point[1] = my_tile.height - point[1]
point[0] -= my_tile.width // 2
point[1] -= my_tile.height // 2
point[0] *= scaling
point[1] *= scaling
point[0] = int(point[0])
point[1] = int(point[1])
my_tile.points = point_list
# --- Map Data ---
# Grab each layer
layer_tag_list = map_tag.findall("./layer")
for layer_tag in layer_tag_list:
layer_width = int(layer_tag.attrib["width"])
# Unzip and unencode each layer
data = layer_tag.find("data")
data_text = data.text.strip()
encoding = data.attrib["encoding"]
if "compression" in data.attrib:
compression = data.attrib["compression"]
else:
compression = None
if encoding == "csv":
layer_grid_ints = _process_csv_encoding(data_text)
elif encoding == "base64":
layer_grid_ints = _process_base64_encoding(
data_text, compression, layer_width
)
else:
print(f"Error, unexpected encoding: {encoding}.")
break
# Great, we have a grid of ints. Save that according to the layer name
my_map.layers_int_data[layer_tag.attrib["name"]] = layer_grid_ints
# Now create grid objects for each tile
layer_grid_objs = []
for row_index, row in enumerate(layer_grid_ints):
layer_grid_objs.append([])
for column_index, column in enumerate(row):
grid_loc = GridLocation()
if layer_grid_ints[row_index][column_index] != 0:
key = str(layer_grid_ints[row_index][column_index])
if key not in my_map.global_tile_set:
print(
f"Warning, tried to load '{key}' and it is not in the tileset."
)
else:
grid_loc.tile = my_map.global_tile_set[key]
if my_map.renderorder == "right-down":
adjusted_row_index = my_map.height - row_index - 1
else:
adjusted_row_index = row_index
if my_map.orientation == "orthogonal":
grid_loc.center_x = (
column_index * my_map.tilewidth + my_map.tilewidth // 2
)
grid_loc.center_y = (
adjusted_row_index * my_map.tileheight
+ my_map.tilewidth // 2
)
else:
grid_loc.center_x, grid_loc.center_y = (
isometric_grid_to_screen(
column_index,
row_index,
my_map.width,
my_map.height,
my_map.tilewidth,
my_map.tileheight,
)
)
layer_grid_objs[row_index].append(grid_loc)
my_map.layers[layer_tag.attrib["name"]] = layer_grid_objs
return my_map
|
https://github.com/pythonarcade/arcade/issues/360
|
Traceback (most recent call last):
File "tsx_bug.py", line 5, in <module>
my_map = arcade.read_tiled_map(MAP_NAME, 1)
File "/$DIR/lib/python3.6/site-packages/arcade/read_tiled_map.py", line 160, in read_tiled_map
tileset_tree = etree.parse(source)
File "/usr/lib/python3.6/xml/etree/ElementTree.py", line 1196, in parse
tree.parse(source, parser)
File "/usr/lib/python3.6/xml/etree/ElementTree.py", line 586, in parse
source = open(source, "rb")
FileNotFoundError: [Errno 2] No such file or directory: 'tsx_test.tsx'
|
FileNotFoundError
|
def generate_sprites(map_object, layer_name, scaling, base_directory=""):
sprite_list = SpriteList()
if layer_name not in map_object.layers_int_data:
print(f"Warning, no layer named '{layer_name}'.")
return sprite_list
map_array = map_object.layers_int_data[layer_name]
# Loop through the layer and add in the wall list
for row_index, row in enumerate(map_array):
for column_index, item in enumerate(row):
if str(item) in map_object.global_tile_set:
tile_info = map_object.global_tile_set[str(item)]
tmx_file = base_directory + tile_info.source
my_sprite = Sprite(tmx_file, scaling)
my_sprite.right = column_index * (map_object.tilewidth * scaling)
my_sprite.top = (map_object.height - row_index) * (
map_object.tileheight * scaling
)
if tile_info.points is not None:
my_sprite.set_points(tile_info.points)
sprite_list.append(my_sprite)
elif item != 0:
print(f"Warning, could not find {item} image to load.")
return sprite_list
|
def generate_sprites(map_object, layer_name, scaling, base_directory=""):
sprite_list = SpriteList()
if layer_name not in map_object.layers_int_data:
print(f"Warning, no layer named '{layer_name}'.")
return sprite_list
map_array = map_object.layers_int_data[layer_name]
# Loop through the layer and add in the wall list
for row_index, row in enumerate(map_array):
for column_index, item in enumerate(row):
if str(item) in map_object.global_tile_set:
tile_info = map_object.global_tile_set[str(item)]
filename = base_directory + tile_info.source
my_sprite = Sprite(filename, scaling)
my_sprite.right = column_index * (map_object.tilewidth * scaling)
my_sprite.top = (map_object.height - row_index) * (
map_object.tileheight * scaling
)
if tile_info.points is not None:
my_sprite.set_points(tile_info.points)
sprite_list.append(my_sprite)
elif item != 0:
print(f"Warning, could not find {item} image to load.")
return sprite_list
|
https://github.com/pythonarcade/arcade/issues/360
|
Traceback (most recent call last):
File "tsx_bug.py", line 5, in <module>
my_map = arcade.read_tiled_map(MAP_NAME, 1)
File "/$DIR/lib/python3.6/site-packages/arcade/read_tiled_map.py", line 160, in read_tiled_map
tileset_tree = etree.parse(source)
File "/usr/lib/python3.6/xml/etree/ElementTree.py", line 1196, in parse
tree.parse(source, parser)
File "/usr/lib/python3.6/xml/etree/ElementTree.py", line 586, in parse
source = open(source, "rb")
FileNotFoundError: [Errno 2] No such file or directory: 'tsx_test.tsx'
|
FileNotFoundError
|
def _load_sound_library():
"""
Special code for Windows so we grab the proper avbin from our directory.
Otherwise hope the correct package is installed.
"""
# lazy loading
if not _load_sound_library._sound_library_loaded:
_load_sound_library._sound_library_loaded = True
else:
return
import os
appveyor = not os.environ.get("APPVEYOR") is None
import platform
path_user = ""
my_system = platform.system()
if my_system == "Windows":
import sys
is64bit = sys.maxsize > 2**32
import site
user_packages = ""
if hasattr(site, "getsitepackages"):
packages = site.getsitepackages()
user_packages = site.getuserbase()
from distutils.sysconfig import get_python_lib
site_pkg_path = get_python_lib()
if appveyor:
if is64bit:
path_global = "Win64/avbin"
else:
path_global = "Win32/avbin"
else:
if is64bit:
path_global = os.path.join(site_pkg_path, r"arcade\Win64\avbin")
path_user = user_packages + "/lib/site-packages/arcade/Win64/avbin"
else:
path_global = os.path.join(site_pkg_path, r"arcade\Win32\avbin")
path_user = user_packages + "/lib/site-packages/arcade/Win64/avbin"
elif my_system == "Darwin":
from distutils.sysconfig import get_python_lib
path_global = (
get_python_lib() + "/lib/site-packages/arcade/lib/libavbin.10.dylib"
)
pyglet.options["audio"] = ("openal", "pulse", "silent")
else:
path_global = "avbin"
pyglet.options["audio"] = ("openal", "pulse", "silent")
pyglet.have_avbin = False
try:
pyglet.lib.load_library(path_user)
pyglet.have_avbin = True
except ImportError:
pass
if not pyglet.have_avbin:
try:
pyglet.lib.load_library(path_global)
pyglet.have_avbin = True
except ImportError:
pass
if not pyglet.have_avbin:
# Try loading like its never been installed, from current directory.
try:
import platform
mysys = platform.architecture()
post = "avbin"
if mysys[0] == "32bit":
post = "/../Win32/avbin"
elif mysys[0] == "64bit":
post = "/../Win64/avbin"
import os
dir_path = os.path.dirname(os.path.realpath(__file__)) + post
pyglet.lib.load_library(dir_path)
pyglet.have_avbin = True
except ImportError:
pass
if not pyglet.have_avbin:
print("Warning - Unable to load sound library.")
|
def _load_sound_library():
"""
Special code for Windows so we grab the proper avbin from our directory.
Otherwise hope the correct package is installed.
"""
# lazy loading
if not _load_sound_library._sound_library_loaded:
_load_sound_library._sound_library_loaded = True
else:
return
import os
appveyor = not os.environ.get("APPVEYOR") is None
import platform
path_user = ""
my_system = platform.system()
if my_system == "Windows":
import sys
is64bit = sys.maxsize > 2**32
import site
if hasattr(site, "getsitepackages"):
packages = site.getsitepackages()
user_packages = site.getuserbase()
if appveyor:
if is64bit:
path_global = "Win64/avbin"
else:
path_global = "Win32/avbin"
else:
if is64bit:
path_global = packages[0] + "/lib/site-packages/arcade/Win64/avbin"
path_user = user_packages + "/lib/site-packages/arcade/Win64/avbin"
else:
path_global = packages[0] + "/lib/site-packages/arcade/Win32/avbin"
path_user = user_packages + "/lib/site-packages/arcade/Win32/avbin"
else:
if is64bit:
path_global = "Win64/avbin"
else:
path_global = "Win32/avbin"
elif my_system == "Darwin":
from distutils.sysconfig import get_python_lib
path_global = (
get_python_lib() + "/lib/site-packages/arcade/lib/libavbin.10.dylib"
)
pyglet.options["audio"] = ("openal", "pulse", "silent")
else:
path_global = "avbin"
pyglet.options["audio"] = ("openal", "pulse", "silent")
pyglet.have_avbin = False
try:
pyglet.lib.load_library(path_user)
pyglet.have_avbin = True
except ImportError:
pass
if not pyglet.have_avbin:
try:
pyglet.lib.load_library(path_global)
pyglet.have_avbin = True
except ImportError:
pass
if not pyglet.have_avbin:
# Try loading like its never been installed, from current directory.
try:
import platform
mysys = platform.architecture()
post = "avbin"
if mysys[0] == "32bit":
post = "/../Win32/avbin"
elif mysys[0] == "64bit":
post = "/../Win64/avbin"
import os
dir_path = os.path.dirname(os.path.realpath(__file__)) + post
pyglet.lib.load_library(dir_path)
pyglet.have_avbin = True
except ImportError:
pass
if not pyglet.have_avbin:
print("Warning - Unable to load sound library.")
|
https://github.com/pythonarcade/arcade/issues/249
|
Traceback (most recent call last):
File "c:\Users\Ruth\Desktop\Max Anwendungen\Python Codes\Projects\Python RPG\game.py", line 1, in <module>
import arcade
File "c:\Users\Ruth\Desktop\Max Anwendungen\Python Codes\Projects\Python RPG\venv\lib\site-packages\arcade\__init__.py", line 21, in <module>
from arcade.sound import *
File "c:\Users\Ruth\Desktop\Max Anwendungen\Python Codes\Projects\Python RPG\venv\lib\site-packages\arcade\sound.py", line 193, in <module>
_load_sound_library()
File "c:\Users\Ruth\Desktop\Max Anwendungen\Python Codes\Projects\Python RPG\venv\lib\site-packages\arcade\sound.py", line 70, in _load_sound_library
pyglet.lib.load_library(path)
File "c:\Users\Ruth\Desktop\Max Anwendungen\Python Codes\Projects\Python RPG\venv\lib\site-packages\pyglet\lib.py", line 158, in load_library
raise ImportError('Library "%s" not found.' % names[0])
ImportError: Library "Win32/avbin" not found.
|
ImportError
|
def __init__(self, width, height):
super().__init__(width, height)
# Set the working directory (where we expect to find files) to the same
# directory this .py file is in. You can leave this out of your own
# code, but it is needed to easily run the examples using "python -m"
# as mentioned at the top of this program.
file_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(file_path)
arcade.set_background_color(arcade.color.AMAZON)
self.pause = False
self.coin_list = None
self.button_list = None
|
def __init__(self, width, height):
super().__init__(width, height)
arcade.set_background_color(arcade.color.AMAZON)
self.pause = False
self.coin_list = None
self.button_list = None
|
https://github.com/pythonarcade/arcade/issues/284
|
$ python3 -m arcade.examples.gui_text_button
Traceback (most recent call last):
File "/usr/lib/python3.6/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/lib/python3.6/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/home/USERNAME/.local/lib/python3.6/site-packages/arcade/examples/gui_text_button.py", line 228, in <module>
main()
File "/home/USERNAME/.local/lib/python3.6/site-packages/arcade/examples/gui_text_button.py", line 223, in main
game.setup()
File "/home/USERNAME/.local/lib/python3.6/site-packages/arcade/examples/gui_text_button.py", line 155, in setup
coin = arcade.Sprite("images/coin_01.png", 0.25)
File "/home/USERNAME/.local/lib/python3.6/site-packages/arcade/sprite.py", line 142, in __init__
image_width, image_height)
File "/home/USERNAME/.local/lib/python3.6/site-packages/arcade/draw_commands.py", line 286, in load_texture
source_image = PIL.Image.open(file_name)
File "/home/USERNAME/.local/lib/python3.6/site-packages/PIL/Image.py", line 2609, in open
fp = builtins.open(filename, "rb")
FileNotFoundError: [Errno 2] No such file or directory: 'images/coin_01.png'
|
FileNotFoundError
|
def update(self, x):
"""Move everything"""
self.frame_count += 1
if not self.game_over:
self.all_sprites_list.update()
for bullet in self.bullet_list:
asteroids_plain = arcade.check_for_collision_with_list(
bullet, self.asteroid_list
)
asteroids_spatial = arcade.check_for_collision_with_list(
bullet, self.asteroid_list
)
if len(asteroids_plain) != len(asteroids_spatial):
print("ERROR")
asteroids = asteroids_spatial
for asteroid in asteroids:
self.split_asteroid(asteroid)
asteroid.kill()
bullet.kill()
if not self.player_sprite.respawning:
asteroids = arcade.check_for_collision_with_list(
self.player_sprite, self.asteroid_list
)
if len(asteroids) > 0:
if self.lives > 0:
self.lives -= 1
self.player_sprite.respawn()
self.split_asteroid(asteroids[0])
asteroids[0].kill()
self.ship_life_list.pop().kill()
print("Crash")
else:
self.game_over = True
print("Game over")
|
def update(self, x):
"""Move everything"""
self.frame_count += 1
if not self.game_over:
self.all_sprites_list.update()
for bullet in self.bullet_list:
self.asteroid_list.use_spatial_hash = False
asteroids_plain = arcade.check_for_collision_with_list(
bullet, self.asteroid_list
)
self.asteroid_list.use_spatial_hash = True
asteroids_spatial = arcade.check_for_collision_with_list(
bullet, self.asteroid_list
)
if len(asteroids_plain) != len(asteroids_spatial):
print("ERROR")
asteroids = asteroids_spatial
for asteroid in asteroids:
self.split_asteroid(asteroid)
asteroid.kill()
bullet.kill()
if not self.player_sprite.respawning:
asteroids = arcade.check_for_collision_with_list(
self.player_sprite, self.asteroid_list
)
if len(asteroids) > 0:
if self.lives > 0:
self.lives -= 1
self.player_sprite.respawn()
self.split_asteroid(asteroids[0])
asteroids[0].kill()
self.ship_life_list.pop().kill()
print("Crash")
else:
self.game_over = True
print("Game over")
|
https://github.com/pythonarcade/arcade/issues/324
|
Traceback (most recent call last):
File "/usr/lib/python3.7/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/lib/python3.7/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/home/srw/projects/pyarcade_lab/venv/lib/python3.7/site-packages/arcade/examples/asteroid_smasher.py", line 394, in <module>
main()
File "/home/srw/projects/pyarcade_lab/venv/lib/python3.7/site-packages/arcade/examples/asteroid_smasher.py", line 390, in main
arcade.run()
File "/home/srw/projects/pyarcade_lab/venv/lib/python3.7/site-packages/arcade/window_commands.py", line 245, in run
pyglet.app.run()
File "/home/srw/projects/pyarcade_lab/venv/lib/python3.7/site-packages/pyglet/app/__init__.py", line 142, in run
event_loop.run()
File "/home/srw/projects/pyarcade_lab/venv/lib/python3.7/site-packages/pyglet/app/base.py", line 175, in run
self._run()
File "/home/srw/projects/pyarcade_lab/venv/lib/python3.7/site-packages/pyglet/app/base.py", line 187, in _run
timeout = self.idle()
File "/home/srw/projects/pyarcade_lab/venv/lib/python3.7/site-packages/pyglet/app/base.py", line 308, in idle
redraw_all = self.clock.call_scheduled_functions(dt)
File "/home/srw/projects/pyarcade_lab/venv/lib/python3.7/site-packages/pyglet/clock.py", line 314, in call_scheduled_functions
item.func(now - item.last_ts, *item.args, **item.kwargs)
File "/home/srw/projects/pyarcade_lab/venv/lib/python3.7/site-packages/arcade/examples/asteroid_smasher.py", line 355, in update
self.all_sprites_list.update()
File "/home/srw/projects/pyarcade_lab/venv/lib/python3.7/site-packages/arcade/sprite_list.py", line 299, in update
sprite.update()
File "/home/srw/projects/pyarcade_lab/venv/lib/python3.7/site-packages/arcade/examples/asteroid_smasher.py", line 103, in update
super().update()
File "/home/srw/projects/pyarcade_lab/venv/lib/python3.7/site-packages/arcade/sprite.py", line 547, in update
self.set_position(self.center_x + self.change_x, self.center_y + self.change_y)
File "/home/srw/projects/pyarcade_lab/venv/lib/python3.7/site-packages/arcade/sprite.py", line 202, in set_position
self._set_position(self, (center_x, center_y))
TypeError: _set_position() takes 2 positional arguments but 3 were given
|
TypeError
|
def update(self):
"""
Update the sprite.
"""
self.position = [
self._position[0] + self.change_x,
self._position[1] + self.change_y,
]
self.angle += self.change_angle
|
def update(self):
"""
Update the sprite.
"""
self.position = (
self._position[0] + self.change_x,
self._position[1] + self.change_y,
)
self.angle += self.change_angle
|
https://github.com/pythonarcade/arcade/issues/324
|
Traceback (most recent call last):
File "/usr/lib/python3.7/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/lib/python3.7/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/home/srw/projects/pyarcade_lab/venv/lib/python3.7/site-packages/arcade/examples/asteroid_smasher.py", line 394, in <module>
main()
File "/home/srw/projects/pyarcade_lab/venv/lib/python3.7/site-packages/arcade/examples/asteroid_smasher.py", line 390, in main
arcade.run()
File "/home/srw/projects/pyarcade_lab/venv/lib/python3.7/site-packages/arcade/window_commands.py", line 245, in run
pyglet.app.run()
File "/home/srw/projects/pyarcade_lab/venv/lib/python3.7/site-packages/pyglet/app/__init__.py", line 142, in run
event_loop.run()
File "/home/srw/projects/pyarcade_lab/venv/lib/python3.7/site-packages/pyglet/app/base.py", line 175, in run
self._run()
File "/home/srw/projects/pyarcade_lab/venv/lib/python3.7/site-packages/pyglet/app/base.py", line 187, in _run
timeout = self.idle()
File "/home/srw/projects/pyarcade_lab/venv/lib/python3.7/site-packages/pyglet/app/base.py", line 308, in idle
redraw_all = self.clock.call_scheduled_functions(dt)
File "/home/srw/projects/pyarcade_lab/venv/lib/python3.7/site-packages/pyglet/clock.py", line 314, in call_scheduled_functions
item.func(now - item.last_ts, *item.args, **item.kwargs)
File "/home/srw/projects/pyarcade_lab/venv/lib/python3.7/site-packages/arcade/examples/asteroid_smasher.py", line 355, in update
self.all_sprites_list.update()
File "/home/srw/projects/pyarcade_lab/venv/lib/python3.7/site-packages/arcade/sprite_list.py", line 299, in update
sprite.update()
File "/home/srw/projects/pyarcade_lab/venv/lib/python3.7/site-packages/arcade/examples/asteroid_smasher.py", line 103, in update
super().update()
File "/home/srw/projects/pyarcade_lab/venv/lib/python3.7/site-packages/arcade/sprite.py", line 547, in update
self.set_position(self.center_x + self.change_x, self.center_y + self.change_y)
File "/home/srw/projects/pyarcade_lab/venv/lib/python3.7/site-packages/arcade/sprite.py", line 202, in set_position
self._set_position(self, (center_x, center_y))
TypeError: _set_position() takes 2 positional arguments but 3 were given
|
TypeError
|
def set_position(self, center_x: float, center_y: float):
"""
Set a sprite's position
>>> import arcade
>>> empty_sprite = arcade.Sprite()
>>> empty_sprite.set_position(10, 10)
"""
self.center_x = center_x
self.center_y = center_y
|
def set_position(self, new_position: (float, float)):
self.clear_spatial_hashes()
self._position[0] = new_position[0]
self._position[1] = new_position[1]
self._point_list_cache = None
|
https://github.com/pythonarcade/arcade/issues/208
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: set_position() takes 2 positional arguments but 3 were given
|
TypeError
|
def enable_dev_tools(
self,
debug=None,
dev_tools_ui=None,
dev_tools_props_check=None,
dev_tools_serve_dev_bundles=None,
dev_tools_hot_reload=None,
dev_tools_hot_reload_interval=None,
dev_tools_hot_reload_watch_interval=None,
dev_tools_hot_reload_max_retry=None,
dev_tools_silence_routes_logging=None,
dev_tools_prune_errors=None,
):
"""Activate the dev tools, called by `run_server`. If your application
is served by wsgi and you want to activate the dev tools, you can call
this method out of `__main__`.
All parameters can be set by environment variables as listed.
Values provided here take precedence over environment variables.
Available dev_tools environment variables:
- DASH_DEBUG
- DASH_UI
- DASH_PROPS_CHECK
- DASH_SERVE_DEV_BUNDLES
- DASH_HOT_RELOAD
- DASH_HOT_RELOAD_INTERVAL
- DASH_HOT_RELOAD_WATCH_INTERVAL
- DASH_HOT_RELOAD_MAX_RETRY
- DASH_SILENCE_ROUTES_LOGGING
- DASH_PRUNE_ERRORS
:param debug: Enable/disable all the dev tools unless overridden by the
arguments or environment variables. Default is ``True`` when
``enable_dev_tools`` is called directly, and ``False`` when called
via ``run_server``. env: ``DASH_DEBUG``
:type debug: bool
:param dev_tools_ui: Show the dev tools UI. env: ``DASH_UI``
:type dev_tools_ui: bool
:param dev_tools_props_check: Validate the types and values of Dash
component props. env: ``DASH_PROPS_CHECK``
:type dev_tools_props_check: bool
:param dev_tools_serve_dev_bundles: Serve the dev bundles. Production
bundles do not necessarily include all the dev tools code.
env: ``DASH_SERVE_DEV_BUNDLES``
:type dev_tools_serve_dev_bundles: bool
:param dev_tools_hot_reload: Activate hot reloading when app, assets,
and component files change. env: ``DASH_HOT_RELOAD``
:type dev_tools_hot_reload: bool
:param dev_tools_hot_reload_interval: Interval in seconds for the
client to request the reload hash. Default 3.
env: ``DASH_HOT_RELOAD_INTERVAL``
:type dev_tools_hot_reload_interval: float
:param dev_tools_hot_reload_watch_interval: Interval in seconds for the
server to check asset and component folders for changes.
Default 0.5. env: ``DASH_HOT_RELOAD_WATCH_INTERVAL``
:type dev_tools_hot_reload_watch_interval: float
:param dev_tools_hot_reload_max_retry: Maximum number of failed reload
hash requests before failing and displaying a pop up. Default 8.
env: ``DASH_HOT_RELOAD_MAX_RETRY``
:type dev_tools_hot_reload_max_retry: int
:param dev_tools_silence_routes_logging: Silence the `werkzeug` logger,
will remove all routes logging. Enabled with debugging by default
because hot reload hash checks generate a lot of requests.
env: ``DASH_SILENCE_ROUTES_LOGGING``
:type dev_tools_silence_routes_logging: bool
:param dev_tools_prune_errors: Reduce tracebacks to just user code,
stripping out Flask and Dash pieces. Only available with debugging.
`True` by default, set to `False` to see the complete traceback.
env: ``DASH_PRUNE_ERRORS``
:type dev_tools_prune_errors: bool
:return: debug
"""
if debug is None:
debug = get_combined_config("debug", None, True)
dev_tools = self._setup_dev_tools(
debug=debug,
ui=dev_tools_ui,
props_check=dev_tools_props_check,
serve_dev_bundles=dev_tools_serve_dev_bundles,
hot_reload=dev_tools_hot_reload,
hot_reload_interval=dev_tools_hot_reload_interval,
hot_reload_watch_interval=dev_tools_hot_reload_watch_interval,
hot_reload_max_retry=dev_tools_hot_reload_max_retry,
silence_routes_logging=dev_tools_silence_routes_logging,
prune_errors=dev_tools_prune_errors,
)
if dev_tools.silence_routes_logging:
logging.getLogger("werkzeug").setLevel(logging.ERROR)
self.logger.setLevel(logging.INFO)
if dev_tools.hot_reload:
_reload = self._hot_reload
_reload.hash = generate_hash()
# find_loader should return None on __main__ but doesn't
# on some python versions https://bugs.python.org/issue14710
packages = [
pkgutil.find_loader(x)
for x in list(ComponentRegistry.registry) + ["dash_renderer"]
if x != "__main__"
]
component_packages_dist = [
os.path.dirname(package.path)
if hasattr(package, "path")
else package.filename
for package in packages
]
_reload.watch_thread = threading.Thread(
target=lambda: _watch.watch(
[self.config.assets_folder] + component_packages_dist,
self._on_assets_change,
sleep_time=dev_tools.hot_reload_watch_interval,
)
)
_reload.watch_thread.daemon = True
_reload.watch_thread.start()
if debug and dev_tools.prune_errors:
@self.server.errorhandler(Exception)
def _wrap_errors(_):
# find the callback invocation, if the error is from a callback
# and skip the traceback up to that point
# if the error didn't come from inside a callback, we won't
# skip anything.
tb = get_current_traceback()
skip = 0
for i, line in enumerate(tb.plaintext.splitlines()):
if "%% callback invoked %%" in line:
skip = int((i + 1) / 2)
break
return get_current_traceback(skip=skip).render_full(), 500
if debug and dev_tools.serve_dev_bundles and not self.scripts.config.serve_locally:
# Dev bundles only works locally.
self.scripts.config.serve_locally = True
print(
"WARNING: dev bundles requested with serve_locally=False.\n"
"This is not supported, switching to serve_locally=True"
)
return debug
|
def enable_dev_tools(
self,
debug=None,
dev_tools_ui=None,
dev_tools_props_check=None,
dev_tools_serve_dev_bundles=None,
dev_tools_hot_reload=None,
dev_tools_hot_reload_interval=None,
dev_tools_hot_reload_watch_interval=None,
dev_tools_hot_reload_max_retry=None,
dev_tools_silence_routes_logging=None,
dev_tools_prune_errors=None,
):
"""Activate the dev tools, called by `run_server`. If your application
is served by wsgi and you want to activate the dev tools, you can call
this method out of `__main__`.
All parameters can be set by environment variables as listed.
Values provided here take precedence over environment variables.
Available dev_tools environment variables:
- DASH_DEBUG
- DASH_UI
- DASH_PROPS_CHECK
- DASH_SERVE_DEV_BUNDLES
- DASH_HOT_RELOAD
- DASH_HOT_RELOAD_INTERVAL
- DASH_HOT_RELOAD_WATCH_INTERVAL
- DASH_HOT_RELOAD_MAX_RETRY
- DASH_SILENCE_ROUTES_LOGGING
- DASH_PRUNE_ERRORS
:param debug: Enable/disable all the dev tools unless overridden by the
arguments or environment variables. Default is ``True`` when
``enable_dev_tools`` is called directly, and ``False`` when called
via ``run_server``. env: ``DASH_DEBUG``
:type debug: bool
:param dev_tools_ui: Show the dev tools UI. env: ``DASH_UI``
:type dev_tools_ui: bool
:param dev_tools_props_check: Validate the types and values of Dash
component props. env: ``DASH_PROPS_CHECK``
:type dev_tools_props_check: bool
:param dev_tools_serve_dev_bundles: Serve the dev bundles. Production
bundles do not necessarily include all the dev tools code.
env: ``DASH_SERVE_DEV_BUNDLES``
:type dev_tools_serve_dev_bundles: bool
:param dev_tools_hot_reload: Activate hot reloading when app, assets,
and component files change. env: ``DASH_HOT_RELOAD``
:type dev_tools_hot_reload: bool
:param dev_tools_hot_reload_interval: Interval in seconds for the
client to request the reload hash. Default 3.
env: ``DASH_HOT_RELOAD_INTERVAL``
:type dev_tools_hot_reload_interval: float
:param dev_tools_hot_reload_watch_interval: Interval in seconds for the
server to check asset and component folders for changes.
Default 0.5. env: ``DASH_HOT_RELOAD_WATCH_INTERVAL``
:type dev_tools_hot_reload_watch_interval: float
:param dev_tools_hot_reload_max_retry: Maximum number of failed reload
hash requests before failing and displaying a pop up. Default 8.
env: ``DASH_HOT_RELOAD_MAX_RETRY``
:type dev_tools_hot_reload_max_retry: int
:param dev_tools_silence_routes_logging: Silence the `werkzeug` logger,
will remove all routes logging. Enabled with debugging by default
because hot reload hash checks generate a lot of requests.
env: ``DASH_SILENCE_ROUTES_LOGGING``
:type dev_tools_silence_routes_logging: bool
:param dev_tools_prune_errors: Reduce tracebacks to just user code,
stripping out Flask and Dash pieces. Only available with debugging.
`True` by default, set to `False` to see the complete traceback.
env: ``DASH_PRUNE_ERRORS``
:type dev_tools_prune_errors: bool
:return: debug
"""
if debug is None:
debug = get_combined_config("debug", None, True)
dev_tools = self._setup_dev_tools(
debug=debug,
ui=dev_tools_ui,
props_check=dev_tools_props_check,
serve_dev_bundles=dev_tools_serve_dev_bundles,
hot_reload=dev_tools_hot_reload,
hot_reload_interval=dev_tools_hot_reload_interval,
hot_reload_watch_interval=dev_tools_hot_reload_watch_interval,
hot_reload_max_retry=dev_tools_hot_reload_max_retry,
silence_routes_logging=dev_tools_silence_routes_logging,
prune_errors=dev_tools_prune_errors,
)
if dev_tools.silence_routes_logging:
logging.getLogger("werkzeug").setLevel(logging.ERROR)
self.logger.setLevel(logging.INFO)
if dev_tools.hot_reload:
_reload = self._hot_reload
_reload.hash = generate_hash()
component_packages_dist = [
os.path.dirname(package.path)
if hasattr(package, "path")
else package.filename
for package in (
pkgutil.find_loader(x)
for x in list(ComponentRegistry.registry) + ["dash_renderer"]
)
]
_reload.watch_thread = threading.Thread(
target=lambda: _watch.watch(
[self.config.assets_folder] + component_packages_dist,
self._on_assets_change,
sleep_time=dev_tools.hot_reload_watch_interval,
)
)
_reload.watch_thread.daemon = True
_reload.watch_thread.start()
if debug and dev_tools.prune_errors:
@self.server.errorhandler(Exception)
def _wrap_errors(_):
# find the callback invocation, if the error is from a callback
# and skip the traceback up to that point
# if the error didn't come from inside a callback, we won't
# skip anything.
tb = get_current_traceback()
skip = 0
for i, line in enumerate(tb.plaintext.splitlines()):
if "%% callback invoked %%" in line:
skip = int((i + 1) / 2)
break
return get_current_traceback(skip=skip).render_full(), 500
if debug and dev_tools.serve_dev_bundles and not self.scripts.config.serve_locally:
# Dev bundles only works locally.
self.scripts.config.serve_locally = True
print(
"WARNING: dev bundles requested with serve_locally=False.\n"
"This is not supported, switching to serve_locally=True"
)
return debug
|
https://github.com/plotly/dash/issues/1285
|
Traceback (most recent call last):
File "C:\Users\gioxc\AppData\Local\Programs\Python\Python37\lib\pkgutil.py", line 493, in find_loader
spec = importlib.util.find_spec(fullname)
File "C:\Users\gioxc\AppData\Local\Programs\Python\Python37\lib\importlib\util.py", line 114, in find_spec
raise ValueError('{}.__spec__ is None'.format(name))
ValueError: __main__.__spec__ is None
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "apptest.py", line 108, in <module>
app.run_server(debug=True)
File "C:\Users\gioxc\AppData\Local\Programs\Python\Python37\lib\site-packages\dash\dash.py", line 1475, in run_server
dev_tools_prune_errors,
File "C:\Users\gioxc\AppData\Local\Programs\Python\Python37\lib\site-packages\dash\dash.py", line 1282, in enable_dev_tools
for x in list(ComponentRegistry.registry) + ["dash_renderer"]
File "C:\Users\gioxc\AppData\Local\Programs\Python\Python37\lib\site-packages\dash\dash.py", line 1277, in <listcomp>
os.path.dirname(package.path)
File "C:\Users\gioxc\AppData\Local\Programs\Python\Python37\lib\site-packages\dash\dash.py", line 1282, in <genexpr>
for x in list(ComponentRegistry.registry) + ["dash_renderer"]
File "C:\Users\gioxc\AppData\Local\Programs\Python\Python37\lib\pkgutil.py", line 499, in find_loader
raise ImportError(msg.format(fullname, type(ex), ex)) from ex
ImportError: Error while finding loader for '__main__' (<class 'ValueError'>: __main__.__spec__ is None)
|
ValueError
|
def callback(self, output, inputs=[], state=[]):
self._validate_callback(output, inputs, state)
callback_id = _create_callback_id(output)
multi = isinstance(output, (list, tuple))
self.callback_map[callback_id] = {
"inputs": [
{"id": c.component_id, "property": c.component_property} for c in inputs
],
"state": [
{"id": c.component_id, "property": c.component_property} for c in state
],
}
def wrap_func(func):
@wraps(func)
def add_context(*args, **kwargs):
# don't touch the comment on the next line - used by debugger
output_value = func(*args, **kwargs) # %% callback invoked %%
if multi:
if not isinstance(output_value, (list, tuple)):
raise exceptions.InvalidCallbackReturnValue(
"The callback {} is a multi-output.\n"
"Expected the output type to be a list"
" or tuple but got {}.".format(callback_id, repr(output_value))
)
if not len(output_value) == len(output):
raise exceptions.InvalidCallbackReturnValue(
"Invalid number of output values for {}.\n"
" Expected {} got {}".format(
callback_id, len(output), len(output_value)
)
)
component_ids = collections.defaultdict(dict)
has_update = False
for i, o in enumerate(output):
val = output_value[i]
if not isinstance(val, _NoUpdate):
has_update = True
o_id, o_prop = o.component_id, o.component_property
component_ids[o_id][o_prop] = val
if not has_update:
raise exceptions.PreventUpdate
response = {"response": component_ids, "multi": True}
else:
if isinstance(output_value, _NoUpdate):
raise exceptions.PreventUpdate
response = {
"response": {"props": {output.component_property: output_value}}
}
try:
jsonResponse = json.dumps(response, cls=plotly.utils.PlotlyJSONEncoder)
except TypeError:
self._validate_callback_output(output_value, output)
raise exceptions.InvalidCallbackReturnValue(
dedent(
"""
The callback for property `{property:s}`
of component `{id:s}` returned a value
which is not JSON serializable.
In general, Dash properties can only be
dash components, strings, dictionaries, numbers, None,
or lists of those.
"""
).format(
property=output.component_property,
id=output.component_id,
)
)
return jsonResponse
self.callback_map[callback_id]["callback"] = add_context
return add_context
return wrap_func
|
def callback(self, output, inputs=[], state=[]):
self._validate_callback(output, inputs, state)
callback_id = _create_callback_id(output)
multi = isinstance(output, (list, tuple))
self.callback_map[callback_id] = {
"inputs": [
{"id": c.component_id, "property": c.component_property} for c in inputs
],
"state": [
{"id": c.component_id, "property": c.component_property} for c in state
],
}
def wrap_func(func):
@wraps(func)
def add_context(*args, **kwargs):
# don't touch the comment on the next line - used by debugger
output_value = func(*args, **kwargs) # %% callback invoked %%
if multi:
if not isinstance(output_value, (list, tuple)):
raise exceptions.InvalidCallbackReturnValue(
"The callback {} is a multi-output.\n"
"Expected the output type to be a list"
" or tuple but got {}.".format(callback_id, repr(output_value))
)
if not len(output_value) == len(output):
raise exceptions.InvalidCallbackReturnValue(
"Invalid number of output values for {}.\n"
" Expected {} got {}".format(
callback_id, len(output), len(output_value)
)
)
component_ids = collections.defaultdict(dict)
has_update = False
for i, o in enumerate(output):
val = output_value[i]
if val is not no_update:
has_update = True
o_id, o_prop = o.component_id, o.component_property
component_ids[o_id][o_prop] = val
if not has_update:
raise exceptions.PreventUpdate
response = {"response": component_ids, "multi": True}
else:
if output_value is no_update:
raise exceptions.PreventUpdate
response = {
"response": {"props": {output.component_property: output_value}}
}
try:
jsonResponse = json.dumps(response, cls=plotly.utils.PlotlyJSONEncoder)
except TypeError:
self._validate_callback_output(output_value, output)
raise exceptions.InvalidCallbackReturnValue(
dedent(
"""
The callback for property `{property:s}`
of component `{id:s}` returned a value
which is not JSON serializable.
In general, Dash properties can only be
dash components, strings, dictionaries, numbers, None,
or lists of those.
"""
).format(
property=output.component_property,
id=output.component_id,
)
)
return jsonResponse
self.callback_map[callback_id]["callback"] = add_context
return add_context
return wrap_func
|
https://github.com/plotly/dash/issues/1014
|
Exception on /_dash-update-component [POST]
Traceback (most recent call last):
File "/home/kz/Envs/dash_bleedingedge/lib/python3.7/site-packages/dash/dash.py", line 1328, in add_context
response, cls=plotly.utils.PlotlyJSONEncoder
File "/home/kz/.local/pyenv/versions/3.7.3/lib/python3.7/json/__init__.py", line 238, in dumps
**kw).encode(obj)
File "/home/kz/Envs/dash_bleedingedge/lib/python3.7/site-packages/_plotly_utils/utils.py", line 44, in encode
encoded_o = super(PlotlyJSONEncoder, self).encode(o)
File "/home/kz/.local/pyenv/versions/3.7.3/lib/python3.7/json/encoder.py", line 199, in encode
chunks = self.iterencode(o, _one_shot=True)
File "/home/kz/.local/pyenv/versions/3.7.3/lib/python3.7/json/encoder.py", line 257, in iterencode
return _iterencode(o, 0)
File "/home/kz/Envs/dash_bleedingedge/lib/python3.7/site-packages/_plotly_utils/utils.py", line 113, in default
return _json.JSONEncoder.default(self, obj)
File "/home/kz/.local/pyenv/versions/3.7.3/lib/python3.7/json/encoder.py", line 179, in default
raise TypeError(f'Object of type {o.__class__.__name__} '
TypeError: Object of type _NoUpdate is not JSON serializable
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/kz/Envs/dash_bleedingedge/lib/python3.7/site-packages/flask/app.py", line 2446, in wsgi_app
response = self.full_dispatch_request()
File "/home/kz/Envs/dash_bleedingedge/lib/python3.7/site-packages/flask/app.py", line 1951, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/home/kz/Envs/dash_bleedingedge/lib/python3.7/site-packages/flask/app.py", line 1820, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/home/kz/Envs/dash_bleedingedge/lib/python3.7/site-packages/flask/_compat.py", line 39, in reraise
raise value
File "/home/kz/Envs/dash_bleedingedge/lib/python3.7/site-packages/flask/app.py", line 1949, in full_dispatch_request
rv = self.dispatch_request()
File "/home/kz/Envs/dash_bleedingedge/lib/python3.7/site-packages/flask/app.py", line 1935, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/home/kz/Envs/dash_bleedingedge/lib/python3.7/site-packages/dash/dash.py", line 1404, in dispatch
response.set_data(self.callback_map[output]["callback"](*args))
File "/home/kz/Envs/dash_bleedingedge/lib/python3.7/site-packages/dash/dash.py", line 1331, in add_context
self._validate_callback_output(output_value, output)
File "/home/kz/Envs/dash_bleedingedge/lib/python3.7/site-packages/dash/dash.py", line 1187, in _validate_callback_output
_validate_value(output_value)
File "/home/kz/Envs/dash_bleedingedge/lib/python3.7/site-packages/dash/dash.py", line 1180, in _validate_value
toplevel=True,
File "/home/kz/Envs/dash_bleedingedge/lib/python3.7/site-packages/dash/dash.py", line 1125, in _raise_invalid
bad_val=bad_val,
dash.exceptions.InvalidCallbackReturnValue:
The callback for `<Output `approot.children`>`
returned a value having type `_NoUpdate`
which is not JSON serializable.
The value in question is either the only value returned,
or is in the top level of the returned list,
and has string representation
`<dash.dash._NoUpdate object at 0x7ff2ca37ccf8>`
In general, Dash properties can only be
dash components, strings, dictionaries, numbers, None,
or lists of those.
|
TypeError
|
def wrap_func(func):
@wraps(func)
def add_context(*args, **kwargs):
# don't touch the comment on the next line - used by debugger
output_value = func(*args, **kwargs) # %% callback invoked %%
if multi:
if not isinstance(output_value, (list, tuple)):
raise exceptions.InvalidCallbackReturnValue(
"The callback {} is a multi-output.\n"
"Expected the output type to be a list"
" or tuple but got {}.".format(callback_id, repr(output_value))
)
if not len(output_value) == len(output):
raise exceptions.InvalidCallbackReturnValue(
"Invalid number of output values for {}.\n"
" Expected {} got {}".format(
callback_id, len(output), len(output_value)
)
)
component_ids = collections.defaultdict(dict)
has_update = False
for i, o in enumerate(output):
val = output_value[i]
if not isinstance(val, _NoUpdate):
has_update = True
o_id, o_prop = o.component_id, o.component_property
component_ids[o_id][o_prop] = val
if not has_update:
raise exceptions.PreventUpdate
response = {"response": component_ids, "multi": True}
else:
if isinstance(output_value, _NoUpdate):
raise exceptions.PreventUpdate
response = {
"response": {"props": {output.component_property: output_value}}
}
try:
jsonResponse = json.dumps(response, cls=plotly.utils.PlotlyJSONEncoder)
except TypeError:
self._validate_callback_output(output_value, output)
raise exceptions.InvalidCallbackReturnValue(
dedent(
"""
The callback for property `{property:s}`
of component `{id:s}` returned a value
which is not JSON serializable.
In general, Dash properties can only be
dash components, strings, dictionaries, numbers, None,
or lists of those.
"""
).format(
property=output.component_property,
id=output.component_id,
)
)
return jsonResponse
self.callback_map[callback_id]["callback"] = add_context
return add_context
|
def wrap_func(func):
@wraps(func)
def add_context(*args, **kwargs):
# don't touch the comment on the next line - used by debugger
output_value = func(*args, **kwargs) # %% callback invoked %%
if multi:
if not isinstance(output_value, (list, tuple)):
raise exceptions.InvalidCallbackReturnValue(
"The callback {} is a multi-output.\n"
"Expected the output type to be a list"
" or tuple but got {}.".format(callback_id, repr(output_value))
)
if not len(output_value) == len(output):
raise exceptions.InvalidCallbackReturnValue(
"Invalid number of output values for {}.\n"
" Expected {} got {}".format(
callback_id, len(output), len(output_value)
)
)
component_ids = collections.defaultdict(dict)
has_update = False
for i, o in enumerate(output):
val = output_value[i]
if val is not no_update:
has_update = True
o_id, o_prop = o.component_id, o.component_property
component_ids[o_id][o_prop] = val
if not has_update:
raise exceptions.PreventUpdate
response = {"response": component_ids, "multi": True}
else:
if output_value is no_update:
raise exceptions.PreventUpdate
response = {
"response": {"props": {output.component_property: output_value}}
}
try:
jsonResponse = json.dumps(response, cls=plotly.utils.PlotlyJSONEncoder)
except TypeError:
self._validate_callback_output(output_value, output)
raise exceptions.InvalidCallbackReturnValue(
dedent(
"""
The callback for property `{property:s}`
of component `{id:s}` returned a value
which is not JSON serializable.
In general, Dash properties can only be
dash components, strings, dictionaries, numbers, None,
or lists of those.
"""
).format(
property=output.component_property,
id=output.component_id,
)
)
return jsonResponse
self.callback_map[callback_id]["callback"] = add_context
return add_context
|
https://github.com/plotly/dash/issues/1014
|
Exception on /_dash-update-component [POST]
Traceback (most recent call last):
File "/home/kz/Envs/dash_bleedingedge/lib/python3.7/site-packages/dash/dash.py", line 1328, in add_context
response, cls=plotly.utils.PlotlyJSONEncoder
File "/home/kz/.local/pyenv/versions/3.7.3/lib/python3.7/json/__init__.py", line 238, in dumps
**kw).encode(obj)
File "/home/kz/Envs/dash_bleedingedge/lib/python3.7/site-packages/_plotly_utils/utils.py", line 44, in encode
encoded_o = super(PlotlyJSONEncoder, self).encode(o)
File "/home/kz/.local/pyenv/versions/3.7.3/lib/python3.7/json/encoder.py", line 199, in encode
chunks = self.iterencode(o, _one_shot=True)
File "/home/kz/.local/pyenv/versions/3.7.3/lib/python3.7/json/encoder.py", line 257, in iterencode
return _iterencode(o, 0)
File "/home/kz/Envs/dash_bleedingedge/lib/python3.7/site-packages/_plotly_utils/utils.py", line 113, in default
return _json.JSONEncoder.default(self, obj)
File "/home/kz/.local/pyenv/versions/3.7.3/lib/python3.7/json/encoder.py", line 179, in default
raise TypeError(f'Object of type {o.__class__.__name__} '
TypeError: Object of type _NoUpdate is not JSON serializable
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/kz/Envs/dash_bleedingedge/lib/python3.7/site-packages/flask/app.py", line 2446, in wsgi_app
response = self.full_dispatch_request()
File "/home/kz/Envs/dash_bleedingedge/lib/python3.7/site-packages/flask/app.py", line 1951, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/home/kz/Envs/dash_bleedingedge/lib/python3.7/site-packages/flask/app.py", line 1820, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/home/kz/Envs/dash_bleedingedge/lib/python3.7/site-packages/flask/_compat.py", line 39, in reraise
raise value
File "/home/kz/Envs/dash_bleedingedge/lib/python3.7/site-packages/flask/app.py", line 1949, in full_dispatch_request
rv = self.dispatch_request()
File "/home/kz/Envs/dash_bleedingedge/lib/python3.7/site-packages/flask/app.py", line 1935, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/home/kz/Envs/dash_bleedingedge/lib/python3.7/site-packages/dash/dash.py", line 1404, in dispatch
response.set_data(self.callback_map[output]["callback"](*args))
File "/home/kz/Envs/dash_bleedingedge/lib/python3.7/site-packages/dash/dash.py", line 1331, in add_context
self._validate_callback_output(output_value, output)
File "/home/kz/Envs/dash_bleedingedge/lib/python3.7/site-packages/dash/dash.py", line 1187, in _validate_callback_output
_validate_value(output_value)
File "/home/kz/Envs/dash_bleedingedge/lib/python3.7/site-packages/dash/dash.py", line 1180, in _validate_value
toplevel=True,
File "/home/kz/Envs/dash_bleedingedge/lib/python3.7/site-packages/dash/dash.py", line 1125, in _raise_invalid
bad_val=bad_val,
dash.exceptions.InvalidCallbackReturnValue:
The callback for `<Output `approot.children`>`
returned a value having type `_NoUpdate`
which is not JSON serializable.
The value in question is either the only value returned,
or is in the top level of the returned list,
and has string representation
`<dash.dash._NoUpdate object at 0x7ff2ca37ccf8>`
In general, Dash properties can only be
dash components, strings, dictionaries, numbers, None,
or lists of those.
|
TypeError
|
def add_context(*args, **kwargs):
# don't touch the comment on the next line - used by debugger
output_value = func(*args, **kwargs) # %% callback invoked %%
if multi:
if not isinstance(output_value, (list, tuple)):
raise exceptions.InvalidCallbackReturnValue(
"The callback {} is a multi-output.\n"
"Expected the output type to be a list"
" or tuple but got {}.".format(callback_id, repr(output_value))
)
if not len(output_value) == len(output):
raise exceptions.InvalidCallbackReturnValue(
"Invalid number of output values for {}.\n Expected {} got {}".format(
callback_id, len(output), len(output_value)
)
)
component_ids = collections.defaultdict(dict)
has_update = False
for i, o in enumerate(output):
val = output_value[i]
if not isinstance(val, _NoUpdate):
has_update = True
o_id, o_prop = o.component_id, o.component_property
component_ids[o_id][o_prop] = val
if not has_update:
raise exceptions.PreventUpdate
response = {"response": component_ids, "multi": True}
else:
if isinstance(output_value, _NoUpdate):
raise exceptions.PreventUpdate
response = {"response": {"props": {output.component_property: output_value}}}
try:
jsonResponse = json.dumps(response, cls=plotly.utils.PlotlyJSONEncoder)
except TypeError:
self._validate_callback_output(output_value, output)
raise exceptions.InvalidCallbackReturnValue(
dedent(
"""
The callback for property `{property:s}`
of component `{id:s}` returned a value
which is not JSON serializable.
In general, Dash properties can only be
dash components, strings, dictionaries, numbers, None,
or lists of those.
"""
).format(
property=output.component_property,
id=output.component_id,
)
)
return jsonResponse
|
def add_context(*args, **kwargs):
# don't touch the comment on the next line - used by debugger
output_value = func(*args, **kwargs) # %% callback invoked %%
if multi:
if not isinstance(output_value, (list, tuple)):
raise exceptions.InvalidCallbackReturnValue(
"The callback {} is a multi-output.\n"
"Expected the output type to be a list"
" or tuple but got {}.".format(callback_id, repr(output_value))
)
if not len(output_value) == len(output):
raise exceptions.InvalidCallbackReturnValue(
"Invalid number of output values for {}.\n Expected {} got {}".format(
callback_id, len(output), len(output_value)
)
)
component_ids = collections.defaultdict(dict)
has_update = False
for i, o in enumerate(output):
val = output_value[i]
if val is not no_update:
has_update = True
o_id, o_prop = o.component_id, o.component_property
component_ids[o_id][o_prop] = val
if not has_update:
raise exceptions.PreventUpdate
response = {"response": component_ids, "multi": True}
else:
if output_value is no_update:
raise exceptions.PreventUpdate
response = {"response": {"props": {output.component_property: output_value}}}
try:
jsonResponse = json.dumps(response, cls=plotly.utils.PlotlyJSONEncoder)
except TypeError:
self._validate_callback_output(output_value, output)
raise exceptions.InvalidCallbackReturnValue(
dedent(
"""
The callback for property `{property:s}`
of component `{id:s}` returned a value
which is not JSON serializable.
In general, Dash properties can only be
dash components, strings, dictionaries, numbers, None,
or lists of those.
"""
).format(
property=output.component_property,
id=output.component_id,
)
)
return jsonResponse
|
https://github.com/plotly/dash/issues/1014
|
Exception on /_dash-update-component [POST]
Traceback (most recent call last):
File "/home/kz/Envs/dash_bleedingedge/lib/python3.7/site-packages/dash/dash.py", line 1328, in add_context
response, cls=plotly.utils.PlotlyJSONEncoder
File "/home/kz/.local/pyenv/versions/3.7.3/lib/python3.7/json/__init__.py", line 238, in dumps
**kw).encode(obj)
File "/home/kz/Envs/dash_bleedingedge/lib/python3.7/site-packages/_plotly_utils/utils.py", line 44, in encode
encoded_o = super(PlotlyJSONEncoder, self).encode(o)
File "/home/kz/.local/pyenv/versions/3.7.3/lib/python3.7/json/encoder.py", line 199, in encode
chunks = self.iterencode(o, _one_shot=True)
File "/home/kz/.local/pyenv/versions/3.7.3/lib/python3.7/json/encoder.py", line 257, in iterencode
return _iterencode(o, 0)
File "/home/kz/Envs/dash_bleedingedge/lib/python3.7/site-packages/_plotly_utils/utils.py", line 113, in default
return _json.JSONEncoder.default(self, obj)
File "/home/kz/.local/pyenv/versions/3.7.3/lib/python3.7/json/encoder.py", line 179, in default
raise TypeError(f'Object of type {o.__class__.__name__} '
TypeError: Object of type _NoUpdate is not JSON serializable
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/kz/Envs/dash_bleedingedge/lib/python3.7/site-packages/flask/app.py", line 2446, in wsgi_app
response = self.full_dispatch_request()
File "/home/kz/Envs/dash_bleedingedge/lib/python3.7/site-packages/flask/app.py", line 1951, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/home/kz/Envs/dash_bleedingedge/lib/python3.7/site-packages/flask/app.py", line 1820, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/home/kz/Envs/dash_bleedingedge/lib/python3.7/site-packages/flask/_compat.py", line 39, in reraise
raise value
File "/home/kz/Envs/dash_bleedingedge/lib/python3.7/site-packages/flask/app.py", line 1949, in full_dispatch_request
rv = self.dispatch_request()
File "/home/kz/Envs/dash_bleedingedge/lib/python3.7/site-packages/flask/app.py", line 1935, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/home/kz/Envs/dash_bleedingedge/lib/python3.7/site-packages/dash/dash.py", line 1404, in dispatch
response.set_data(self.callback_map[output]["callback"](*args))
File "/home/kz/Envs/dash_bleedingedge/lib/python3.7/site-packages/dash/dash.py", line 1331, in add_context
self._validate_callback_output(output_value, output)
File "/home/kz/Envs/dash_bleedingedge/lib/python3.7/site-packages/dash/dash.py", line 1187, in _validate_callback_output
_validate_value(output_value)
File "/home/kz/Envs/dash_bleedingedge/lib/python3.7/site-packages/dash/dash.py", line 1180, in _validate_value
toplevel=True,
File "/home/kz/Envs/dash_bleedingedge/lib/python3.7/site-packages/dash/dash.py", line 1125, in _raise_invalid
bad_val=bad_val,
dash.exceptions.InvalidCallbackReturnValue:
The callback for `<Output `approot.children`>`
returned a value having type `_NoUpdate`
which is not JSON serializable.
The value in question is either the only value returned,
or is in the top level of the returned list,
and has string representation
`<dash.dash._NoUpdate object at 0x7ff2ca37ccf8>`
In general, Dash properties can only be
dash components, strings, dictionaries, numbers, None,
or lists of those.
|
TypeError
|
def __init__(
self,
name="__main__",
server=None,
static_folder="static",
assets_folder=None,
assets_url_path="/assets",
assets_ignore="",
include_assets_files=True,
url_base_pathname=None,
assets_external_path=None,
requests_pathname_prefix=None,
routes_pathname_prefix=None,
compress=True,
meta_tags=None,
index_string=_default_index,
external_scripts=None,
external_stylesheets=None,
suppress_callback_exceptions=None,
components_cache_max_age=None,
**kwargs,
):
# pylint-disable: too-many-instance-attributes
if "csrf_protect" in kwargs:
warnings.warn(
"""
`csrf_protect` is no longer used,
CSRF protection has been removed as it is no longer
necessary.
See https://github.com/plotly/dash/issues/141 for details.
""",
DeprecationWarning,
)
name = name if server is None else server.name
self._assets_folder = assets_folder or os.path.join(
flask.helpers.get_root_path(name), "assets"
)
self._assets_url_path = assets_url_path
# allow users to supply their own flask server
self.server = server or Flask(name, static_folder=static_folder)
if "assets" not in self.server.blueprints:
self.server.register_blueprint(
flask.Blueprint(
"assets",
"assets",
static_folder=self._assets_folder,
static_url_path=assets_url_path,
)
)
env_configs = _configs.env_configs()
url_base_pathname, routes_pathname_prefix, requests_pathname_prefix = (
_configs.pathname_configs(
url_base_pathname,
routes_pathname_prefix,
requests_pathname_prefix,
environ_configs=env_configs,
)
)
self.url_base_pathname = url_base_pathname
self.config = _AttributeDict(
{
"suppress_callback_exceptions": _configs.get_config(
"suppress_callback_exceptions",
suppress_callback_exceptions,
env_configs,
False,
),
"routes_pathname_prefix": routes_pathname_prefix,
"requests_pathname_prefix": requests_pathname_prefix,
"include_assets_files": _configs.get_config(
"include_assets_files", include_assets_files, env_configs, True
),
"assets_external_path": _configs.get_config(
"assets_external_path", assets_external_path, env_configs, ""
),
"components_cache_max_age": int(
_configs.get_config(
"components_cache_max_age",
components_cache_max_age,
env_configs,
2678400,
)
),
}
)
# list of dependencies
self.callback_map = {}
self._index_string = ""
self.index_string = index_string
self._meta_tags = meta_tags or []
self._favicon = None
if compress:
# gzip
Compress(self.server)
@self.server.errorhandler(exceptions.PreventUpdate)
def _handle_error(error):
"""Handle a halted callback and return an empty 204 response"""
print(error, file=sys.stderr)
return ("", 204)
# static files from the packages
self.css = Css()
self.scripts = Scripts()
self._external_scripts = external_scripts or []
self._external_stylesheets = external_stylesheets or []
self.assets_ignore = assets_ignore
self.registered_paths = {}
# urls
def add_url(name, view_func, methods=("GET",)):
self.server.add_url_rule(
name, view_func=view_func, endpoint=name, methods=list(methods)
)
add_url(
"{}_dash-layout".format(self.config["routes_pathname_prefix"]),
self.serve_layout,
)
add_url(
"{}_dash-dependencies".format(self.config["routes_pathname_prefix"]),
self.dependencies,
)
add_url(
"{}_dash-update-component".format(self.config["routes_pathname_prefix"]),
self.dispatch,
["POST"],
)
add_url(
(
"{}_dash-component-suites/<string:package_name>/<path:path_in_package_dist>"
).format(self.config["routes_pathname_prefix"]),
self.serve_component_suites,
)
add_url(
"{}_dash-routes".format(self.config["routes_pathname_prefix"]),
self.serve_routes,
)
add_url(self.config["routes_pathname_prefix"], self.index)
# catch-all for front-end routes
add_url("{}<path:path>".format(self.config["routes_pathname_prefix"]), self.index)
self.server.before_first_request(self._setup_server)
self._layout = None
self._cached_layout = None
self.routes = []
# add a handler for components suites errors to return 404
self.server.errorhandler(exceptions.InvalidResourceError)(
self._invalid_resources_handler
)
|
def __init__(
self,
name="__main__",
server=None,
static_folder="static",
assets_folder=None,
assets_url_path="/assets",
assets_ignore="",
include_assets_files=True,
url_base_pathname=None,
assets_external_path=None,
requests_pathname_prefix=None,
routes_pathname_prefix=None,
compress=True,
meta_tags=None,
index_string=_default_index,
external_scripts=None,
external_stylesheets=None,
suppress_callback_exceptions=None,
components_cache_max_age=None,
**kwargs,
):
# pylint-disable: too-many-instance-attributes
if "csrf_protect" in kwargs:
warnings.warn(
"""
`csrf_protect` is no longer used,
CSRF protection has been removed as it is no longer
necessary.
See https://github.com/plotly/dash/issues/141 for details.
""",
DeprecationWarning,
)
name = name if server is None else server.name
self._assets_folder = assets_folder or os.path.join(
flask.helpers.get_root_path(name), "assets"
)
self._assets_url_path = assets_url_path
# allow users to supply their own flask server
self.server = server or Flask(name, static_folder=static_folder)
if "assets" not in self.server.blueprints:
self.server.register_blueprint(
flask.Blueprint(
"assets",
"assets",
static_folder=self._assets_folder,
static_url_path=assets_url_path,
)
)
env_configs = _configs.env_configs()
url_base_pathname, routes_pathname_prefix, requests_pathname_prefix = (
_configs.pathname_configs(
url_base_pathname,
routes_pathname_prefix,
requests_pathname_prefix,
environ_configs=env_configs,
)
)
self.url_base_pathname = url_base_pathname
self.config = _AttributeDict(
{
"suppress_callback_exceptions": _configs.get_config(
"suppress_callback_exceptions",
suppress_callback_exceptions,
env_configs,
False,
),
"routes_pathname_prefix": routes_pathname_prefix,
"requests_pathname_prefix": requests_pathname_prefix,
"include_assets_files": _configs.get_config(
"include_assets_files", include_assets_files, env_configs, True
),
"assets_external_path": _configs.get_config(
"assets_external_path", assets_external_path, env_configs, ""
),
"components_cache_max_age": int(
_configs.get_config(
"components_cache_max_age",
components_cache_max_age,
env_configs,
2678400,
)
),
}
)
# list of dependencies
self.callback_map = {}
self._index_string = ""
self.index_string = index_string
self._meta_tags = meta_tags or []
self._favicon = None
if compress:
# gzip
Compress(self.server)
@self.server.errorhandler(exceptions.PreventUpdate)
def _handle_error(error):
"""Handle a halted callback and return an empty 204 response"""
print(error, file=sys.stderr)
return ("", 204)
# static files from the packages
self.css = Css()
self.scripts = Scripts()
self._external_scripts = external_scripts or []
self._external_stylesheets = external_stylesheets or []
self.assets_ignore = assets_ignore
self.registered_paths = {}
# urls
def add_url(name, view_func, methods=("GET",)):
self.server.add_url_rule(
name, view_func=view_func, endpoint=name, methods=list(methods)
)
add_url(
"{}_dash-layout".format(self.config["routes_pathname_prefix"]),
self.serve_layout,
)
add_url(
"{}_dash-dependencies".format(self.config["routes_pathname_prefix"]),
self.dependencies,
)
add_url(
"{}_dash-update-component".format(self.config["routes_pathname_prefix"]),
self.dispatch,
["POST"],
)
add_url(
(
"{}_dash-component-suites/<string:package_name>/<path:path_in_package_dist>"
).format(self.config["routes_pathname_prefix"]),
self.serve_component_suites,
)
add_url(
"{}_dash-routes".format(self.config["routes_pathname_prefix"]),
self.serve_routes,
)
add_url(self.config["routes_pathname_prefix"], self.index)
# catch-all for front-end routes
add_url("{}<path:path>".format(self.config["routes_pathname_prefix"]), self.index)
self.server.before_first_request(self._setup_server)
self._layout = None
self._cached_layout = None
self.routes = []
|
https://github.com/plotly/dash/issues/393
|
Exception on /_dash-component-suites/dash_renderer/foo.js [GET]
Traceback (most recent call last):
File "/app/venv/lib/python3.6/site-packages/flask/app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "/app/venv/lib/python3.6/site-packages/flask/app.py", line 1815, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/app/venv/lib/python3.6/site-packages/flask/app.py", line 1718, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/app/venv/lib/python3.6/site-packages/flask/_compat.py", line 35, in reraise
raise value
File "/app/venv/lib/python3.6/site-packages/flask/app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "/app/venv/lib/python3.6/site-packages/flask/app.py", line 1799, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/app/venv/lib/python3.6/site-packages/dash/dash.py", line 417, in serve_component_suites
self.registered_paths
Exception: "dash_renderer" is registered but the path requested is not valid.
The path requested: "foo.js"
List of registered paths: {'dash_renderer': ['react@15.4.2.min.js', 'react-dom@15.4.2.min.js', 'bundle.js', 'react@15.4.2.min.js', 'react-dom@15.4.2.min.js', 'bundle.js', 'react@15.4.2.min.js', 'react-dom@15.4.2.min.js', 'bundle.js'], 'dash_html_components': ['bundle.js', 'bundle.js', 'bundle.js'], 'dash_table_experiments': ['bundle.js', 'dash_table_experiments.css', 'bundle.js', 'dash_table_experiments.css', 'bundle.js', 'dash_table_experiments.css'], 'dash_core_components': ['plotly-1.41.0.min.js', 'bundle.js', 'rc-slider@6.1.2.css', 'react-select@1.0.0-rc.3.min.css', 'react-virtualized@9.9.0.css', 'react-virtualized-select@3.1.0.css', 'react-dates@12.3.0.css', 'plotly-1.41.0.min.js', 'bundle.js', 'rc-slider@6.1.2.css', 'react-select@1.0.0-rc.3.min.css', 'react-virtualized@9.9.0.css', 'react-virtualized-select@3.1.0.css', 'react-dates@12.3.0.css', 'plotly-1.41.0.min.js', 'bundle.js', 'rc-slider@6.1.2.css', 'react-select@1.0.0-rc.3.min.css', 'react-virtualized@9.9.0.css', 'react-virtualized-select@3.1.0.css', 'react-dates@12.3.0.css']}
|
Exception
|
def serve_component_suites(self, package_name, path_in_package_dist):
if package_name not in self.registered_paths:
raise exceptions.InvalidResourceError(
"Error loading dependency.\n"
'"{}" is not a registered library.\n'
"Registered libraries are: {}".format(
package_name, list(self.registered_paths.keys())
)
)
elif path_in_package_dist not in self.registered_paths[package_name]:
raise exceptions.InvalidResourceError(
'"{}" is registered but the path requested is not valid.\n'
'The path requested: "{}"\n'
"List of registered paths: {}".format(
package_name, path_in_package_dist, self.registered_paths
)
)
mimetype = ({"js": "application/JavaScript", "css": "text/css"})[
path_in_package_dist.split(".")[-1]
]
headers = {
"Cache-Control": "public, max-age={}".format(
self.config.components_cache_max_age
)
}
return Response(
pkgutil.get_data(package_name, path_in_package_dist),
mimetype=mimetype,
headers=headers,
)
|
def serve_component_suites(self, package_name, path_in_package_dist):
if package_name not in self.registered_paths:
raise Exception(
"Error loading dependency.\n"
'"{}" is not a registered library.\n'
"Registered libraries are: {}".format(
package_name, list(self.registered_paths.keys())
)
)
elif path_in_package_dist not in self.registered_paths[package_name]:
raise Exception(
'"{}" is registered but the path requested is not valid.\n'
'The path requested: "{}"\n'
"List of registered paths: {}".format(
package_name, path_in_package_dist, self.registered_paths
)
)
mimetype = ({"js": "application/JavaScript", "css": "text/css"})[
path_in_package_dist.split(".")[-1]
]
headers = {
"Cache-Control": "public, max-age={}".format(
self.config.components_cache_max_age
)
}
return Response(
pkgutil.get_data(package_name, path_in_package_dist),
mimetype=mimetype,
headers=headers,
)
|
https://github.com/plotly/dash/issues/393
|
Exception on /_dash-component-suites/dash_renderer/foo.js [GET]
Traceback (most recent call last):
File "/app/venv/lib/python3.6/site-packages/flask/app.py", line 2292, in wsgi_app
response = self.full_dispatch_request()
File "/app/venv/lib/python3.6/site-packages/flask/app.py", line 1815, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/app/venv/lib/python3.6/site-packages/flask/app.py", line 1718, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/app/venv/lib/python3.6/site-packages/flask/_compat.py", line 35, in reraise
raise value
File "/app/venv/lib/python3.6/site-packages/flask/app.py", line 1813, in full_dispatch_request
rv = self.dispatch_request()
File "/app/venv/lib/python3.6/site-packages/flask/app.py", line 1799, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/app/venv/lib/python3.6/site-packages/dash/dash.py", line 417, in serve_component_suites
self.registered_paths
Exception: "dash_renderer" is registered but the path requested is not valid.
The path requested: "foo.js"
List of registered paths: {'dash_renderer': ['react@15.4.2.min.js', 'react-dom@15.4.2.min.js', 'bundle.js', 'react@15.4.2.min.js', 'react-dom@15.4.2.min.js', 'bundle.js', 'react@15.4.2.min.js', 'react-dom@15.4.2.min.js', 'bundle.js'], 'dash_html_components': ['bundle.js', 'bundle.js', 'bundle.js'], 'dash_table_experiments': ['bundle.js', 'dash_table_experiments.css', 'bundle.js', 'dash_table_experiments.css', 'bundle.js', 'dash_table_experiments.css'], 'dash_core_components': ['plotly-1.41.0.min.js', 'bundle.js', 'rc-slider@6.1.2.css', 'react-select@1.0.0-rc.3.min.css', 'react-virtualized@9.9.0.css', 'react-virtualized-select@3.1.0.css', 'react-dates@12.3.0.css', 'plotly-1.41.0.min.js', 'bundle.js', 'rc-slider@6.1.2.css', 'react-select@1.0.0-rc.3.min.css', 'react-virtualized@9.9.0.css', 'react-virtualized-select@3.1.0.css', 'react-dates@12.3.0.css', 'plotly-1.41.0.min.js', 'bundle.js', 'rc-slider@6.1.2.css', 'react-select@1.0.0-rc.3.min.css', 'react-virtualized@9.9.0.css', 'react-virtualized-select@3.1.0.css', 'react-dates@12.3.0.css']}
|
Exception
|
def index(self, *args, **kwargs): # pylint: disable=unused-argument
scripts = self._generate_scripts_html()
css = self._generate_css_dist_html()
config = self._generate_config_html()
title = getattr(self, "title", "Dash")
return """
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>{}</title>
{}
</head>
<body>
<div id="react-entry-point">
<div class="_dash-loading">
Loading...
</div>
</div>
<footer>
{}
{}
</footer>
</body>
</html>
""".format(title, css, config, scripts)
|
def index(self):
scripts = self._generate_scripts_html()
css = self._generate_css_dist_html()
config = self._generate_config_html()
title = getattr(self, "title", "Dash")
return """
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>{}</title>
{}
</head>
<body>
<div id="react-entry-point">
<div class="_dash-loading">
Loading...
</div>
</div>
<footer>
{}
{}
</footer>
</body>
</html>
""".format(title, css, config, scripts)
|
https://github.com/plotly/dash/issues/189
|
Traceback (most recent call last):
File "/home/nejl/.pyenv/versions/3.6.1/envs/dash/lib/python3.6/site-packages/flask/app.py", line 1998, in __call__
return self.wsgi_app(environ, start_response)
File "/home/nejl/.pyenv/versions/3.6.1/envs/dash/lib/python3.6/site-packages/flask/app.py", line 1986, in wsgi_app
response = self.handle_exception(e)
File "/home/nejl/.pyenv/versions/3.6.1/envs/dash/lib/python3.6/site-packages/flask/app.py", line 1540, in handle_exception
reraise(exc_type, exc_value, tb)
File "/home/nejl/.pyenv/versions/3.6.1/envs/dash/lib/python3.6/site-packages/flask/_compat.py", line 33, in reraise
raise value
File "/home/nejl/.pyenv/versions/3.6.1/envs/dash/lib/python3.6/site-packages/flask/app.py", line 1983, in wsgi_app
response = self.full_dispatch_request()
File "/home/nejl/.pyenv/versions/3.6.1/envs/dash/lib/python3.6/site-packages/flask/app.py", line 1615, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/home/nejl/.pyenv/versions/3.6.1/envs/dash/lib/python3.6/site-packages/flask/app.py", line 1517, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/home/nejl/.pyenv/versions/3.6.1/envs/dash/lib/python3.6/site-packages/flask/_compat.py", line 33, in reraise
raise value
File "/home/nejl/.pyenv/versions/3.6.1/envs/dash/lib/python3.6/site-packages/flask/app.py", line 1613, in full_dispatch_request
rv = self.dispatch_request()
File "/home/nejl/.pyenv/versions/3.6.1/envs/dash/lib/python3.6/site-packages/flask/app.py", line 1599, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
TypeError: index() got an unexpected keyword argument 'path'
|
TypeError
|
def include(*args, **kwargs):
"""Used for including Django project settings from multiple files.
Note: Expects to get ``scope=globals()`` as a keyword argument.
Usage::
from split_settings.tools import optional, include
include(
'components/base.py',
'components/database.py',
optional('local_settings.py'),
scope=globals()
)
Parameters:
*args: File paths (``glob`` - compatible wildcards can be used)
**kwargs: The context for the settings,
should always contain ``scope=globals()``
Raises:
IOError: if a required settings file is not found
"""
scope = kwargs.pop("scope")
including_file = scope.get("__included_file__", scope["__file__"].rstrip("c"))
conf_path = os.path.dirname(including_file)
for conf_file in args:
saved_included_file = scope.get("__included_file__")
pattern = os.path.join(conf_path, conf_file)
# find files per pattern, raise an error if not found (unless file is
# optional)
files_to_include = glob.glob(pattern)
if not files_to_include and not isinstance(conf_file, _Optional):
raise IOError("No such file: %s" % pattern)
for included_file in files_to_include:
scope["__included_file__"] = included_file
with open(included_file, "rb") as to_compile:
exec(compile(to_compile.read(), included_file, "exec"), scope)
# add dummy modules to sys.modules to make runserver autoreload
# work with settings components
module_name = "_split_settings.%s" % conf_file[
: conf_file.rfind(".")
].replace("/", ".")
module = types.ModuleType(str(module_name))
module.__file__ = included_file
sys.modules[module_name] = module
if saved_included_file:
scope["__included_file__"] = saved_included_file
elif "__included_file__" in scope:
del scope["__included_file__"]
|
def include(*args, **kwargs):
"""Used for including Django project settings from multiple files.
Note: Expects to get ``scope=globals()`` as a keyword argument.
Usage::
from split_settings.tools import optional, include
include(
'components/base.py',
'components/database.py',
optional('local_settings.py'),
scope=globals()
)
Parameters:
*args: File paths (``glob`` - compatible wildcards can be used)
**kwargs: The context for the settings,
should always contain ``scope=globals()``
Raises:
IOError: if a required settings file is not found
"""
scope = kwargs.pop("scope")
including_file = scope.get("__included_file__", scope["__file__"].rstrip("c"))
conf_path = os.path.dirname(including_file)
for conf_file in args:
saved_included_file = scope.get("__included_file__")
pattern = os.path.join(conf_path, conf_file)
# find files per pattern, raise an error if not found (unless file is
# optional)
files_to_include = glob.glob(pattern)
if not files_to_include and not isinstance(conf_file, _Optional):
raise IOError("No such file: %s" % pattern)
for included_file in files_to_include:
scope["__included_file__"] = included_file
with open(included_file, "rb") as to_compile:
exec(compile(to_compile.read(), included_file, "exec"), scope)
# add dummy modules to sys.modules to make runserver autoreload
# work with settings components
module_name = "_split_settings.%s" % conf_file[
: conf_file.rfind(".")
].replace("/", ".")
module = types.ModuleType(module_name)
module.__file__ = included_file
sys.modules[module_name] = module
if saved_included_file:
scope["__included_file__"] = saved_included_file
elif "__included_file__" in scope:
del scope["__included_file__"]
|
https://github.com/sobolevn/django-split-settings/issues/9
|
Traceback (most recent call last):
File ".tox/py27/bin/django-admin.py", line 5, in <module>
management.execute_from_command_line()
File "/home/…/source/.tox/py27/local/lib/python2.7/site-packages/django/core/management/__init__.py", line 353, in execute_from_command_line
utility.execute()
File "/home/…/source/.tox/py27/local/lib/python2.7/site-packages/django/core/management/__init__.py", line 302, in execute
settings.INSTALLED_APPS
File "/home/…/source/.tox/py27/local/lib/python2.7/site-packages/django/conf/__init__.py", line 55, in __getattr__
self._setup(name)
File "/home/…/source/.tox/py27/local/lib/python2.7/site-packages/django/conf/__init__.py", line 43, in _setup
self._wrapped = Settings(settings_module)
File "/home/…/source/.tox/py27/local/lib/python2.7/site-packages/django/conf/__init__.py", line 99, in __init__
mod = importlib.import_module(self.SETTINGS_MODULE)
File "/usr/lib/python2.7/importlib/__init__.py", line 37, in import_module
__import__(name)
File "/home/…/source/streambox/streambox/settings/test.py", line 19, in <module>
scope=globals()
File "/home/…/source/.tox/py27/local/lib/python2.7/site-packages/split_settings/tools.py", line 84, in include
exec(compile(to_compile.read(), included_file, 'exec'), scope)
File "/home/…/source/streambox/streambox/settings/components/base.py", line 147, in <module>
scope=globals()
File "/home/…/source/.tox/py27/local/lib/python2.7/site-packages/split_settings/tools.py", line 90, in include
module = types.ModuleType(module_name)
TypeError: module.__init__() argument 1 must be string, not unicode
|
TypeError
|
def _append_compressed_format_query(handle):
# Convert the tuple from urlparse into list so it can be updated in place.
parsed = list(urlparse.urlparse(handle))
qsl = urlparse.parse_qsl(parsed[4])
qsl.append(_COMPRESSED_FORMAT_QUERY)
# NOTE: Cast to string to avoid urlunparse to deal with mixed types.
# This happens due to backport of urllib.parse into python2 returning an
# instance of <class 'future.types.newstr.newstr'>.
parsed[4] = str(urlencode(qsl))
return urlparse.urlunparse(parsed)
|
def _append_compressed_format_query(handle):
# Convert the tuple from urlparse into list so it can be updated in place.
parsed = list(urlparse.urlparse(handle))
qsl = urlparse.parse_qsl(parsed[4])
qsl.append(_COMPRESSED_FORMAT_QUERY)
parsed[4] = urlencode(qsl)
return urlparse.urlunparse(parsed)
|
https://github.com/tensorflow/hub/issues/76
|
[2018-06-12 20:47:19,845] {base_task_runner.py:98} INFO - Subtask: [2018-06-12 20:47:19,841] {tf_hub_encode_posts.py:118} INFO - ... save posts into list ...
[2018-06-12 20:47:19,845] {base_task_runner.py:98} INFO - Subtask: [2018-06-12 20:47:19,842] {tf_hub_encode_posts.py:121} INFO - ... look at posts ...
[2018-06-12 20:47:19,847] {base_task_runner.py:98} INFO - Subtask: [2018-06-12 20:47:19,842] {tf_hub_encode_posts.py:122} INFO - [u'11 Stars Who Looked Sexy Playing Home-Wreckers On-Screen: Taylor Swift &amp; More', u"Shania Twain Performs 'Life's About To Get Good' In Sexy Cheetah Print Suit On James Corden", u'Selena Gomez &amp; Justin Bieber: It\u2019s \u2018Painful\u2019 For Her To See Him With Hailey Baldwin', u"Meghan Markle Is Adjusting To Royal Life 'Faster Than Expected' -- Which Royal Is Her New Bestie?", u'Dennis Rodman Mocked After He Breaks Down On CNN After Trump Meets With Kim Jong Un', u'Kate Upton Shows Off Major Sideboob In Sexy Nude Video To Thank Fans For Their Birthday Wishes', u'Hailey Baldwin Playfully Dries Justin Bieber Off With A Towel After Taking A Dip In The Pool', u"'Criminal Minds': Kirsten Vangsness Reveals What\u2019s Really On Her Computer Screen On The Show", u"Mac Miller 'Devastated' Over Ariana's Engagement To Pete Davidson: 'It's A Punch To The Gut'", u'Bode Miller: 5 Things To Know About Olympian Whose 19-Month-Old Daughter Tragically Drowned']
[2018-06-12 20:47:19,847] {base_task_runner.py:98} INFO - Subtask: [2018-06-12 20:47:19,842] {tf_hub_encode_posts.py:125} INFO - ... begin - get module from tf-hub ...
[2018-06-12 20:47:19,858] {base_task_runner.py:98} INFO - Subtask: [2018-06-12 20:47:19,857] {tf_logging.py:160} INFO - Using /tmp/tfhub_modules to cache modules.
[2018-06-12 20:47:19,858] {base_task_runner.py:98} INFO - Subtask: [2018-06-12 20:47:19,858] {tf_logging.py:116} INFO - Downloading TF-Hub Module 'https://tfhub.dev/google/nnlm-en-dim50/1'.
[2018-06-12 20:47:19,935] {base_task_runner.py:98} INFO - Subtask: [2018-06-12 20:47:19,935] {sendgrid.py:84} INFO - Email with subject Airflow alert: <TaskInstance: tf_hub_encode_posts.hollywoodlife_encode_posts_nnlm_en_dim50 2018-06-12 20:30:00 [up_for_retry]> is successfully sent to recipients: [{'to': [{'email': 'andrew.maguire@pmc.com'}]}]
[2018-06-12 20:47:19,974] {base_task_runner.py:98} INFO - Subtask: Traceback (most recent call last):
[2018-06-12 20:47:19,975] {base_task_runner.py:98} INFO - Subtask: File "/usr/local/bin/airflow", line 27, in <module>
[2018-06-12 20:47:19,975] {base_task_runner.py:98} INFO - Subtask: args.func(args)
[2018-06-12 20:47:19,975] {base_task_runner.py:98} INFO - Subtask: File "/usr/local/lib/python2.7/site-packages/airflow/bin/cli.py", line 392, in run
[2018-06-12 20:47:19,976] {base_task_runner.py:98} INFO - Subtask: pool=args.pool,
[2018-06-12 20:47:19,976] {base_task_runner.py:98} INFO - Subtask: File "/usr/local/lib/python2.7/site-packages/airflow/utils/db.py", line 50, in wrapper
[2018-06-12 20:47:19,976] {base_task_runner.py:98} INFO - Subtask: result = func(*args, **kwargs)
[2018-06-12 20:47:19,976] {base_task_runner.py:98} INFO - Subtask: File "/usr/local/lib/python2.7/site-packages/airflow/models.py", line 1492, in _run_raw_task
[2018-06-12 20:47:19,976] {base_task_runner.py:98} INFO - Subtask: result = task_copy.execute(context=context)
[2018-06-12 20:47:19,977] {base_task_runner.py:98} INFO - Subtask: File "/usr/local/lib/python2.7/site-packages/airflow/operators/python_operator.py", line 89, in execute
[2018-06-12 20:47:19,977] {base_task_runner.py:98} INFO - Subtask: return_value = self.execute_callable()
[2018-06-12 20:47:19,977] {base_task_runner.py:98} INFO - Subtask: File "/usr/local/lib/python2.7/site-packages/airflow/operators/python_operator.py", line 94, in execute_callable
[2018-06-12 20:47:19,977] {base_task_runner.py:98} INFO - Subtask: return self.python_callable(*self.op_args, **self.op_kwargs)
[2018-06-12 20:47:19,977] {base_task_runner.py:98} INFO - Subtask: File "/home/airflow/gcs/dags/tf_hub_encode_posts.py", line 127, in fn_encode_posts
[2018-06-12 20:47:19,978] {base_task_runner.py:98} INFO - Subtask: embed = hub.Module("https://tfhub.dev/google/nnlm-en-dim50/1")
[2018-06-12 20:47:19,978] {base_task_runner.py:98} INFO - Subtask: File "/usr/local/lib/python2.7/site-packages/tensorflow_hub/module.py", line 105, in __init__
[2018-06-12 20:47:19,979] {base_task_runner.py:98} INFO - Subtask: self._spec = as_module_spec(spec)
[2018-06-12 20:47:19,979] {base_task_runner.py:98} INFO - Subtask: File "/usr/local/lib/python2.7/site-packages/tensorflow_hub/module.py", line 31, in as_module_spec
[2018-06-12 20:47:19,980] {base_task_runner.py:98} INFO - Subtask: return native_module.load_module_spec(spec)
[2018-06-12 20:47:19,980] {base_task_runner.py:98} INFO - Subtask: File "/usr/local/lib/python2.7/site-packages/tensorflow_hub/native_module.py", line 99, in load_module_spec
[2018-06-12 20:47:19,981] {base_task_runner.py:98} INFO - Subtask: path = compressed_module_resolver.get_default().get_module_path(path)
[2018-06-12 20:47:19,982] {base_task_runner.py:98} INFO - Subtask: File "/usr/local/lib/python2.7/site-packages/tensorflow_hub/resolver.py", line 385, in get_module_path
[2018-06-12 20:47:19,982] {base_task_runner.py:98} INFO - Subtask: return self._get_module_path(handle)
[2018-06-12 20:47:19,983] {base_task_runner.py:98} INFO - Subtask: File "/usr/local/lib/python2.7/site-packages/tensorflow_hub/resolver.py", line 467, in _get_module_path
[2018-06-12 20:47:19,983] {base_task_runner.py:98} INFO - Subtask: return resolver.get_module_path(handle)
[2018-06-12 20:47:19,983] {base_task_runner.py:98} INFO - Subtask: File "/usr/local/lib/python2.7/site-packages/tensorflow_hub/resolver.py", line 385, in get_module_path
[2018-06-12 20:47:19,984] {base_task_runner.py:98} INFO - Subtask: return self._get_module_path(handle)
[2018-06-12 20:47:19,984] {base_task_runner.py:98} INFO - Subtask: File "/usr/local/lib/python2.7/site-packages/tensorflow_hub/compressed_module_resolver.py", line 105, in _get_module_path
[2018-06-12 20:47:19,985] {base_task_runner.py:98} INFO - Subtask: self._lock_file_timeout_sec())
[2018-06-12 20:47:19,985] {base_task_runner.py:98} INFO - Subtask: File "/usr/local/lib/python2.7/site-packages/tensorflow_hub/resolver.py", line 313, in atomic_download
[2018-06-12 20:47:19,985] {base_task_runner.py:98} INFO - Subtask: download_fn(handle, tmp_dir)
[2018-06-12 20:47:19,986] {base_task_runner.py:98} INFO - Subtask: File "/usr/local/lib/python2.7/site-packages/tensorflow_hub/compressed_module_resolver.py", line 86, in download
[2018-06-12 20:47:19,986] {base_task_runner.py:98} INFO - Subtask: request = url.Request(_append_compressed_format_query(handle))
[2018-06-12 20:47:19,986] {base_task_runner.py:98} INFO - Subtask: File "/usr/local/lib/python2.7/site-packages/tensorflow_hub/compressed_module_resolver.py", line 62, in _append_compressed_format_query
[2018-06-12 20:47:19,987] {base_task_runner.py:98} INFO - Subtask: return urlparse.urlunparse(parsed)
[2018-06-12 20:47:19,987] {base_task_runner.py:98} INFO - Subtask: File "/usr/local/lib/python2.7/site-packages/future/backports/urllib/parse.py", line 387, in urlunparse
[2018-06-12 20:47:19,987] {base_task_runner.py:98} INFO - Subtask: _coerce_args(*components))
[2018-06-12 20:47:19,988] {base_task_runner.py:98} INFO - Subtask: File "/usr/local/lib/python2.7/site-packages/future/backports/urllib/parse.py", line 115, in _coerce_args
[2018-06-12 20:47:19,988] {base_task_runner.py:98} INFO - Subtask: raise TypeError("Cannot mix str and non-str arguments")
[2018-06-12 20:47:19,988] {base_task_runner.py:98} INFO - Subtask: TypeError: Cannot mix str and non-str arguments
|
TypeError
|
def format_basic(df: dd.DataFrame) -> Dict[str, Any]:
# pylint: disable=too-many-statements
"""
Format basic version.
Parameters
----------
df
The DataFrame for which data are calculated.
Returns
-------
Dict[str, Any]
A dictionary in which formatted data is stored.
This variable acts like an API in passing data to the template engine.
"""
# pylint: disable=too-many-locals
# aggregate all computations
data, completions = basic_computations(df)
with catch_warnings():
filterwarnings(
"ignore",
"invalid value encountered in true_divide",
category=RuntimeWarning,
)
(data,) = dask.compute(data)
# results dictionary
res: Dict[str, Any] = {}
# overview
data["ov"].pop("ks_tests")
res["overview"] = format_ov_stats(data["ov"])
# variables
res["variables"] = {}
for col in df.columns:
stats: Any = None # needed for pylint
if is_dtype(detect_dtype(df[col]), Continuous()):
itmdt = Intermediate(
col=col, data=data[col], visual_type="numerical_column"
)
stats = format_num_stats(data[col])
elif is_dtype(detect_dtype(df[col]), Nominal()):
itmdt = Intermediate(
col=col, data=data[col], visual_type="categorical_column"
)
stats = format_cat_stats(
data[col]["stats"], data[col]["len_stats"], data[col]["letter_stats"]
)
elif is_dtype(detect_dtype(df[col]), DateTime()):
itmdt = Intermediate(
col=col,
data=data[col]["stats"],
line=data[col]["line"],
visual_type="datetime_column",
)
stats = stats_viz_dt(data[col]["stats"])
rndrd = render(itmdt, plot_height_lrg=250, plot_width_lrg=280)["layout"]
figs: List[Figure] = []
for tab in rndrd:
try:
fig = tab.children[0]
except AttributeError:
fig = tab
# fig.title = Title(text=tab.title, align="center")
figs.append(fig)
res["variables"][col] = {
"tabledata": stats,
"plots": components(figs),
"col_type": itmdt.visual_type.replace("_column", ""),
}
if len(data["num_cols"]) > 0:
# interactions
res["has_interaction"] = True
itmdt = Intermediate(data=data["scat"], visual_type="correlation_crossfilter")
rndrd = render_correlation(itmdt)
rndrd.sizing_mode = "stretch_width"
res["interactions"] = components(rndrd)
# correlations
res["has_correlation"] = True
dfs: Dict[str, pd.DataFrame] = {}
for method, corr in data["corrs"].items():
ndf = pd.DataFrame(
{
"x": data["num_cols"][data["cordx"]],
"y": data["num_cols"][data["cordy"]],
"correlation": corr.ravel(),
}
)
dfs[method.name] = ndf[data["cordy"] > data["cordx"]]
itmdt = Intermediate(
data=dfs,
axis_range=list(data["num_cols"]),
visual_type="correlation_heatmaps",
)
rndrd = render_correlation(itmdt)
figs.clear()
for tab in rndrd.tabs:
fig = tab.child
fig.sizing_mode = "stretch_width"
fig.title = Title(text=tab.title, align="center", text_font_size="20px")
figs.append(fig)
res["correlations"] = components(figs)
else:
res["has_interaction"], res["has_correlation"] = False, False
# missing
res["has_missing"] = True
itmdt = completions["miss"](data["miss"])
rndrd = render_missing(itmdt)
figs.clear()
for fig in rndrd["layout"]:
fig.sizing_mode = "stretch_width"
fig.title = Title(
text=rndrd["meta"][rndrd["layout"].index(fig)],
align="center",
text_font_size="20px",
)
figs.append(fig)
res["missing"] = components(figs)
return res
|
def format_basic(df: dd.DataFrame) -> Dict[str, Any]:
# pylint: disable=too-many-statements
"""
Format basic version.
Parameters
----------
df
The DataFrame for which data are calculated.
Returns
-------
Dict[str, Any]
A dictionary in which formatted data is stored.
This variable acts like an API in passing data to the template engine.
"""
# pylint: disable=too-many-locals
# aggregate all computations
data, completions = basic_computations(df)
with catch_warnings():
filterwarnings(
"ignore",
"invalid value encountered in true_divide",
category=RuntimeWarning,
)
(data,) = dask.compute(data)
# results dictionary
res: Dict[str, Any] = {}
# overview
data["ov"].pop("ks_tests")
res["overview"] = format_ov_stats(data["ov"])
# variables
res["variables"] = {}
for col in df.columns:
stats: Any = None # needed for pylint
if is_dtype(detect_dtype(df[col]), Continuous()):
itmdt = Intermediate(
col=col, data=data[col], visual_type="numerical_column"
)
rndrd = render(itmdt, plot_height_lrg=250, plot_width_lrg=280)["layout"]
stats = format_num_stats(data[col])
elif is_dtype(detect_dtype(df[col]), Nominal()):
itmdt = Intermediate(
col=col, data=data[col], visual_type="categorical_column"
)
rndrd = render(itmdt, plot_height_lrg=250, plot_width_lrg=280)["layout"]
stats = format_cat_stats(
data[col]["stats"], data[col]["len_stats"], data[col]["letter_stats"]
)
figs: List[Figure] = []
for tab in rndrd:
try:
fig = tab.children[0]
except AttributeError:
fig = tab
# fig.title = Title(text=tab.title, align="center")
figs.append(fig)
res["variables"][col] = {
"tabledata": stats,
"plots": components(figs),
"col_type": itmdt.visual_type.replace("_column", ""),
}
if len(data["num_cols"]) > 0:
# interactions
res["has_interaction"] = True
itmdt = Intermediate(data=data["scat"], visual_type="correlation_crossfilter")
rndrd = render_correlation(itmdt)
rndrd.sizing_mode = "stretch_width"
res["interactions"] = components(rndrd)
# correlations
res["has_correlation"] = True
dfs: Dict[str, pd.DataFrame] = {}
for method, corr in data["corrs"].items():
ndf = pd.DataFrame(
{
"x": data["num_cols"][data["cordx"]],
"y": data["num_cols"][data["cordy"]],
"correlation": corr.ravel(),
}
)
dfs[method.name] = ndf[data["cordy"] > data["cordx"]]
itmdt = Intermediate(
data=dfs,
axis_range=list(data["num_cols"]),
visual_type="correlation_heatmaps",
)
rndrd = render_correlation(itmdt)
figs.clear()
for tab in rndrd.tabs:
fig = tab.child
fig.sizing_mode = "stretch_width"
fig.title = Title(text=tab.title, align="center", text_font_size="20px")
figs.append(fig)
res["correlations"] = components(figs)
else:
res["has_interaction"], res["has_correlation"] = False, False
# missing
res["has_missing"] = True
itmdt = completions["miss"](data["miss"])
rndrd = render_missing(itmdt)
figs.clear()
for fig in rndrd["layout"]:
fig.sizing_mode = "stretch_width"
fig.title = Title(
text=rndrd["meta"][rndrd["layout"].index(fig)],
align="center",
text_font_size="20px",
)
figs.append(fig)
res["missing"] = components(figs)
return res
|
https://github.com/sfu-db/dataprep/issues/412
|
---------------------------------------------------------------------------
UnboundLocalError Traceback (most recent call last)
<ipython-input-10-c4d17e3c9e92> in <module>
----> 1 report = eda.create_report(dist_df, title="Distribution Report")
/usr/local/lib/python3.8/site-packages/dataprep/eda/create_report/__init__.py in create_report(df, title, mode, progress)
52 "resources": INLINE.render(),
53 "title": title,
---> 54 "components": format_report(df, mode, progress),
55 }
56 template_base = ENV_LOADER.get_template("base.html")
/usr/local/lib/python3.8/site-packages/dataprep/eda/create_report/formatter.py in format_report(df, mode, progress)
61 df = string_dtype_to_object(df)
62 if mode == "basic":
---> 63 comps = format_basic(df)
64 # elif mode == "full":
65 # comps = format_full(df)
/usr/local/lib/python3.8/site-packages/dataprep/eda/create_report/formatter.py in format_basic(df)
121 )
122 figs: List[Figure] = []
--> 123 for tab in rndrd:
124 try:
125 fig = tab.children[0]
UnboundLocalError: local variable 'rndrd' referenced before assignment
|
UnboundLocalError
|
def basic_computations(df: dd.DataFrame) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""Computations for the basic version.
Parameters
----------
df
The DataFrame for which data are calculated.
"""
data: Dict[str, Any] = {}
df = DataArray(df)
df_num = df.select_num_columns()
data["num_cols"] = df_num.columns
first_rows = df.select_dtypes(CATEGORICAL_DTYPES).head
# variables
for col in df.columns:
if is_dtype(detect_dtype(df.frame[col]), Continuous()):
data[col] = cont_comps(df.frame[col], 20)
elif is_dtype(detect_dtype(df.frame[col]), Nominal()):
# cast the column as string type if it contains a mutable type
try:
first_rows[col].apply(hash)
except TypeError:
df.frame[col] = df.frame[col].astype(str)
data[col] = nom_comps(
df.frame[col], first_rows[col], 10, True, 10, 20, True, False, False
)
elif is_dtype(detect_dtype(df.frame[col]), DateTime()):
data[col] = {}
data[col]["stats"] = calc_stats_dt(df.frame[col])
data[col]["line"] = dask.delayed(_calc_line_dt)(df.frame[[col]], "auto")
# overview
data["ov"] = calc_stats(df.frame, None)
# interactions
data["scat"] = df_num.frame.map_partitions(
lambda x: x.sample(min(1000, x.shape[0])), meta=df_num.frame
)
# correlations
data.update(zip(("cordx", "cordy", "corrs"), correlation_nxn(df_num)))
# missing values
(
delayed,
completion,
) = compute_missing_nullivariate( # pylint: disable=unexpected-keyword-arg
df, 30, _staged=True
)
data["miss"] = delayed
completions = {"miss": completion}
return data, completions
|
def basic_computations(df: dd.DataFrame) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""Computations for the basic version.
Parameters
----------
df
The DataFrame for which data are calculated.
"""
data: Dict[str, Any] = {}
df = DataArray(df)
df_num = df.select_num_columns()
data["num_cols"] = df_num.columns
first_rows = df.select_dtypes(CATEGORICAL_DTYPES).head
# variables
for col in df.columns:
if is_dtype(detect_dtype(df.frame[col]), Continuous()):
data[col] = cont_comps(df.frame[col], 20)
elif is_dtype(detect_dtype(df.frame[col]), Nominal()):
# cast the column as string type if it contains a mutable type
try:
first_rows[col].apply(hash)
except TypeError:
df.frame[col] = df.frame[col].astype(str)
data[col] = nom_comps(
df.frame[col], first_rows[col], 10, True, 10, 20, True, False, False
)
# overview
data["ov"] = calc_stats(df.frame, None)
# interactions
data["scat"] = df_num.frame.map_partitions(
lambda x: x.sample(min(1000, x.shape[0])), meta=df_num.frame
)
# correlations
data.update(zip(("cordx", "cordy", "corrs"), correlation_nxn(df_num)))
# missing values
(
delayed,
completion,
) = compute_missing_nullivariate( # pylint: disable=unexpected-keyword-arg
df, 30, _staged=True
)
data["miss"] = delayed
completions = {"miss": completion}
return data, completions
|
https://github.com/sfu-db/dataprep/issues/412
|
---------------------------------------------------------------------------
UnboundLocalError Traceback (most recent call last)
<ipython-input-10-c4d17e3c9e92> in <module>
----> 1 report = eda.create_report(dist_df, title="Distribution Report")
/usr/local/lib/python3.8/site-packages/dataprep/eda/create_report/__init__.py in create_report(df, title, mode, progress)
52 "resources": INLINE.render(),
53 "title": title,
---> 54 "components": format_report(df, mode, progress),
55 }
56 template_base = ENV_LOADER.get_template("base.html")
/usr/local/lib/python3.8/site-packages/dataprep/eda/create_report/formatter.py in format_report(df, mode, progress)
61 df = string_dtype_to_object(df)
62 if mode == "basic":
---> 63 comps = format_basic(df)
64 # elif mode == "full":
65 # comps = format_full(df)
/usr/local/lib/python3.8/site-packages/dataprep/eda/create_report/formatter.py in format_basic(df)
121 )
122 figs: List[Figure] = []
--> 123 for tab in rndrd:
124 try:
125 fig = tab.children[0]
UnboundLocalError: local variable 'rndrd' referenced before assignment
|
UnboundLocalError
|
def calc_stats_dt(srs: dd.Series) -> Dict[str, str]:
"""
Calculate stats from a datetime column
Parameters
----------
srs
a datetime column
Returns
-------
Dict[str, str]
Dictionary that contains Overview
"""
size = srs.shape[0] # include nan
count = srs.count() # exclude nan
uniq_count = srs.nunique()
overview_dict = {
"Distinct Count": uniq_count,
"Unique (%)": uniq_count / count,
"Missing": size - count,
"Missing (%)": 1 - (count / size),
"Memory Size": srs.memory_usage(deep=True),
"Minimum": srs.min(),
"Maximum": srs.max(),
}
return overview_dict
|
def calc_stats_dt(srs: dd.Series) -> Dict[str, str]:
"""
Calculate stats from a datetime column
Parameters
----------
srs
a datetime column
Returns
-------
Dict[str, str]
Dictionary that contains Overview
"""
size = len(srs) # include nan
count = srs.count() # exclude nan
uniq_count = srs.nunique()
overview_dict = {
"Distinct Count": uniq_count,
"Unique (%)": uniq_count / count,
"Missing": size - count,
"Missing (%)": 1 - (count / size),
"Memory Size": srs.memory_usage(),
"Minimum": srs.min(),
"Maximum": srs.max(),
}
return overview_dict
|
https://github.com/sfu-db/dataprep/issues/412
|
---------------------------------------------------------------------------
UnboundLocalError Traceback (most recent call last)
<ipython-input-10-c4d17e3c9e92> in <module>
----> 1 report = eda.create_report(dist_df, title="Distribution Report")
/usr/local/lib/python3.8/site-packages/dataprep/eda/create_report/__init__.py in create_report(df, title, mode, progress)
52 "resources": INLINE.render(),
53 "title": title,
---> 54 "components": format_report(df, mode, progress),
55 }
56 template_base = ENV_LOADER.get_template("base.html")
/usr/local/lib/python3.8/site-packages/dataprep/eda/create_report/formatter.py in format_report(df, mode, progress)
61 df = string_dtype_to_object(df)
62 if mode == "basic":
---> 63 comps = format_basic(df)
64 # elif mode == "full":
65 # comps = format_full(df)
/usr/local/lib/python3.8/site-packages/dataprep/eda/create_report/formatter.py in format_basic(df)
121 )
122 figs: List[Figure] = []
--> 123 for tab in rndrd:
124 try:
125 fig = tab.children[0]
UnboundLocalError: local variable 'rndrd' referenced before assignment
|
UnboundLocalError
|
def nom_insights(data: Dict[str, Any], col: str) -> Dict[str, List[str]]:
"""
Format the insights for plot(df, Nominal())
"""
# pylint: disable=line-too-long
# insight dictionary, with a list associated with each plot
ins: Dict[str, List[str]] = {
"stat": [],
"bar": [],
"pie": [],
"cloud": [],
"wf": [],
"wl": [],
}
## if cfg.insight.constant_enable:
if data["nuniq"] == 1:
ins["stat"].append(f"{col} has a constant value")
## if cfg.insight.high_cardinality_enable:
if data["nuniq"] > 50: ## cfg.insght.high_cardinality_threshold
nuniq = data["nuniq"]
ins["stat"].append(f"{col} has a high cardinality: {nuniq} distinct values")
## if cfg.insight.missing_enable:
pmiss = round((data["nrows"] - data["stats"]["npres"]) / data["nrows"] * 100, 2)
if pmiss > 1: ## cfg.insight.missing_threshold
nmiss = data["nrows"] - data["stats"]["npres"]
ins["stat"].append(f"{col} has {nmiss} ({pmiss}%) missing values")
## if cfg.insight.constant_length_enable:
if data["stats"]["nuniq"] == data["stats"]["npres"]:
ins["stat"].append(f"{col} has all distinct values")
## if cfg.insight.evenness_enable:
if data["chisq"][1] > 0.999: ## cfg.insight.uniform_threshold
ins["bar"].append(f"{col} is relatively evenly distributed")
## if cfg.insight.outstanding_no1_enable
factor = data["bar"][0] / data["bar"][1] if len(data["bar"]) > 1 else 0
if factor > 1.5:
val1, val2 = data["bar"].index[0], data["bar"].index[1]
ins["bar"].append(
f"The largest value ({val1}) is over {factor} times larger than the second largest value ({val2})"
)
## if cfg.insight.attribution_enable
if data["pie"][:2].sum() / data["nrows"] > 0.5 and len(data["pie"]) >= 2:
vals = ", ".join(data["pie"].index[i] for i in range(2))
ins["pie"].append(f"The top 2 categories ({vals}) take over 50%")
## if cfg.insight.high_word_cardinlaity_enable
if data["nwords"] > 1000:
nwords = data["nwords"]
ins["cloud"].append(f"{col} contains many words: {nwords} words")
## if cfg.insight.outstanding_no1_word_enable
factor = (
data["word_cnts"][0] / data["word_cnts"][1] if len(data["word_cnts"]) > 1 else 0
)
if factor > 1.5:
val1, val2 = data["word_cnts"].index[0], data["word_cnts"].index[1]
ins["wf"].append(
f"The largest value ({val1}) is over {factor} times larger than the second largest value ({val2})"
)
## if cfg.insight.constant_word_length_enable
if data["len_stats"]["Minimum"] == data["len_stats"]["Maximum"]:
ins["wf"].append(f"{col} has words of constant length")
return ins
|
def nom_insights(data: Dict[str, Any], col: str) -> Dict[str, List[str]]:
"""
Format the insights for plot(df, Nominal())
"""
# pylint: disable=line-too-long
# insight dictionary, with a list associated with each plot
ins: Dict[str, List[str]] = {
"stat": [],
"bar": [],
"pie": [],
"cloud": [],
"wf": [],
"wl": [],
}
## if cfg.insight.constant_enable:
if data["nuniq"] == 1:
ins["stat"].append(f"{col} has a constant value")
## if cfg.insight.high_cardinality_enable:
if data["nuniq"] > 50: ## cfg.insght.high_cardinality_threshold
nuniq = data["nuniq"]
ins["stat"].append(f"{col} has a high cardinality: {nuniq} distinct values")
## if cfg.insight.missing_enable:
pmiss = round((data["nrows"] - data["stats"]["npres"]) / data["nrows"] * 100, 2)
if pmiss > 1: ## cfg.insight.missing_threshold
nmiss = data["nrows"] - data["stats"]["npres"]
ins["stat"].append(f"{col} has {nmiss} ({pmiss}%) missing values")
## if cfg.insight.constant_length_enable:
if data["stats"]["nuniq"] == data["stats"]["npres"]:
ins["stat"].append(f"{col} has all distinct values")
## if cfg.insight.evenness_enable:
if data["chisq"][1] > 0.999: ## cfg.insight.uniform_threshold
ins["bar"].append(f"{col} is relatively evenly distributed")
## if cfg.insight.outstanding_no1_enable
factor = data["bar"][0] / data["bar"][1] if len(data["bar"]) > 1 else 0
if factor > 1.5:
val1, val2 = data["bar"].index[0], data["bar"].index[1]
ins["bar"].append(
f"The largest value ({val1}) is over {factor} times larger than the second largest value ({val2})"
)
## if cfg.insight.attribution_enable
if data["pie"][:2].sum() / data["nrows"] > 0.5:
vals = ", ".join(data["pie"].index[i] for i in range(2))
ins["pie"].append(f"The top 2 categories ({vals}) take over 50%")
## if cfg.insight.high_word_cardinlaity_enable
if data["nwords"] > 1000:
nwords = data["nwords"]
ins["cloud"].append(f"{col} contains many words: {nwords} words")
## if cfg.insight.outstanding_no1_word_enable
factor = (
data["word_cnts"][0] / data["word_cnts"][1] if len(data["word_cnts"]) > 1 else 0
)
if factor > 1.5:
val1, val2 = data["word_cnts"].index[0], data["word_cnts"].index[1]
ins["wf"].append(
f"The largest value ({val1}) is over {factor} times larger than the second largest value ({val2})"
)
## if cfg.insight.constant_word_length_enable
if data["len_stats"]["Minimum"] == data["len_stats"]["Maximum"]:
ins["wf"].append(f"{col} has words of constant length")
return ins
|
https://github.com/sfu-db/dataprep/issues/321
|
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-31-463fb2fdfb17> in <module>
----> 1 create_report(df)
~/projects/dataprep/dataprep/eda/create_report/__init__.py in create_report(df, title, mode, progress)
50 "resources": INLINE.render(),
51 "title": title,
---> 52 "components": format_report(df, mode, progress),
53 }
54 template_base = ENV_LOADER.get_template("base.html")
~/projects/dataprep/dataprep/eda/create_report/formatter.py in format_report(df, mode, progress)
57 df = to_dask(df)
58 if mode == "basic":
---> 59 comps = format_basic(df)
60 # elif mode == "full":
61 # comps = format_full(df)
~/projects/dataprep/dataprep/eda/create_report/formatter.py in format_basic(df)
109 col=col, data=data[col], visual_type="categorical_column"
110 )
--> 111 rndrd = render(itmdt, plot_height_lrg=250, plot_width_lrg=280)
112 stats = format_cat_stats(
113 data[col]["stats"], data[col]["len_stats"], data[col]["letter_stats"]
~/projects/dataprep/dataprep/eda/distribution/render.py in render(itmdt, yscale, tile_size, plot_width_sml, plot_height_sml, plot_width_lrg, plot_height_lrg, plot_width_wide)
2111 )
2112 elif itmdt.visual_type == "categorical_column":
-> 2113 visual_elem = render_cat(itmdt, yscale, plot_width_lrg, plot_height_lrg)
2114 elif itmdt.visual_type == "numerical_column":
2115 visual_elem = render_num(itmdt, yscale, plot_width_lrg, plot_height_lrg)
~/projects/dataprep/dataprep/eda/distribution/render.py in render_cat(itmdt, yscale, plot_width, plot_height)
1674 tabs = Tabs(tabs=tabs)
1675 # insights
-> 1676 nom_insights(data, col)
1677 # TODO return insights
1678 return tabs
~/projects/dataprep/dataprep/eda/distribution/render.py in nom_insights(data, col)
1727 ## if cfg.insight.attribution_enable
1728 if data["pie"][:2].sum() / data["nrows"] > 0.5:
-> 1729 vals = ", ".join(data["pie"].index[i] for i in range(2))
1730 ins["pie"].append(f"The top 2 categories ({vals}) take over 50%")
1731
~/projects/dataprep/dataprep/eda/distribution/render.py in <genexpr>(.0)
1727 ## if cfg.insight.attribution_enable
1728 if data["pie"][:2].sum() / data["nrows"] > 0.5:
-> 1729 vals = ", ".join(data["pie"].index[i] for i in range(2))
1730 ins["pie"].append(f"The top 2 categories ({vals}) take over 50%")
1731
~/projects/dataprep/.venv/lib/python3.7/site-packages/pandas/core/indexes/base.py in __getitem__(self, key)
3928 if is_scalar(key):
3929 key = com.cast_scalar_indexer(key)
-> 3930 return getitem(key)
3931
3932 if isinstance(key, slice):
IndexError: index 1 is out of bounds for axis 0 with size 1
|
IndexError
|
def compute(
df: Union[pd.DataFrame, dd.DataFrame],
x: Optional[str] = None,
y: Optional[str] = None,
z: Optional[str] = None,
*,
bins: int = 10,
ngroups: int = 10,
largest: bool = True,
nsubgroups: int = 5,
timeunit: str = "auto",
agg: str = "mean",
sample_size: int = 1000,
top_words: int = 30,
stopword: bool = True,
lemmatize: bool = False,
stem: bool = False,
value_range: Optional[Tuple[float, float]] = None,
dtype: Optional[DTypeDef] = None,
) -> Intermediate:
"""All in one compute function.
Parameters
----------
df
Dataframe from which plots are to be generated
x: Optional[str], default None
A valid column name from the dataframe
y: Optional[str], default None
A valid column name from the dataframe
z: Optional[str], default None
A valid column name from the dataframe
bins: int, default 10
For a histogram or box plot with numerical x axis, it defines
the number of equal-width bins to use when grouping.
ngroups: int, default 10
When grouping over a categorical column, it defines the
number of groups to show in the plot. Ie, the number of
bars to show in a bar chart.
largest: bool, default True
If true, when grouping over a categorical column, the groups
with the largest count will be output. If false, the groups
with the smallest count will be output.
nsubgroups: int, default 5
If x and y are categorical columns, ngroups refers to
how many groups to show from column x, and nsubgroups refers to
how many subgroups to show from column y in each group in column x.
timeunit: str, default "auto"
Defines the time unit to group values over for a datetime column.
It can be "year", "quarter", "month", "week", "day", "hour",
"minute", "second". With default value "auto", it will use the
time unit such that the resulting number of groups is closest to 15.
agg: str, default "mean"
Specify the aggregate to use when aggregating over a numeric column
sample_size: int, default 1000
Sample size for the scatter plot
top_words: int, default 30
Specify the amount of words to show in the wordcloud and
word frequency bar chart
stopword: bool, default True
Eliminate the stopwords in the text data for plotting wordcloud and
word frequency bar chart
lemmatize: bool, default False
Lemmatize the words in the text data for plotting wordcloud and
word frequency bar chart
stem: bool, default False
Apply Potter Stem on the text data for plotting wordcloud and
word frequency bar chart
value_range: Optional[Tuple[float, float]], default None
The lower and upper bounds on the range of a numerical column.
Applies when column x is specified and column y is unspecified.
dtype: str or DType or dict of str or dict of DType, default None
Specify Data Types for designated column or all columns.
E.g. dtype = {"a": Continuous, "b": "Nominal"} or
dtype = {"a": Continuous(), "b": "nominal"}
or dtype = Continuous() or dtype = "Continuous" or dtype = Continuous()
""" # pylint: disable=too-many-locals
df.columns = df.columns.astype(str)
df = to_dask(df)
if not any((x, y, z)):
return compute_overview(df, bins, ngroups, largest, timeunit, dtype)
if sum(v is None for v in (x, y, z)) == 2:
col: str = cast(str, x or y or z)
return compute_univariate(
df,
col,
bins,
ngroups,
largest,
timeunit,
top_words,
stopword,
lemmatize,
stem,
value_range,
dtype,
)
if sum(v is None for v in (x, y, z)) == 1:
x, y = (v for v in (x, y, z) if v is not None)
return compute_bivariate(
df,
x,
y,
bins,
ngroups,
largest,
nsubgroups,
timeunit,
agg,
sample_size,
dtype,
)
if x is not None and y is not None and z is not None:
return compute_trivariate(df, x, y, z, ngroups, largest, timeunit, agg, dtype)
raise ValueError("not possible")
|
def compute(
df: Union[pd.DataFrame, dd.DataFrame],
x: Optional[str] = None,
y: Optional[str] = None,
z: Optional[str] = None,
*,
bins: int = 10,
ngroups: int = 10,
largest: bool = True,
nsubgroups: int = 5,
timeunit: str = "auto",
agg: str = "mean",
sample_size: int = 1000,
top_words: int = 30,
stopword: bool = True,
lemmatize: bool = False,
stem: bool = False,
value_range: Optional[Tuple[float, float]] = None,
dtype: Optional[DTypeDef] = None,
) -> Intermediate:
"""All in one compute function.
Parameters
----------
df
Dataframe from which plots are to be generated
x: Optional[str], default None
A valid column name from the dataframe
y: Optional[str], default None
A valid column name from the dataframe
z: Optional[str], default None
A valid column name from the dataframe
bins: int, default 10
For a histogram or box plot with numerical x axis, it defines
the number of equal-width bins to use when grouping.
ngroups: int, default 10
When grouping over a categorical column, it defines the
number of groups to show in the plot. Ie, the number of
bars to show in a bar chart.
largest: bool, default True
If true, when grouping over a categorical column, the groups
with the largest count will be output. If false, the groups
with the smallest count will be output.
nsubgroups: int, default 5
If x and y are categorical columns, ngroups refers to
how many groups to show from column x, and nsubgroups refers to
how many subgroups to show from column y in each group in column x.
timeunit: str, default "auto"
Defines the time unit to group values over for a datetime column.
It can be "year", "quarter", "month", "week", "day", "hour",
"minute", "second". With default value "auto", it will use the
time unit such that the resulting number of groups is closest to 15.
agg: str, default "mean"
Specify the aggregate to use when aggregating over a numeric column
sample_size: int, default 1000
Sample size for the scatter plot
top_words: int, default 30
Specify the amount of words to show in the wordcloud and
word frequency bar chart
stopword: bool, default True
Eliminate the stopwords in the text data for plotting wordcloud and
word frequency bar chart
lemmatize: bool, default False
Lemmatize the words in the text data for plotting wordcloud and
word frequency bar chart
stem: bool, default False
Apply Potter Stem on the text data for plotting wordcloud and
word frequency bar chart
value_range: Optional[Tuple[float, float]], default None
The lower and upper bounds on the range of a numerical column.
Applies when column x is specified and column y is unspecified.
dtype: str or DType or dict of str or dict of DType, default None
Specify Data Types for designated column or all columns.
E.g. dtype = {"a": Continuous, "b": "Nominal"} or
dtype = {"a": Continuous(), "b": "nominal"}
or dtype = Continuous() or dtype = "Continuous" or dtype = Continuous()
""" # pylint: disable=too-many-locals
df = to_dask(df)
if not any((x, y, z)):
return compute_overview(df, bins, ngroups, largest, timeunit, dtype)
if sum(v is None for v in (x, y, z)) == 2:
col: str = cast(str, x or y or z)
return compute_univariate(
df,
col,
bins,
ngroups,
largest,
timeunit,
top_words,
stopword,
lemmatize,
stem,
value_range,
dtype,
)
if sum(v is None for v in (x, y, z)) == 1:
x, y = (v for v in (x, y, z) if v is not None)
return compute_bivariate(
df,
x,
y,
bins,
ngroups,
largest,
nsubgroups,
timeunit,
agg,
sample_size,
dtype,
)
if x is not None and y is not None and z is not None:
return compute_trivariate(df, x, y, z, ngroups, largest, timeunit, agg, dtype)
raise ValueError("not possible")
|
https://github.com/sfu-db/dataprep/issues/299
|
nan_value = float("NaN")
df = pd.DataFrame(5 * [[1, nan_value]]
... )
df
0 1
0 1 NaN
1 1 NaN
2 1 NaN
3 1 NaN
4 1 NaN
rep = plot(df)
C:\Users\Lukas\AppData\Local\Programs\Python\Python38\lib\site-packages\numpy\lib\histograms.py:433: RuntimeWarning: invalid value encountered in greater
if np.any(bin_edges[:-1] > bin_edges[1:]):
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\Users\Lukas\AppData\Local\Programs\Python\Python38\lib\site-packages\dataprep\eda\distribution\__init__.py", line 165, in plot
figure = render(intermediate, yscale=yscale, tile_size=tile_size)
File "C:\Users\Lukas\AppData\Local\Programs\Python\Python38\lib\site-packages\dataprep\eda\distribution\render.py", line 1920, in render
visual_elem = render_distribution_grid(
File "C:\Users\Lukas\AppData\Local\Programs\Python\Python38\lib\site-packages\dataprep\eda\distribution\render.py", line 1565, in render_distribution_grid
fig = hist_viz(data, nrows, col, yscale, plot_width, plot_height, False)
File "C:\Users\Lukas\AppData\Local\Programs\Python\Python38\lib\site-packages\dataprep\eda\distribution\render.py", line 441, in hist_viz
fig = Figure(
File "C:\Users\Lukas\AppData\Local\Programs\Python\Python38\lib\site-packages\bokeh\plotting\figure.py", line 155, in __init__
super().__init__(*arg, **kw)
File "C:\Users\Lukas\AppData\Local\Programs\Python\Python38\lib\site-packages\bokeh\model.py", line 234, in __init__
super().__init__(**kwargs)
File "C:\Users\Lukas\AppData\Local\Programs\Python\Python38\lib\site-packages\bokeh\core\has_props.py", line 247, in __init__
setattr(self, name, value)
File "C:\Users\Lukas\AppData\Local\Programs\Python\Python38\lib\site-packages\bokeh\core\has_props.py", line 274, in __setattr__
super().__setattr__(name, value)
File "C:\Users\Lukas\AppData\Local\Programs\Python\Python38\lib\site-packages\bokeh\core\property\descriptors.py", line 539, in __set__
self._internal_set(obj, value, setter=setter)
File "C:\Users\Lukas\AppData\Local\Programs\Python\Python38\lib\site-packages\bokeh\core\property\descriptors.py", line 760, in _internal_set
value = self.property.prepare_value(obj, self.name, value)
File "C:\Users\Lukas\AppData\Local\Programs\Python\Python38\lib\site-packages\bokeh\core\property\bases.py", line 331, in prepare_value
raise e
File "C:\Users\Lukas\AppData\Local\Programs\Python\Python38\lib\site-packages\bokeh\core\property\bases.py", line 324, in prepare_value
self.validate(value)
File "C:\Users\Lukas\AppData\Local\Programs\Python\Python38\lib\site-packages\bokeh\core\property\instance.py", line 112, in validate
raise ValueError(msg)
ValueError: expected an instance of type Title, got 0 of type int
|
ValueError
|
def wordcloud_viz(
word_cnts: pd.Series,
plot_width: int,
plot_height: int,
) -> Panel:
"""
Visualize the word cloud
""" # pylint: disable=unsubscriptable-object
ellipse_mask = np.array(
Image.open(f"{Path(__file__).parent.parent.parent}/assets/ellipse.jpg")
)
wordcloud = WordCloud(background_color="white", mask=ellipse_mask)
wordcloud.generate_from_frequencies(word_cnts)
wcarr = wordcloud.to_array().astype(np.uint8)
# use image_rgba following this example
# https://docs.bokeh.org/en/latest/docs/gallery/image_rgba.html
img = np.empty(wcarr.shape[:2], dtype=np.uint32)
view = img.view(dtype=np.uint8).reshape((*wcarr.shape[:2], 4))
alpha = np.full((*wcarr.shape[:2], 1), 255, dtype=np.uint8)
view[:] = np.concatenate([wcarr, alpha], axis=2)[::-1]
fig = figure(
plot_width=plot_width,
plot_height=plot_height,
title="Word Cloud",
x_range=(0, 1),
y_range=(0, 1),
toolbar_location=None,
)
fig.image_rgba(image=[img], x=0, y=0, dw=1, dh=1)
fig.axis.visible = False
fig.grid.visible = False
return Panel(child=row(fig), title="Word Cloud")
|
def wordcloud_viz(
word_cnts: pd.Series,
plot_width: int,
plot_height: int,
) -> Panel:
"""
Visualize the word cloud
""" # pylint: disable=unsubscriptable-object
ellipse_mask = np.array(
Image.open(f"{Path(__file__).parent.parent.parent}/assets/ellipse.jpg")
)
wordcloud = WordCloud(
background_color="white", mask=ellipse_mask, width=800, height=400
)
wordcloud.generate_from_frequencies(word_cnts)
wcimg = wordcloud.to_array().astype(np.uint8)
alpha = np.full([*wcimg.shape[:2], 1], 255, dtype=np.uint8)
wcimg = np.concatenate([wcimg, alpha], axis=2)[::-1, :]
fig = figure(
plot_width=plot_width,
plot_height=plot_height,
title="Word Cloud",
x_range=(0, 1),
y_range=(0, 1),
toolbar_location=None,
)
fig.image_rgba(image=[wcimg], x=0, y=0, dh=1, dw=1)
fig.axis.visible = False
fig.grid.visible = False
return Panel(child=row(fig), title="Word Cloud")
|
https://github.com/sfu-db/dataprep/issues/299
|
nan_value = float("NaN")
df = pd.DataFrame(5 * [[1, nan_value]]
... )
df
0 1
0 1 NaN
1 1 NaN
2 1 NaN
3 1 NaN
4 1 NaN
rep = plot(df)
C:\Users\Lukas\AppData\Local\Programs\Python\Python38\lib\site-packages\numpy\lib\histograms.py:433: RuntimeWarning: invalid value encountered in greater
if np.any(bin_edges[:-1] > bin_edges[1:]):
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\Users\Lukas\AppData\Local\Programs\Python\Python38\lib\site-packages\dataprep\eda\distribution\__init__.py", line 165, in plot
figure = render(intermediate, yscale=yscale, tile_size=tile_size)
File "C:\Users\Lukas\AppData\Local\Programs\Python\Python38\lib\site-packages\dataprep\eda\distribution\render.py", line 1920, in render
visual_elem = render_distribution_grid(
File "C:\Users\Lukas\AppData\Local\Programs\Python\Python38\lib\site-packages\dataprep\eda\distribution\render.py", line 1565, in render_distribution_grid
fig = hist_viz(data, nrows, col, yscale, plot_width, plot_height, False)
File "C:\Users\Lukas\AppData\Local\Programs\Python\Python38\lib\site-packages\dataprep\eda\distribution\render.py", line 441, in hist_viz
fig = Figure(
File "C:\Users\Lukas\AppData\Local\Programs\Python\Python38\lib\site-packages\bokeh\plotting\figure.py", line 155, in __init__
super().__init__(*arg, **kw)
File "C:\Users\Lukas\AppData\Local\Programs\Python\Python38\lib\site-packages\bokeh\model.py", line 234, in __init__
super().__init__(**kwargs)
File "C:\Users\Lukas\AppData\Local\Programs\Python\Python38\lib\site-packages\bokeh\core\has_props.py", line 247, in __init__
setattr(self, name, value)
File "C:\Users\Lukas\AppData\Local\Programs\Python\Python38\lib\site-packages\bokeh\core\has_props.py", line 274, in __setattr__
super().__setattr__(name, value)
File "C:\Users\Lukas\AppData\Local\Programs\Python\Python38\lib\site-packages\bokeh\core\property\descriptors.py", line 539, in __set__
self._internal_set(obj, value, setter=setter)
File "C:\Users\Lukas\AppData\Local\Programs\Python\Python38\lib\site-packages\bokeh\core\property\descriptors.py", line 760, in _internal_set
value = self.property.prepare_value(obj, self.name, value)
File "C:\Users\Lukas\AppData\Local\Programs\Python\Python38\lib\site-packages\bokeh\core\property\bases.py", line 331, in prepare_value
raise e
File "C:\Users\Lukas\AppData\Local\Programs\Python\Python38\lib\site-packages\bokeh\core\property\bases.py", line 324, in prepare_value
self.validate(value)
File "C:\Users\Lukas\AppData\Local\Programs\Python\Python38\lib\site-packages\bokeh\core\property\instance.py", line 112, in validate
raise ValueError(msg)
ValueError: expected an instance of type Title, got 0 of type int
|
ValueError
|
def pie_viz(
df: pd.DataFrame,
nrows: int,
col: str,
plot_width: int,
plot_height: int,
) -> Panel:
"""
Render a pie chart
"""
npresent = df[col].sum()
if nrows > npresent:
df = df.append(pd.DataFrame({col: [nrows - npresent]}, index=["Others"]))
df["pct"] = df[col] / nrows * 100
df["angle"] = df[col] / nrows * 2 * np.pi
tooltips = [(col, "@index"), ("Count", f"@{col}"), ("Percent", "@pct{0.2f}%")]
fig = Figure(
plot_width=plot_width,
plot_height=plot_height,
title=col,
toolbar_location=None,
tools="hover",
tooltips=tooltips,
)
color_list = CATEGORY20 * (len(df) // len(CATEGORY20) + 1)
df["colour"] = color_list[0 : len(df)]
df.index = df.index.astype(str)
df.index = df.index.map(lambda x: x[0:13] + "..." if len(x) > 13 else x)
pie = fig.wedge(
x=0,
y=1,
radius=0.9,
start_angle=cumsum("angle", include_zero=True),
end_angle=cumsum("angle"),
line_color="white",
fill_color="colour",
source=df,
)
legend = Legend(items=[LegendItem(label=dict(field="index"), renderers=[pie])])
legend.label_text_font_size = "8pt"
fig.add_layout(legend, "right")
tweak_figure(fig, "pie")
fig.axis.major_label_text_font_size = "0pt"
fig.axis.major_tick_line_color = None
return Panel(child=row(fig), title="Pie Chart")
|
def pie_viz(
df: pd.DataFrame,
nrows: int,
col: str,
plot_width: int,
plot_height: int,
) -> Panel:
"""
Render a pie chart
"""
npresent = df[col].sum()
if nrows > npresent:
df = df.append(pd.DataFrame({col: [nrows - npresent]}, index=["Others"]))
df["pct"] = df[col] / nrows * 100
df["angle"] = df[col] / npresent * 2 * np.pi
tooltips = [(col, "@index"), ("Count", f"@{col}"), ("Percent", "@pct{0.2f}%")]
fig = Figure(
plot_width=plot_width,
plot_height=plot_height,
title=col,
toolbar_location=None,
tools="hover",
tooltips=tooltips,
)
color_list = CATEGORY20 * (len(df) // len(CATEGORY20) + 1)
df["colour"] = color_list[0 : len(df)]
df.index = df.index.astype(str)
df.index = df.index.map(lambda x: x[0:13] + "..." if len(x) > 13 else x)
pie = fig.wedge(
x=0,
y=1,
radius=0.9,
start_angle=cumsum("angle", include_zero=True),
end_angle=cumsum("angle"),
line_color="white",
fill_color="colour",
source=df,
)
legend = Legend(items=[LegendItem(label=dict(field="index"), renderers=[pie])])
legend.label_text_font_size = "8pt"
fig.add_layout(legend, "right")
tweak_figure(fig, "pie")
fig.axis.major_label_text_font_size = "0pt"
fig.axis.major_tick_line_color = None
return Panel(child=row(fig), title="Pie Chart")
|
https://github.com/sfu-db/dataprep/issues/299
|
nan_value = float("NaN")
df = pd.DataFrame(5 * [[1, nan_value]]
... )
df
0 1
0 1 NaN
1 1 NaN
2 1 NaN
3 1 NaN
4 1 NaN
rep = plot(df)
C:\Users\Lukas\AppData\Local\Programs\Python\Python38\lib\site-packages\numpy\lib\histograms.py:433: RuntimeWarning: invalid value encountered in greater
if np.any(bin_edges[:-1] > bin_edges[1:]):
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\Users\Lukas\AppData\Local\Programs\Python\Python38\lib\site-packages\dataprep\eda\distribution\__init__.py", line 165, in plot
figure = render(intermediate, yscale=yscale, tile_size=tile_size)
File "C:\Users\Lukas\AppData\Local\Programs\Python\Python38\lib\site-packages\dataprep\eda\distribution\render.py", line 1920, in render
visual_elem = render_distribution_grid(
File "C:\Users\Lukas\AppData\Local\Programs\Python\Python38\lib\site-packages\dataprep\eda\distribution\render.py", line 1565, in render_distribution_grid
fig = hist_viz(data, nrows, col, yscale, plot_width, plot_height, False)
File "C:\Users\Lukas\AppData\Local\Programs\Python\Python38\lib\site-packages\dataprep\eda\distribution\render.py", line 441, in hist_viz
fig = Figure(
File "C:\Users\Lukas\AppData\Local\Programs\Python\Python38\lib\site-packages\bokeh\plotting\figure.py", line 155, in __init__
super().__init__(*arg, **kw)
File "C:\Users\Lukas\AppData\Local\Programs\Python\Python38\lib\site-packages\bokeh\model.py", line 234, in __init__
super().__init__(**kwargs)
File "C:\Users\Lukas\AppData\Local\Programs\Python\Python38\lib\site-packages\bokeh\core\has_props.py", line 247, in __init__
setattr(self, name, value)
File "C:\Users\Lukas\AppData\Local\Programs\Python\Python38\lib\site-packages\bokeh\core\has_props.py", line 274, in __setattr__
super().__setattr__(name, value)
File "C:\Users\Lukas\AppData\Local\Programs\Python\Python38\lib\site-packages\bokeh\core\property\descriptors.py", line 539, in __set__
self._internal_set(obj, value, setter=setter)
File "C:\Users\Lukas\AppData\Local\Programs\Python\Python38\lib\site-packages\bokeh\core\property\descriptors.py", line 760, in _internal_set
value = self.property.prepare_value(obj, self.name, value)
File "C:\Users\Lukas\AppData\Local\Programs\Python\Python38\lib\site-packages\bokeh\core\property\bases.py", line 331, in prepare_value
raise e
File "C:\Users\Lukas\AppData\Local\Programs\Python\Python38\lib\site-packages\bokeh\core\property\bases.py", line 324, in prepare_value
self.validate(value)
File "C:\Users\Lukas\AppData\Local\Programs\Python\Python38\lib\site-packages\bokeh\core\property\instance.py", line 112, in validate
raise ValueError(msg)
ValueError: expected an instance of type Title, got 0 of type int
|
ValueError
|
def hist_viz(
hist: Tuple[np.ndarray, np.ndarray],
nrows: int,
col: str,
yscale: str,
plot_width: int,
plot_height: int,
show_yticks: bool,
) -> Figure:
"""
Render a histogram
"""
# pylint: disable=too-many-arguments,too-many-locals
counts, bins = hist
if sum(counts) == 0:
return _empty_figure(col, plot_height, plot_width)
intvls = _format_bin_intervals(bins)
df = pd.DataFrame(
{
"intvl": intvls,
"left": bins[:-1],
"right": bins[1:],
"freq": counts,
"pct": counts / nrows * 100,
}
)
tooltips = [("Bin", "@intvl"), ("Frequency", "@freq"), ("Percent", "@pct{0.2f}%")]
fig = Figure(
plot_width=plot_width,
plot_height=plot_height,
title=col,
toolbar_location=None,
tools="",
y_axis_type=yscale,
)
bottom = 0 if yscale == "linear" or df.empty else df["freq"].min() / 2
fig.quad(
source=df,
left="left",
right="right",
bottom=bottom,
alpha=0.5,
top="freq",
fill_color="#6baed6",
)
hover = HoverTool(
tooltips=tooltips,
mode="vline",
)
fig.add_tools(hover)
tweak_figure(fig, "hist", show_yticks)
fig.yaxis.axis_label = "Frequency"
_format_axis(fig, df.iloc[0]["left"], df.iloc[-1]["right"], "x")
if show_yticks:
fig.xaxis.axis_label = col
if yscale == "linear":
_format_axis(fig, 0, df["freq"].max(), "y")
return fig
|
def hist_viz(
hist: Tuple[np.ndarray, np.ndarray],
nrows: int,
col: str,
yscale: str,
plot_width: int,
plot_height: int,
show_yticks: bool,
) -> Figure:
"""
Render a histogram
"""
# pylint: disable=too-many-arguments,too-many-locals
counts, bins = hist
intvls = _format_bin_intervals(bins)
df = pd.DataFrame(
{
"intvl": intvls,
"left": bins[:-1],
"right": bins[1:],
"freq": counts,
"pct": counts / nrows * 100,
}
)
tooltips = [("Bin", "@intvl"), ("Frequency", "@freq"), ("Percent", "@pct{0.2f}%")]
fig = Figure(
plot_width=plot_width,
plot_height=plot_height,
title=col,
toolbar_location=None,
tools="",
y_axis_type=yscale,
)
bottom = 0 if yscale == "linear" or df.empty else df["freq"].min() / 2
fig.quad(
source=df,
left="left",
right="right",
bottom=bottom,
alpha=0.5,
top="freq",
fill_color="#6baed6",
)
hover = HoverTool(
tooltips=tooltips,
mode="vline",
)
fig.add_tools(hover)
tweak_figure(fig, "hist", show_yticks)
fig.yaxis.axis_label = "Frequency"
if not df.empty:
_format_axis(fig, df.iloc[0]["left"], df.iloc[-1]["right"], "x")
if show_yticks:
fig.xaxis.axis_label = col
if yscale == "linear":
_format_axis(fig, 0, df["freq"].max(), "y")
return fig
|
https://github.com/sfu-db/dataprep/issues/299
|
nan_value = float("NaN")
df = pd.DataFrame(5 * [[1, nan_value]]
... )
df
0 1
0 1 NaN
1 1 NaN
2 1 NaN
3 1 NaN
4 1 NaN
rep = plot(df)
C:\Users\Lukas\AppData\Local\Programs\Python\Python38\lib\site-packages\numpy\lib\histograms.py:433: RuntimeWarning: invalid value encountered in greater
if np.any(bin_edges[:-1] > bin_edges[1:]):
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\Users\Lukas\AppData\Local\Programs\Python\Python38\lib\site-packages\dataprep\eda\distribution\__init__.py", line 165, in plot
figure = render(intermediate, yscale=yscale, tile_size=tile_size)
File "C:\Users\Lukas\AppData\Local\Programs\Python\Python38\lib\site-packages\dataprep\eda\distribution\render.py", line 1920, in render
visual_elem = render_distribution_grid(
File "C:\Users\Lukas\AppData\Local\Programs\Python\Python38\lib\site-packages\dataprep\eda\distribution\render.py", line 1565, in render_distribution_grid
fig = hist_viz(data, nrows, col, yscale, plot_width, plot_height, False)
File "C:\Users\Lukas\AppData\Local\Programs\Python\Python38\lib\site-packages\dataprep\eda\distribution\render.py", line 441, in hist_viz
fig = Figure(
File "C:\Users\Lukas\AppData\Local\Programs\Python\Python38\lib\site-packages\bokeh\plotting\figure.py", line 155, in __init__
super().__init__(*arg, **kw)
File "C:\Users\Lukas\AppData\Local\Programs\Python\Python38\lib\site-packages\bokeh\model.py", line 234, in __init__
super().__init__(**kwargs)
File "C:\Users\Lukas\AppData\Local\Programs\Python\Python38\lib\site-packages\bokeh\core\has_props.py", line 247, in __init__
setattr(self, name, value)
File "C:\Users\Lukas\AppData\Local\Programs\Python\Python38\lib\site-packages\bokeh\core\has_props.py", line 274, in __setattr__
super().__setattr__(name, value)
File "C:\Users\Lukas\AppData\Local\Programs\Python\Python38\lib\site-packages\bokeh\core\property\descriptors.py", line 539, in __set__
self._internal_set(obj, value, setter=setter)
File "C:\Users\Lukas\AppData\Local\Programs\Python\Python38\lib\site-packages\bokeh\core\property\descriptors.py", line 760, in _internal_set
value = self.property.prepare_value(obj, self.name, value)
File "C:\Users\Lukas\AppData\Local\Programs\Python\Python38\lib\site-packages\bokeh\core\property\bases.py", line 331, in prepare_value
raise e
File "C:\Users\Lukas\AppData\Local\Programs\Python\Python38\lib\site-packages\bokeh\core\property\bases.py", line 324, in prepare_value
self.validate(value)
File "C:\Users\Lukas\AppData\Local\Programs\Python\Python38\lib\site-packages\bokeh\core\property\instance.py", line 112, in validate
raise ValueError(msg)
ValueError: expected an instance of type Title, got 0 of type int
|
ValueError
|
def histogram(
srs: dd.Series,
bins: Optional[int] = None,
return_edges: bool = True,
range: Optional[Tuple[int, int]] = None, # pylint: disable=redefined-builtin
dtype: Optional[DTypeDef] = None,
) -> Union[Tuple[da.Array, da.Array], Tuple[da.Array, da.Array, da.Array]]:
"""
Calculate "histogram" for both numerical and categorical
"""
if is_dtype(detect_dtype(srs, dtype), Continuous()):
if range is not None:
minimum, maximum = range
else:
minimum, maximum = srs.min(axis=0), srs.max(axis=0)
minimum, maximum = dask.compute(minimum, maximum)
assert bins is not None, (
"num_bins cannot be None if calculating numerical histograms"
)
counts, edges = da.histogram(
srs.to_dask_array(), bins, range=[minimum, maximum]
)
centers = (edges[:-1] + edges[1:]) / 2
if not return_edges:
return counts, centers
return counts, centers, edges
elif is_dtype(detect_dtype(srs, dtype), Nominal()):
value_counts = srs.value_counts()
counts = value_counts.to_dask_array()
# Dask array dones't understand the pandas dtypes such as categorical type.
# We convert these types into str before calling into `to_dask_array`.
if is_pandas_categorical(value_counts.index.dtype):
centers = value_counts.index.astype("str").to_dask_array()
else:
centers = value_counts.index.to_dask_array()
return (counts, centers)
else:
raise UnreachableError()
|
def histogram(
srs: dd.Series,
bins: Optional[int] = None,
return_edges: bool = True,
range: Optional[Tuple[int, int]] = None, # pylint: disable=redefined-builtin
dtype: Optional[DTypeDef] = None,
) -> Union[Tuple[da.Array, da.Array], Tuple[da.Array, da.Array, da.Array]]:
"""
Calculate histogram for both numerical and categorical
"""
if is_dtype(detect_dtype(srs, dtype), Continuous()):
if range is not None:
minimum, maximum = range
else:
minimum, maximum = srs.min(axis=0), srs.max(axis=0)
minimum, maximum = dask.compute(minimum, maximum)
assert bins is not None, (
"num_bins cannot be None if calculating numerical histograms"
)
counts, edges = da.histogram(
srs.to_dask_array(), bins, range=[minimum, maximum]
)
centers = (edges[:-1] + edges[1:]) / 2
if not return_edges:
return counts, centers
return counts, centers, edges
elif is_dtype(detect_dtype(srs, dtype), Nominal()):
value_counts = srs.value_counts()
counts = value_counts.to_dask_array()
# Dask array dones't understand the pandas dtypes such as categorical type.
# We convert these types into str before calling into `to_dask_array`.
if is_pandas_categorical(value_counts.index.dtype):
centers = value_counts.index.astype("str").to_dask_array()
else:
centers = value_counts.index.to_dask_array()
return (counts, centers)
else:
raise UnreachableError()
|
https://github.com/sfu-db/dataprep/issues/219
|
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-50-47c276e219b8> in <module>
----> 1 plot_missing(X)
~\miniconda3\lib\site-packages\dataprep\eda\missing\__init__.py in plot_missing(df, x, y, bins, ncols, ndist_sample, dtype)
63 df, x, y, dtype=dtype, bins=bins, ncols=ncols, ndist_sample=ndist_sample
64 )
---> 65 fig = render_missing(itmdt)
66 return Report(fig)
~\miniconda3\lib\site-packages\dataprep\eda\missing\render.py in render_missing(itmdt, plot_width, plot_height, palette)
61 elif itmdt.visual_type == "missing_spectrum_heatmap":
62 return render_missing_heatmap(
---> 63 itmdt, plot_width, plot_height, palette or BIPALETTE
64 )
65
~\miniconda3\lib\site-packages\dataprep\eda\missing\render.py in render_missing_heatmap(itmdt, plot_width, plot_height, palette)
287 pan_spectrum = Panel(child=fig_spectrum, title="Spectrum")
288 tabs.append(pan_spectrum)
--> 289 fig_heatmap = render_heatmaps_tab(itmdt, plot_width, plot_height, palette)
290 pan_heatmap = Panel(child=fig_heatmap, title="Heatmap")
291 tabs.append(pan_heatmap)
~\miniconda3\lib\site-packages\dataprep\eda\missing\render.py in render_heatmaps_tab(itmdt, plot_width, plot_height, palette)
307 np.triu(np.ones(df.shape)).astype(np.bool) # pylint: disable=no-member
308 ).T
--> 309 df = df.unstack().reset_index(name="correlation")
310 df = df.rename(columns={"level_0": "x", "level_1": "y"})
311 df = df[df["x"] != df["y"]]
~\miniconda3\lib\site-packages\pandas\core\frame.py in unstack(self, level, fill_value)
6384 from pandas.core.reshape.reshape import unstack
6385
-> 6386 return unstack(self, level, fill_value)
6387
6388 _shared_docs[
~\miniconda3\lib\site-packages\pandas\core\reshape\reshape.py in unstack(obj, level, fill_value)
408 return _unstack_frame(obj, level, fill_value=fill_value)
409 else:
--> 410 return obj.T.stack(dropna=False)
411 else:
412 if is_extension_array_dtype(obj.dtype):
~\miniconda3\lib\site-packages\pandas\core\frame.py in stack(self, level, dropna)
6249 return stack_multiple(self, level, dropna=dropna)
6250 else:
-> 6251 return stack(self, level, dropna=dropna)
6252
6253 def explode(self, column: Union[str, Tuple]) -> "DataFrame":
~\miniconda3\lib\site-packages\pandas\core\reshape\reshape.py in stack(frame, level, dropna)
541 # we concatenate instead.
542 dtypes = list(frame.dtypes.values)
--> 543 dtype = dtypes[0]
544
545 if is_extension_array_dtype(dtype):
IndexError: list index out of range
|
IndexError
|
def missing_spectrum(
df: dd.DataFrame, bins: int, ncols: int
) -> Tuple[dd.DataFrame, dd.DataFrame]:
"""
Calculate a missing spectrum for each column
"""
# pylint: disable=too-many-locals
num_bins = min(bins, len(df) - 1)
df = df.iloc[:, :ncols]
cols = df.columns[:ncols]
ncols = len(cols)
nrows = len(df)
chunk_size = len(df) // num_bins
data = df.isnull().to_dask_array()
data.compute_chunk_sizes()
data = data.rechunk((chunk_size, None))
notnull_counts = data.sum(axis=0) / data.shape[0]
total_missing_percs = {col: notnull_counts[idx] for idx, col in enumerate(cols)}
spectrum_missing_percs = data.map_blocks(
missing_perc_blockwise, chunks=(1, data.shape[1]), dtype=float
)
nsegments = len(spectrum_missing_percs)
locs0 = da.arange(nsegments) * chunk_size
locs1 = da.minimum(locs0 + chunk_size, nrows)
locs_middle = locs0 + chunk_size / 2
df = dd.from_dask_array(
da.repeat(da.from_array(cols.values, (1,)), nsegments),
columns=["column"],
)
df = df.assign(
location=da.tile(locs_middle, ncols),
missing_rate=spectrum_missing_percs.T.ravel(),
loc_start=da.tile(locs0, ncols),
loc_end=da.tile(locs1, ncols),
)
return df, total_missing_percs
|
def missing_spectrum(df: dd.DataFrame, bins: int, ncols: int) -> Intermediate:
"""
Calculate a missing spectrum for each column
"""
# pylint: disable=too-many-locals
num_bins = min(bins, len(df) - 1)
df = df.iloc[:, :ncols]
cols = df.columns[:ncols]
ncols = len(cols)
nrows = len(df)
chunk_size = len(df) // num_bins
data = df.isnull().to_dask_array()
data.compute_chunk_sizes()
data = data.rechunk((chunk_size, None))
(notnull_counts,) = dd.compute(data.sum(axis=0) / data.shape[0])
missing_percent = {col: notnull_counts[idx] for idx, col in enumerate(cols)}
missing_percs = data.map_blocks(missing_perc_blockwise, dtype=float).compute()
locs0 = np.arange(len(missing_percs)) * chunk_size
locs1 = np.minimum(locs0 + chunk_size, nrows)
locs_middle = locs0 + chunk_size / 2
df = pd.DataFrame(
{
"column": np.repeat(cols.values, len(missing_percs)),
"location": np.tile(locs_middle, ncols),
"missing_rate": missing_percs.T.ravel(),
"loc_start": np.tile(locs0, ncols),
"loc_end": np.tile(locs1, ncols),
}
)
return Intermediate(
data=df,
missing_percent=missing_percent,
visual_type="missing_spectrum",
)
|
https://github.com/sfu-db/dataprep/issues/219
|
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-50-47c276e219b8> in <module>
----> 1 plot_missing(X)
~\miniconda3\lib\site-packages\dataprep\eda\missing\__init__.py in plot_missing(df, x, y, bins, ncols, ndist_sample, dtype)
63 df, x, y, dtype=dtype, bins=bins, ncols=ncols, ndist_sample=ndist_sample
64 )
---> 65 fig = render_missing(itmdt)
66 return Report(fig)
~\miniconda3\lib\site-packages\dataprep\eda\missing\render.py in render_missing(itmdt, plot_width, plot_height, palette)
61 elif itmdt.visual_type == "missing_spectrum_heatmap":
62 return render_missing_heatmap(
---> 63 itmdt, plot_width, plot_height, palette or BIPALETTE
64 )
65
~\miniconda3\lib\site-packages\dataprep\eda\missing\render.py in render_missing_heatmap(itmdt, plot_width, plot_height, palette)
287 pan_spectrum = Panel(child=fig_spectrum, title="Spectrum")
288 tabs.append(pan_spectrum)
--> 289 fig_heatmap = render_heatmaps_tab(itmdt, plot_width, plot_height, palette)
290 pan_heatmap = Panel(child=fig_heatmap, title="Heatmap")
291 tabs.append(pan_heatmap)
~\miniconda3\lib\site-packages\dataprep\eda\missing\render.py in render_heatmaps_tab(itmdt, plot_width, plot_height, palette)
307 np.triu(np.ones(df.shape)).astype(np.bool) # pylint: disable=no-member
308 ).T
--> 309 df = df.unstack().reset_index(name="correlation")
310 df = df.rename(columns={"level_0": "x", "level_1": "y"})
311 df = df[df["x"] != df["y"]]
~\miniconda3\lib\site-packages\pandas\core\frame.py in unstack(self, level, fill_value)
6384 from pandas.core.reshape.reshape import unstack
6385
-> 6386 return unstack(self, level, fill_value)
6387
6388 _shared_docs[
~\miniconda3\lib\site-packages\pandas\core\reshape\reshape.py in unstack(obj, level, fill_value)
408 return _unstack_frame(obj, level, fill_value=fill_value)
409 else:
--> 410 return obj.T.stack(dropna=False)
411 else:
412 if is_extension_array_dtype(obj.dtype):
~\miniconda3\lib\site-packages\pandas\core\frame.py in stack(self, level, dropna)
6249 return stack_multiple(self, level, dropna=dropna)
6250 else:
-> 6251 return stack(self, level, dropna=dropna)
6252
6253 def explode(self, column: Union[str, Tuple]) -> "DataFrame":
~\miniconda3\lib\site-packages\pandas\core\reshape\reshape.py in stack(frame, level, dropna)
541 # we concatenate instead.
542 dtypes = list(frame.dtypes.values)
--> 543 dtype = dtypes[0]
544
545 if is_extension_array_dtype(dtype):
IndexError: list index out of range
|
IndexError
|
def compute_missing(
# pylint: disable=too-many-arguments
df: Union[pd.DataFrame, dd.DataFrame],
x: Optional[str] = None,
y: Optional[str] = None,
*,
bins: int = 30,
ncols: int = 30,
ndist_sample: int = 100,
dtype: Optional[DTypeDef] = None,
) -> Intermediate:
"""
This function is designed to deal with missing values
There are three functions: plot_missing(df), plot_missing(df, x)
plot_missing(df, x, y)
Parameters
----------
df
the pandas data_frame for which plots are calculated for each column
x
a valid column name of the data frame
y
a valid column name of the data frame
ncols
The number of columns in the figure
bins
The number of rows in the figure
ndist_sample
The number of sample points
dtype: str or DType or dict of str or dict of DType, default None
Specify Data Types for designated column or all columns.
E.g. dtype = {"a": Continuous, "b": "Nominal"} or
dtype = {"a": Continuous(), "b": "nominal"}
or dtype = Continuous() or dtype = "Continuous" or dtype = Continuous()
Examples
--------
>>> from dataprep.eda.missing.computation import plot_missing
>>> import pandas as pd
>>> df = pd.read_csv("suicide-rate.csv")
>>> plot_missing(df, "HDI_for_year")
>>> plot_missing(df, "HDI_for_year", "population")
"""
df = to_dask(df)
# pylint: disable=no-else-raise
if x is None and y is not None:
raise ValueError("x cannot be None while y has value")
elif x is not None and y is None:
return missing_impact_1vn(df, dtype=dtype, x=x, bins=bins)
elif x is not None and y is not None:
return missing_impact_1v1(
df, dtype=dtype, x=x, y=y, bins=bins, ndist_sample=ndist_sample
)
else:
spectrum, total_missing, bars, heatmap = dd.compute(
*missing_spectrum(df, bins=bins, ncols=ncols),
missing_bars(df),
missing_heatmap(df),
)
return Intermediate(
data_total_missing=total_missing,
data_spectrum=spectrum,
data_bars=bars,
data_heatmap=heatmap,
visual_type="missing_impact",
)
|
def compute_missing(
# pylint: disable=too-many-arguments
df: Union[pd.DataFrame, dd.DataFrame],
x: Optional[str] = None,
y: Optional[str] = None,
*,
bins: int = 30,
ncols: int = 30,
ndist_sample: int = 100,
dtype: Optional[DTypeDef] = None,
) -> Intermediate:
"""
This function is designed to deal with missing values
There are three functions: plot_missing(df), plot_missing(df, x)
plot_missing(df, x, y)
Parameters
----------
df
the pandas data_frame for which plots are calculated for each column
x
a valid column name of the data frame
y
a valid column name of the data frame
ncols
The number of columns in the figure
bins
The number of rows in the figure
ndist_sample
The number of sample points
dtype: str or DType or dict of str or dict of DType, default None
Specify Data Types for designated column or all columns.
E.g. dtype = {"a": Continuous, "b": "Nominal"} or
dtype = {"a": Continuous(), "b": "nominal"}
or dtype = Continuous() or dtype = "Continuous" or dtype = Continuous()
Examples
----------
>>> from dataprep.eda.missing.computation import plot_missing
>>> import pandas as pd
>>> df = pd.read_csv("suicide-rate.csv")
>>> plot_missing(df, "HDI_for_year")
>>> plot_missing(df, "HDI_for_year", "population")
"""
df = to_dask(df)
# pylint: disable=no-else-raise
if x is None and y is not None:
raise ValueError("x cannot be None while y has value")
elif x is not None and y is None:
return missing_impact_1vn(df, dtype=dtype, x=x, bins=bins)
elif x is not None and y is not None:
return missing_impact_1v1(
df, dtype=dtype, x=x, y=y, bins=bins, ndist_sample=ndist_sample
)
else:
# return missing_spectrum(df, bins=bins, ncols=ncols)
return missing_spectrum_tabs(df, bins=bins, ncols=ncols)
|
https://github.com/sfu-db/dataprep/issues/219
|
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-50-47c276e219b8> in <module>
----> 1 plot_missing(X)
~\miniconda3\lib\site-packages\dataprep\eda\missing\__init__.py in plot_missing(df, x, y, bins, ncols, ndist_sample, dtype)
63 df, x, y, dtype=dtype, bins=bins, ncols=ncols, ndist_sample=ndist_sample
64 )
---> 65 fig = render_missing(itmdt)
66 return Report(fig)
~\miniconda3\lib\site-packages\dataprep\eda\missing\render.py in render_missing(itmdt, plot_width, plot_height, palette)
61 elif itmdt.visual_type == "missing_spectrum_heatmap":
62 return render_missing_heatmap(
---> 63 itmdt, plot_width, plot_height, palette or BIPALETTE
64 )
65
~\miniconda3\lib\site-packages\dataprep\eda\missing\render.py in render_missing_heatmap(itmdt, plot_width, plot_height, palette)
287 pan_spectrum = Panel(child=fig_spectrum, title="Spectrum")
288 tabs.append(pan_spectrum)
--> 289 fig_heatmap = render_heatmaps_tab(itmdt, plot_width, plot_height, palette)
290 pan_heatmap = Panel(child=fig_heatmap, title="Heatmap")
291 tabs.append(pan_heatmap)
~\miniconda3\lib\site-packages\dataprep\eda\missing\render.py in render_heatmaps_tab(itmdt, plot_width, plot_height, palette)
307 np.triu(np.ones(df.shape)).astype(np.bool) # pylint: disable=no-member
308 ).T
--> 309 df = df.unstack().reset_index(name="correlation")
310 df = df.rename(columns={"level_0": "x", "level_1": "y"})
311 df = df[df["x"] != df["y"]]
~\miniconda3\lib\site-packages\pandas\core\frame.py in unstack(self, level, fill_value)
6384 from pandas.core.reshape.reshape import unstack
6385
-> 6386 return unstack(self, level, fill_value)
6387
6388 _shared_docs[
~\miniconda3\lib\site-packages\pandas\core\reshape\reshape.py in unstack(obj, level, fill_value)
408 return _unstack_frame(obj, level, fill_value=fill_value)
409 else:
--> 410 return obj.T.stack(dropna=False)
411 else:
412 if is_extension_array_dtype(obj.dtype):
~\miniconda3\lib\site-packages\pandas\core\frame.py in stack(self, level, dropna)
6249 return stack_multiple(self, level, dropna=dropna)
6250 else:
-> 6251 return stack(self, level, dropna=dropna)
6252
6253 def explode(self, column: Union[str, Tuple]) -> "DataFrame":
~\miniconda3\lib\site-packages\pandas\core\reshape\reshape.py in stack(frame, level, dropna)
541 # we concatenate instead.
542 dtypes = list(frame.dtypes.values)
--> 543 dtype = dtypes[0]
544
545 if is_extension_array_dtype(dtype):
IndexError: list index out of range
|
IndexError
|
def render_missing(
itmdt: Intermediate,
plot_width: int = 500,
plot_height: int = 500,
) -> LayoutDOM:
"""
@Jinglin write here
"""
if itmdt.visual_type == "missing_impact":
return render_missing_impact(itmdt, plot_width, plot_height)
elif itmdt.visual_type == "missing_impact_1vn":
return render_missing_impact_1vn(itmdt, plot_width, plot_height)
elif itmdt.visual_type == "missing_impact_1v1":
return render_missing_impact_1v1(itmdt, plot_width, plot_height)
else:
raise UnreachableError
|
def render_missing(
itmdt: Intermediate,
plot_width: int = 500,
plot_height: int = 500,
palette: Optional[Sequence[str]] = None,
) -> LayoutDOM:
"""
@Jinglin write here
"""
if itmdt.visual_type == "missing_spectrum":
return render_missing_spectrum(itmdt, plot_width, plot_height)
elif itmdt.visual_type == "missing_impact_1vn":
return render_missing_impact_1vn(itmdt, plot_width, plot_height)
elif itmdt.visual_type == "missing_impact_1v1":
return render_missing_impact_1v1(itmdt, plot_width, plot_height)
elif itmdt.visual_type == "missing_spectrum_heatmap":
return render_missing_heatmap(
itmdt, plot_width, plot_height, palette or BIPALETTE
)
else:
raise UnreachableError
|
https://github.com/sfu-db/dataprep/issues/219
|
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-50-47c276e219b8> in <module>
----> 1 plot_missing(X)
~\miniconda3\lib\site-packages\dataprep\eda\missing\__init__.py in plot_missing(df, x, y, bins, ncols, ndist_sample, dtype)
63 df, x, y, dtype=dtype, bins=bins, ncols=ncols, ndist_sample=ndist_sample
64 )
---> 65 fig = render_missing(itmdt)
66 return Report(fig)
~\miniconda3\lib\site-packages\dataprep\eda\missing\render.py in render_missing(itmdt, plot_width, plot_height, palette)
61 elif itmdt.visual_type == "missing_spectrum_heatmap":
62 return render_missing_heatmap(
---> 63 itmdt, plot_width, plot_height, palette or BIPALETTE
64 )
65
~\miniconda3\lib\site-packages\dataprep\eda\missing\render.py in render_missing_heatmap(itmdt, plot_width, plot_height, palette)
287 pan_spectrum = Panel(child=fig_spectrum, title="Spectrum")
288 tabs.append(pan_spectrum)
--> 289 fig_heatmap = render_heatmaps_tab(itmdt, plot_width, plot_height, palette)
290 pan_heatmap = Panel(child=fig_heatmap, title="Heatmap")
291 tabs.append(pan_heatmap)
~\miniconda3\lib\site-packages\dataprep\eda\missing\render.py in render_heatmaps_tab(itmdt, plot_width, plot_height, palette)
307 np.triu(np.ones(df.shape)).astype(np.bool) # pylint: disable=no-member
308 ).T
--> 309 df = df.unstack().reset_index(name="correlation")
310 df = df.rename(columns={"level_0": "x", "level_1": "y"})
311 df = df[df["x"] != df["y"]]
~\miniconda3\lib\site-packages\pandas\core\frame.py in unstack(self, level, fill_value)
6384 from pandas.core.reshape.reshape import unstack
6385
-> 6386 return unstack(self, level, fill_value)
6387
6388 _shared_docs[
~\miniconda3\lib\site-packages\pandas\core\reshape\reshape.py in unstack(obj, level, fill_value)
408 return _unstack_frame(obj, level, fill_value=fill_value)
409 else:
--> 410 return obj.T.stack(dropna=False)
411 else:
412 if is_extension_array_dtype(obj.dtype):
~\miniconda3\lib\site-packages\pandas\core\frame.py in stack(self, level, dropna)
6249 return stack_multiple(self, level, dropna=dropna)
6250 else:
-> 6251 return stack(self, level, dropna=dropna)
6252
6253 def explode(self, column: Union[str, Tuple]) -> "DataFrame":
~\miniconda3\lib\site-packages\pandas\core\reshape\reshape.py in stack(frame, level, dropna)
541 # we concatenate instead.
542 dtypes = list(frame.dtypes.values)
--> 543 dtype = dtypes[0]
544
545 if is_extension_array_dtype(dtype):
IndexError: list index out of range
|
IndexError
|
def render_missing_spectrum(
data_spectrum: pd.DataFrame,
data_total_missing: pd.DataFrame,
plot_width: int,
plot_height: int,
) -> Figure:
"""
Render the missing specturm
"""
mapper, color_bar = create_color_mapper()
df = data_spectrum.copy()
df["column_with_perc"] = df["column"].apply(
lambda c: fuse_missing_perc(cut_long_name(c), data_total_missing[c])
)
radius = (df["loc_end"][0] - df["loc_start"][0]) / 2
if (df["loc_end"] - df["loc_start"]).max() <= 1:
loc_tooltip = "@loc_start{1}"
else:
loc_tooltip = "@loc_start{1}~@loc_end{1}"
tooltips = [
("Column", "@column"),
("Loc", loc_tooltip),
("Missing%", "@missing_rate{1%}"),
]
x_range = FactorRange(*df["column_with_perc"].unique())
minimum, maximum = df["location"].min(), df["location"].max()
y_range = Range1d(maximum + radius, minimum - radius)
fig = tweak_figure(
Figure(
x_range=x_range,
y_range=y_range,
plot_width=plot_width,
plot_height=plot_height,
x_axis_location="below",
tools="hover",
toolbar_location=None,
tooltips=tooltips,
)
)
fig.xgrid.grid_line_color = None
fig.ygrid.grid_line_color = None
fig.rect(
x="column_with_perc",
y="location",
line_width=0,
width=0.95,
height=radius * 2,
source=df,
fill_color={"field": "missing_rate", "transform": mapper},
line_color=None,
)
fig.add_layout(color_bar, "right")
return fig
|
def render_missing_spectrum(
itmdt: Intermediate, plot_width: int, plot_height: int
) -> Figure:
"""
Render the missing specturm
"""
mapper, color_bar = create_color_mapper()
df = itmdt["data"].copy()
df["column_with_perc"] = df["column"].apply(
lambda c: fuse_missing_perc(cut_long_name(c), itmdt["missing_percent"][c])
)
radius = (df["loc_end"][0] - df["loc_start"][0]) / 2
if (df["loc_end"] - df["loc_start"]).max() <= 1:
loc_tooltip = "@loc_start{1}"
else:
loc_tooltip = "@loc_start{1}~@loc_end{1}"
tooltips = [
("Column", "@column"),
("Loc", loc_tooltip),
("Missing%", "@missing_rate{1%}"),
]
x_range = FactorRange(*df["column_with_perc"].unique())
minimum, maximum = df["location"].min(), df["location"].max()
y_range = Range1d(maximum + radius, minimum - radius)
fig = tweak_figure(
Figure(
x_range=x_range,
y_range=y_range,
plot_width=plot_width,
plot_height=plot_height,
x_axis_location="below",
tools="hover",
toolbar_location=None,
tooltips=tooltips,
)
)
fig.xgrid.grid_line_color = None
fig.ygrid.grid_line_color = None
fig.rect(
x="column_with_perc",
y="location",
line_width=0,
width=0.95,
height=radius * 2,
source=df,
fill_color={"field": "missing_rate", "transform": mapper},
line_color=None,
)
fig.add_layout(color_bar, "right")
return fig
|
https://github.com/sfu-db/dataprep/issues/219
|
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-50-47c276e219b8> in <module>
----> 1 plot_missing(X)
~\miniconda3\lib\site-packages\dataprep\eda\missing\__init__.py in plot_missing(df, x, y, bins, ncols, ndist_sample, dtype)
63 df, x, y, dtype=dtype, bins=bins, ncols=ncols, ndist_sample=ndist_sample
64 )
---> 65 fig = render_missing(itmdt)
66 return Report(fig)
~\miniconda3\lib\site-packages\dataprep\eda\missing\render.py in render_missing(itmdt, plot_width, plot_height, palette)
61 elif itmdt.visual_type == "missing_spectrum_heatmap":
62 return render_missing_heatmap(
---> 63 itmdt, plot_width, plot_height, palette or BIPALETTE
64 )
65
~\miniconda3\lib\site-packages\dataprep\eda\missing\render.py in render_missing_heatmap(itmdt, plot_width, plot_height, palette)
287 pan_spectrum = Panel(child=fig_spectrum, title="Spectrum")
288 tabs.append(pan_spectrum)
--> 289 fig_heatmap = render_heatmaps_tab(itmdt, plot_width, plot_height, palette)
290 pan_heatmap = Panel(child=fig_heatmap, title="Heatmap")
291 tabs.append(pan_heatmap)
~\miniconda3\lib\site-packages\dataprep\eda\missing\render.py in render_heatmaps_tab(itmdt, plot_width, plot_height, palette)
307 np.triu(np.ones(df.shape)).astype(np.bool) # pylint: disable=no-member
308 ).T
--> 309 df = df.unstack().reset_index(name="correlation")
310 df = df.rename(columns={"level_0": "x", "level_1": "y"})
311 df = df[df["x"] != df["y"]]
~\miniconda3\lib\site-packages\pandas\core\frame.py in unstack(self, level, fill_value)
6384 from pandas.core.reshape.reshape import unstack
6385
-> 6386 return unstack(self, level, fill_value)
6387
6388 _shared_docs[
~\miniconda3\lib\site-packages\pandas\core\reshape\reshape.py in unstack(obj, level, fill_value)
408 return _unstack_frame(obj, level, fill_value=fill_value)
409 else:
--> 410 return obj.T.stack(dropna=False)
411 else:
412 if is_extension_array_dtype(obj.dtype):
~\miniconda3\lib\site-packages\pandas\core\frame.py in stack(self, level, dropna)
6249 return stack_multiple(self, level, dropna=dropna)
6250 else:
-> 6251 return stack(self, level, dropna=dropna)
6252
6253 def explode(self, column: Union[str, Tuple]) -> "DataFrame":
~\miniconda3\lib\site-packages\pandas\core\reshape\reshape.py in stack(frame, level, dropna)
541 # we concatenate instead.
542 dtypes = list(frame.dtypes.values)
--> 543 dtype = dtypes[0]
544
545 if is_extension_array_dtype(dtype):
IndexError: list index out of range
|
IndexError
|
def compute_univariate(
df: dd.DataFrame,
x: str,
bins: int,
ngroups: int,
largest: bool,
timeunit: str,
value_range: Optional[Tuple[float, float]] = None,
dtype: Optional[DTypeDef] = None,
top_words: Optional[int] = 30,
stopword: Optional[bool] = True,
lemmatize: Optional[bool] = False,
stem: Optional[bool] = False,
) -> Intermediate:
"""
Compute functions for plot(df, x)
Parameters
----------
df
Dataframe from which plots are to be generated
x
A valid column name from the dataframe
bins
For a histogram or box plot with numerical x axis, it defines
the number of equal-width bins to use when grouping.
ngroups
When grouping over a categorical column, it defines the
number of groups to show in the plot. Ie, the number of
bars to show in a bar chart.
largest
If true, when grouping over a categorical column, the groups
with the largest count will be output. If false, the groups
with the smallest count will be output.
timeunit
Defines the time unit to group values over for a datetime column.
It can be "year", "quarter", "month", "week", "day", "hour",
"minute", "second". With default value "auto", it will use the
time unit such that the resulting number of groups is closest to 15.
value_range
The lower and upper bounds on the range of a numerical column.
Applies when column x is specified and column y is unspecified.
dtype: str or DType or dict of str or dict of DType, default None
Specify Data Types for designated column or all columns.
E.g. dtype = {"a": Continuous, "b": "Nominal"} or
dtype = {"a": Continuous(), "b": "nominal"}
or dtype = Continuous() or dtype = "Continuous" or dtype = Continuous()
top_words: int, default 30
Specify the amount of words to show in the wordcloud and
word frequency bar chart
stopword: bool, default True
Eliminate the stopwords in the text data for plotting wordcloud and
word frequency bar chart
lemmatize: bool, default False
Lemmatize the words in the text data for plotting wordcloud and
word frequency bar chart
stem: bool, default False
Apply Potter Stem on the text data for plotting wordcloud and
word frequency bar chart
"""
# pylint: disable=too-many-locals, too-many-arguments
col_dtype = detect_dtype(df[x], dtype)
if is_dtype(col_dtype, Nominal()):
data_cat: List[Any] = []
# reset index for calculating quantile stats
df = df.reset_index()
# stats
data_cat.append(dask.delayed(calc_stats_cat)(df[x]))
# drop nan and empty spaces for plots
df[x].replace("", np.nan)
df = df.dropna(subset=[x])
# data for bar and pie charts
data_cat.append(dask.delayed(calc_bar_pie)(df[x], ngroups, largest))
statsdata_cat, data = dask.compute(*data_cat)
# wordcloud and word frequencies
word_cloud = cal_word_freq(df, x, top_words, stopword, lemmatize, stem)
# length_distribution
length_dist = cal_length_dist(df, x, bins)
return Intermediate(
col=x,
data=data,
statsdata=statsdata_cat,
word_cloud=word_cloud,
length_dist=length_dist,
visual_type="categorical_column",
)
elif is_dtype(col_dtype, Continuous()):
if value_range is not None:
if (
(value_range[0] <= np.nanmax(df[x]))
and (value_range[1] >= np.nanmin(df[x]))
and (value_range[0] < value_range[1])
):
df = df[df[x].between(value_range[0], value_range[1])]
else:
print("Invalid range of values for this column", file=stderr)
data_num: List[Any] = []
# qq plot
qqdata = calc_qqnorm(df[x].dropna())
# kde plot
kdedata = calc_hist_kde(df[x].dropna().values, bins)
# box plot
boxdata = calc_box(df[[x]].dropna(), bins, dtype=dtype)
# histogram
data_num.append(dask.delayed(calc_hist)(df[x], bins))
# stats
data_num.append(
dask.delayed(calc_stats_num)(
df[x],
mean=qqdata[2],
std=qqdata[3],
min=kdedata[3],
max=kdedata[4],
quantile=qqdata[0],
)
)
histdata, statsdata_num = dask.compute(*data_num)
return Intermediate(
col=x,
histdata=histdata,
kdedata=kdedata,
qqdata=qqdata,
boxdata=boxdata,
statsdata=statsdata_num,
visual_type="numerical_column",
)
elif is_dtype(col_dtype, DateTime()):
data_dt: List[Any] = []
# line chart
data_dt.append(dask.delayed(calc_line_dt)(df[[x]], timeunit))
# stats
data_dt.append(dask.delayed(calc_stats_dt)(df[x]))
data, statsdata_dt = dask.compute(*data_dt)
return Intermediate(
col=x,
data=data,
statsdata=statsdata_dt,
visual_type="datetime_column",
)
else:
raise UnreachableError
|
def compute_univariate(
df: dd.DataFrame,
x: str,
bins: int,
ngroups: int,
largest: bool,
timeunit: str,
value_range: Optional[Tuple[float, float]] = None,
dtype: Optional[DTypeDef] = None,
top_words: Optional[int] = 30,
stopword: Optional[bool] = True,
lemmatize: Optional[bool] = False,
stem: Optional[bool] = False,
) -> Intermediate:
"""
Compute functions for plot(df, x)
Parameters
----------
df
Dataframe from which plots are to be generated
x
A valid column name from the dataframe
bins
For a histogram or box plot with numerical x axis, it defines
the number of equal-width bins to use when grouping.
ngroups
When grouping over a categorical column, it defines the
number of groups to show in the plot. Ie, the number of
bars to show in a bar chart.
largest
If true, when grouping over a categorical column, the groups
with the largest count will be output. If false, the groups
with the smallest count will be output.
timeunit
Defines the time unit to group values over for a datetime column.
It can be "year", "quarter", "month", "week", "day", "hour",
"minute", "second". With default value "auto", it will use the
time unit such that the resulting number of groups is closest to 15.
value_range
The lower and upper bounds on the range of a numerical column.
Applies when column x is specified and column y is unspecified.
dtype: str or DType or dict of str or dict of DType, default None
Specify Data Types for designated column or all columns.
E.g. dtype = {"a": Continuous, "b": "Nominal"} or
dtype = {"a": Continuous(), "b": "nominal"}
or dtype = Continuous() or dtype = "Continuous" or dtype = Continuous()
top_words: int, default 30
Specify the amount of words to show in the wordcloud and
word frequency bar chart
stopword: bool, default True
Eliminate the stopwords in the text data for plotting wordcloud and
word frequency bar chart
lemmatize: bool, default False
Lemmatize the words in the text data for plotting wordcloud and
word frequency bar chart
stem: bool, default False
Apply Potter Stem on the text data for plotting wordcloud and
word frequency bar chart
"""
# pylint: disable=too-many-locals, too-many-arguments
col_dtype = detect_dtype(df[x], dtype)
if is_dtype(col_dtype, Nominal()):
# data for bar and pie charts
data_cat: List[Any] = []
data_cat.append(dask.delayed(calc_bar_pie)(df[x], ngroups, largest))
# stats
data_cat.append(dask.delayed(calc_stats_cat)(df[x]))
data, statsdata_cat = dask.compute(*data_cat)
# wordcloud and word frequencies
word_cloud = cal_word_freq(df, x, top_words, stopword, lemmatize, stem)
# length_distribution
length_dist = cal_length_dist(df, x, bins)
return Intermediate(
col=x,
data=data,
statsdata=statsdata_cat,
word_cloud=word_cloud,
length_dist=length_dist,
visual_type="categorical_column",
)
elif is_dtype(col_dtype, Continuous()):
if value_range is not None:
if (
(value_range[0] <= np.nanmax(df[x]))
and (value_range[1] >= np.nanmin(df[x]))
and (value_range[0] < value_range[1])
):
df = df[df[x].between(value_range[0], value_range[1])]
else:
print("Invalid range of values for this column", file=stderr)
data_num: List[Any] = []
# qq plot
qqdata = calc_qqnorm(df[x].dropna())
# kde plot
kdedata = calc_hist_kde(df[x].dropna().values, bins)
# box plot
boxdata = calc_box(df[[x]].dropna(), bins, dtype=dtype)
# histogram
data_num.append(dask.delayed(calc_hist)(df[x], bins))
# stats
data_num.append(
dask.delayed(calc_stats_num)(
df[x],
mean=qqdata[2],
std=qqdata[3],
min=kdedata[3],
max=kdedata[4],
quantile=qqdata[0],
)
)
histdata, statsdata_num = dask.compute(*data_num)
return Intermediate(
col=x,
histdata=histdata,
kdedata=kdedata,
qqdata=qqdata,
boxdata=boxdata,
statsdata=statsdata_num,
visual_type="numerical_column",
)
elif is_dtype(col_dtype, DateTime()):
data_dt: List[Any] = []
# line chart
data_dt.append(dask.delayed(calc_line_dt)(df[[x]], timeunit))
# stats
data_dt.append(dask.delayed(calc_stats_dt)(df[x]))
data, statsdata_dt = dask.compute(*data_dt)
return Intermediate(
col=x,
data=data,
statsdata=statsdata_dt,
visual_type="datetime_column",
)
else:
raise UnreachableError
|
https://github.com/sfu-db/dataprep/issues/208
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-43-f8a3846cc211> in <module>
1 df = pd.DataFrame({"A": ["01-02", "01-02"]})
----> 2 plot(df, "A", top_words=2)
~/dataprep/dataprep/eda/basic/__init__.py in plot(df, x, y, z, bins, ngroups, largest, nsubgroups, timeunit, agg, sample_size, value_range, yscale, tile_size, dtype, top_words, stopword, lemmatize, stem)
163 stem=stem,
164 )
--> 165 figure = render(intermediate, yscale=yscale, tile_size=tile_size)
166
167 return Report(figure)
~/dataprep/dataprep/eda/basic/render.py in render(itmdt, yscale, tile_size, plot_width_sml, plot_height_sml, plot_width_lrg, plot_height_lrg, plot_width_wide)
1684 visual_elem = render_basic(itmdt, yscale, plot_width_sml, plot_height_sml)
1685 elif itmdt.visual_type == "categorical_column":
-> 1686 visual_elem = render_cat(itmdt, yscale, plot_width_lrg, plot_height_lrg)
1687 elif itmdt.visual_type == "numerical_column":
1688 visual_elem = render_num(itmdt, yscale, plot_width_lrg, plot_height_lrg)
~/dataprep/dataprep/eda/basic/render.py in render_cat(itmdt, yscale, plot_width, plot_height)
1393 tabs.append(pie_viz(df, itmdt["col"], miss_pct, plot_width, plot_height))
1394 freq_tuple = itmdt["word_cloud"]
-> 1395 word_cloud = wordcloud_viz(freq_tuple, plot_width, plot_height)
1396 tabs.append(Panel(child=row(word_cloud), title="word cloud"))
1397 wordfreq = wordfreq_viz(freq_tuple, plot_width, plot_height, True)
~/dataprep/dataprep/eda/basic/render.py in wordcloud_viz(freq_tuple, plot_width, plot_height)
228 )
229 top_freq = freq_tuple[1]
--> 230 wordcloud.generate_from_frequencies(dict(top_freq))
231 wcimg = wordcloud.to_array().astype(np.uint8)
232 alpha = np.full([*wcimg.shape[:2], 1], 255, dtype=np.uint8)
~/dataprep/.venv/lib/python3.8/site-packages/wordcloud/wordcloud.py in generate_from_frequencies(self, frequencies, max_font_size)
401 frequencies = sorted(frequencies.items(), key=itemgetter(1), reverse=True)
402 if len(frequencies) <= 0:
--> 403 raise ValueError("We need at least 1 word to plot a word cloud, "
404 "got %d." % len(frequencies))
405 frequencies = frequencies[:self.max_words]
ValueError: We need at least 1 word to plot a word cloud, got 0.
|
ValueError
|
def clean_text(
freqdist: Dict[str, int],
non_single_word: int,
top_words: Optional[int] = 30,
stopword: Optional[bool] = True,
lemmatize: Optional[bool] = False,
stem: Optional[bool] = False,
) -> Dict[Any, Any]:
"""
clean the frequency dictionary by stopwords, lemmatization and stemming
""" # pylint: disable=too-many-arguments
freq_copy = copy.deepcopy(freqdist)
lemmatizer = WordNetLemmatizer()
porter = PorterStemmer()
for key in freq_copy.keys():
if stopword and non_single_word > top_words: # type: ignore
if key in english_stopwords.english_stopwords or len(key) <= 2:
del freqdist[key]
if lemmatize:
if lemmatizer.lemmatize(key) != key:
freqdist[lemmatizer.lemmatize(key)] = freqdist[key]
del freqdist[key]
if stem:
if porter.stem(key) != key:
freqdist[porter.stem(key)] = freqdist[key]
del freqdist[key]
return freqdist
|
def clean_text(
freqdist: Dict[str, int],
non_single_word: int,
top_words: Optional[int] = 30,
stopword: Optional[bool] = True,
lemmatize: Optional[bool] = False,
stem: Optional[bool] = False,
) -> Dict[Any, Any]:
"""
clean the frequency dictionary by stopwords, lemmatization and stemming
""" # pylint: disable=too-many-arguments
freq_copy = copy.deepcopy(freqdist)
lemmatizer = WordNetLemmatizer()
porter = PorterStemmer()
for key in freq_copy.keys():
if stopword and non_single_word >= top_words: # type: ignore
if key in english_stopwords.english_stopwords or len(key) <= 2:
del freqdist[key]
if lemmatize:
if lemmatizer.lemmatize(key) != key:
freqdist[lemmatizer.lemmatize(key)] = freqdist[key]
del freqdist[key]
if stem:
if porter.stem(key) != key:
freqdist[porter.stem(key)] = freqdist[key]
del freqdist[key]
return freqdist
|
https://github.com/sfu-db/dataprep/issues/208
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-43-f8a3846cc211> in <module>
1 df = pd.DataFrame({"A": ["01-02", "01-02"]})
----> 2 plot(df, "A", top_words=2)
~/dataprep/dataprep/eda/basic/__init__.py in plot(df, x, y, z, bins, ngroups, largest, nsubgroups, timeunit, agg, sample_size, value_range, yscale, tile_size, dtype, top_words, stopword, lemmatize, stem)
163 stem=stem,
164 )
--> 165 figure = render(intermediate, yscale=yscale, tile_size=tile_size)
166
167 return Report(figure)
~/dataprep/dataprep/eda/basic/render.py in render(itmdt, yscale, tile_size, plot_width_sml, plot_height_sml, plot_width_lrg, plot_height_lrg, plot_width_wide)
1684 visual_elem = render_basic(itmdt, yscale, plot_width_sml, plot_height_sml)
1685 elif itmdt.visual_type == "categorical_column":
-> 1686 visual_elem = render_cat(itmdt, yscale, plot_width_lrg, plot_height_lrg)
1687 elif itmdt.visual_type == "numerical_column":
1688 visual_elem = render_num(itmdt, yscale, plot_width_lrg, plot_height_lrg)
~/dataprep/dataprep/eda/basic/render.py in render_cat(itmdt, yscale, plot_width, plot_height)
1393 tabs.append(pie_viz(df, itmdt["col"], miss_pct, plot_width, plot_height))
1394 freq_tuple = itmdt["word_cloud"]
-> 1395 word_cloud = wordcloud_viz(freq_tuple, plot_width, plot_height)
1396 tabs.append(Panel(child=row(word_cloud), title="word cloud"))
1397 wordfreq = wordfreq_viz(freq_tuple, plot_width, plot_height, True)
~/dataprep/dataprep/eda/basic/render.py in wordcloud_viz(freq_tuple, plot_width, plot_height)
228 )
229 top_freq = freq_tuple[1]
--> 230 wordcloud.generate_from_frequencies(dict(top_freq))
231 wcimg = wordcloud.to_array().astype(np.uint8)
232 alpha = np.full([*wcimg.shape[:2], 1], 255, dtype=np.uint8)
~/dataprep/.venv/lib/python3.8/site-packages/wordcloud/wordcloud.py in generate_from_frequencies(self, frequencies, max_font_size)
401 frequencies = sorted(frequencies.items(), key=itemgetter(1), reverse=True)
402 if len(frequencies) <= 0:
--> 403 raise ValueError("We need at least 1 word to plot a word cloud, "
404 "got %d." % len(frequencies))
405 frequencies = frequencies[:self.max_words]
ValueError: We need at least 1 word to plot a word cloud, got 0.
|
ValueError
|
def calc_stats_cat(
srs: dd.Series,
) -> Tuple[Dict[str, str], Dict[str, str], Dict[str, str], Dict[str, str]]:
"""
Calculate stats from a categorical column
Parameters
----------
srs
a categorical column
Returns
-------
Dict[str, str]
Dictionary that contains Overview
"""
# overview stats
size = len(srs) # include nan
count = srs.count() # exclude nan
uniq_count = srs.nunique()
overview_dict = {
"Distinct Count": uniq_count,
"Unique (%)": uniq_count / count,
"Missing": size - count,
"Missing (%)": 1 - (count / size),
"Memory Size": srs.memory_usage(),
}
srs = srs.astype("str")
# quantile stats
max_lbl_len = 25
quantile_dict = {}
for label, centile in zip(
(
"1st Row",
"25% Row",
"50% Row",
"75% Row",
"Last Row",
),
(0, 0.25, 0.5, 0.75, 1),
):
if round(len(srs) * centile) == 0:
element = srs[round(len(srs) * centile)]
if len(element) > max_lbl_len:
quantile_dict[label] = element[0 : max_lbl_len - 2] + "..."
else:
quantile_dict[label] = element
else:
element = srs[round(len(srs) * centile) - 1]
if len(element) > max_lbl_len:
quantile_dict[label] = element[0 : max_lbl_len - 2] + "..."
else:
quantile_dict[label] = element
srs = srs.dropna()
# length stats
length = srs.str.len()
length_dict = {
"Mean": length.mean(),
"Standard Deviation": length.std(),
"Median": length.median(),
"Minimum": length.min(),
"Maximum": length.max(),
}
# letter stats
letter_dict = {
"Count": srs.str.count(r"[a-zA-Z]").sum(),
"Lowercase Letter": srs.str.count(r"[a-z]").sum(),
"Space Separator": srs.str.count(r"[ ]").sum(),
"Uppercase Letter": srs.str.count(r"[A-Z]").sum(),
"Dash Punctuation": srs.str.count(r"[-]").sum(),
"Decimal Number": srs.str.count(r"[0-9]").sum(),
}
return (
{k: _format_values(k, v) for k, v in overview_dict.items()},
{k: _format_values(k, v) for k, v in length_dict.items()},
quantile_dict,
{k: _format_values(k, v) for k, v in letter_dict.items()},
)
|
def calc_stats_cat(
srs: dd.Series,
) -> Tuple[Dict[str, str], Dict[str, str], Dict[str, str], Dict[str, str]]:
"""
Calculate stats from a categorical column
Parameters
----------
srs
a categorical column
Returns
-------
Dict[str, str]
Dictionary that contains Overview
"""
# overview stats
size = len(srs) # include nan
count = srs.count() # exclude nan
uniq_count = srs.nunique()
overview_dict = {
"Distinct Count": uniq_count,
"Unique (%)": uniq_count / count,
"Missing": size - count,
"Missing (%)": 1 - (count / size),
"Memory Size": srs.memory_usage(),
}
srs = srs.astype("str")
# length stats
length = srs.str.len()
length_dict = {
"mean": length.mean(),
"median": length.median(),
"minimum": length.min(),
"maximum": length.max(),
}
# quantile stats
max_lbl_len = 13
quantile_dict = {}
for label, centile in zip(
(
"Minimum",
"5-th Percentile",
"Q1",
"Median",
"Q3",
"95-th Percentile",
"Maximum",
),
(0, 0.05, 0.25, 0.5, 0.75, 0.95, 1),
):
if round(len(srs) * centile) == 0:
element = srs[round(len(srs) * centile)]
if len(element) > max_lbl_len:
quantile_dict[label] = element[0 : max_lbl_len - 2] + "..."
else:
quantile_dict[label] = element
else:
element = srs[round(len(srs) * centile) - 1]
if len(element) > max_lbl_len:
quantile_dict[label] = element[0 : max_lbl_len - 2] + "..."
else:
quantile_dict[label] = element
# letter stats
letter_dict = {
"count": srs.str.count(r"[a-zA-Z]").sum(),
"Lowercase Letter": srs.str.count(r"[a-z]").sum(),
"Space Separator": srs.str.count(r"[ ]").sum(),
"Uppercase Letter": srs.str.count(r"[A-Z]").sum(),
"Dash Punctuation": srs.str.count(r"[-]").sum(),
"Decimal Number": srs.str.count(r"[0-9]").sum(),
}
return (
{k: _format_values(k, v) for k, v in overview_dict.items()},
{k: _format_values(k, v) for k, v in length_dict.items()},
quantile_dict,
{k: _format_values(k, v) for k, v in letter_dict.items()},
)
|
https://github.com/sfu-db/dataprep/issues/208
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-43-f8a3846cc211> in <module>
1 df = pd.DataFrame({"A": ["01-02", "01-02"]})
----> 2 plot(df, "A", top_words=2)
~/dataprep/dataprep/eda/basic/__init__.py in plot(df, x, y, z, bins, ngroups, largest, nsubgroups, timeunit, agg, sample_size, value_range, yscale, tile_size, dtype, top_words, stopword, lemmatize, stem)
163 stem=stem,
164 )
--> 165 figure = render(intermediate, yscale=yscale, tile_size=tile_size)
166
167 return Report(figure)
~/dataprep/dataprep/eda/basic/render.py in render(itmdt, yscale, tile_size, plot_width_sml, plot_height_sml, plot_width_lrg, plot_height_lrg, plot_width_wide)
1684 visual_elem = render_basic(itmdt, yscale, plot_width_sml, plot_height_sml)
1685 elif itmdt.visual_type == "categorical_column":
-> 1686 visual_elem = render_cat(itmdt, yscale, plot_width_lrg, plot_height_lrg)
1687 elif itmdt.visual_type == "numerical_column":
1688 visual_elem = render_num(itmdt, yscale, plot_width_lrg, plot_height_lrg)
~/dataprep/dataprep/eda/basic/render.py in render_cat(itmdt, yscale, plot_width, plot_height)
1393 tabs.append(pie_viz(df, itmdt["col"], miss_pct, plot_width, plot_height))
1394 freq_tuple = itmdt["word_cloud"]
-> 1395 word_cloud = wordcloud_viz(freq_tuple, plot_width, plot_height)
1396 tabs.append(Panel(child=row(word_cloud), title="word cloud"))
1397 wordfreq = wordfreq_viz(freq_tuple, plot_width, plot_height, True)
~/dataprep/dataprep/eda/basic/render.py in wordcloud_viz(freq_tuple, plot_width, plot_height)
228 )
229 top_freq = freq_tuple[1]
--> 230 wordcloud.generate_from_frequencies(dict(top_freq))
231 wcimg = wordcloud.to_array().astype(np.uint8)
232 alpha = np.full([*wcimg.shape[:2], 1], 255, dtype=np.uint8)
~/dataprep/.venv/lib/python3.8/site-packages/wordcloud/wordcloud.py in generate_from_frequencies(self, frequencies, max_font_size)
401 frequencies = sorted(frequencies.items(), key=itemgetter(1), reverse=True)
402 if len(frequencies) <= 0:
--> 403 raise ValueError("We need at least 1 word to plot a word cloud, "
404 "got %d." % len(frequencies))
405 frequencies = frequencies[:self.max_words]
ValueError: We need at least 1 word to plot a word cloud, got 0.
|
ValueError
|
def pie_viz(
df: pd.DataFrame,
col: str,
miss_pct: float,
plot_width: int,
plot_height: int,
) -> Panel:
"""
Render a pie chart
"""
title = f"{col} ({miss_pct}% missing)" if miss_pct > 0 else f"{col}"
tooltips = [(f"{col}", "@col"), ("Count", "@cnt"), ("Percent", "@pct{0.2f}%")]
df["angle"] = df["cnt"] / df["cnt"].sum() * 2 * pi
fig = Figure(
title=title,
plot_width=plot_width,
plot_height=plot_height,
tools="hover",
toolbar_location=None,
tooltips=tooltips,
)
color_list = PALETTE * (len(df) // len(PALETTE) + 1)
df["colour"] = color_list[0 : len(df)]
df["col"] = df["col"].map(lambda x: x[0:13] + "..." if len(x) > 13 else x)
if df.iloc[-1]["cnt"] == 0: # no "Others" group
df = df.iloc[:-1]
pie = fig.wedge(
x=0,
y=1,
radius=0.9,
start_angle=cumsum("angle", include_zero=True),
end_angle=cumsum("angle"),
line_color="white",
fill_color="colour",
source=df,
)
legend = Legend(items=[LegendItem(label=dict(field="col"), renderers=[pie])])
legend.label_text_font_size = "8pt"
fig.add_layout(legend, "right")
tweak_figure(fig, "pie")
fig.axis.major_label_text_font_size = "0pt"
fig.axis.major_tick_line_color = None
return Panel(child=row(fig), title="pie chart")
|
def pie_viz(
df: pd.DataFrame,
col: str,
miss_pct: float,
plot_width: int,
plot_height: int,
) -> Panel:
"""
Render a pie chart
"""
title = f"{col} ({miss_pct}% missing)" if miss_pct > 0 else f"{col}"
tooltips = [(f"{col}", "@col"), ("Count", "@cnt"), ("Percent", "@pct{0.2f}%")]
df["angle"] = df["cnt"] / df["cnt"].sum() * 2 * pi
fig = Figure(
title=title,
plot_width=plot_width,
plot_height=plot_height,
tools="hover",
toolbar_location=None,
tooltips=tooltips,
)
color_list = PALETTE * (len(df) // len(PALETTE) + 1)
df["colour"] = color_list[0 : len(df)]
if df.iloc[-1]["cnt"] == 0: # no "Others" group
df = df[:-1]
df["col"] = df["col"].map(lambda x: x[0:13] + "..." if len(x) > 13 else x)
pie = fig.wedge(
x=0,
y=1,
radius=0.9,
start_angle=cumsum("angle", include_zero=True),
end_angle=cumsum("angle"),
line_color="white",
fill_color="colour",
source=df,
)
legend = Legend(items=[LegendItem(label=dict(field="col"), renderers=[pie])])
legend.label_text_font_size = "8pt"
fig.add_layout(legend, "right")
tweak_figure(fig, "pie")
fig.axis.major_label_text_font_size = "0pt"
fig.axis.major_tick_line_color = None
return Panel(child=row(fig), title="pie chart")
|
https://github.com/sfu-db/dataprep/issues/208
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-43-f8a3846cc211> in <module>
1 df = pd.DataFrame({"A": ["01-02", "01-02"]})
----> 2 plot(df, "A", top_words=2)
~/dataprep/dataprep/eda/basic/__init__.py in plot(df, x, y, z, bins, ngroups, largest, nsubgroups, timeunit, agg, sample_size, value_range, yscale, tile_size, dtype, top_words, stopword, lemmatize, stem)
163 stem=stem,
164 )
--> 165 figure = render(intermediate, yscale=yscale, tile_size=tile_size)
166
167 return Report(figure)
~/dataprep/dataprep/eda/basic/render.py in render(itmdt, yscale, tile_size, plot_width_sml, plot_height_sml, plot_width_lrg, plot_height_lrg, plot_width_wide)
1684 visual_elem = render_basic(itmdt, yscale, plot_width_sml, plot_height_sml)
1685 elif itmdt.visual_type == "categorical_column":
-> 1686 visual_elem = render_cat(itmdt, yscale, plot_width_lrg, plot_height_lrg)
1687 elif itmdt.visual_type == "numerical_column":
1688 visual_elem = render_num(itmdt, yscale, plot_width_lrg, plot_height_lrg)
~/dataprep/dataprep/eda/basic/render.py in render_cat(itmdt, yscale, plot_width, plot_height)
1393 tabs.append(pie_viz(df, itmdt["col"], miss_pct, plot_width, plot_height))
1394 freq_tuple = itmdt["word_cloud"]
-> 1395 word_cloud = wordcloud_viz(freq_tuple, plot_width, plot_height)
1396 tabs.append(Panel(child=row(word_cloud), title="word cloud"))
1397 wordfreq = wordfreq_viz(freq_tuple, plot_width, plot_height, True)
~/dataprep/dataprep/eda/basic/render.py in wordcloud_viz(freq_tuple, plot_width, plot_height)
228 )
229 top_freq = freq_tuple[1]
--> 230 wordcloud.generate_from_frequencies(dict(top_freq))
231 wcimg = wordcloud.to_array().astype(np.uint8)
232 alpha = np.full([*wcimg.shape[:2], 1], 255, dtype=np.uint8)
~/dataprep/.venv/lib/python3.8/site-packages/wordcloud/wordcloud.py in generate_from_frequencies(self, frequencies, max_font_size)
401 frequencies = sorted(frequencies.items(), key=itemgetter(1), reverse=True)
402 if len(frequencies) <= 0:
--> 403 raise ValueError("We need at least 1 word to plot a word cloud, "
404 "got %d." % len(frequencies))
405 frequencies = frequencies[:self.max_words]
ValueError: We need at least 1 word to plot a word cloud, got 0.
|
ValueError
|
def stats_viz_cat(
data: Tuple[Dict[str, str], Dict[str, str], Dict[str, str], Dict[str, str]],
plot_width: int,
plot_height: int,
) -> Panel:
"""
Render statistics panel for categorical data
"""
# pylint: disable=line-too-long
ov_content = ""
lens_content = ""
qs_content = ""
ls_content = ""
for key, value in data[0].items():
value = _sci_notation_superscript(value)
if "Distinct" in key and float(value) > 50:
ov_content += _create_table_row(key, value, True)
elif "Unique" in key and float(value.replace("%", "")) == 100:
ov_content += _create_table_row(key, value, True)
elif "Missing" in key and float(value.replace("%", "")) != 0:
ov_content += _create_table_row(key, value, True)
else:
ov_content += _create_table_row(key, value)
for key, value in data[1].items():
lens_content += _create_table_row(key, value)
for key, value in data[2].items():
qs_content += _create_table_row(key, value)
for key, value in data[3].items():
ls_content += _create_table_row(key, value)
ov_content = f"""
<div style="grid-area: a;">
<h3 style="text-align: center;">Overview</h3>
<table style="width: 100%; table-layout: auto; font-size:11px;">
<tbody>{ov_content}</tbody>
</table>
</div>
"""
qs_content = f"""
<div style="grid-area: b;">
<h3 style="text-align: center;">Sample</h3>
<table style="width: 100%; table-layout: auto; font-size:11px;">
<tbody>{qs_content}</tbody>
</table>
</div>
"""
ls_content = f"""
<div style="grid-area: c;">
<h3 style="text-align: center;">Letter</h3>
<table style="width: 100%; table-layout: auto; font-size:11px;">
<tbody>{ls_content}</tbody>
</table>
</div>
"""
lens_content = f"""
<div style="grid-area: d;">
<h3 style="text-align: center;">Length</h3>
<table style="width: 100%; table-layout: auto; font-size:11px;">
<tbody>{lens_content}</tbody>
</table>
</div>
"""
container = f"""<div style="display: grid;grid-template-columns: 1fr 1fr;grid-template-rows: 1fr 1fr;gap: 1px 1px;
grid-template-areas:\'a b\' \'c d\';">
{ov_content}{qs_content}{ls_content}{lens_content}</div>"""
div = Div(
text=container, width=plot_width, height=plot_height, style={"width": "100%"}
)
return Panel(child=div, title="stats")
|
def stats_viz_cat(
data: Tuple[Dict[str, str], Dict[str, str], Dict[str, str], Dict[str, str]],
plot_width: int,
plot_height: int,
) -> Panel:
"""
Render statistics panel for categorical data
"""
# pylint: disable=line-too-long
ov_content = ""
lens_content = ""
qs_content = ""
ls_content = ""
for key, value in data[0].items():
value = _sci_notation_superscript(value)
if "Distinct" in key and float(value) > 50:
ov_content += _create_table_row(key, value, True)
elif "Unique" in key and float(value.replace("%", "")) == 100:
ov_content += _create_table_row(key, value, True)
elif "Missing" in key and float(value.replace("%", "")) != 0:
ov_content += _create_table_row(key, value, True)
else:
ov_content += _create_table_row(key, value)
for key, value in data[1].items():
lens_content += _create_table_row(key, value)
for key, value in data[2].items():
qs_content += _create_table_row(key, value)
for key, value in data[3].items():
ls_content += _create_table_row(key, value)
ov_content = f"""
<div style="grid-area: a;">
<h3 style="text-align: center;">Overview</h3>
<table style="width: 100%; table-layout: auto;">
<tbody>{ov_content}</tbody>
</table>
</div>
"""
lens_content = f"""
<div style="grid-area: b;">
<h3 style="text-align: center;">Length</h3>
<table style="width: 100%; table-layout: auto; font-size:11px;">
<tbody>{lens_content}</tbody>
</table>
</div>
"""
qs_content = f"""
<div style="grid-area: c;">
<h3 style="text-align: center;margin-top: -10px;">Quantile Statistics</h3>
<table style="width: 100%; table-layout: auto; font-size:11px;">
<tbody>{qs_content}</tbody>
</table>
</div>
"""
ls_content = f"""
<div style="grid-area: d;">
<h3 style="text-align: center;margin-top: -10px;">Letter</h3>
<table style="width: 100%; table-layout: auto; font-size:11px;">
<tbody>{ls_content}</tbody>
</table>
</div>
"""
container = f"""<div style="display: grid;grid-template-columns: 1fr 1fr;grid-template-rows: 1fr 1fr;gap: 1px 1px;
grid-template-areas:\'a b\' \'c d\';">
{ov_content}{lens_content}{qs_content}{ls_content}</div>"""
div = Div(
text=container, width=plot_width, height=plot_height, style={"width": "100%"}
)
return Panel(child=div, title="stats")
|
https://github.com/sfu-db/dataprep/issues/208
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-43-f8a3846cc211> in <module>
1 df = pd.DataFrame({"A": ["01-02", "01-02"]})
----> 2 plot(df, "A", top_words=2)
~/dataprep/dataprep/eda/basic/__init__.py in plot(df, x, y, z, bins, ngroups, largest, nsubgroups, timeunit, agg, sample_size, value_range, yscale, tile_size, dtype, top_words, stopword, lemmatize, stem)
163 stem=stem,
164 )
--> 165 figure = render(intermediate, yscale=yscale, tile_size=tile_size)
166
167 return Report(figure)
~/dataprep/dataprep/eda/basic/render.py in render(itmdt, yscale, tile_size, plot_width_sml, plot_height_sml, plot_width_lrg, plot_height_lrg, plot_width_wide)
1684 visual_elem = render_basic(itmdt, yscale, plot_width_sml, plot_height_sml)
1685 elif itmdt.visual_type == "categorical_column":
-> 1686 visual_elem = render_cat(itmdt, yscale, plot_width_lrg, plot_height_lrg)
1687 elif itmdt.visual_type == "numerical_column":
1688 visual_elem = render_num(itmdt, yscale, plot_width_lrg, plot_height_lrg)
~/dataprep/dataprep/eda/basic/render.py in render_cat(itmdt, yscale, plot_width, plot_height)
1393 tabs.append(pie_viz(df, itmdt["col"], miss_pct, plot_width, plot_height))
1394 freq_tuple = itmdt["word_cloud"]
-> 1395 word_cloud = wordcloud_viz(freq_tuple, plot_width, plot_height)
1396 tabs.append(Panel(child=row(word_cloud), title="word cloud"))
1397 wordfreq = wordfreq_viz(freq_tuple, plot_width, plot_height, True)
~/dataprep/dataprep/eda/basic/render.py in wordcloud_viz(freq_tuple, plot_width, plot_height)
228 )
229 top_freq = freq_tuple[1]
--> 230 wordcloud.generate_from_frequencies(dict(top_freq))
231 wcimg = wordcloud.to_array().astype(np.uint8)
232 alpha = np.full([*wcimg.shape[:2], 1], 255, dtype=np.uint8)
~/dataprep/.venv/lib/python3.8/site-packages/wordcloud/wordcloud.py in generate_from_frequencies(self, frequencies, max_font_size)
401 frequencies = sorted(frequencies.items(), key=itemgetter(1), reverse=True)
402 if len(frequencies) <= 0:
--> 403 raise ValueError("We need at least 1 word to plot a word cloud, "
404 "got %d." % len(frequencies))
405 frequencies = frequencies[:self.max_words]
ValueError: We need at least 1 word to plot a word cloud, got 0.
|
ValueError
|
def render_cat(
itmdt: Intermediate, yscale: str, plot_width: int, plot_height: int
) -> Tabs:
"""
Render plots from plot(df, x) when x is a categorical column
"""
tabs: List[Panel] = []
osd = itmdt["statsdata"]
tabs.append(stats_viz_cat(osd, plot_width, plot_height))
df, total_grps, miss_pct = itmdt["data"]
fig = bar_viz(
df[:-1],
total_grps,
miss_pct,
itmdt["col"],
yscale,
plot_width,
plot_height,
True,
)
tabs.append(Panel(child=row(fig), title="bar chart"))
tabs.append(pie_viz(df, itmdt["col"], miss_pct, plot_width, plot_height))
freq_tuple = itmdt["word_cloud"]
if freq_tuple[0] != 0:
word_cloud = wordcloud_viz(freq_tuple, plot_width, plot_height)
tabs.append(Panel(child=row(word_cloud), title="word cloud"))
wordfreq = wordfreq_viz(freq_tuple, plot_width, plot_height, True)
tabs.append(Panel(child=row(wordfreq), title="words frequency"))
df, miss_pct = itmdt["length_dist"]
length_dist = hist_viz(
df, miss_pct, "length", yscale, plot_width, plot_height, True
)
tabs.append(Panel(child=row(length_dist), title="length"))
tabs = Tabs(tabs=tabs)
return tabs
|
def render_cat(
itmdt: Intermediate, yscale: str, plot_width: int, plot_height: int
) -> Tabs:
"""
Render plots from plot(df, x) when x is a categorical column
"""
tabs: List[Panel] = []
osd = itmdt["statsdata"]
tabs.append(stats_viz_cat(osd, plot_width, plot_height))
df, total_grps, miss_pct = itmdt["data"]
fig = bar_viz(
df[:-1],
total_grps,
miss_pct,
itmdt["col"],
yscale,
plot_width,
plot_height,
True,
)
tabs.append(Panel(child=row(fig), title="bar chart"))
tabs.append(pie_viz(df, itmdt["col"], miss_pct, plot_width, plot_height))
freq_tuple = itmdt["word_cloud"]
word_cloud = wordcloud_viz(freq_tuple, plot_width, plot_height)
tabs.append(Panel(child=row(word_cloud), title="word cloud"))
wordfreq = wordfreq_viz(freq_tuple, plot_width, plot_height, True)
tabs.append(Panel(child=row(wordfreq), title="words frequency"))
df, miss_pct = itmdt["length_dist"]
length_dist = hist_viz(
df, miss_pct, "length", yscale, plot_width, plot_height, True
)
tabs.append(Panel(child=row(length_dist), title="length"))
tabs = Tabs(tabs=tabs)
return tabs
|
https://github.com/sfu-db/dataprep/issues/208
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-43-f8a3846cc211> in <module>
1 df = pd.DataFrame({"A": ["01-02", "01-02"]})
----> 2 plot(df, "A", top_words=2)
~/dataprep/dataprep/eda/basic/__init__.py in plot(df, x, y, z, bins, ngroups, largest, nsubgroups, timeunit, agg, sample_size, value_range, yscale, tile_size, dtype, top_words, stopword, lemmatize, stem)
163 stem=stem,
164 )
--> 165 figure = render(intermediate, yscale=yscale, tile_size=tile_size)
166
167 return Report(figure)
~/dataprep/dataprep/eda/basic/render.py in render(itmdt, yscale, tile_size, plot_width_sml, plot_height_sml, plot_width_lrg, plot_height_lrg, plot_width_wide)
1684 visual_elem = render_basic(itmdt, yscale, plot_width_sml, plot_height_sml)
1685 elif itmdt.visual_type == "categorical_column":
-> 1686 visual_elem = render_cat(itmdt, yscale, plot_width_lrg, plot_height_lrg)
1687 elif itmdt.visual_type == "numerical_column":
1688 visual_elem = render_num(itmdt, yscale, plot_width_lrg, plot_height_lrg)
~/dataprep/dataprep/eda/basic/render.py in render_cat(itmdt, yscale, plot_width, plot_height)
1393 tabs.append(pie_viz(df, itmdt["col"], miss_pct, plot_width, plot_height))
1394 freq_tuple = itmdt["word_cloud"]
-> 1395 word_cloud = wordcloud_viz(freq_tuple, plot_width, plot_height)
1396 tabs.append(Panel(child=row(word_cloud), title="word cloud"))
1397 wordfreq = wordfreq_viz(freq_tuple, plot_width, plot_height, True)
~/dataprep/dataprep/eda/basic/render.py in wordcloud_viz(freq_tuple, plot_width, plot_height)
228 )
229 top_freq = freq_tuple[1]
--> 230 wordcloud.generate_from_frequencies(dict(top_freq))
231 wcimg = wordcloud.to_array().astype(np.uint8)
232 alpha = np.full([*wcimg.shape[:2], 1], 255, dtype=np.uint8)
~/dataprep/.venv/lib/python3.8/site-packages/wordcloud/wordcloud.py in generate_from_frequencies(self, frequencies, max_font_size)
401 frequencies = sorted(frequencies.items(), key=itemgetter(1), reverse=True)
402 if len(frequencies) <= 0:
--> 403 raise ValueError("We need at least 1 word to plot a word cloud, "
404 "got %d." % len(frequencies))
405 frequencies = frequencies[:self.max_words]
ValueError: We need at least 1 word to plot a word cloud, got 0.
|
ValueError
|
def __del__(self):
# We decrease the IDirectSound refcount
self.driver._ds_driver._native_dsound.Release()
|
def __del__(self):
assert _debug("Delete DirectSoundAudioPlayer")
# We decrease the IDirectSound refcount
self.driver._ds_driver._native_dsound.Release()
|
https://github.com/pyglet/pyglet/issues/113
|
Exception ignored in: <function DirectSoundBuffer.__del__ at 0x038B6070>
Traceback (most recent call last):
File "xxx\pyglet\media\drivers\directsound\interface.py", line 174, in __del__
self.delete()
File "xxx\pyglet\media\drivers\directsound\interface.py", line 180, in delete
self._native_buffer.Stop()
File "xxx\pyglet\com.py", line 131, in _call
ret = self.method.get_field()(self.i, self.name)(obj, *args)
OSError: exception: access violation writing 0x00000000
|
OSError
|
def delete(self):
# Make sure the _ds_listener is deleted before the _ds_driver
self._ds_listener = None
|
def delete(self):
assert _debug("Delete DirectSoundDriver")
# Make sure the _ds_listener is deleted before the _ds_driver
self._ds_listener = None
|
https://github.com/pyglet/pyglet/issues/113
|
Exception ignored in: <function DirectSoundBuffer.__del__ at 0x038B6070>
Traceback (most recent call last):
File "xxx\pyglet\media\drivers\directsound\interface.py", line 174, in __del__
self.delete()
File "xxx\pyglet\media\drivers\directsound\interface.py", line 180, in delete
self._native_buffer.Stop()
File "xxx\pyglet\com.py", line 131, in _call
ret = self.method.get_field()(self.i, self.name)(obj, *args)
OSError: exception: access violation writing 0x00000000
|
OSError
|
def __del__(self):
self.delete()
|
def __del__(self):
assert _debug("Delete DirectSoundListener")
|
https://github.com/pyglet/pyglet/issues/113
|
Exception ignored in: <function DirectSoundBuffer.__del__ at 0x038B6070>
Traceback (most recent call last):
File "xxx\pyglet\media\drivers\directsound\interface.py", line 174, in __del__
self.delete()
File "xxx\pyglet\media\drivers\directsound\interface.py", line 180, in delete
self._native_buffer.Stop()
File "xxx\pyglet\com.py", line 131, in _call
ret = self.method.get_field()(self.i, self.name)(obj, *args)
OSError: exception: access violation writing 0x00000000
|
OSError
|
def __del__(self):
self.primary_buffer = None
self._native_dsound.Release()
|
def __del__(self):
assert _debug("Delete interface.DirectSoundDriver")
self.primary_buffer = None
self._native_dsound.Release()
|
https://github.com/pyglet/pyglet/issues/113
|
Exception ignored in: <function DirectSoundBuffer.__del__ at 0x038B6070>
Traceback (most recent call last):
File "xxx\pyglet\media\drivers\directsound\interface.py", line 174, in __del__
self.delete()
File "xxx\pyglet\media\drivers\directsound\interface.py", line 180, in delete
self._native_buffer.Stop()
File "xxx\pyglet\com.py", line 131, in _call
ret = self.method.get_field()(self.i, self.name)(obj, *args)
OSError: exception: access violation writing 0x00000000
|
OSError
|
def delete(self):
if self._native_buffer is not None:
self._native_buffer.Stop()
self._native_buffer.Release()
self._native_buffer = None
if self._native_buffer3d is not None:
self._native_buffer3d.Release()
self._native_buffer3d = None
|
def delete(self):
assert _debug(
"Delete interface.DirectSoundBuffer from AudioFormat {}".format(
self.audio_format
)
)
if self._native_buffer is not None:
self._native_buffer.Stop()
self._native_buffer.Release()
self._native_buffer = None
if self._native_buffer3d is not None:
self._native_buffer3d.Release()
self._native_buffer3d = None
|
https://github.com/pyglet/pyglet/issues/113
|
Exception ignored in: <function DirectSoundBuffer.__del__ at 0x038B6070>
Traceback (most recent call last):
File "xxx\pyglet\media\drivers\directsound\interface.py", line 174, in __del__
self.delete()
File "xxx\pyglet\media\drivers\directsound\interface.py", line 180, in delete
self._native_buffer.Stop()
File "xxx\pyglet\com.py", line 131, in _call
ret = self.method.get_field()(self.i, self.name)(obj, *args)
OSError: exception: access violation writing 0x00000000
|
OSError
|
def delete(self):
if self._native_listener:
self._native_listener.Release()
self._native_listener = None
|
def delete(self):
assert _debug("Delete interface.DirectSoundListener")
if self._native_listener:
self._native_listener.Release()
self._native_listener = None
|
https://github.com/pyglet/pyglet/issues/113
|
Exception ignored in: <function DirectSoundBuffer.__del__ at 0x038B6070>
Traceback (most recent call last):
File "xxx\pyglet\media\drivers\directsound\interface.py", line 174, in __del__
self.delete()
File "xxx\pyglet\media\drivers\directsound\interface.py", line 180, in delete
self._native_buffer.Stop()
File "xxx\pyglet\com.py", line 131, in _call
ret = self.method.get_field()(self.i, self.name)(obj, *args)
OSError: exception: access violation writing 0x00000000
|
OSError
|
def __del__(self):
try:
self.delete()
except OSError:
pass
|
def __del__(self):
self.delete()
|
https://github.com/pyglet/pyglet/issues/113
|
Exception ignored in: <function DirectSoundBuffer.__del__ at 0x038B6070>
Traceback (most recent call last):
File "xxx\pyglet\media\drivers\directsound\interface.py", line 174, in __del__
self.delete()
File "xxx\pyglet\media\drivers\directsound\interface.py", line 180, in delete
self._native_buffer.Stop()
File "xxx\pyglet\com.py", line 131, in _call
ret = self.method.get_field()(self.i, self.name)(obj, *args)
OSError: exception: access violation writing 0x00000000
|
OSError
|
def send_super(receiver, selName, *args, superclass_name=None, **kwargs):
if hasattr(receiver, "_as_parameter_"):
receiver = receiver._as_parameter_
if superclass_name is None:
superclass = get_superclass_of_object(receiver)
else:
superclass = get_class(superclass_name)
super_struct = OBJC_SUPER(receiver, superclass)
selector = get_selector(selName)
restype = kwargs.get("restype", c_void_p)
argtypes = kwargs.get("argtypes", None)
objc.objc_msgSendSuper.restype = restype
if argtypes:
objc.objc_msgSendSuper.argtypes = [OBJC_SUPER_PTR, c_void_p] + argtypes
else:
objc.objc_msgSendSuper.argtypes = None
result = objc.objc_msgSendSuper(byref(super_struct), selector, *args)
if restype == c_void_p:
result = c_void_p(result)
return result
|
def send_super(receiver, selName, *args, **kwargs):
if hasattr(receiver, "_as_parameter_"):
receiver = receiver._as_parameter_
superclass = get_superclass_of_object(receiver)
superclass_ptr = c_void_p(objc.class_getSuperclass(superclass))
if superclass_ptr.value is not None:
superclass = superclass_ptr
super_struct = OBJC_SUPER(receiver, superclass)
selector = get_selector(selName)
restype = kwargs.get("restype", c_void_p)
argtypes = kwargs.get("argtypes", None)
objc.objc_msgSendSuper.restype = restype
if argtypes:
objc.objc_msgSendSuper.argtypes = [OBJC_SUPER_PTR, c_void_p] + argtypes
else:
objc.objc_msgSendSuper.argtypes = None
result = objc.objc_msgSendSuper(byref(super_struct), selector, *args)
if restype == c_void_p:
result = c_void_p(result)
return result
|
https://github.com/pyglet/pyglet/issues/5
|
$ python3 example.py
Traceback (most recent call last):
File "_ctypes/callbacks.c", line 232, in 'calling callback function'
File "/usr/local/lib/python3.7/site-packages/pyglet/libs/darwin/cocoapy/runtime.py", line 1120, in objc_method
args = convert_method_arguments(encoding, args)
File "/usr/local/lib/python3.7/site-packages/pyglet/libs/darwin/cocoapy/runtime.py", line 1000, in convert_method_arguments
new_args.append(ObjCInstance(a))
File "/usr/local/lib/python3.7/site-packages/pyglet/libs/darwin/cocoapy/runtime.py", line 921, in __new__
if not isinstance(object_ptr, c_void_p):
RecursionError: maximum recursion depth exceeded while calling a Python object
Segmentation fault: 11
$
|
RecursionError
|
def getMouseDelta(nsevent):
dx = nsevent.deltaX()
dy = -nsevent.deltaY()
return dx, dy
|
def getMouseDelta(nsevent):
dx = nsevent.deltaX()
dy = -nsevent.deltaY()
return int(round(dx)), int(round(dy))
|
https://github.com/pyglet/pyglet/issues/5
|
$ python3 example.py
Traceback (most recent call last):
File "_ctypes/callbacks.c", line 232, in 'calling callback function'
File "/usr/local/lib/python3.7/site-packages/pyglet/libs/darwin/cocoapy/runtime.py", line 1120, in objc_method
args = convert_method_arguments(encoding, args)
File "/usr/local/lib/python3.7/site-packages/pyglet/libs/darwin/cocoapy/runtime.py", line 1000, in convert_method_arguments
new_args.append(ObjCInstance(a))
File "/usr/local/lib/python3.7/site-packages/pyglet/libs/darwin/cocoapy/runtime.py", line 921, in __new__
if not isinstance(object_ptr, c_void_p):
RecursionError: maximum recursion depth exceeded while calling a Python object
Segmentation fault: 11
$
|
RecursionError
|
def setFrameSize_(self, size):
cocoapy.send_super(
self, "setFrameSize:", size, superclass_name="NSView", argtypes=[cocoapy.NSSize]
)
# This method is called when view is first installed as the
# contentView of window. Don't do anything on first call.
# This also helps ensure correct window creation event ordering.
if not self._window.context.canvas:
return
width, height = int(size.width), int(size.height)
self._window.switch_to()
self._window.context.update_geometry()
self._window.dispatch_event("on_resize", width, height)
self._window.dispatch_event("on_expose")
# Can't get app.event_loop.enter_blocking() working with Cocoa, because
# when mouse clicks on the window's resize control, Cocoa enters into a
# mini-event loop that only responds to mouseDragged and mouseUp events.
# This means that using NSTimer to call idle() won't work. Our kludge
# is to override NSWindow's nextEventMatchingMask_etc method and call
# idle() from there.
if self.inLiveResize():
from pyglet import app
if app.event_loop is not None:
app.event_loop.idle()
|
def setFrameSize_(self, size):
cocoapy.send_super(self, "setFrameSize:", size, argtypes=[cocoapy.NSSize])
# This method is called when view is first installed as the
# contentView of window. Don't do anything on first call.
# This also helps ensure correct window creation event ordering.
if not self._window.context.canvas:
return
width, height = int(size.width), int(size.height)
self._window.switch_to()
self._window.context.update_geometry()
self._window.dispatch_event("on_resize", width, height)
self._window.dispatch_event("on_expose")
# Can't get app.event_loop.enter_blocking() working with Cocoa, because
# when mouse clicks on the window's resize control, Cocoa enters into a
# mini-event loop that only responds to mouseDragged and mouseUp events.
# This means that using NSTimer to call idle() won't work. Our kludge
# is to override NSWindow's nextEventMatchingMask_etc method and call
# idle() from there.
if self.inLiveResize():
from pyglet import app
if app.event_loop is not None:
app.event_loop.idle()
|
https://github.com/pyglet/pyglet/issues/5
|
$ python3 example.py
Traceback (most recent call last):
File "_ctypes/callbacks.c", line 232, in 'calling callback function'
File "/usr/local/lib/python3.7/site-packages/pyglet/libs/darwin/cocoapy/runtime.py", line 1120, in objc_method
args = convert_method_arguments(encoding, args)
File "/usr/local/lib/python3.7/site-packages/pyglet/libs/darwin/cocoapy/runtime.py", line 1000, in convert_method_arguments
new_args.append(ObjCInstance(a))
File "/usr/local/lib/python3.7/site-packages/pyglet/libs/darwin/cocoapy/runtime.py", line 921, in __new__
if not isinstance(object_ptr, c_void_p):
RecursionError: maximum recursion depth exceeded while calling a Python object
Segmentation fault: 11
$
|
RecursionError
|
def nextEventMatchingMask_untilDate_inMode_dequeue_(self, mask, date, mode, dequeue):
if self.inLiveResize():
# Call the idle() method while we're stuck in a live resize event.
from pyglet import app
if app.event_loop is not None:
app.event_loop.idle()
event = send_super(
self,
"nextEventMatchingMask:untilDate:inMode:dequeue:",
mask,
date,
mode,
dequeue,
superclass_name="NSWindow",
argtypes=[NSUInteger, c_void_p, c_void_p, c_bool],
)
if event.value is None:
return 0
else:
return event.value
|
def nextEventMatchingMask_untilDate_inMode_dequeue_(self, mask, date, mode, dequeue):
if self.inLiveResize():
# Call the idle() method while we're stuck in a live resize event.
from pyglet import app
if app.event_loop is not None:
app.event_loop.idle()
event = send_super(
self,
"nextEventMatchingMask:untilDate:inMode:dequeue:",
mask,
date,
mode,
dequeue,
argtypes=[NSUInteger, c_void_p, c_void_p, c_bool],
)
if event.value is None:
return 0
else:
return event.value
|
https://github.com/pyglet/pyglet/issues/5
|
$ python3 example.py
Traceback (most recent call last):
File "_ctypes/callbacks.c", line 232, in 'calling callback function'
File "/usr/local/lib/python3.7/site-packages/pyglet/libs/darwin/cocoapy/runtime.py", line 1120, in objc_method
args = convert_method_arguments(encoding, args)
File "/usr/local/lib/python3.7/site-packages/pyglet/libs/darwin/cocoapy/runtime.py", line 1000, in convert_method_arguments
new_args.append(ObjCInstance(a))
File "/usr/local/lib/python3.7/site-packages/pyglet/libs/darwin/cocoapy/runtime.py", line 921, in __new__
if not isinstance(object_ptr, c_void_p):
RecursionError: maximum recursion depth exceeded while calling a Python object
Segmentation fault: 11
$
|
RecursionError
|
def send_super(receiver, selName, *args, superclass_name=None, **kwargs):
if hasattr(receiver, "_as_parameter_"):
receiver = receiver._as_parameter_
if superclass_name is None:
superclass = get_superclass_of_object(receiver)
else:
superclass = get_class(superclass_name)
super_struct = OBJC_SUPER(receiver, superclass)
selector = get_selector(selName)
restype = kwargs.get("restype", c_void_p)
argtypes = kwargs.get("argtypes", None)
objc.objc_msgSendSuper.restype = restype
if argtypes:
objc.objc_msgSendSuper.argtypes = [OBJC_SUPER_PTR, c_void_p] + argtypes
else:
objc.objc_msgSendSuper.argtypes = None
result = objc.objc_msgSendSuper(byref(super_struct), selector, *args)
if restype == c_void_p:
result = c_void_p(result)
return result
|
def send_super(receiver, selName, *args, preventSuperclassRecursion=False, **kwargs):
if hasattr(receiver, "_as_parameter_"):
receiver = receiver._as_parameter_
superclass = get_superclass_of_object(receiver)
if preventSuperclassRecursion:
supersuperclass = c_void_p(objc.class_getSuperclass(superclass))
if supersuperclass.value is not None:
superclass = supersuperclass
super_struct = OBJC_SUPER(receiver, superclass)
selector = get_selector(selName)
restype = kwargs.get("restype", c_void_p)
argtypes = kwargs.get("argtypes", None)
objc.objc_msgSendSuper.restype = restype
if argtypes:
objc.objc_msgSendSuper.argtypes = [OBJC_SUPER_PTR, c_void_p] + argtypes
else:
objc.objc_msgSendSuper.argtypes = None
result = objc.objc_msgSendSuper(byref(super_struct), selector, *args)
if restype == c_void_p:
result = c_void_p(result)
return result
|
https://github.com/pyglet/pyglet/issues/5
|
$ python3 example.py
Traceback (most recent call last):
File "_ctypes/callbacks.c", line 232, in 'calling callback function'
File "/usr/local/lib/python3.7/site-packages/pyglet/libs/darwin/cocoapy/runtime.py", line 1120, in objc_method
args = convert_method_arguments(encoding, args)
File "/usr/local/lib/python3.7/site-packages/pyglet/libs/darwin/cocoapy/runtime.py", line 1000, in convert_method_arguments
new_args.append(ObjCInstance(a))
File "/usr/local/lib/python3.7/site-packages/pyglet/libs/darwin/cocoapy/runtime.py", line 921, in __new__
if not isinstance(object_ptr, c_void_p):
RecursionError: maximum recursion depth exceeded while calling a Python object
Segmentation fault: 11
$
|
RecursionError
|
def setFrameSize_(self, size):
cocoapy.send_super(
self, "setFrameSize:", size, superclass_name="NSView", argtypes=[cocoapy.NSSize]
)
# This method is called when view is first installed as the
# contentView of window. Don't do anything on first call.
# This also helps ensure correct window creation event ordering.
if not self._window.context.canvas:
return
width, height = int(size.width), int(size.height)
self._window.switch_to()
self._window.context.update_geometry()
self._window.dispatch_event("on_resize", width, height)
self._window.dispatch_event("on_expose")
# Can't get app.event_loop.enter_blocking() working with Cocoa, because
# when mouse clicks on the window's resize control, Cocoa enters into a
# mini-event loop that only responds to mouseDragged and mouseUp events.
# This means that using NSTimer to call idle() won't work. Our kludge
# is to override NSWindow's nextEventMatchingMask_etc method and call
# idle() from there.
if self.inLiveResize():
from pyglet import app
if app.event_loop is not None:
app.event_loop.idle()
|
def setFrameSize_(self, size):
# This method is called when view is first installed as the
# contentView of window. Don't do anything on first call.
# This also helps ensure correct window creation event ordering.
if not self._window.context.canvas:
cocoapy.send_super(
self,
"setFrameSize:",
size,
preventSuperclassRecursion=False,
argtypes=[cocoapy.NSSize],
)
return
cocoapy.send_super(
self,
"setFrameSize:",
size,
preventSuperclassRecursion=True,
argtypes=[cocoapy.NSSize],
)
width, height = int(size.width), int(size.height)
self._window.switch_to()
self._window.context.update_geometry()
self._window.dispatch_event("on_resize", width, height)
self._window.dispatch_event("on_expose")
# Can't get app.event_loop.enter_blocking() working with Cocoa, because
# when mouse clicks on the window's resize control, Cocoa enters into a
# mini-event loop that only responds to mouseDragged and mouseUp events.
# This means that using NSTimer to call idle() won't work. Our kludge
# is to override NSWindow's nextEventMatchingMask_etc method and call
# idle() from there.
if self.inLiveResize():
from pyglet import app
if app.event_loop is not None:
app.event_loop.idle()
|
https://github.com/pyglet/pyglet/issues/5
|
$ python3 example.py
Traceback (most recent call last):
File "_ctypes/callbacks.c", line 232, in 'calling callback function'
File "/usr/local/lib/python3.7/site-packages/pyglet/libs/darwin/cocoapy/runtime.py", line 1120, in objc_method
args = convert_method_arguments(encoding, args)
File "/usr/local/lib/python3.7/site-packages/pyglet/libs/darwin/cocoapy/runtime.py", line 1000, in convert_method_arguments
new_args.append(ObjCInstance(a))
File "/usr/local/lib/python3.7/site-packages/pyglet/libs/darwin/cocoapy/runtime.py", line 921, in __new__
if not isinstance(object_ptr, c_void_p):
RecursionError: maximum recursion depth exceeded while calling a Python object
Segmentation fault: 11
$
|
RecursionError
|
def nextEventMatchingMask_untilDate_inMode_dequeue_(self, mask, date, mode, dequeue):
if self.inLiveResize():
# Call the idle() method while we're stuck in a live resize event.
from pyglet import app
if app.event_loop is not None:
app.event_loop.idle()
event = send_super(
self,
"nextEventMatchingMask:untilDate:inMode:dequeue:",
mask,
date,
mode,
dequeue,
superclass_name="NSWindow",
argtypes=[NSUInteger, c_void_p, c_void_p, c_bool],
)
if event.value is None:
return 0
else:
return event.value
|
def nextEventMatchingMask_untilDate_inMode_dequeue_(self, mask, date, mode, dequeue):
if self.inLiveResize():
# Call the idle() method while we're stuck in a live resize event.
from pyglet import app
if app.event_loop is not None:
app.event_loop.idle()
event = send_super(
self,
"nextEventMatchingMask:untilDate:inMode:dequeue:",
mask,
date,
mode,
dequeue,
preventSuperclassRecursion=True,
argtypes=[NSUInteger, c_void_p, c_void_p, c_bool],
)
if event.value is None:
return 0
else:
return event.value
|
https://github.com/pyglet/pyglet/issues/5
|
$ python3 example.py
Traceback (most recent call last):
File "_ctypes/callbacks.c", line 232, in 'calling callback function'
File "/usr/local/lib/python3.7/site-packages/pyglet/libs/darwin/cocoapy/runtime.py", line 1120, in objc_method
args = convert_method_arguments(encoding, args)
File "/usr/local/lib/python3.7/site-packages/pyglet/libs/darwin/cocoapy/runtime.py", line 1000, in convert_method_arguments
new_args.append(ObjCInstance(a))
File "/usr/local/lib/python3.7/site-packages/pyglet/libs/darwin/cocoapy/runtime.py", line 921, in __new__
if not isinstance(object_ptr, c_void_p):
RecursionError: maximum recursion depth exceeded while calling a Python object
Segmentation fault: 11
$
|
RecursionError
|
def send_super(receiver, selName, *args, **kwargs):
if hasattr(receiver, "_as_parameter_"):
receiver = receiver._as_parameter_
superclass = get_superclass_of_object(receiver)
superclass_ptr = c_void_p(objc.class_getSuperclass(superclass))
if superclass_ptr.value is not None:
superclass = superclass_ptr
super_struct = OBJC_SUPER(receiver, superclass)
selector = get_selector(selName)
restype = kwargs.get("restype", c_void_p)
argtypes = kwargs.get("argtypes", None)
objc.objc_msgSendSuper.restype = restype
if argtypes:
objc.objc_msgSendSuper.argtypes = [OBJC_SUPER_PTR, c_void_p] + argtypes
else:
objc.objc_msgSendSuper.argtypes = None
result = objc.objc_msgSendSuper(byref(super_struct), selector, *args)
if restype == c_void_p:
result = c_void_p(result)
return result
|
def send_super(receiver, selName, *args, **kwargs):
# print 'send_super', receiver, selName, args
if hasattr(receiver, "_as_parameter_"):
receiver = receiver._as_parameter_
superclass = get_superclass_of_object(receiver)
super_struct = OBJC_SUPER(receiver, superclass)
selector = get_selector(selName)
restype = kwargs.get("restype", c_void_p)
argtypes = kwargs.get("argtypes", None)
objc.objc_msgSendSuper.restype = restype
if argtypes:
objc.objc_msgSendSuper.argtypes = [OBJC_SUPER_PTR, c_void_p] + argtypes
else:
objc.objc_msgSendSuper.argtypes = None
result = objc.objc_msgSendSuper(byref(super_struct), selector, *args)
if restype == c_void_p:
result = c_void_p(result)
return result
|
https://github.com/pyglet/pyglet/issues/5
|
$ python3 example.py
Traceback (most recent call last):
File "_ctypes/callbacks.c", line 232, in 'calling callback function'
File "/usr/local/lib/python3.7/site-packages/pyglet/libs/darwin/cocoapy/runtime.py", line 1120, in objc_method
args = convert_method_arguments(encoding, args)
File "/usr/local/lib/python3.7/site-packages/pyglet/libs/darwin/cocoapy/runtime.py", line 1000, in convert_method_arguments
new_args.append(ObjCInstance(a))
File "/usr/local/lib/python3.7/site-packages/pyglet/libs/darwin/cocoapy/runtime.py", line 921, in __new__
if not isinstance(object_ptr, c_void_p):
RecursionError: maximum recursion depth exceeded while calling a Python object
Segmentation fault: 11
$
|
RecursionError
|
def __init__(self, auth_encoding="latin-1"):
self.auth_encoding = auth_encoding
self.proxies = {}
for type_, url in getproxies().items():
try:
self.proxies[type_] = self._get_proxy(url, type_)
# some values such as '/var/run/docker.sock' can't be parsed
# by _parse_proxy and as such should be skipped
except ValueError:
continue
|
def __init__(self, auth_encoding="latin-1"):
self.auth_encoding = auth_encoding
self.proxies = {}
for type_, url in getproxies().items():
self.proxies[type_] = self._get_proxy(url, type_)
|
https://github.com/scrapy/scrapy/issues/3331
|
10:11 $ scrapy runspider quotes.py
2018-07-11 10:12:04 [scrapy.utils.log] INFO: Scrapy 1.5.0 started (bot: scrapybot)
2018-07-11 10:12:04 [scrapy.utils.log] INFO: Versions: lxml 3.5.0.0, libxml2 2.9.3, cssselect 0.9.1, parsel 1.5.0, w3lib 1.19.0, Twisted 16.0.0, Python 2.7.12 (default, Dec 4 2017, 14:50:18) - [GCC 5.4.0 20160609], pyOpenSSL 0.15.1 (OpenSSL 1.0.2g 1 Mar 2016), cryptography 1.2.3, Platform Linux-4.4.0-130-generic-x86_64-with-Ubuntu-16.04-xenial
2018-07-11 10:12:04 [scrapy.crawler] INFO: Overridden settings: {'SPIDER_LOADER_WARN_ONLY': True}
2018-07-11 10:12:04 [scrapy.middleware] INFO: Enabled extensions:
['scrapy.extensions.memusage.MemoryUsage',
'scrapy.extensions.logstats.LogStats',
'scrapy.extensions.telnet.TelnetConsole',
'scrapy.extensions.corestats.CoreStats']
Unhandled error in Deferred:
2018-07-11 10:12:04 [twisted] CRITICAL: Unhandled error in Deferred:
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/scrapy/commands/runspider.py", line 88, in run
self.crawler_process.crawl(spidercls, **opts.spargs)
File "/usr/local/lib/python2.7/dist-packages/scrapy/crawler.py", line 171, in crawl
return self._crawl(crawler, *args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/scrapy/crawler.py", line 175, in _crawl
d = crawler.crawl(*args, **kwargs)
File "/usr/lib/python2.7/dist-packages/twisted/internet/defer.py", line 1274, in unwindGenerator
return _inlineCallbacks(None, gen, Deferred())
--- <exception caught here> ---
File "/usr/lib/python2.7/dist-packages/twisted/internet/defer.py", line 1128, in _inlineCallbacks
result = g.send(result)
File "/usr/local/lib/python2.7/dist-packages/scrapy/crawler.py", line 98, in crawl
six.reraise(*exc_info)
File "/usr/local/lib/python2.7/dist-packages/scrapy/crawler.py", line 80, in crawl
self.engine = self._create_engine()
File "/usr/local/lib/python2.7/dist-packages/scrapy/crawler.py", line 105, in _create_engine
return ExecutionEngine(self, lambda _: self.stop())
File "/usr/local/lib/python2.7/dist-packages/scrapy/core/engine.py", line 69, in __init__
self.downloader = downloader_cls(crawler)
File "/usr/local/lib/python2.7/dist-packages/scrapy/core/downloader/__init__.py", line 88, in __init__
self.middleware = DownloaderMiddlewareManager.from_crawler(crawler)
File "/usr/local/lib/python2.7/dist-packages/scrapy/middleware.py", line 58, in from_crawler
return cls.from_settings(crawler.settings, crawler)
File "/usr/local/lib/python2.7/dist-packages/scrapy/middleware.py", line 36, in from_settings
mw = mwcls.from_crawler(crawler)
File "/usr/local/lib/python2.7/dist-packages/scrapy/downloadermiddlewares/httpproxy.py", line 29, in from_crawler
return cls(auth_encoding)
File "/usr/local/lib/python2.7/dist-packages/scrapy/downloadermiddlewares/httpproxy.py", line 22, in __init__
self.proxies[type] = self._get_proxy(url, type)
File "/usr/local/lib/python2.7/dist-packages/scrapy/downloadermiddlewares/httpproxy.py", line 39, in _get_proxy
proxy_type, user, password, hostport = _parse_proxy(url)
File "/usr/lib/python2.7/urllib2.py", line 721, in _parse_proxy
raise ValueError("proxy URL with no authority: %r" % proxy)
exceptions.ValueError: proxy URL with no authority: '/var/run/docker.sock'
2018-07-11 10:12:04 [twisted] CRITICAL:
|
exceptions.ValueError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.