id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
11460164
|
import contextlib
from datetime import datetime
import gym
from tqdm import tqdm
import os, sys
import segar
from segar.mdps.metrics import task_set_init_dist
RUN = int(sys.argv[1])
print(f"=== Running script on slice {RUN}")
all_envs = [ # 0-20 (21 items total)
"Segar-empty-easy-rgb-v0",
"Segar-empty-medium-rgb-v0",
"Segar-empty-hard-rgb-v0",
"Segar-objectsx1-easy-rgb-v0",
"Segar-objectsx2-easy-rgb-v0",
"Segar-objectsx3-easy-rgb-v0",
"Segar-objectsx1-medium-rgb-v0",
"Segar-objectsx2-medium-rgb-v0",
"Segar-objectsx3-medium-rgb-v0",
"Segar-objectsx1-hard-rgb-v0",
"Segar-objectsx2-hard-rgb-v0",
"Segar-objectsx3-hard-rgb-v0",
"Segar-tilesx1-easy-rgb-v0",
"Segar-tilesx2-easy-rgb-v0",
"Segar-tilesx3-easy-rgb-v0",
"Segar-tilesx1-medium-rgb-v0",
"Segar-tilesx2-medium-rgb-v0",
"Segar-tilesx3-medium-rgb-v0",
"Segar-tilesx1-hard-rgb-v0",
"Segar-tilesx2-hard-rgb-v0",
"Segar-tilesx3-hard-rgb-v0",
]
def get_task_list(env):
return env.unwrapped.env.envs[0].task_list
def dump_sim(env):
dt = datetime.today().strftime("%Y%m%d-%H%M%S")
env.unwrapped.env.envs[0].sim.save(f"sim-coredump-{dt}.pkl")
csv_lines = [",".join([" "] + all_envs)]
for envA_name in tqdm(all_envs[RUN : RUN + 1]):
output = [envA_name]
for envB_name in tqdm(all_envs):
try:
with open(os.devnull, "w") as f, contextlib.redirect_stdout(f):
envA = gym.make(envA_name)
except ValueError:
print("it's in envA creation", envA_name)
dump_sim(envA)
envA = gym.make(envA_name)
try:
with open(os.devnull, "w") as f, contextlib.redirect_stdout(f):
envB = gym.make(envB_name)
except ValueError:
print("it's in envB creation", envB_name)
dump_sim(envB)
envB = gym.make(envB_name)
w2 = task_set_init_dist(get_task_list(envA), get_task_list(envB))
output.append(w2)
csv_lines.append(",".join([str(x) for x in output]))
with open(f"env-w2-distances-r{RUN}.csv", "w") as outfile:
outfile.write("\n".join(csv_lines))
|
11460170
|
import pytest
from bplustree.const import TreeConf, ENDIAN
from bplustree.entry import Record, Reference, OpaqueData
from bplustree.node import (Node, LonelyRootNode, RootNode, InternalNode,
LeafNode, FreelistNode, OverflowNode)
from bplustree.serializer import IntSerializer
tree_conf = TreeConf(4096, 7, 16, 16, IntSerializer())
@pytest.mark.parametrize('klass,order,min_children,max_children', [
(LonelyRootNode, 7, 0, 6),
(LonelyRootNode, 100, 0, 99),
(RootNode, 7, 2, 7),
(RootNode, 100, 2, 100),
(InternalNode, 7, 4, 7),
(InternalNode, 100, 50, 100),
(LeafNode, 7, 3, 6),
(LeafNode, 100, 49, 99),
])
def test_node_limit_children(klass, order, min_children, max_children):
node = klass(TreeConf(4096, order, 16, 16, IntSerializer()))
assert node.min_children == min_children
assert node.max_children == max_children
@pytest.mark.parametrize('klass', [
LeafNode, InternalNode, RootNode, LonelyRootNode,
])
def test_empty_node_serialization(klass):
n1 = klass(tree_conf)
data = n1.dump()
n2 = klass(tree_conf, data=data)
assert n1.entries == n2.entries
n3 = Node.from_page_data(tree_conf, data)
assert isinstance(n3, klass)
assert n1.entries == n3.entries
def test_leaf_node_serialization():
n1 = LeafNode(tree_conf, next_page=66)
n1.insert_entry(Record(tree_conf, 43, b'43'))
n1.insert_entry(Record(tree_conf, 42, b'42'))
assert n1.entries == [Record(tree_conf, 42, b'42'),
Record(tree_conf, 43, b'43')]
data = n1.dump()
n2 = LeafNode(tree_conf, data=data)
assert n1.entries == n2.entries
assert n1.next_page == n2.next_page == 66
def test_leaf_node_serialization_no_next_page():
n1 = LeafNode(tree_conf)
data = n1.dump()
n2 = LeafNode(tree_conf, data=data)
assert n1.next_page is n2.next_page is None
def test_root_node_serialization():
n1 = RootNode(tree_conf)
n1.insert_entry(Reference(tree_conf, 43, 2, 3))
n1.insert_entry(Reference(tree_conf, 42, 1, 2))
assert n1.entries == [Reference(tree_conf, 42, 1, 2),
Reference(tree_conf, 43, 2, 3)]
data = n1.dump()
n2 = RootNode(tree_conf, data=data)
assert n1.entries == n2.entries
assert n1.next_page is n2.next_page is None
def test_node_slots():
n1 = RootNode(tree_conf)
with pytest.raises(AttributeError):
n1.foo = True
def test_get_node_from_page_data():
data = (2).to_bytes(1, ENDIAN) + bytes(4096 - 1)
tree_conf = TreeConf(4096, 7, 16, 16, IntSerializer())
assert isinstance(
Node.from_page_data(tree_conf, data, 4),
RootNode
)
def test_insert_find_get_remove_entries():
node = RootNode(tree_conf)
# Test empty _find_entry_index, get and remove
with pytest.raises(ValueError):
node._find_entry_index(42)
with pytest.raises(ValueError):
node.get_entry(42)
with pytest.raises(ValueError):
node.remove_entry(42)
# Test insert_entry
r42, r43 = Reference(tree_conf, 42, 1, 2), Reference(tree_conf, 43, 2, 3)
node.insert_entry_at_the_end(r43)
node.insert_entry(r42)
assert sorted(node.entries) == node.entries
# Test _find_entry_index
assert node._find_entry_index(42) == 0
assert node._find_entry_index(43) == 1
# Test _get_entry
assert node.get_entry(42) == r42
assert node.get_entry(43) == r43
node.remove_entry(43)
assert node.entries == [r42]
node.remove_entry(42)
assert node.entries == []
def test_smallest_biggest():
node = RootNode(tree_conf)
with pytest.raises(IndexError):
node.pop_smallest()
r42, r43 = Reference(tree_conf, 42, 1, 2), Reference(tree_conf, 43, 2, 3)
node.insert_entry(r43)
node.insert_entry(r42)
# Smallest
assert node.smallest_entry == r42
assert node.smallest_key == 42
# Biggest
assert node.biggest_entry == r43
assert node.biggest_key == 43
assert node.pop_smallest() == r42
assert node.entries == [r43]
def test_freelist_node_serialization():
n1 = FreelistNode(tree_conf, next_page=3)
data = n1.dump()
n2 = FreelistNode(tree_conf, data=data)
assert n1.next_page == n2.next_page
def test_freelist_node_serialization_no_next_page():
n1 = FreelistNode(tree_conf, next_page=None)
data = n1.dump()
n2 = FreelistNode(tree_conf, data=data)
assert n1.next_page is n2.next_page is None
def test_overflow_node_serialization():
n1 = OverflowNode(tree_conf, next_page=3)
n1.insert_entry_at_the_end(OpaqueData(data=b'foo'))
data = n1.dump()
n2 = OverflowNode(tree_conf, data=data)
assert n1.next_page == n2.next_page
def test_overflow_node_serialization_no_next_page():
n1 = OverflowNode(tree_conf, next_page=None)
n1.insert_entry_at_the_end(OpaqueData(data=b'foo'))
data = n1.dump()
n2 = OverflowNode(tree_conf, data=data)
assert n1.next_page is n2.next_page is None
|
11460200
|
import unittest
from task_bash import TaskBashTestCase
class TaskDrushTestCase(TaskBashTestCase):
file = 'cmf/drupal/all/scripts/tasks/drush.yml'
tests = [
# Covers:
# - cim: ~
{
'result': {
'name': 'Running a Drush command',
'shell': 'drush cim -y',
},
'args': {
'cim': '',
},
},
# Covers:
# - name: "Installing Drupal"
# si: ["standard", "--db-url=mysql://root:root@localhost/database"]
{
'result': {
'name': 'Installing Drupal',
'shell': 'drush si standard --db-url=mysql://root:root@localhost/database -y',
},
'args': {
'name': 'Installing Drupal',
'si': [
'standard',
'--db-url=mysql://root:root@localhost/database'
],
},
},
# Covers:
# - name: "Getting a one-time login link"
# uli: 12
{
'result': {
'name': 'Getting a one-time login link',
'shell': 'drush uli 12 -y',
},
'args': {
'name': 'Getting a one-time login link',
'uli': '12',
},
},
]
if __name__ == '__main__':
unittest.main()
|
11460201
|
import os
import tensorflow as tf
import numpy as np
from sklearn.decomposition import TruncatedSVD
def combine_first_two_axes(tensor):
shape = tensor.shape
return tf.reshape(tensor, (shape[0] * shape[1], *shape[2:]))
def average_gradients(tower_grads, losses):
average_grads = list()
for grads, loss in zip(tower_grads, losses):
grad = tf.math.reduce_mean(grads, axis=0)
average_grads.append(grad)
return average_grads
def convert_grayscale_images_to_rgb(instances):
"""Gets a list of full path to images and replaces the ones which are grayscale with the same image but in RGB
format."""
counter = 0
fixed_instances = list()
for instance in instances:
image = tf.image.decode_jpeg(tf.io.read_file(instance))
if image.shape[2] != 3:
print(f'Overwriting 2d instance with 3d data: {instance}')
fixed_instances.append(instance)
image = tf.squeeze(image, axis=2)
image = tf.stack((image, image, image), axis=2)
image_data = tf.image.encode_jpeg(image)
tf.io.write_file(instance, image_data)
counter += 1
return counter, fixed_instances
def keep_keys_with_greater_than_equal_k_items(folders_dict, k):
"""Gets a dictionary and just keeps the keys which have greater than equal k items."""
to_be_removed = list()
for folder in folders_dict.keys():
if len(folders_dict[folder]) < k:
to_be_removed.append(folder)
for folder in to_be_removed:
del folders_dict[folder]
def get_folders_with_greater_than_equal_k_files(folders, k):
to_be_removed = list()
for folder in folders:
if len(os.listdir(folder)) < k:
to_be_removed.append(folder)
for folder in to_be_removed:
folders.remove(folder)
return folders
def SP(data, K):
A = data
indices = np.random.choice(range(data.shape[1]), K, replace=False)
indices = indices.astype(int)
iter = 0
for iter in range(0, K):
k = iter % K
inds = np.delete(np.copy(indices), k)
A3 = A[:, inds]
At = A - np.random.uniform(low=0.5, high=1) * np.matmul(np.matmul(A3, np.linalg.pinv(np.matmul(np.transpose(A3), A3))),
np.matmul(np.transpose(A3), A))
# Compute just the first column from U and V
svd = TruncatedSVD(n_components=1)
svd.fit(np.transpose(At))
# [U, S, V] = np.linalg.svd(At, full_matrices=False)
# u1 = U[:, 0]
# v = V[:, 1]
u = svd.components_.reshape(-1)
N = np.linalg.norm(At, axis=0)
B = At / N
B = np.transpose(B)
Cr = np.abs(np.matmul(B, u))
# ind = np.argsort(Cr)[::-1]
# p = ind[0]
p = np.argsort(Cr)[-1]
indices[k] = p
# ind2 = np.zeros(K - 1, );
# for iter in range(1, 5):
# for k in range(0, K):
# ind2 = np.delete(inds, k)
# A3 = A[:, ind2]
# At = A - np.matmul(np.matmul(A3, np.linalg.pinv(np.matmul(np.transpose(A3), A3))),
# np.matmul(np.transpose(A3), A))
# [U, S, V] = np.linalg.svd(At, full_matrices=False)
# u = U[:, 1]
# v = V[:, 1]
# N = np.linalg.norm(At, axis=0)
# B = At / N
# B = np.transpose(B)
# Cr = np.abs(np.matmul(B, u))
# ind = np.argsort(Cr)[::-1]
# p = ind[0]
# inds[k] = p
return indices
def SP_deterministic(data, K):
A = data
At = data
inds = np.zeros(K, )
inds = inds.astype(int)
iter = 0
for k in range(0, K):
iter = iter + 1
# Compute just the first column from U and V
svd = TruncatedSVD(n_components=1)
svd.fit(np.transpose(At))
# [U, S, V] = np.linalg.svd(At, full_matrices=False)
# u1 = U[:, 0]
# v = V[:, 1]
u = svd.components_.reshape(-1)
N = np.linalg.norm(At, axis=0)
B = At / N
B = np.transpose(B)
Cr = np.abs(np.matmul(B, u))
ind = np.argsort(Cr)[::-1]
p = ind[0]
inds[k] = p
A3 = A[:, inds[0:k + 1]]
At = A - np.matmul(np.matmul(A3, np.linalg.pinv(np.matmul(np.transpose(A3), A3))),
np.matmul(np.transpose(A3), A))
# ind2 = np.zeros(K - 1, )
# for iter in range(1, 5):
# for k in range(0, K):
# ind2 = np.delete(inds, k)
# A3 = A[:, ind2]
# At = A - np.matmul(np.matmul(A3, np.linalg.pinv(np.matmul(np.transpose(A3), A3))),
# np.matmul(np.transpose(A3), A))
# [U, S, V] = np.linalg.svd(At, full_matrices=False)
# u = U[:, 1]
# v = V[:, 1]
# N = np.linalg.norm(At, axis=0)
# B = At / N
# B = np.transpose(B)
# Cr = np.abs(np.matmul(B, u))
# ind = np.argsort(Cr)[::-1]
# p = ind[0]
# inds[k] = p
return inds
def SSP_with_random_validation_set(features, labels, K, delta=20):
label_values = np.unique(labels)
num_classes = len(label_values)
label_matrix = np.zeros((len(label_values), len(labels)))
for i, label in enumerate(labels):
label_matrix[label, i] = delta
A = np.concatenate((features, label_matrix), axis=0)
At = np.copy(A)
inds = np.zeros(num_classes * K, )
inds = inds.astype(int)
iter = 0
counter = 0
chosen_indices = list()
for k in range(0, K // 2):
iter = iter + 1
# Compute just the first column from U and V
svd = TruncatedSVD(n_components=1)
svd.fit(np.transpose(At))
# [U, S, V] = np.linalg.svd(At, full_matrices=False)
# u1 = U[:, 0]
# v = V[:, 1]
u = svd.components_.reshape(-1)
new_At = At[:4096, :]
N = np.linalg.norm(new_At, axis=0)
B = new_At / N
B = np.transpose(B)
Cr = np.abs(np.matmul(B, u[:4096]))
for label_value in label_values:
x = np.multiply(Cr, A[features.shape[0] + label_value, ...])
ind = np.argsort(x)
inds[label_value * K // 2 + counter] = np.random.choice((ind[-1], ind[-2], ind[-3], ind[-4]), 1, p=(0.5, 0.3, 0.1, 0.1))
chosen_indices.append(inds[label_value * K // 2 + counter])
validation_choices = np.array(np.where(x != 0)).reshape((-1, ))
inds[label_value * K // 2 + counter + 2 * num_classes] = np.random.choice(validation_choices, 1)
counter += 1
# return inds
if k != K // 2 - 1:
A3 = A[:, chosen_indices]
At = A - np.matmul(np.matmul(A3, np.linalg.pinv(np.matmul(np.transpose(A3), A3))),
np.matmul(np.transpose(A3), A))
# print(inds)
return inds
def SSP(features, labels, K, delta=10):
label_values = np.unique(labels)
num_classes = len(label_values)
label_matrix = np.zeros((len(label_values), len(labels)))
for i, label in enumerate(labels):
label_matrix[label, i] = delta
A = np.concatenate((features, label_matrix), axis=0)
At = np.copy(A)
inds = np.zeros(num_classes * K, )
inds = inds.astype(int)
iter = 0
counter = 0
for k in range(0, K):
iter = iter + 1
# Compute just the first column from U and V
svd = TruncatedSVD(n_components=1)
svd.fit(np.transpose(At))
# [U, S, V] = np.linalg.svd(At, full_matrices=False)
# u1 = U[:, 0]
# v = V[:, 1]
u = svd.components_.reshape(-1)
N = np.linalg.norm(At, axis=0)
B = At / N
B = np.transpose(B)
Cr = np.abs(np.matmul(B, u))
for label_value in label_values:
x = np.multiply(Cr, A[features.shape[0] + label_value, ...])
ind = np.argsort(x)[::-1]
inds[counter] = np.random.choice((ind[0], ind[1], ind[2], ind[3]), 1, p=(0.5, 0.3, 0.1, 0.1))
counter += 1
A3 = A[:, inds[0:counter + 1]]
At = A - np.matmul(np.matmul(A3, np.linalg.pinv(np.matmul(np.transpose(A3), A3))),
np.matmul(np.transpose(A3), A))
return inds
if __name__ == '__main__':
features = np.random.rand(4096, 12000)
labels = [0] * 2000 + [1] * 4000 + [2] * 2600 + [3] * 2000 + [4] * 1400
while True:
indices = SSP_with_random_validation_set(features, labels, 4)
print(indices)
if indices[0] == indices[2] or indices[1] == indices[3] or indices[2] == indices[4]:
break
print(indices)
# data = np.random.rand(40, 73)
# A = data
#
# indices = SP(data, 5)
# A3 = A[:, indices]
# At = A - np.matmul(np.matmul(A3, np.linalg.pinv(np.matmul(np.transpose(A3), A3))),
# np.matmul(np.transpose(A3), A))
#
# norm = np.linalg.norm(At)
# print(norm)
#
# for test_case in range(1000):
# rand_numbers = np.random.randint(0, 73, size=5)
# A3 = A[:, rand_numbers]
# At = A - np.matmul(np.matmul(A3, np.linalg.pinv(np.matmul(np.transpose(A3), A3))),
# np.matmul(np.transpose(A3), A))
# current_norm = np.linalg.norm(At)
#
# print(current_norm)
# assert(current_norm >= norm)
#
# print(norm)
# indices = SP_deterministic(data, 5)
# A3 = A[:, indices]
# At = A - np.matmul(np.matmul(A3, np.linalg.pinv(np.matmul(np.transpose(A3), A3))),
# np.matmul(np.transpose(A3), A))
#
# print(np.linalg.norm(At))
|
11460206
|
import numpy as np
import tensorflow as tf
import sarnet_td3.common.tf_util as U
import sarnet_td3.common.buffer_util_td3 as butil
from sarnet_td3 import MAgentTrainer
from sarnet_td3.common.distributions import make_pdtype
def discount_with_dones(rewards, dones, gamma):
discounted = []
r = 0
for reward, done in zip(rewards[::-1], dones[::-1]):
r = reward + gamma*r
r = r*(1.-done)
discounted.append(r)
return discounted[::-1]
def make_update_exp(vals, target_vals, polyak):
polyak = 1.0 - polyak
expression = []
for var, var_target in zip(sorted(vals, key=lambda v: v.name), sorted(target_vals, key=lambda v: v.name)):
expression.append(var_target.assign(polyak * var_target + (1.0-polyak) * var))
expression = tf.group(*expression)
return U.function([], [], updates=[expression])
def create_placeholder_vpg(obs_shape_n, act_space_n, num_agents, args):
# Create placeholders
with tf.name_scope("placeholders"):
obs_ph_n = []
memory_ph_n = []
h_ph_n = []
c_ph_n = []
return_ph_n = []
for i in range(num_agents):
if args.env_type == "mpe":
obs_ph_n.append(U.BatchInput(obs_shape_n[i], name="observation" + str(i), traj=True).get())
else:
obs_ph_n.append(U.BatchInput((obs_shape_n[i],), name="observation" + str(i), traj=True).get())
h_ph_n.append(U.BatchInput((args.gru_units,), name="gru_ph1" + str(i)).get())
c_ph_n.append(U.BatchInput((args.gru_units,), name="gru_ph2" + str(i)).get())
memory_ph_n.append(U.BatchInput((args.value_units,), name="memory_ph" + str(i)).get())
return_ph_n.append(tf.compat.v1.placeholder(tf.float32, [None, None], name="returns" + str(i)))
act_pdtype_n = [make_pdtype(act_space, args.env_type) for act_space in act_space_n]
act_ph_n = [tf.compat.v1.placeholder(tf.int32, [None, None], name="act_one_hot" + str(i)) for i in range(len(act_space_n))]
return obs_ph_n, h_ph_n, c_ph_n, memory_ph_n, act_ph_n, act_space_n, return_ph_n
class CommAgentTrainerVPG(MAgentTrainer):
def __init__(self, name, p_model, obs_ph_n, h_ph_n, c_ph_n, memory_ph_n, act_ph_n,
action_space_n, return_in_ph, args, p_index, num_env=1, is_train=False):
self.name = name
self.args = args
self.p_index = p_index
self.reuse = False
self.num_adv = self.args.num_adversaries
self.n = len(obs_ph_n) # Total number of agents
self.n_start = 0
self.n_end = self.num_adv
self.comm_type = self.args.adv_test
# Update at these many number of steps
self.step_update_time = 10
if self.args.optimizer == "RMSProp":
self.optimizer = tf.compat.v1.train.RMSPropOptimizer(learning_rate=self.args.actor_lr, decay=0.97, epsilon=1e-6)
else:
self.optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=self.args.actor_lr)
# Setup weight sharing for first initialization of adv/good policy
if not(self.p_index == 0 or self.p_index == self.num_adv): self.reuse = True
# Prepare indexing parameters
if self.name == "good_agent":
self.comm_type = self.args.good_test
self.n_start = self.num_adv
self.n_end = self.n
# Batch size and number of agents/environments
self.num_env = num_env
# Initialize actor network for communication
actor_net = p_model(is_train, self.args, reuse=self.reuse)
pMA_model = self.agent_model(self.comm_type, actor_net)
self.max_replay_buffer_len = self.args.update_lag
self.act, self.p_train, self.v_train = self._pMA_VPG_train(
scope=self.name,
make_obs_ph_n=obs_ph_n,
make_memory_ph_n=memory_ph_n,
make_h_ph_n=h_ph_n,
make_c_ph_n=c_ph_n,
make_act_ph_n=act_ph_n,
action_space_n=action_space_n,
make_return_ph_n=return_in_ph,
p_func=pMA_model,
grad_norm_clipping=0.5,
reuse=self.reuse,
)
def agent_model(self, comm_type, p_model):
if comm_type == "SARNET":
return p_model.sarnet
elif comm_type == "TARMAC":
return p_model.tarmac
elif comm_type == "COMMNET":
return p_model.commnet
elif comm_type == "DDPG":
return p_model.ddpg
elif comm_type == "IC3NET":
return p_model.ic3net
def _p_setup_placeholder(self, obs_ph_n, h_ph_n, c_ph_n, memory_ph_n):
p_input = [None] * int(self.n * 4)
for i in range(self.n):
p_input[i] = obs_ph_n[i]
p_input[i + self.n] = h_ph_n[i]
p_input[i + int(2 * self.n)] = c_ph_n[i]
p_input[i + int(3 * self.n)] = memory_ph_n[i]
return p_input
def _pMA_VPG_train(self, make_obs_ph_n, make_memory_ph_n, make_h_ph_n, make_c_ph_n, make_act_ph_n, action_space_n, make_return_ph_n, p_func, grad_norm_clipping=None, scope="agent", reuse=None):
with tf.compat.v1.variable_scope(scope, reuse=reuse):
# create distributions
act_pdtype_n = [make_pdtype(act_space, self.args.env_type) for act_space in action_space_n]
# set up placeholders
obs_ph_n = make_obs_ph_n
memory_ph_n = make_memory_ph_n
h_ph_n = make_h_ph_n
c_ph_n = make_c_ph_n
act_onehot_ph = make_act_ph_n[self.p_index]
return_ph = make_return_ph_n[self.p_index]
# Feed all inputs. Let the model decide what to choose.
p_input = self._p_setup_placeholder(obs_ph_n, h_ph_n, c_ph_n, memory_ph_n)
p, enc_state, memory_state, attention, value = p_func(p_input, int(act_pdtype_n[self.p_index].param_shape()[0]), self.p_index, self.n, self.n_start, self.n_end, scope="p_func", reuse=reuse)
# wrap parameters in distribution and sample
act_pd = act_pdtype_n[self.p_index].pdfromflat(p)
act_soft_sample = act_pd.sample(noise=False, onehot=True)
# print(act_soft_sample)
act_onehot = tf.multinomial(act_soft_sample[-1,:,:], 1)
# print(act_onehot)
value_out = tf.squeeze(value, axis=0) # remove the time dimension from the output for storing in the buffer
return_ph_expd = tf.expand_dims(return_ph, axis=-1)
# Value Network Optimization
# value = tf.squeeze(value, axis=-1) # remove the last single out dim, to align with return (#trajlen, #batch)
target = return_ph_expd - value
loss_v = tf.reduce_mean(tf.math.squared_difference(value, return_ph_expd))
optim_v = self.optimizer.minimize(loss_v, name='adam_optim_v')
# Policy Network Optimization
# print(act_soft_sample)
target_pi = tf.squeeze(target, axis=-1)
loss_pi = tf.reduce_mean(tf.stop_gradient(target_pi) * tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=p, labels=act_onehot_ph), name='loss_pi')
optim_pi = self.optimizer.minimize(loss_pi, name='adam_optim_pi')
# Create callable functions
# policy network
# Use sess.run to the feed the dictionary, since we are not calling it anywhere else, simi
update_pi = optim_pi
update_v = optim_v
train_v = U.function(inputs=p_input + [return_ph], outputs=update_v)
train_pi = U.function(inputs=p_input + [act_onehot_ph] + [return_ph], outputs=update_pi)
act = U.function(inputs=p_input, outputs=[act_onehot, act_soft_sample, enc_state, memory_state, attention, value_out])
return act, train_pi, train_v
def prep_input(self, obs, h, c, memory, is_train=True):
input = [None] * int(self.n * 4)
for i in range(self.n):
input[i] = obs[i]
input[i + self.n] = h[i]
input[i + int(2 * self.n)] = c[i]
input[i + int(3 * self.n)] = memory[i]
return input
def action(self, input, is_train=False):
return self.act(*input)
def sample_experience(self, bufferop):
# Receive all the data for the sampled trajectories
data, index, importance = bufferop.return_exp()
return data, index, importance
def update(self, agents, buffer_data, t):
# Check if an update is needed
# if not (t % self.step_update_time == 0): # only update every 10 steps for policy, 5 for critic
# return "no_update"
# Get mini-batch of trajectories
# Returns the following indexing scheme
# Shape of the trajectory is [# numtraj, [agent, trajlen, numenv, dim] or
# [numtraj [agent, trajlen, num_env]] for rew/done
obs_n_buffer, h_n_buffer, c_n_buffer, memory_n_buffer, action_n_buffer, action_n_logits_buffer, rew_n_buffer, \
value_n_buffer, done_n_buffer = buffer_data
""" Prepare Inputs for network feed """
# Receives [batch_size, [trajlen, numenv, agent]] -> concat [trajlen, batch x numenv, agent]
# Reshape to - [agent, trajlen, batchsize x num_env]]
rew_n_buffer = np.transpose(np.concatenate(rew_n_buffer, axis=1), (2, 0, 1))
# done_n_buffer = np.transpose(np.concatenate(done_n_buffer, axis=1), (2, 0, 1))
# Receives [batch_size, [trajlen, agent, numenv]] -> concat [trajlen, agent, batch x numenv]
# Reshape to - [agent, trajlen, batchsize x num_env]]
# value_n_buffer = np.transpose(np.concatenate(value_n_buffer, axis=-1), (2, 0, 1))
# Receives [batch, [traj, agent, numevn, dim]] -> [traj, agent, numenv x batch, dim]
# Reshape to [agent, trajlen, numenv x batch, dim]
obs_n_buffer = np.swapaxes(np.concatenate(obs_n_buffer, axis=-2), 1, 0)
action_n_buffer = np.squeeze(np.swapaxes(np.concatenate(action_n_buffer, axis=-2), 1, 0))
# For hidden states we only feed the start (i.e. no trajlen)
h_n_buffer = np.swapaxes(np.concatenate(h_n_buffer, axis=-2), 1, 0)
h_n_buffer = h_n_buffer[:, 0, :, :]
c_n_buffer = np.swapaxes(np.concatenate(c_n_buffer, axis=-2), 1, 0)
c_n_buffer = c_n_buffer[:, 0, :, :]
memory_n_buffer = np.swapaxes(np.concatenate(memory_n_buffer, axis=-2), 1, 0)
memory_n_buffer = memory_n_buffer[:, 0, :, :]
returns = []
advantages = []
# Calculate returns
return_so_far = np.zeros(np.shape(rew_n_buffer[self.p_index, 0, :]))
# Get trajectory length to compute the returns in reverse
traj_len, _ = rew_n_buffer[self.p_index].shape
# Do returns calculation for individual agent
for traj_idx in reversed(range(traj_len)):
return_so_far = self.args.gamma * return_so_far + rew_n_buffer[self.p_index, traj_idx, :]
returns.append(return_so_far)
# Returns is of the form [trajlen, dim]
# We need first indexes as agents for easier data manipulation
# returns = np.stack(returns, axis=0)
train_input = self.prep_input(obs_n_buffer, h_n_buffer, c_n_buffer, memory_n_buffer)
#for i in range(5):
_ = self.v_train(*(train_input + [returns]))
_ = self.p_train(*(train_input + [action_n_buffer[self.p_index]] + [returns]))
return "update done"
|
11460242
|
import os
import sys
# noinspection PyUnresolvedReferences
import tests.mock_tables.dbconnector
modules_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, os.path.join(modules_path, 'src'))
from unittest import TestCase
from ax_interface import ValueType
from ax_interface.pdu_implementations import GetPDU, GetNextPDU
from ax_interface.encodings import ObjectIdentifier
from ax_interface.constants import PduTypes
from ax_interface.pdu import PDU, PDUHeader
from ax_interface.mib import MIBTable
from sonic_ax_impl.mibs.vendor.cisco import ciscoEntityFruControlMIB
class TestPsuStatus(TestCase):
@classmethod
def setUpClass(cls):
cls.lut = MIBTable(ciscoEntityFruControlMIB.cefcFruPowerStatusTable)
def test_getNextPsu0(self):
oid = ObjectIdentifier(2, 0, 0, 0, (1, 3, 6, 1, 4, 1, 9, 9, 117, 1, 1, 2, 1, 2))
expected_oid = ObjectIdentifier(2, 0, 0, 0, (1, 3, 6, 1, 4, 1, 9, 9, 117, 1, 1, 2, 1, 2, 1))
get_pdu = GetNextPDU(
header=PDUHeader(1, PduTypes.GET_NEXT, 16, 0, 42, 0, 0, 0),
oids=[oid]
)
encoded = get_pdu.encode()
response = get_pdu.make_response(self.lut)
value0 = response.values[0]
self.assertEqual(value0.type_, ValueType.INTEGER)
self.assertEqual(str(value0.name), str(expected_oid))
self.assertEqual(value0.data, 8)
def test_getPsu1Status(self):
oid = ObjectIdentifier(2, 0, 0, 0, (1, 3, 6, 1, 4, 1, 9, 9, 117, 1, 1, 2, 1, 2, 1))
get_pdu = GetPDU(
header=PDUHeader(1, PduTypes.GET, 16, 0, 42, 0, 0, 0),
oids=[oid]
)
encoded = get_pdu.encode()
response = get_pdu.make_response(self.lut)
value0 = response.values[0]
self.assertEqual(value0.type_, ValueType.INTEGER)
self.assertEqual(str(value0.name), str(oid))
self.assertEqual(value0.data, 8)
def test_getNextPsu1(self):
oid = ObjectIdentifier(2, 0, 0, 0, (1, 3, 6, 1, 4, 1, 9, 9, 117, 1, 1, 2, 1, 2, 1))
expected_oid = ObjectIdentifier(2, 0, 0, 0, (1, 3, 6, 1, 4, 1, 9, 9, 117, 1, 1, 2, 1, 2, 2))
get_pdu = GetNextPDU(
header=PDUHeader(1, PduTypes.GET_NEXT, 16, 0, 42, 0, 0, 0),
oids=[oid]
)
encoded = get_pdu.encode()
response = get_pdu.make_response(self.lut)
value0 = response.values[0]
self.assertEqual(value0.type_, ValueType.INTEGER)
self.assertEqual(str(value0.name), str(expected_oid))
self.assertEqual(value0.data, 2)
def test_getPsu2Status(self):
oid = ObjectIdentifier(2, 0, 0, 0, (1, 3, 6, 1, 4, 1, 9, 9, 117, 1, 1, 2, 1, 2, 2))
get_pdu = GetPDU(
header=PDUHeader(1, PduTypes.GET, 16, 0, 42, 0, 0, 0),
oids=[oid]
)
encoded = get_pdu.encode()
response = get_pdu.make_response(self.lut)
value0 = response.values[0]
self.assertEqual(value0.type_, ValueType.INTEGER)
self.assertEqual(str(value0.name), str(oid))
self.assertEqual(value0.data, 2)
def test_getNextPsu3(self):
oid = ObjectIdentifier(2, 0, 0, 0, (1, 3, 6, 1, 4, 1, 9, 9, 117, 1, 1, 2, 1, 2, 3))
expected_oid = None
get_pdu = GetNextPDU(
header=PDUHeader(1, PduTypes.GET_NEXT, 16, 0, 42, 0, 0, 0),
oids=[oid]
)
encoded = get_pdu.encode()
response = get_pdu.make_response(self.lut)
value0 = response.values[0]
self.assertEqual(value0.type_, ValueType.END_OF_MIB_VIEW)
self.assertEqual(str(value0.name), str(oid))
self.assertEqual(value0.data, None)
def test_getMissedPsu(self):
oid = ObjectIdentifier(2, 0, 0, 0, (1, 3, 6, 1, 4, 1, 9, 9, 117, 1, 1, 2, 1, 2, 5, 1))
expected_oid = None
get_pdu = GetNextPDU(
header=PDUHeader(1, PduTypes.GET_NEXT, 16, 0, 42, 0, 0, 0),
oids=[oid]
)
encoded = get_pdu.encode()
response = get_pdu.make_response(self.lut)
value0 = response.values[0]
self.assertEqual(value0.type_, ValueType.END_OF_MIB_VIEW)
self.assertEqual(str(value0.name), str(oid))
self.assertEqual(value0.data, None)
|
11460246
|
import scrapy
class BasicFormSpider(scrapy.Spider):
name = 'basic-form'
def start_requests(self):
formdata = {
'custname': 'Valdir',
'custtel': '99333322',
'custemail': '<EMAIL>',
'size': 'large',
'topping': ['bacon', 'cheese'],
'delivery': '21:00',
'comments': 'My dog is hungry!'
}
yield scrapy.FormRequest(
'http://httpbin.org/post',
formdata=formdata,
callback=self.parse_form_results
)
def parse_form_results(self, response):
# this website just returns a JSON response with the same
# contents as the ones passed in the form.
self.log(response.body)
|
11460290
|
import torch.nn as nn
import torch.nn.functional as F
class IBNIBComm(nn.Module):
def __init__(self, input_shape, args):
super(IBNIBComm, self).__init__()
self.args = args
self.n_agents = args.n_agents
self.fc1 = nn.Linear(input_shape, args.rnn_hidden_dim)
self.fc2 = nn.Linear(args.rnn_hidden_dim, args.rnn_hidden_dim)
self.fc3 = nn.Linear(args.rnn_hidden_dim, 2 * args.comm_embed_dim * self.n_agents)
self.inference_model = nn.Sequential(
nn.Linear(input_shape + 2 * args.comm_embed_dim * self.n_agents, 4 * args.comm_embed_dim * self.n_agents),
nn.ReLU(True),
nn.Linear(4 * args.comm_embed_dim * self.n_agents, 4 * args.comm_embed_dim * self.n_agents),
nn.ReLU(True),
nn.Linear(4 * args.comm_embed_dim * self.n_agents, args.n_actions)
)
def forward(self, inputs):
x = F.relu(self.fc1(inputs))
x = F.relu(self.fc2(x))
massage = self.fc3(x)
return massage
|
11460326
|
from mygrations.core.parse.parser import parser
from mygrations.formats.mysql.definitions.column import column
class type_character(parser, column):
allowed_collation_types = {'char': True, 'varchar': True}
# in this case we have much less disallowed than allowed
disallowed_types = {
'date': True,
'year': True,
'tinyblob': True,
'blob': True,
'mediumblob': True,
'longblob': True,
'tinytext': True,
'text': True,
'mediumtext': True,
'longtext': True,
'json': True
}
has_comma = False
# name varchar(255) NOT NULL DEFAULT '' CHARACTER SET uf8 COLLATE utf8
# hackish? maybe (i.e. yes) the repeated COLLATE and CHARACTER SETS take care of uncertain
# ordering. I could also use `children` which is order-agnostic, but I'm being lazy
rules = [{
'type': 'regexp',
'value': '[^\(\s\)]+',
'name': 'name'
}, {
'type': 'regexp',
'value': '\w+',
'name': 'type'
}, {
'type': 'literal',
'value': '('
}, {
'type': 'regexp',
'value': '\d+',
'name': 'length'
}, {
'type': 'literal',
'value': ')'
}, {
'type': 'regexp',
'value': 'COLLATE ([^\(\s\),]+)',
'name': 'collate',
'optional': True
}, {
'type': 'regexp',
'value': 'CHARACTER SET ([^\(\s\),]+)',
'name': 'character_set',
'optional': True
}, {
'type': 'regexp',
'value': 'COLLATE ([^\(\s\),]+)',
'name': 'collate',
'optional': True
}, {
'type': 'literal',
'value': 'NOT NULL',
'optional': True
}, {
'type': 'regexp',
'value': 'COLLATE ([^\(\s\),]+)',
'name': 'collate',
'optional': True
}, {
'type': 'regexp',
'value': 'CHARACTER SET ([^\(\s\),]+)',
'name': 'character_set',
'optional': True
}, {
'type': 'regexp',
'value': 'COLLATE ([^\(\s\),]+)',
'name': 'collate',
'optional': True
}, {
'type': 'regexp',
'value': 'DEFAULT ([^\(\s\),]+)',
'optional': True,
'name': 'default'
}, {
'type': 'regexp',
'value': 'COLLATE ([^\(\s\),]+)',
'name': 'collate',
'optional': True
}, {
'type': 'regexp',
'value': 'CHARACTER SET ([^\(\s\),]+)',
'name': 'character_set',
'optional': True
}, {
'type': 'regexp',
'value': 'COLLATE ([^\(\s\),]+)',
'name': 'collate',
'optional': True
}, {
'type': 'literal',
'value': ',',
'optional': True,
'name': 'ending_comma'
}]
def process(self):
self.has_comma = True if 'ending_comma' in self._values else False
self._errors = []
self._warnings = []
self._name = self._values['name'].strip('`')
self._column_type = self._values['type']
self._length = self._values['length']
self._null = False if 'NOT NULL' in self._values else True
self._default = self._values['default'] if 'default' in self._values else None
self._character_set = self._values['character_set'] if 'character_set' in self._values else None
self._collate = self._values['collate'] if 'collate' in self._values else None
# make sense of the default
if self._default and len(self._default) >= 2 and self._default[0] == "'" and self._default[-1] == "'":
self._default = self._default.strip("'")
elif self._default:
if self._default.lower() == 'null':
self._default = None
elif not self._default.isdigit():
self._warnings.append(
'Default value of "%s" should have quotes for field %s' % (self._default, self._name)
)
if self._character_set and len(self._character_set
) >= 2 and self._character_set[0] == "'" and self._character_set[-1] == "'":
self._character_set = self._character_set.strip("'")
if self._collate and len(self._collate) >= 2 and self._collate[0] == "'" and self._collate[-1] == "'":
self._collate = self._collate.strip("'")
if self._character_set or self._collate:
if not self._column_type.lower() in self.allowed_collation_types:
self._errors.append(
'Column of type %s is not allowed to have a collation or character set for column %s' %
(self._column_type, self._name)
)
if self._default is None and not self._null:
self._warnings.append(
'Column %s is not null and has no default: you should set a default to avoid MySQL warnings' %
(self._name)
)
if self._column_type.lower() in self.disallowed_types:
self._errors.append(
'Column of type %s is not allowed to have a length for column %s' % (self._column_type, self._name)
)
self._attributes = {}
if self._character_set:
self._attributes['CHARACTER SET'] = self._character_set
if self._collate:
self._attributes['COLLATE'] = self._collate
|
11460329
|
from tests import TestCase
from src.masonite.pipeline import Pipeline
from src.masonite.request import Request
import os
class PipeTestOne:
def handle(self, request, response):
request.one = 1
return request
class PipeTestTwo:
def handle(self, request, response):
request.two = 2
return request
class PipeTestBreak:
def handle(self, request, response):
return response
class PipeTestThree:
def handle(self, request, response):
request.three = 3
return request
class TestPipeline(TestCase):
def test_pipeline_sets_attributes(self):
request = Request({})
request2 = Request({})
pipeline = Pipeline(request, request2).through([PipeTestOne, PipeTestTwo])
self.assertTrue(request.one == 1)
self.assertTrue(request.two == 2)
def test_pipeline_exits(self):
request = Request({})
request2 = Request({})
pipeline = Pipeline(request, request2).through([PipeTestOne, PipeTestBreak])
self.assertTrue(request.one == 1)
with self.assertRaises(AttributeError):
self.assertTrue(request.two == 2)
|
11460332
|
import sys
import warnings
import re
import xml.etree.ElementTree
import io
import uuid
import struct
import pathlib
import jnius_config
import numpy as np
import scipy.spatial.distance
import scipy.fft
import skimage.util
import skimage.util.dtype
import skimage.io
import skimage.exposure
import skimage.transform
import sklearn.linear_model
import networkx as nx
import matplotlib.pyplot as plt
import matplotlib.cm as mcm
import matplotlib.patches as mpatches
import matplotlib.patheffects as mpatheffects
from . import utils
from . import thumbnail
from . import __version__ as _version
if not jnius_config.vm_running:
pkg_root = pathlib.Path(__file__).parent.resolve()
bf_jar_path = pkg_root / 'jars' / 'loci_tools.jar'
if not bf_jar_path.exists():
raise RuntimeError("loci_tools.jar missing from distribution"
" (expected it at %s)" % bf_jar_path)
jnius_config.add_classpath(str(bf_jar_path))
import jnius
DebugTools = jnius.autoclass('loci.common.DebugTools')
IFormatReader = jnius.autoclass('loci.formats.IFormatReader')
MetadataRetrieve = jnius.autoclass('ome.xml.meta.MetadataRetrieve')
ServiceFactory = jnius.autoclass('loci.common.services.ServiceFactory')
OMEXMLService = jnius.autoclass('loci.formats.services.OMEXMLService')
ChannelSeparator = jnius.autoclass('loci.formats.ChannelSeparator')
DynamicMetadataOptions = jnius.autoclass('loci.formats.in.DynamicMetadataOptions')
UNITS = jnius.autoclass('ome.units.UNITS')
DebugTools.enableLogging("ERROR")
# TODO:
# - Write tables with summary information about alignments.
class Metadata(object):
@property
def _num_images(self):
raise NotImplementedError
@property
def num_channels(self):
raise NotImplementedError
@property
def pixel_size(self):
raise NotImplementedError
@property
def pixel_dtype(self):
raise NotImplementedError
def tile_position(self, i):
raise NotImplementedError
def tile_size(self, i):
raise NotImplementedError
@property
def grid_dimensions(self):
pos = self.positions
shape = np.array([len(set(pos[:, d])) for d in range(2)])
if np.prod(shape) != self.num_images:
raise ValueError("Series positions do not form a grid")
return shape
@property
def num_images(self):
return self._num_images
@property
def positions(self):
if not hasattr(self, '_positions'):
self._positions = np.vstack([
self.tile_position(i) for i in range(self._num_images)
])
return self._positions
@property
def size(self):
if not hasattr(self, '_size'):
s0 = self.tile_size(0)
image_ids = range(1, self._num_images)
if any(any(self.tile_size(i) != s0) for i in image_ids):
raise ValueError("Image series must all have the same dimensions")
self._size = s0
return self._size
@property
def centers(self):
return self.positions + self.size / 2
@property
def origin(self):
return self.positions.min(axis=0)
class PlateMetadata(Metadata):
def __init__(self):
super(PlateMetadata, self).__init__()
self.set_active_plate_well(None, None)
@property
def num_plates(self):
raise NotImplementedError
@property
def num_wells(self):
raise NotImplementedError
@property
def plate_well_series(self):
raise NotImplementedError
def plate_name(self, i):
raise NotImplementedError
def well_name(self, plate, i):
raise NotImplementedError
def set_active_plate_well(self, plate, well):
if (plate is None) ^ (well is None):
raise ValueError("plate and well must be both set or both None")
self.active_plate = plate
self.active_well = well
@property
def active_series(self):
if self.active_plate is None:
return range(self._num_images)
else:
return self.plate_well_series[self.active_plate][self.active_well]
@property
def plate_names(self):
if not hasattr(self, '_plate_names'):
self._plate_names = [
self.plate_name(i) for i in range(self.num_plates)
]
return self._plate_names
@property
def well_names(self):
if not hasattr(self, '_well_names'):
self._well_names = [
[self.well_name(p, i) for i in range(num_plate_wells)]
for p, num_plate_wells in enumerate(self.num_wells)
]
return self._well_names
@Metadata.num_images.getter
def num_images(self):
return len(self.active_series)
@Metadata.positions.getter
def positions(self):
return Metadata.positions.fget(self)[self.active_series]
# FIXME Metadata.grid_dimensions should be overriden here or removed.
class Reader(object):
def read(self, series, c):
raise NotImplementedError
class PlateReader(Reader):
# No API here, just a way to signal that a subclass's metadata class
# inherits from PlateMetadata. This is probably a sign that the
# architectural split between Metadata and Reader should be reconsidered.
pass
class BioformatsMetadata(PlateMetadata):
_pixel_dtypes = {
'uint8': np.dtype(np.uint8),
'uint16': np.dtype(np.uint16),
}
_ome_dtypes = {v: k for k, v in _pixel_dtypes.items()}
def __init__(self, path):
super(BioformatsMetadata, self).__init__()
self.path = path
self._init_metadata()
def __getstate__(self):
state = self.__dict__.copy()
del state['_reader'], state['_metadata'], state['_omexml_root']
return state
def __setstate__(self, state):
self.__dict__.update(state)
self._init_metadata()
def _init_metadata(self):
factory = ServiceFactory()
service = jnius.cast(OMEXMLService, factory.getInstance(OMEXMLService))
metadata = service.createOMEXMLMetadata()
self._reader = ChannelSeparator()
self._reader.setMetadataStore(metadata)
# For multi-scene .CZI files, we need raw tiles instead of the
# auto-stitched mosaic and we don't want labels or overview images
options = DynamicMetadataOptions()
options.setBoolean('zeissczi.autostitch', False)
options.setBoolean('zeissczi.attachments', False)
self._reader.setMetadataOptions(options)
self._reader.setId(self.path)
xml_content = service.getOMEXML(metadata)
self._metadata = jnius.cast(MetadataRetrieve, metadata)
self._omexml_root = xml.etree.ElementTree.fromstring(xml_content)
self.format_name = self._reader.getFormat()
@property
def _num_images(self):
count = self._metadata.imageCount
# Skip final overview slide in Metamorph Slide Scan data if present.
if (self.format_name == 'Metamorph STK'
and 'overview' in self._metadata.getImageName(count - 1).lower()):
count -= 1
return count
@property
def num_channels(self):
return self._metadata.getChannelCount(0)
@property
def num_plates(self):
return self._metadata.getPlateCount()
@property
def num_wells(self):
return [self._metadata.getWellCount(i) for i in range(self.num_plates)]
@property
def plate_well_series(self):
if hasattr(self, '_plate_well_series'):
return self._plate_well_series
# FIXME Store slice objects to save resources where possible.
series = [
[
np.array([
self._metadata.getWellSampleIndex(p, w, s).value
for s in range(self._metadata.getWellSampleCount(p, w))
], dtype=int)
for w in range(num_wells)
]
for p, num_wells in enumerate(self.num_wells)
]
return series
@property
def pixel_size(self):
values = []
for dim in ('Y', 'X'):
method = getattr(self._metadata, 'getPixelsPhysicalSize%s' % dim)
v_units = method(0)
if v_units is None:
warn_data(
"Pixel size undefined; falling back to 1.0 \u03BCm."
)
value = 1.0
else:
value = v_units.value(UNITS.MICROMETER).doubleValue()
values.append(value)
if values[0] != values[1]:
raise Exception("Can't handle non-square pixels (%f, %f)"
% tuple(values))
return values[0]
@property
def pixel_dtype(self):
return self._pixel_dtypes[self._metadata.getPixelsType(0).value]
def plate_name(self, i):
return self._metadata.getPlateName(i)
@property
def well_naming(self):
if not hasattr(self, '_well_naming'):
_well_naming = []
for p in range(self.num_plates):
row_nc = self._metadata.getPlateRowNamingConvention(p)
column_nc = self._metadata.getPlateColumnNamingConvention(p)
if row_nc is not None:
row_nc = row_nc.value
else:
row_nc = 'letter'
if column_nc is not None:
column_nc = column_nc.value
else:
column_nc = 'number'
if row_nc not in ('letter', 'number') or column_nc != 'number':
raise RuntimeError(
"Can't handle well naming convention row={} column={}"
.format(row_nc, column_nc)
)
_well_naming.append([row_nc, column_nc])
self._well_naming = _well_naming
return self._well_naming
def well_name(self, plate, i):
row = self._metadata.getWellRow(plate, i).value
column = self._metadata.getWellColumn(plate, i).value
row_nc, column_nc = self.well_naming[plate]
# FIXME Support formatting with 384/1536-well plates.
assert row_nc in ('letter', 'number')
assert column_nc == 'number'
if row_nc == 'number':
row_fmt = '{:02}'.format(row + 1)
else:
row_fmt = chr(ord('A') + row)
column_fmt = '{:02}'.format(column + 1)
return row_fmt + column_fmt
def tile_position(self, i):
planeCount = self._metadata.getPlaneCount(i)
values = []
for dim in ('Y', 'X'):
method = getattr(self._metadata, 'getPlanePosition%s' % dim)
# FIXME verify all planes have the same X,Y position.
if planeCount > 0:
# Returns None if planePositionX/Y not defined.
v_units = method(i, 0)
else:
# Simple file formats don't have planes at all.
v_units = None
if v_units is None:
warn_data(
"Stage coordinates undefined; falling back to (0, 0)."
)
values = [0.0, 0.0]
break
else:
v = v_units.value(UNITS.MICROMETER)
if v is None:
# Conversion failed, which usually happens when the unit is
# "reference frame". Proceed as if it's actually microns but
# emit a warning.
warn_data(
"Stage coordinates' measurement unit is undefined;"
" assuming \u03BCm."
)
v = v_units.value()
value = v.doubleValue()
values.append(value)
position_microns = np.array(values, dtype=float)
# Invert Y so that stage position coordinates and image pixel
# coordinates are aligned (most formats seem to work this way).
position_microns *= [-1, 1]
position_pixels = position_microns / self.pixel_size
return position_pixels
def tile_size(self, i):
values = []
for dim in ('Y', 'X'):
method = getattr(self._metadata, 'getPixelsSize%s' % dim)
v = method(i).value
values.append(v)
return np.array(values, dtype=int)
class BioformatsReader(PlateReader):
def __init__(self, path, plate=None, well=None):
self.path = path
self.metadata = BioformatsMetadata(self.path)
self.metadata.set_active_plate_well(plate, well)
def read(self, series, c):
self.metadata._reader.setSeries(self.metadata.active_series[series])
index = self.metadata._reader.getIndex(0, c, 0)
byte_array = self.metadata._reader.openBytes(index)
dtype = self.metadata.pixel_dtype
shape = self.metadata.tile_size(series)
img = np.frombuffer(byte_array.tostring(), dtype=dtype).reshape(shape)
return img
class CachingReader(Reader):
"""Wraps a reader to provide tile image caching."""
def __init__(self, reader, channel):
self.reader = reader
self.channel = channel
self._cache = {}
@property
def metadata(self):
return self.reader.metadata
def read(self, series, c):
if c == self.channel and series in self._cache:
img = self._cache[series]
else:
img = self.reader.read(series, c)
if c == self.channel and series not in self._cache:
self._cache[series] = img
return img
# TileStatistics = collections.namedtuple(
# 'TileStatistics',
# 'scan tile x_original y_original x y shift_x shift_y error'
# )
@property
def neighbors_graph(aligner):
"""Return graph of neighboring (overlapping) tiles.
Tiles are considered neighbors if the 'city block' distance between them
is less than the largest tile dimension.
"""
# FIXME: This should properly test for overlap, possibly via
# intersection of bounding rectangles.
if not hasattr(aligner, '_neighbors_graph'):
pdist = scipy.spatial.distance.pdist(aligner.metadata.positions,
metric='cityblock')
sp = scipy.spatial.distance.squareform(pdist)
max_distance = aligner.metadata.size.max() + 1
edges = zip(*np.nonzero((sp > 0) & (sp < max_distance)))
graph = nx.from_edgelist(edges)
graph.add_nodes_from(range(aligner.metadata.num_images))
aligner._neighbors_graph = graph
return aligner._neighbors_graph
class EdgeAligner(object):
def __init__(
self, reader, channel=0, max_shift=15, false_positive_ratio=0.01,
randomize=False, filter_sigma=0.0, do_make_thumbnail=True, verbose=False
):
self.channel = channel
self.reader = CachingReader(reader, self.channel)
self.verbose = verbose
# Unit is micrometers.
self.max_shift = max_shift
self.max_shift_pixels = self.max_shift / self.metadata.pixel_size
self.false_positive_ratio = false_positive_ratio
self.randomize = randomize
self.filter_sigma = filter_sigma
self.do_make_thumbnail = do_make_thumbnail
self._cache = {}
neighbors_graph = neighbors_graph
def run(self):
self.make_thumbnail()
self.check_overlaps()
self.compute_threshold()
self.register_all()
self.build_spanning_tree()
self.calculate_positions()
self.fit_model()
def make_thumbnail(self):
if not self.do_make_thumbnail:
return
self.reader.thumbnail = thumbnail.make_thumbnail(
self.reader, channel=self.channel
)
def check_overlaps(self):
# This might be better addressed by removing the +1 from the
# neighbors_graph max_distance calculation and ensuring the graph is
# fully connected.
pos = self.metadata.positions
overlaps = np.array([
self.metadata.size - abs(pos[t1] - pos[t2])
for t1, t2 in self.neighbors_graph.edges
])
failures = np.any(overlaps < 1, axis=1) if len(overlaps) else []
if len(failures) and all(failures):
warn_data("No tiles overlap, attempting alignment anyway.")
elif any(failures):
warn_data("Some neighboring tiles have zero overlap.")
def compute_threshold(self):
# Compute error threshold for rejecting aligments. We generate a
# distribution of error scores for many known non-overlapping image
# regions and take a certain percentile as the maximum allowable error.
# The percentile becomes our accepted false-positive ratio.
edges = self.neighbors_graph.edges
num_tiles = self.metadata.num_images
# If not enough tiles overlap to matter, skip this whole thing.
if len(edges) <= 1:
self.errors_negative_sampled = np.empty(0)
self.max_error = np.inf
return
widths = np.array([
self.intersection(t1, t2).shape.min()
for t1, t2 in edges
])
w = widths.max()
max_offset = self.metadata.size[0] - w
# Number of possible pairs minus number of actual neighbor pairs.
num_distant_pairs = num_tiles * (num_tiles - 1) // 2 - len(edges)
# Reduce permutation count for small datasets -- there are fewer
# possible truly distinct strips with fewer tiles. The calculation here
# is just a heuristic, not rigorously derived.
n = 1000 if num_distant_pairs > 8 else (num_distant_pairs + 1) * 10
pairs = np.empty((n, 2), dtype=int)
offsets = np.empty((n, 2), dtype=int)
# Generate n random non-overlapping image strips. Strips are always
# horizontal, across the entire image width.
max_tries = 100
if self.randomize is False:
random_state = np.random.RandomState(0)
else:
random_state = np.random.RandomState()
for i in range(n):
# Limit tries to avoid infinite loop in pathological cases.
for current_try in range(max_tries):
t1, t2 = random_state.randint(self.metadata.num_images, size=2)
o1, o2 = random_state.randint(max_offset, size=2)
# Check for non-overlapping strips and abort the retry loop.
if t1 != t2 and (t1, t2) not in edges:
# Different, non-neighboring tiles -- always OK.
break
elif t1 == t2 and abs(o1 - o2) > w:
# Same tile OK if strips don't overlap within the image.
break
elif (t1, t2) in edges:
# Neighbors OK if either strip is entirely outside the
# expected overlap region (based on nominal positions).
its = self.intersection(t1, t2, np.repeat(w, 2))
ioff1, ioff2 = its.offsets[:, 0]
if (
its.shape[0] > its.shape[1]
or o1 < ioff1 - w or o1 > ioff1 + w
or o2 < ioff2 - w or o2 > ioff2 + w
):
break
else:
# Retries exhausted. This should be very rare.
warn_data(
"Could not find non-overlapping strips in {max_tries} tries"
)
pairs[i] = t1, t2
offsets[i] = o1, o2
errors = np.empty(n)
for i, ((t1, t2), (offset1, offset2)) in enumerate(zip(pairs, offsets)):
if self.verbose and (i % 10 == 9 or i == n - 1):
sys.stdout.write(
'\r quantifying alignment error %d/%d' % (i + 1, n)
)
sys.stdout.flush()
img1 = self.reader.read(t1, self.channel)[offset1:offset1+w, :]
img2 = self.reader.read(t2, self.channel)[offset2:offset2+w, :]
_, errors[i] = utils.register(img1, img2, self.filter_sigma, upsample=1)
if self.verbose:
print()
self.errors_negative_sampled = errors
self.max_error = np.percentile(errors, self.false_positive_ratio * 100)
def register_all(self):
n = self.neighbors_graph.size()
for i, (t1, t2) in enumerate(self.neighbors_graph.edges, 1):
if self.verbose:
sys.stdout.write('\r aligning edge %d/%d' % (i, n))
sys.stdout.flush()
self.register_pair(t1, t2)
if self.verbose:
print()
self.all_errors = np.array([x[1] for x in self._cache.values()])
# Set error values above the threshold to infinity.
for k, v in self._cache.items():
if v[1] > self.max_error or any(np.abs(v[0]) > self.max_shift_pixels):
self._cache[k] = (v[0], np.inf)
def build_spanning_tree(self):
# Note that this may be disconnected, so it's technically a forest.
g = nx.Graph()
g.add_nodes_from(self.neighbors_graph)
g.add_weighted_edges_from(
(t1, t2, error)
for (t1, t2), (_, error) in self._cache.items()
if np.isfinite(error)
)
spanning_tree = nx.Graph()
spanning_tree.add_nodes_from(g)
for c in nx.connected_components(g):
cc = g.subgraph(c)
center = nx.center(cc)[0]
paths = nx.single_source_dijkstra_path(cc, center).values()
for path in paths:
nx.add_path(spanning_tree, path)
self.spanning_tree = spanning_tree
def calculate_positions(self):
shifts = {}
for c in nx.connected_components(self.spanning_tree):
cc = self.spanning_tree.subgraph(c)
center = nx.center(cc)[0]
shifts[center] = np.array([0, 0])
for edge in nx.traversal.bfs_edges(cc, center):
source, dest = edge
if source not in shifts:
source, dest = dest, source
shift = self.register_pair(source, dest)[0]
shifts[dest] = shifts[source] + shift
if shifts:
self.shifts = np.array([s for _, s in sorted(shifts.items())])
self.positions = self.metadata.positions + self.shifts
else:
# TODO: fill in shifts and positions with 0x2 arrays
raise NotImplementedError("No images")
def fit_model(self):
components = sorted(
nx.connected_components(self.spanning_tree),
key=len, reverse=True
)
# Fit LR model on positions of largest connected component.
cc0 = list(components[0])
self.lr = sklearn.linear_model.LinearRegression()
self.lr.fit(self.metadata.positions[cc0], self.positions[cc0])
# Fix up degenerate transform matrix (e.g. when we have only one tile).
if (self.lr.coef_ == 0).all():
self.lr.coef_ = np.diag(np.ones(2))
# Adjust position of remaining components so their centroids match
# the predictions of the model.
for cc in components[1:]:
nodes = list(cc)
centroid_m = np.mean(self.metadata.positions[nodes], axis=0)
centroid_f = np.mean(self.positions[nodes], axis=0)
shift = self.lr.predict([centroid_m])[0] - centroid_f
self.positions[nodes] += shift
# Adjust positions and model intercept to put origin at 0,0.
self.origin = self.positions.min(axis=0)
self.positions -= self.origin
self.lr.intercept_ -= self.origin
self.centers = self.positions + self.metadata.size / 2
def register_pair(self, t1, t2):
"""Return relative shift between images and the alignment error."""
key = tuple(sorted((t1, t2)))
try:
shift, error = self._cache[key]
except KeyError:
# We test a series of increasing overlap window sizes to help avoid
# missing alignments when the stage position error is large relative
# to the tile overlap. Simply using a large overlap in all cases
# limits the maximum achievable correlation thus increasing the
# error metric, leading to worse overall results. The window size
# starts at the nominal size and doubles until it's at least 10% of
# the tile size. If the nominal overlap is already 10% or greater,
# we only use that one size.
smin = self.intersection(key[0], key[1]).shape
smax = np.round(self.metadata.size * 0.1)
sizes = [smin]
while any(sizes[-1] < smax):
sizes.append(sizes[-1] * 2)
results = [self._register(key[0], key[1], s) for s in sizes]
# Use the shift from the window size that gave the lowest error.
shift, _ = min(results, key=lambda r: r[1])
# Extract the images from the nominal overlap window but with the
# shift applied to the second tile's position, and compute the error
# metric on these images. This should be even lower than the error
# computed above.
_, o1, o2 = self.overlap(key[0], key[1], shift=shift)
error = utils.nccw(o1, o2, self.filter_sigma)
self._cache[key] = (shift, error)
if t1 > t2:
shift = -shift
# Return copy of shift to prevent corruption of cached values.
return shift.copy(), error
def _register(self, t1, t2, min_size=0):
its, img1, img2 = self.overlap(t1, t2, min_size)
# Account for padding, flipping the sign depending on the direction
# between the tiles.
p1, p2 = self.metadata.positions[[t1, t2]]
sx = 1 if p1[1] >= p2[1] else -1
sy = 1 if p1[0] >= p2[0] else -1
padding = its.padding * [sy, sx]
shift, error = utils.register(img1, img2, self.filter_sigma)
shift += padding
return shift, error
def intersection(self, t1, t2, min_size=0, shift=None):
corners1 = self.metadata.positions[[t1, t2]]
if shift is not None:
corners1[1] += shift
corners2 = corners1 + self.metadata.size
return Intersection(corners1, corners2, min_size)
def crop(self, tile, offset, shape):
img = self.reader.read(series=tile, c=self.channel)
return utils.crop(img, offset, shape)
def overlap(self, t1, t2, min_size=0, shift=None):
its = self.intersection(t1, t2, min_size, shift)
img1 = self.crop(t1, its.offsets[0], its.shape)
img2 = self.crop(t2, its.offsets[1], its.shape)
return its, img1, img2
@property
def best_edge(self):
ordered_keys = sorted(self._cache, key=lambda k: self._cache[k][1])
return ordered_keys[0]
@property
def metadata(self):
return self.reader.metadata
@property
def mosaic_shape(self):
upper_corners = self.positions + self.metadata.size
max_dimensions = upper_corners.max(axis=0)
return np.ceil(max_dimensions).astype(int)
def debug(self, t1, t2, min_size=0):
shift, _ = self._register(t1, t2, min_size)
its, o1, o2 = self.overlap(t1, t2, min_size)
w1 = utils.whiten(o1, self.filter_sigma)
w2 = utils.whiten(o2, self.filter_sigma)
corr = scipy.fft.fftshift(np.abs(scipy.fft.ifft2(
scipy.fft.fft2(w1) * scipy.fft.fft2(w2).conj()
)))
corr /= (np.linalg.norm(w1) * np.linalg.norm(w2))
stack = np.vstack
rows, cols = 3, 1
if corr.shape[0] > corr.shape[1]:
stack = np.hstack
rows, cols = cols, rows
plt.figure()
plt.subplot(rows, cols, 1)
plt.imshow(stack([o1, o2]))
ax = plt.subplot(rows, cols, 2)
ax.set_xticks([])
ax.set_yticks([])
plt.imshow(stack([w1, w2]).real)
ax = plt.subplot(rows, cols, 3)
ax.set_xticks([])
ax.set_yticks([])
plt.imshow(corr, vmin=np.exp(-10))
cbar = plt.colorbar()
cbar.ax.yaxis.set_major_locator(
plt.FixedLocator(cbar.mappable.get_clim())
)
cbar.ax.yaxis.set_major_formatter(
plt.FuncFormatter(lambda x, pos: "{:.2f}".format(-np.log(x)))
)
origin = np.array(corr.shape) // 2
plt.plot(origin[1], origin[0], 'r+')
# FIXME This is wrong when t1 > t2.
shift += origin + its.padding
plt.plot(shift[1], shift[0], 'rx')
plt.tight_layout()
class LayerAligner(object):
def __init__(self, reader, reference_aligner, channel=None, max_shift=15,
filter_sigma=0.0, verbose=False):
self.reader = reader
self.reference_aligner = reference_aligner
if channel is None:
channel = reference_aligner.channel
self.channel = channel
# Unit is micrometers.
self.max_shift = max_shift
self.max_shift_pixels = self.max_shift / self.metadata.pixel_size
self.filter_sigma = filter_sigma
self.verbose = verbose
# FIXME Still a bit muddled here on the use of metadata positions vs.
# corrected positions from the reference aligner. We probably want to
# use metadata positions to find the cycle-to-cycle tile
# correspondences, but the corrected positions for computing our
# corrected positions.
neighbors_graph = neighbors_graph
def run(self):
self.make_thumbnail()
self.coarse_align()
self.register_all()
self.calculate_positions()
def make_thumbnail(self):
self.reader.thumbnail = thumbnail.make_thumbnail(
self.reader, channel=self.channel
)
def coarse_align(self):
self.cycle_offset = thumbnail.calculate_cycle_offset(
self.reference_aligner.reader, self.reader
)
self.corrected_nominal_positions = self.metadata.positions + self.cycle_offset
reference_positions = self.reference_aligner.metadata.positions
dist = scipy.spatial.distance.cdist(reference_positions,
self.corrected_nominal_positions)
self.reference_idx = np.argmin(dist, 0)
self.reference_positions = reference_positions[self.reference_idx]
self.reference_aligner_positions = self.reference_aligner.positions[self.reference_idx]
def register_all(self):
n = self.metadata.num_images
self.shifts = np.empty((n, 2))
self.errors = np.empty(n)
for i in range(n):
if self.verbose:
sys.stdout.write("\r aligning tile %d/%d" % (i + 1, n))
sys.stdout.flush()
shift, error = self.register(i)
self.shifts[i] = shift
self.errors[i] = error
if self.verbose:
print()
def calculate_positions(self):
self.positions = (
self.corrected_nominal_positions
+ self.shifts
+ self.reference_aligner_positions
- self.reference_positions
)
self.constrain_positions()
self.centers = self.positions + self.metadata.size / 2
def constrain_positions(self):
# Discard camera background registration which will shift target
# positions to reference aligner positions, due to strong
# self-correlation of the sensor dark current pattern which dominates in
# low-signal images.
position_diffs = np.absolute(
self.positions - self.reference_aligner_positions
)
# Round the diffs to one decimal point because the subpixel shifts are
# calculated by 10x upsampling.
position_diffs = np.rint(position_diffs * 10) / 10
discard = (position_diffs == 0).all(axis=1)
# Discard any tile registration that error is infinite
discard |= np.isinf(self.errors)
# Take the median of registered shifts to determine the offset
# (translation) from the reference image to this one.
if discard.all():
offset = 0
else:
offset = np.nan_to_num(np.median(self.shifts[~discard], axis=0))
# Here we assume the fitted linear model from the reference image is
# still appropriate, apart from the extra offset we just computed.
predictions = self.reference_aligner.lr.predict(self.corrected_nominal_positions)
# Discard any tile registration that's too far from the linear model,
# replacing it with the relevant model prediction.
distance = np.linalg.norm(self.positions - predictions - offset, axis=1)
max_dist = self.max_shift_pixels
extremes = distance > max_dist
# Recalculate the mean shift, also ignoring the extreme values.
discard |= extremes
self.discard = discard
if discard.all():
self.offset = 0
else:
self.offset = np.nan_to_num(np.mean(self.shifts[~discard], axis=0))
# Fill in discarded shifts from the predictions.
self.positions[discard] = predictions[discard] + self.offset
def register(self, t):
"""Return relative shift between images and the alignment error."""
its, ref_img, img = self.overlap(t)
if np.any(np.array(its.shape) == 0):
return (0, 0), np.inf
shift, error = utils.register(ref_img, img, self.filter_sigma)
# We don't use padding and thus can skip the math to account for it.
assert (its.padding == 0).all(), "Unexpected non-zero padding"
return shift, error
def intersection(self, t):
corners1 = np.vstack([self.reference_positions[t],
self.corrected_nominal_positions[t]])
corners2 = corners1 + self.reader.metadata.size
its = Intersection(corners1, corners2)
its.shape = its.shape // 32 * 32
return its
def overlap(self, t):
its = self.intersection(t)
ref_t = self.reference_idx[t]
img1 = self.reference_aligner.reader.read(
series=ref_t, c=self.reference_aligner.channel
)
img2 = self.reader.read(series=t, c=self.channel)
ov1 = utils.crop(img1, its.offsets[0], its.shape)
ov2 = utils.crop(img2, its.offsets[1], its.shape)
return its, ov1, ov2
@property
def metadata(self):
return self.reader.metadata
def debug(self, t):
shift, _ = self.register(t)
its, o1, o2 = self.overlap(t)
w1 = utils.whiten(o1, self.filter_sigma)
w2 = utils.whiten(o2, self.filter_sigma)
corr = scipy.fft.fftshift(np.abs(scipy.fft.ifft2(
scipy.fft.fft2(w1) * scipy.fft.fft2(w2).conj()
)))
plt.figure()
plt.subplot(1, 3, 1)
plt.imshow(np.vstack([o1, o2]))
ax = plt.subplot(1, 3, 2)
ax.set_xticks([])
ax.set_yticks([])
plt.imshow(np.vstack([w1, w2]).real)
ax = plt.subplot(1, 3, 3)
ax.set_xticks([])
ax.set_yticks([])
plt.imshow(corr)
origin = np.array(corr.shape) // 2
plt.plot(origin[1], origin[0], 'r+')
shift += origin
plt.plot(shift[1], shift[0], 'rx')
plt.tight_layout(0, 0, 0)
class Intersection(object):
def __init__(self, corners1, corners2, min_size=0):
if np.isscalar(min_size):
min_size = np.repeat(min_size, 2)
self._calculate(corners1, corners2, min_size)
def _calculate(self, corners1, corners2, min_size):
max_shape = (corners2 - corners1).max(axis=0)
min_size = min_size.clip(1, max_shape)
position = corners1.max(axis=0)
initial_shape = np.floor(corners2.min(axis=0) - position).astype(int)
clipped_shape = np.maximum(initial_shape, min_size)
self.shape = np.ceil(clipped_shape).astype(int)
self.padding = self.shape - initial_shape
self.offsets = np.maximum(position - corners1 - self.padding, 0)
def __repr__(self):
s = 'shape: {0.shape}\npadding: {0.padding}\noffsets:\n{0.offsets}'
return s.format(self)
class Mosaic(object):
def __init__(
self, aligner, shape, filename_format, channels=None,
ffp_path=None, dfp_path=None, flip_mosaic_x=False, flip_mosaic_y=False,
combined=False, tile_size=None, first=False, verbose=False
):
self.aligner = aligner
self.shape = tuple(shape)
self.filename_format = filename_format
self.channels = self._sanitize_channels(channels)
self.flip_mosaic_x = flip_mosaic_x
self.flip_mosaic_y = flip_mosaic_y
self.combined = combined
self.tile_size = tile_size
self.first = first
self.dtype = aligner.metadata.pixel_dtype
self._load_correction_profiles(dfp_path, ffp_path)
self.verbose = verbose
def _sanitize_channels(self, channels):
all_channels = range(self.aligner.metadata.num_channels)
if channels is None:
channels = all_channels
invalid_channels = sorted(set(channels) - set(all_channels))
if invalid_channels:
raise ValueError("invalid channels: %s" % invalid_channels)
return channels
def _load_single_profile(self, path, num_channels, img_size, profile_type):
"""Load, normalize, and validate illumination profile.
Parameters
----------
path : str
Path to the image being loaded.
num_channels : int
Expected number of channels in the profile image.
img_size : tuple
Shape of a 2D image in (row, column).
profile_type : str
Type of profile, only accepts 'dark' and 'flat'.
Returns
----------
ndarray
Image as numpy array in the (channel, row, column) arrangement.
If ``path`` is ``None``, return an array in (channel, 1, 1) shape.
The values in the array are 0 and 1 for dark- and flat-field profile, respectively.
"""
assert profile_type in ('dark', 'flat'), "profile_type must be either 'dark' or 'flat'."
if path is None:
profile_shape = (num_channels, 1, 1)
return (
np.zeros(profile_shape)
if profile_type == 'dark'
else np.ones(profile_shape)
)
expected_ndim = 2 if num_channels == 1 else 3
profile = skimage.io.imread(path)
if profile.ndim != expected_ndim:
raise ValueError(
'Expect dimensionality is {} for {}-field profile but {} has {} dimensions.'.format(
expected_ndim, profile_type, path, profile.ndim
)
)
profile = np.atleast_3d(profile)
# skimage.io.imread convert images with 3 and 4 channels into (Y, X, C) shape,
# but as (C, Y, X) for images with other channel numbers. We normalize
# image-shape to (C, Y, X) regardless of the number of channels in the image.
if num_channels in (1, 3, 4):
profile = np.moveaxis(profile, 2, 0)
if profile.shape != (num_channels,) + img_size:
raise ValueError(
'{}-field profile shape {} does not match target image shape {}.'.format(
profile_type.capitalize(), profile.shape, img_size
)
)
return profile
def _load_correction_profiles(self, dfp_path, ffp_path):
if dfp_path is None and ffp_path is None:
self.do_correction = False
else:
num_channels = self.aligner.metadata.num_channels
img_size = tuple(self.aligner.metadata.size)
self.dfp = self._load_single_profile(dfp_path, num_channels, img_size, 'dark')
self.ffp = self._load_single_profile(ffp_path, num_channels, img_size, 'flat')
# FIXME This assumes integer dtypes. Do we need to support floats?
self.dfp /= np.iinfo(self.dtype).max
self.do_correction = True
def run(self, mode='write', debug=False):
if mode not in ('write', 'return'):
raise ValueError('Invalid mode')
num_tiles = len(self.aligner.positions)
all_images = []
if debug:
node_colors = nx.greedy_color(self.aligner.neighbors_graph)
num_colors = max(node_colors.values()) + 1
if num_colors > 3:
raise ValueError("neighbor graph requires more than 3 colors")
for ci, channel in enumerate(self.channels):
if self.verbose:
print(' Channel %d:' % channel)
if not debug:
mosaic_image = np.zeros(self.shape, self.dtype)
else:
mosaic_image = np.zeros(self.shape + (3,), np.float32)
for tile, position in enumerate(self.aligner.positions):
if self.verbose:
sys.stdout.write('\r merging tile %d/%d'
% (tile + 1, num_tiles))
sys.stdout.flush()
tile_image = self.aligner.reader.read(c=channel, series=tile)
tile_image = self.correct_illumination(tile_image, channel)
if debug:
color_channel = node_colors[tile]
rgb_image = np.zeros(tile_image.shape + (3,),
tile_image.dtype)
rgb_image[:,:,color_channel] = tile_image
tile_image = rgb_image
func = utils.pastefunc_blend if not debug else np.add
utils.paste(mosaic_image, tile_image, position, func=func)
if debug:
np.clip(mosaic_image, 0, 1, out=mosaic_image)
w = int(1e6)
mi_flat = mosaic_image.reshape(-1, 3)
for p in np.arange(0, mi_flat.shape[0], w, dtype=int):
mi_flat[p:p+w] = skimage.exposure.adjust_gamma(
mi_flat[p:p+w], 1/2.2
)
if self.flip_mosaic_x:
mosaic_image = np.fliplr(mosaic_image)
if self.flip_mosaic_y:
mosaic_image = np.flipud(mosaic_image)
if self.verbose:
print()
if mode == 'write':
filename = self.filename_format.format(channel=channel)
kwargs = {}
if self.combined:
kwargs['bigtiff'] = True
# FIXME Propagate this from input files (esp. RGB).
kwargs['photometric'] = 'minisblack'
resolution = np.round(10000 / self.aligner.reader.metadata.pixel_size)
# FIXME Switch to "CENTIMETER" once we use tifffile directly.
kwargs['resolution'] = (resolution, resolution, 'cm')
kwargs['metadata'] = None
if self.first and ci == 0:
# Set description to a short placeholder that will fit
# within the IFD. We'll check for this string later.
kwargs['description'] = '!!xml!!'
kwargs['software'] = (
'Ashlar v{} (Glencoe/Faas pyramid output)'
.format(_version)
)
else:
# Overwite if first channel of first cycle.
kwargs['append'] = True
if self.tile_size:
kwargs['tile'] = (self.tile_size, self.tile_size)
if self.verbose:
print(" writing to %s" % filename)
utils.imsave(filename, mosaic_image, **kwargs)
elif mode == 'return':
all_images.append(mosaic_image)
if mode == 'return':
return all_images
def correct_illumination(self, img, channel):
if self.do_correction:
img = skimage.util.img_as_float(img, force_copy=True)
img -= self.dfp[channel, ...]
img /= self.ffp[channel, ...]
img.clip(0, 1, out=img)
return img
def build_pyramid(
path, num_channels, shape, dtype, pixel_size, tile_size, verbose=False
):
max_level = 0
shapes = [shape]
while any(s > tile_size for s in shape):
prev_level = max_level
max_level += 1
if verbose:
print(" Level %d:" % max_level)
for i in range(num_channels):
if verbose:
sys.stdout.write('\r processing channel %d/%d'
% (i + 1, num_channels))
sys.stdout.flush()
img = skimage.io.imread(path, series=prev_level, key=i)
img = skimage.transform.pyramid_reduce(img, multichannel=False)
img = skimage.util.dtype.convert(img, dtype)
utils.imsave(
path, img, bigtiff=True, metadata=None, append=True,
tile=(tile_size, tile_size), photometric='minisblack'
)
shapes.append(img.shape)
if verbose:
print()
shape = img.shape
# Now that we have the number and dimensions of all levels, we can generate
# the corresponding OME-XML and patch it into the Image Description tag of
# the first IFD.
filename = pathlib.Path(path).name
img_uuid = uuid.uuid4().urn
ome_dtype = BioformatsMetadata._ome_dtypes[dtype]
ifd = 0
xml = io.StringIO()
xml.write(u'<?xml version="1.0" encoding="UTF-8"?>')
xml.write(
(u'<OME xmlns="http://www.openmicroscopy.org/Schemas/OME/2016-06"'
' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"'
' UUID="{uuid}"'
' xsi:schemaLocation="http://www.openmicroscopy.org/Schemas/OME/2016-06'
' http://www.openmicroscopy.org/Schemas/OME/2016-06/ome.xsd">')
.format(uuid=img_uuid)
)
for level in range(max_level + 1):
shape = shapes[level]
if level == 0:
psize_xml = (
u'PhysicalSizeX="{0}" PhysicalSizeXUnit="\u00b5m"'
u' PhysicalSizeY="{0}" PhysicalSizeYUnit="\u00b5m"'
.format(pixel_size)
)
else:
psize_xml = u''
xml.write(u'<Image ID="Image:{}">'.format(level))
xml.write(
(u'<Pixels BigEndian="false" DimensionOrder="XYZCT"'
' ID="Pixels:{level}" {psize_xml} SizeC="{num_channels}" SizeT="1"'
' SizeX="{sizex}" SizeY="{sizey}" SizeZ="1" Type="{ome_dtype}">')
.format(
level=level, psize_xml=psize_xml, num_channels=num_channels,
sizex=shape[1], sizey=shape[0], ome_dtype=ome_dtype
)
)
for channel in range(num_channels):
xml.write(
(u'<Channel ID="Channel:{level}:{channel}"'
+ (u' Name="Channel {channel}"' if level == 0 else u'')
+ u' SamplesPerPixel="1"><LightPath/></Channel>')
.format(level=level, channel=channel)
)
for channel in range(num_channels):
xml.write(
(u'<TiffData FirstC="{channel}" FirstT="0" FirstZ="0"'
' IFD="{ifd}" PlaneCount="1">'
'<UUID FileName="{filename}">{uuid}</UUID>'
'</TiffData>')
.format(
channel=channel, ifd=ifd, filename=filename, uuid=img_uuid
)
)
ifd += 1
if level == 0:
for channel in range(num_channels):
xml.write(
u'<Plane TheC="{channel}" TheT="0" TheZ="0"/>'
.format(channel=channel)
)
xml.write(u'</Pixels>')
xml.write(u'</Image>')
xml.write(u'</OME>')
xml_bytes = xml.getvalue().encode('utf-8') + b'\x00'
# Append the XML and patch up the Image Description tag in the first IFD.
with open(path, 'rb+') as f:
f.seek(0, io.SEEK_END)
xml_offset = f.tell()
f.write(xml_bytes)
f.seek(0)
ifd_block = f.read(500)
match = re.search(b'!!xml!!\x00', ifd_block)
if match is None:
raise RuntimeError("Did not find placeholder string in IFD")
f.seek(match.start() - 8)
f.write(struct.pack('<Q', len(xml_bytes)))
f.write(struct.pack('<Q', xml_offset))
class DataWarning(UserWarning):
"""Warnings about the content of user-provided image data."""
pass
def warn_data(message):
warnings.warn(message, DataWarning)
def plot_edge_shifts(aligner, img=None, bounds=True, im_kwargs=None):
if im_kwargs is None:
im_kwargs = {}
fig = plt.figure()
ax = plt.gca()
draw_mosaic_image(ax, aligner, img, **im_kwargs)
h, w = aligner.reader.metadata.size
if bounds:
# Bounding boxes denoting new tile positions.
for xy in np.fliplr(aligner.positions):
rect = mpatches.Rectangle(xy, w, h, color='black', fill=False,
lw=0.5)
ax.add_patch(rect)
# Compute per-edge relative shifts from tile positions.
edges = np.array(list(aligner.spanning_tree.edges))
dist = aligner.metadata.positions - aligner.positions
shifts = dist[edges[:, 0]] - dist[edges[:, 1]]
shift_distances = np.linalg.norm(shifts, axis=1)
# Spanning tree with nodes at new tile positions, edges colored by shift
# distance (brighter = farther).
nx.draw(
aligner.spanning_tree, ax=ax, with_labels=True,
pos=np.fliplr(aligner.centers), edge_color=shift_distances,
edge_cmap=plt.get_cmap('Blues_r'), width=2, node_size=100, font_size=6
)
fig.set_facecolor('black')
def plot_edge_quality(
aligner, img=None, show_tree=True, pos='metadata', im_kwargs=None, nx_kwargs=None
):
if pos == 'metadata':
centers = aligner.metadata.centers - aligner.metadata.origin
elif pos == 'aligner':
centers = aligner.centers
else:
raise ValueError("pos must be either 'metadata' or 'aligner'")
if im_kwargs is None:
im_kwargs = {}
if nx_kwargs is None:
nx_kwargs = {}
final_nx_kwargs = dict(width=2, node_size=100, font_size=6)
final_nx_kwargs.update(nx_kwargs)
if show_tree:
nrows, ncols = 1, 2
if aligner.mosaic_shape[1] * 2 / aligner.mosaic_shape[0] > 2 * 4 / 3:
nrows, ncols = ncols, nrows
else:
nrows, ncols = 1, 1
fig = plt.figure()
ax = plt.subplot(nrows, ncols, 1)
draw_mosaic_image(ax, aligner, img, **im_kwargs)
error = np.array([aligner._cache[tuple(sorted(e))][1]
for e in aligner.neighbors_graph.edges])
# Manually center and scale data to 0-1, except infinity which is set to -1.
# This lets us use the purple-green diverging color map to color the graph
# edges and cause the "infinity" edges to disappear into the background
# (which is itself purple).
infs = error == np.inf
error[infs] = -1
if not infs.all():
error_f = error[~infs]
emin = np.min(error_f)
emax = np.max(error_f)
if emin == emax:
# Always true when there's only one edge. Otherwise it's unlikely
# but theoretically possible.
erange = 1
else:
erange = emax - emin
error[~infs] = (error_f - emin) / erange
# Neighbor graph colored by edge alignment quality (brighter = better).
nx.draw(
aligner.neighbors_graph, ax=ax, with_labels=True,
pos=np.fliplr(centers), edge_color=error, edge_vmin=-1, edge_vmax=1,
edge_cmap=plt.get_cmap('PRGn'), **final_nx_kwargs
)
if show_tree:
ax = plt.subplot(nrows, ncols, 2)
draw_mosaic_image(ax, aligner, img, **im_kwargs)
# Spanning tree with nodes at original tile positions.
nx.draw(
aligner.spanning_tree, ax=ax, with_labels=True,
pos=np.fliplr(centers), edge_color='royalblue',
**final_nx_kwargs
)
fig.set_facecolor('black')
def plot_edge_scatter(aligner, annotate=True):
import seaborn as sns
xdata = aligner.all_errors
ydata = np.clip(
[np.linalg.norm(v[0]) for v in aligner._cache.values()], 0.01, np.inf
)
pdata = np.clip(aligner.errors_negative_sampled, 0, 10)
g = sns.JointGrid(xdata, ydata)
g.plot_joint(sns.scatterplot, alpha=0.5)
_, xbins = np.histogram(np.hstack([xdata, pdata]), bins=40)
sns.distplot(
xdata, ax=g.ax_marg_x, kde=False, bins=xbins, norm_hist=True
)
sns.distplot(
pdata, ax=g.ax_marg_x, kde=False, bins=xbins, norm_hist=True,
hist_kws=dict(histtype='step')
)
g.ax_joint.axvline(aligner.max_error, c='k', ls=':')
g.ax_joint.axhline(aligner.max_shift_pixels, c='k', ls=':')
g.ax_joint.set_yscale('log')
g.set_axis_labels('error', 'shift')
if annotate:
for pair, x, y in zip(aligner.neighbors_graph.edges, xdata, ydata):
plt.annotate(str(pair), (x, y), alpha=0.1)
plt.tight_layout()
def plot_layer_shifts(aligner, img=None, im_kwargs=None):
if im_kwargs is None:
im_kwargs = {}
fig = plt.figure()
ax = plt.gca()
draw_mosaic_image(ax, aligner, img, **im_kwargs)
h, w = aligner.metadata.size
# Bounding boxes denoting new tile positions.
for xy in np.fliplr(aligner.positions):
rect = mpatches.Rectangle(xy, w, h, color='black', fill=False, lw=0.5)
ax.add_patch(rect)
# Neighbor graph with edges hidden, i.e. just show nodes.
nx.draw(
aligner.neighbors_graph, ax=ax, with_labels=True,
pos=np.fliplr(aligner.centers), edge_color='none',
node_size=100, font_size=6
)
fig.set_facecolor('black')
def plot_layer_quality(
aligner, img=None, scale=1.0, artist='patches', annotate=True, im_kwargs=None
):
if im_kwargs is None:
im_kwargs = {}
fig = plt.figure()
ax = plt.gca()
draw_mosaic_image(ax, aligner, img, **im_kwargs)
h, w = aligner.metadata.size
positions, centers, shifts = aligner.positions, aligner.centers, aligner.shifts
if scale != 1.0:
h, w, positions, centers, shifts = [
scale * i for i in [h, w, positions, centers, shifts]
]
# Bounding boxes denoting new tile positions.
color_index = skimage.exposure.rescale_intensity(
aligner.errors, out_range=np.uint8
).astype(np.uint8)
color_map = mcm.magma_r
for xy, c_idx in zip(np.fliplr(positions), color_index):
rect = mpatches.Rectangle(
xy, w, h, color=color_map(c_idx), fill=False, lw=0.5
)
ax.add_patch(rect)
# Annotate tile numbering.
if annotate:
for idx, (x, y) in enumerate(np.fliplr(positions)):
text = plt.annotate(str(idx), (x+0.1*w, y+0.9*h), alpha=0.7)
# Add outline to text for better contrast in different background color.
text_outline = mpatheffects.Stroke(linewidth=1, foreground='#AAA')
text.set_path_effects(
[text_outline, mpatheffects.Normal()]
)
if artist == 'quiver':
ax.quiver(
*centers.T[::-1], *shifts.T[::-1], aligner.discard,
units='dots', width=2, scale=1, scale_units='xy', angles='xy',
cmap='Greys'
)
if artist == 'patches':
for xy, dxy, is_discarded in zip(
np.fliplr(centers), np.fliplr(shifts), aligner.discard
):
arrow = mpatches.FancyArrowPatch(
xy, np.array(xy) + np.array(dxy),
arrowstyle='->', color='0' if is_discarded else '1',
mutation_scale=8,
)
ax.add_patch(arrow)
ax.axis('off')
def draw_mosaic_image(ax, aligner, img, **kwargs):
if img is None:
img = [[0]]
h, w = aligner.mosaic_shape
ax.imshow(img, extent=(-0.5, w-0.5, h-0.5, -0.5), **kwargs)
|
11460343
|
import time
import pwmio
def fade(pin):
led = pwmio.PWMOut(pin, frequency=5000, duty_cycle=0)
# LED setup for QT Py M0:
# led = pwmio.PWMOut(board.SCK, frequency=5000, duty_cycle=0)
while True:
for i in range(100):
# PWM LED up and down
if i < 50:
led.duty_cycle = int(i * 2 * 65535 / 100) # Up
else:
led.duty_cycle = 65535 - int((i - 50) * 2 * 65535 / 100) # Down
time.sleep(0.01)
|
11460349
|
import torch.nn as nn
class CRLoss(nn.Module):
"""
CRLoss definition
"""
def __init__(self, cls_w=0.4, reg_w=0.6):
super(CRLoss, self).__init__()
self.cls_w = cls_w
self.reg_w = reg_w
self.class_criterion = nn.CrossEntropyLoss()
self.regression_criterion = nn.MSELoss()
def forward(self, cls_pred, cls_gt, score_pred, score_gt):
class_loss = self.class_criterion(cls_pred, cls_gt)
regression_loss = self.regression_criterion(score_pred, score_gt)
cr_loss = self.cls_w * class_loss + self.reg_w * regression_loss
return cr_loss
|
11460354
|
from __future__ import absolute_import, division, print_function
from six.moves import range
from xfel.ui.db import db_proxy
from scitbx.array_family import flex
from six.moves import zip
class Event(db_proxy):
def __init__(self, app, event_id = None, **kwargs):
db_proxy.__init__(self, app, "%s_event" % app.params.experiment_tag, id = event_id, **kwargs)
self.event_id = self.id
class Experiment(db_proxy):
def __init__(self, app, experiment_id = None, experiment = None, **kwargs):
assert [experiment_id, experiment].count(None) == 1
if experiment is not None:
self.imageset = Imageset(app)
self.beam = Beam(app, beam = experiment.beam)
self.detector = Detector(app, detector = experiment.detector)
self.crystal = Crystal(app, crystal = experiment.crystal)
kwargs['imageset_id'] = self.imageset.id
kwargs['beam_id'] = self.beam.id
kwargs['detector_id'] = self.detector.id
kwargs['crystal_id'] = self.crystal.id
kwargs['crystal_cell_id'] = self.crystal.cell_id
db_proxy.__init__(self, app, "%s_experiment" % app.params.experiment_tag, id = experiment_id, **kwargs)
self.experiment_id = self.id
if experiment is None:
self.imageset = Imageset(app, imageset_id=self.imageset_id)
self.beam = Beam(app, beam_id=self.beam_id)
self.detector = Detector(app, self.detector_id)
self.crystal = Crystal(app, self.crystal_id)
class Imageset(db_proxy):
def __init__(self, app, imageset_id = None, **kwargs):
db_proxy.__init__(self, app, "%s_imageset" % app.params.experiment_tag, id=imageset_id, **kwargs)
self.imageset_id = self.id
class Beam(db_proxy):
def __init__(self, app, beam_id = None, beam = None, **kwargs):
assert [beam_id, beam].count(None) == 1
if beam is not None:
u_s0 = beam.get_unit_s0()
kwargs['direction_1'] = u_s0[0]
kwargs['direction_2'] = u_s0[1]
kwargs['direction_3'] = u_s0[2]
kwargs['wavelength'] = beam.get_wavelength()
db_proxy.__init__(self, app, "%s_beam" % app.params.experiment_tag, id=beam_id, **kwargs)
self.beam_id = self.id
class Detector(db_proxy):
def __init__(self, app, detector_id = None, detector = None, **kwargs):
assert [detector_id, detector].count(None) == 1
if detector is not None:
kwargs['distance'] = flex.mean(flex.double([p.get_distance() for p in detector]))
db_proxy.__init__(self, app, "%s_detector" % app.params.experiment_tag, id=detector_id, **kwargs)
self.detector_id = self.id
class Crystal(db_proxy):
def __init__(self, app, crystal_id = None, crystal = None, make_cell = True, **kwargs):
from scitbx import matrix
assert [crystal_id, crystal].count(None) == 1
if crystal is not None:
u = matrix.sqr(crystal.get_U()) # orientation matrix
for i in range(len(u)):
kwargs['ori_%d' % (i + 1)] = u[i]
try:
kwargs['mosaic_block_rotation'] = crystal.get_half_mosaicity_deg()
kwargs['mosaic_block_size'] = crystal.get_domain_size_ang()
except AttributeError:
pass
if hasattr(crystal, 'identified_isoform'):
print("Warning, isoforms no longer have custom support in the database logger.")
#tag = app.params.experiment_tag
#query = """SELECT cell.id from `%s_cell` cell
# JOIN `%s_isoform` isoform ON cell.isoform_id = isoform.id
# JOIN `%s_trial` trial ON isoform.trial_id = trial.id
# WHERE isoform.name = '%s' AND trial.trial = %d""" % (
# tag, tag, tag, isoform_name, app.params.input.trial)
#cursor = app.execute_query(query)
#results = cursor.fetchall()
#assert len(results) == 1
#self.cell = Cell(app, cell_id = results[0][0])
if make_cell:
self.cell = Cell(app, crystal=crystal, isoform_id = None)
kwargs['cell_id'] = self.cell.id
else:
self.cell = None
db_proxy.__init__(self, app, "%s_crystal" % app.params.experiment_tag, id=crystal_id, **kwargs)
self.crystal_id = self.id
if crystal is None:
self.cell = Cell(app, cell_id = self.cell_id)
class Isoform(db_proxy):
def __init__(self, app, isoform_id=None, **kwargs):
db_proxy.__init__(self, app, "%s_isoform" % app.params.experiment_tag, id=isoform_id, **kwargs)
self.isoform_id = self.id
def __getattr__(self, key):
if key == "cell":
cells = self.app.get_all_x(Cell, 'cell', where = "WHERE isoform_id = %d"%self.id)
assert len(cells) == 1
return cells[0]
else:
return super(Isoform, self).__getattr__(key)
class Cell(db_proxy):
def __init__(self, app, cell_id = None, crystal = None, init_bins = False, **kwargs):
assert [cell_id, crystal].count(None) in [1,2]
if crystal is not None:
for key, p in zip(['a', 'b', 'c', 'alpha', 'beta', 'gamma'], crystal.get_unit_cell().parameters()):
kwargs['cell_%s'%key] = p
kwargs['lookup_symbol'] = crystal.get_space_group().type().lookup_symbol()
db_proxy.__init__(self, app, "%s_cell" % app.params.experiment_tag, id=cell_id, **kwargs)
self.cell_id = self.id
assert [self.isoform_id, self.trial_id].count(None) in [1, 2]
if self.isoform_id is not None:
self.isoform = Isoform(app, isoform_id = self.isoform_id)
else:
self.isoform = None
if init_bins:
self._bins = app.get_cell_bins(self.id)
self._bins_set = True
else:
self._bins = []
self._bins_set = False
def __getattr__(self, key):
if key == "unit_cell":
from cctbx.uctbx import unit_cell
return unit_cell([self.cell_a, self.cell_b, self.cell_c,
self.cell_alpha, self.cell_beta, self.cell_gamma])
if key == "bins":
if len(self._bins) == 0 and not self._bins_set:
self._bins = self.app.get_cell_bins(self.id)
self._bins_set = True
return self._bins
else:
return super(Cell, self).__getattr__(key)
def __setattr__(self, key, value):
if key == "bins":
self._bins = value
else:
return super(Cell, self).__setattr__(key, value)
class Bin(db_proxy):
def __init__(self, app, bin_id = None, **kwargs):
db_proxy.__init__(self, app, "%s_bin" % app.params.experiment_tag, id=bin_id, **kwargs)
self.bin_id = self.id
class Cell_Bin(db_proxy):
def __init__(self, app, cell_bin_id = None, **kwargs):
db_proxy.__init__(self, app, "%s_cell_bin" % app.params.experiment_tag, id=cell_bin_id, **kwargs)
self.cell_bin_id = self.id
|
11460365
|
from pyglet_gui.controllers import ContinuousStateController
from pyglet_gui.core import Viewer
class Slider(ContinuousStateController, Viewer):
PATH = 'slider'
IMAGE_BAR = 'bar'
IMAGE_KNOB = 'knob'
IMAGE_STEP = 'step'
def __init__(self, value=0.0, min_value=0.0, max_value=1.0, on_set=None, steps=None, width=0, height=0):
ContinuousStateController.__init__(self, value=value,
min_value=min_value,
max_value=max_value,
on_set=on_set)
Viewer.__init__(self, width, height)
self._bar = None # a bar where the knob slides.
self._knob = None # the knob that moves along the bar.
self._offset = (0, 0) # offset of the knob image to its central position
self._padding = (0, 0, 0, 0) # padding of the bar image to its central position
self.steps = steps
self._markers = [] # markers in case of discrete steps.
self._step_offset = (0, 0)
def get_path(self):
return self.PATH
def load_graphics(self):
theme = self.theme[self.get_path()]
color = theme['gui_color']
self._bar = theme[self.IMAGE_BAR]['image'].generate(color, **self.get_batch('foreground'))
self._padding = theme[self.IMAGE_BAR]['padding']
self._knob = theme[self.IMAGE_KNOB]['image'].generate(color, **self.get_batch('highlight'))
self._offset = theme[self.IMAGE_KNOB]['offset']
if self.steps is not None:
image_path = self.IMAGE_STEP
for n in range(0, self.steps + 1):
self._markers.append(theme[image_path]['image'].generate(color, **self.get_batch('background')))
self._step_offset = theme[image_path]['offset']
def unload_graphics(self):
self._knob.unload()
self._bar.unload()
for marker in self._markers:
marker.unload()
self._markers = []
def hit_test(self, x, y):
return self.is_inside(x, y)
def set_knob_pos(self, pos):
"""
A setter for value, but using normalized values.
"""
pos = max(min(pos, 1.0), 0.0)
self.set_value(self._min_value + (self._max_value - self._min_value) * pos)
if self._bar is not None and self._knob is not None:
x, y, width, height = self._bar.get_content_region()
offset_x, offset_y = self._offset
self._knob.update(x + int(width * pos) + offset_x,
y + offset_y,
self._knob.width, self._knob.height)
def _knob_pos(self):
"""
The position of the knob in the bar computed by our value.
"""
return max(min(float(self._value - self._min_value) / (self._max_value - self._min_value), 1.0), 0.0)
def _snap_to_nearest(self):
"""
Snaps the knob and value to a discrete value dictated by steps.
"""
assert self.steps is not None
pos = float(int(self._knob_pos() * self.steps + 0.5))/self.steps
self.set_knob_pos(pos)
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
raise NotImplementedError
def on_mouse_press(self, x, y, button, modifiers):
return self.on_mouse_drag(x, y, 0, 0, button, modifiers)
def on_mouse_release(self, x, y, button, modifiers):
if self.steps is not None:
self._snap_to_nearest()
def delete(self):
ContinuousStateController.delete(self)
Viewer.delete(self)
class HorizontalSlider(Slider):
def __init__(self, value=0.0, min_value=0.0, max_value=1.0, steps=None,
width=100, on_set=None):
Slider.__init__(self, value=value,
min_value=min_value,
max_value=max_value,
steps=steps,
on_set=on_set)
self.min_width = width
def layout(self):
left, right, top, bottom = self._padding
self._bar.update(self.x + left, self.y + bottom,
self.width - left - right,
self.height - top - bottom)
x, y, width, height = self._bar.get_content_region()
# knob is positioned with an (x,y) offset
# since its graphics are on its bottom-left corner.
offset_x, offset_y = self._offset
self._knob.update(x + int(width * self._knob_pos()) + offset_x,
y + offset_y,
self._knob.width, self._knob.height)
if self.steps is not None:
step = float(width) / self.steps
offset_x, offset_y = self._step_offset
for n in range(0, self.steps + 1):
self._markers[n].update(int(x + step * n) + offset_x,
y + offset_y,
self._markers[n].width,
self._markers[n].height)
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
bar_x, bar_y, bar_width, bar_height = self._bar.get_content_region()
self.set_knob_pos(float(x - bar_x) / bar_width)
return True
def compute_size(self):
width, height = self._bar.get_needed_size(self.min_width, 0)
left, right, top, bottom = self._padding
return width + left + right, height + top + bottom
|
11460373
|
from __future__ import (
division,
absolute_import,
with_statement,
print_function,
unicode_literals,
)
import torch.optim as optim
import torch.optim.lr_scheduler as lr_sched
import torch.nn as nn
from torch.utils.data import DataLoader
import etw_pytorch_utils as pt_utils
import pprint
import os.path as osp
import os
import argparse
from pointnet2.models import Pointnet2SemMSG as Pointnet
from pointnet2.models.pointnet2_msg_sem import model_fn_decorator
from pointnet2.data import Indoor3DSemSeg
parser = argparse.ArgumentParser(description="Arg parser")
parser.add_argument(
"-batch_size", type=int, default=32, help="Batch size [default: 32]"
)
parser.add_argument(
"-num_points",
type=int,
default=4096,
help="Number of points to train with [default: 4096]",
)
parser.add_argument(
"-weight_decay",
type=float,
default=0,
help="L2 regularization coeff [default: 0.0]",
)
parser.add_argument(
"-lr", type=float, default=1e-2, help="Initial learning rate [default: 1e-2]"
)
parser.add_argument(
"-lr_decay",
type=float,
default=0.5,
help="Learning rate decay gamma [default: 0.5]",
)
parser.add_argument(
"-decay_step",
type=float,
default=2e5,
help="Learning rate decay step [default: 20]",
)
parser.add_argument(
"-bn_momentum",
type=float,
default=0.9,
help="Initial batch norm momentum [default: 0.9]",
)
parser.add_argument(
"-bn_decay",
type=float,
default=0.5,
help="Batch norm momentum decay gamma [default: 0.5]",
)
parser.add_argument(
"-checkpoint", type=str, default=None, help="Checkpoint to start from"
)
parser.add_argument(
"-epochs", type=int, default=200, help="Number of epochs to train for"
)
parser.add_argument(
"-run_name",
type=str,
default="sem_seg_run_1",
help="Name for run in tensorboard_logger",
)
parser.add_argument("--visdom-port", type=int, default=8097)
parser.add_argument("--visdom", action="store_true")
lr_clip = 1e-5
bnm_clip = 1e-2
if __name__ == "__main__":
args = parser.parse_args()
test_set = Indoor3DSemSeg(args.num_points, train=False)
test_loader = DataLoader(
test_set,
batch_size=args.batch_size,
shuffle=True,
pin_memory=True,
num_workers=2,
)
train_set = Indoor3DSemSeg(args.num_points)
train_loader = DataLoader(
train_set,
batch_size=args.batch_size,
pin_memory=True,
num_workers=2,
shuffle=True,
)
model = Pointnet(num_classes=13, input_channels=6, use_xyz=True)
model.cuda()
optimizer = optim.Adam(
model.parameters(), lr=args.lr, weight_decay=args.weight_decay
)
lr_lbmd = lambda it: max(
args.lr_decay ** (int(it * args.batch_size / args.decay_step)),
lr_clip / args.lr,
)
bnm_lmbd = lambda it: max(
args.bn_momentum
* args.bn_decay ** (int(it * args.batch_size / args.decay_step)),
bnm_clip,
)
# default value
it = -1 # for the initialize value of `LambdaLR` and `BNMomentumScheduler`
best_loss = 1e10
start_epoch = 1
# load status from checkpoint
if args.checkpoint is not None:
checkpoint_status = pt_utils.load_checkpoint(
model, optimizer, filename=args.checkpoint.split(".")[0]
)
if checkpoint_status is not None:
it, start_epoch, best_loss = checkpoint_status
lr_scheduler = lr_sched.LambdaLR(optimizer, lr_lambda=lr_lbmd, last_epoch=it)
bnm_scheduler = pt_utils.BNMomentumScheduler(
model, bn_lambda=bnm_lmbd, last_epoch=it
)
it = max(it, 0) # for the initialize value of `trainer.train`
model_fn = model_fn_decorator(nn.CrossEntropyLoss())
if args.visdom:
viz = pt_utils.VisdomViz(port=args.visdom_port)
else:
viz = pt_utils.CmdLineViz()
viz.text(pprint.pformat(vars(args)))
if not osp.isdir("checkpoints"):
os.makedirs("checkpoints")
trainer = pt_utils.Trainer(
model,
model_fn,
optimizer,
checkpoint_name="checkpoints/pointnet2_semseg",
best_name="checkpoints/pointnet2_semseg_best",
lr_scheduler=lr_scheduler,
bnm_scheduler=bnm_scheduler,
viz=viz,
)
trainer.train(
it, start_epoch, args.epochs, train_loader, test_loader, best_loss=best_loss
)
if start_epoch == args.epochs:
_ = trainer.eval_epoch(test_loader)
|
11460377
|
from runners.python import Submission
class MathieuSubmission(Submission):
def run(self, s):
inputs = [line.replace(',','').split() for line in s.split('\n')]
roots = set()
sons = set()
for line in inputs:
if len(line) > 2:
roots.add(line[0])
for son in line[3:]:
sons.add(son)
return (roots-sons).pop()
|
11460378
|
from __future__ import annotations
from typing import TYPE_CHECKING
import pytest
if TYPE_CHECKING:
from cleo.testers.command_tester import CommandTester
from tests.types import CommandTesterFactory
@pytest.fixture()
def tester(command_tester_factory: CommandTesterFactory) -> CommandTester:
return command_tester_factory("plugin show")
def test_deprecation_warning(tester: CommandTester) -> None:
tester.execute("")
assert (
tester.io.fetch_error()
== "This command is deprecated. Use self show plugins command instead.\n"
)
|
11460476
|
import tensorflow as tf
import numpy as np
def predict_with_model(model, imgpath):
image = tf.io.read_file(imgpath)
image = tf.image.decode_png(image, channels=3)
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
image = tf.image.resize(image, [60,60]) # (60,60,3)
image = tf.expand_dims(image, axis=0) # (1,60,60,3)
predictions = model.predict(image) # [0.005, 0.00003, 0.99, 0.00 ....]
predictions = np.argmax(predictions) # 2
return predictions
if __name__=="__main__":
img_path = "D:\\Datasets\\GTSRB\\raw_downloaded_dataset\\GTSRB-GermanTrafficSignRecognitionBenchmark\\Test\\2\\00409.png"
img_path = "D:\\Datasets\\GTSRB\\raw_downloaded_dataset\\GTSRB-GermanTrafficSignRecognitionBenchmark\\Test\\0\\00807.png"
model = tf.keras.models.load_model('./Models')
prediction = predict_with_model(model, img_path)
print(f"prediction = {prediction}")
|
11460478
|
import ray
import math
import click
from torch import cuda
from pathlib import Path
from quince.application import workflows
@click.group(chain=True)
@click.pass_context
def cli(context):
context.obj = {"n_gpu": cuda.device_count()}
@cli.command("train")
@click.option(
"--job-dir",
type=str,
required=True,
help="location for writing checkpoints and results",
)
@click.option("--num-trials", default=1, type=int, help="number of trials, default=1")
@click.option(
"--gpu-per-trial",
default=0.0,
type=float,
help="number of gpus for each trial, default=0",
)
@click.option(
"--cpu-per-trial",
default=1.0,
type=float,
help="number of cpus for each trial, default=1",
)
@click.option("--verbose", default=False, type=bool, help="verbosity default=False")
@click.option(
"--seed", default=1331, type=int, help="random number generator seed, default=1331",
)
@click.pass_context
def train(
context, job_dir, num_trials, gpu_per_trial, cpu_per_trial, verbose, seed,
):
ray.init(
num_gpus=context.obj["n_gpu"],
dashboard_host="127.0.0.1",
ignore_reinit_error=True,
)
gpu_per_trial = 0 if context.obj["n_gpu"] == 0 else gpu_per_trial
context.obj.update(
{
"job_dir": job_dir,
"num_trials": num_trials,
"gpu_per_trial": gpu_per_trial,
"cpu_per_trial": cpu_per_trial,
"verbose": verbose,
"seed": seed,
"tune": False,
}
)
@cli.command("tune")
@click.option(
"--job-dir",
type=str,
required=True,
help="location for writing checkpoints and results",
)
@click.option(
"--max-samples",
default=100,
type=int,
help="maximum number of search space samples, default=100",
)
@click.option(
"--gpu-per-trial",
default=0.0,
type=float,
help="number of gpus for each trial, default=0",
)
@click.option(
"--cpu-per-trial",
default=1.0,
type=float,
help="number of cpus for each trial, default=1",
)
@click.option(
"--seed", default=1331, type=int, help="random number generator seed, default=1331",
)
@click.pass_context
def tune(
context, job_dir, max_samples, gpu_per_trial, cpu_per_trial, seed,
):
ray.init(
num_gpus=context.obj["n_gpu"],
dashboard_host="127.0.0.1",
ignore_reinit_error=True,
)
gpu_per_trial = 0 if context.obj["n_gpu"] == 0 else gpu_per_trial
context.obj.update(
{
"job_dir": job_dir,
"max_samples": max_samples,
"gpu_per_trial": gpu_per_trial,
"cpu_per_trial": cpu_per_trial,
"seed": seed,
"tune": True,
}
)
@cli.command("ihdp")
@click.pass_context
@click.option(
"--root",
type=str,
default=None,
help="location of dataset, default=~/quince_datasets/",
)
@click.option(
"--hidden-confounding",
default=True,
type=bool,
help="Censor hidden confounder, default=True",
)
@click.option(
"--beta-u",
default=None,
type=float,
help="Coefficient value for hidden confounder, random if None, default=None",
)
def ihdp(
context, root, hidden_confounding, beta_u,
):
job_dir = Path(context.obj.get("job_dir"))
dataset_name = "ihdp"
experiment_dir = job_dir / dataset_name / f"hc-{hidden_confounding}_beta-{beta_u}"
context.obj.update(
{
"dataset_name": dataset_name,
"experiment_dir": str(experiment_dir),
"ds_train": {
"root": root,
"split": "train",
"mode": "mu",
"hidden_confounding": hidden_confounding,
"beta_u": beta_u,
"seed": context.obj.get("seed"),
},
"ds_valid": {
"root": root,
"split": "valid",
"mode": "mu",
"hidden_confounding": hidden_confounding,
"beta_u": beta_u,
"seed": context.obj.get("seed"),
},
"ds_test": {
"root": root,
"split": "test",
"mode": "mu",
"hidden_confounding": hidden_confounding,
"beta_u": beta_u,
"seed": context.obj.get("seed"),
},
}
)
@cli.command("hcmnist")
@click.pass_context
@click.option(
"--root",
type=str,
default=None,
help="location of dataset, default=~/quince_datasets/",
)
@click.option(
"--gamma-star",
default=math.exp(1.0),
type=float,
help="Ground truth level of hidden confounding, default=2.7",
)
@click.option(
"--theta",
default=4.0,
type=float,
help="Coefficient for u effect on y, default=4.0",
)
@click.option(
"--beta",
default=0.75,
type=float,
help="Coefficient for x effect on t, default=2.0",
)
@click.option(
"--sigma",
default=1.0,
type=float,
help="standard deviation of random noise in y, default=1.0",
)
@click.option(
"--domain-limit",
default=2.0,
type=float,
help="Domain of x is [-domain_limit, domain_limit], default=2.5",
)
def hcmnist(
context, root, gamma_star, theta, beta, sigma, domain_limit,
):
job_dir = Path(context.obj.get("job_dir"))
dataset_name = "hcmnist"
experiment_dir = (
job_dir
/ dataset_name
/ f"gs-{gamma_star:.02f}_th-{theta:.02f}_be-{beta:.02f}_si-{sigma:.02f}_dl-{domain_limit:.02f}"
)
context.obj.update(
{
"dataset_name": dataset_name,
"experiment_dir": str(experiment_dir),
"ds_train": {
"root": root,
"gamma_star": gamma_star,
"split": "train",
"theta": theta,
"beta": beta,
"mode": "mu",
"p_u": "bernoulli",
"sigma_y": sigma,
"domain": domain_limit,
"seed": context.obj.get("seed"),
},
"ds_valid": {
"root": root,
"gamma_star": gamma_star,
"split": "valid",
"theta": theta,
"beta": beta,
"mode": "mu",
"p_u": "bernoulli",
"sigma_y": sigma,
"domain": domain_limit,
"seed": context.obj.get("seed") + 1,
},
"ds_test": {
"root": root,
"gamma_star": gamma_star,
"split": "test",
"theta": theta,
"beta": beta,
"mode": "mu",
"p_u": "bernoulli",
"sigma_y": sigma,
"domain": domain_limit,
"seed": context.obj.get("seed") + 2,
},
}
)
@cli.command("synthetic")
@click.pass_context
@click.option(
"--num-examples",
default=1000,
type=int,
help="number of training examples, defaul=1000",
)
@click.option(
"--gamma-star",
default=math.exp(1.0),
type=float,
help="Ground truth level of hidden confounding, default=2.7",
)
@click.option(
"--theta",
default=4.0,
type=float,
help="Coefficient for u effect on y, default=4.0",
)
@click.option(
"--beta",
default=0.75,
type=float,
help="Coefficient for x effect on t, default=2.0",
)
@click.option(
"--sigma",
default=1.0,
type=float,
help="standard deviation of random noise in y, default=1.0",
)
@click.option(
"--domain-limit",
default=2.0,
type=float,
help="Domain of x is [-domain_limit, domain_limit], default=2.5",
)
def synthetic(
context, num_examples, gamma_star, theta, beta, sigma, domain_limit,
):
job_dir = Path(context.obj.get("job_dir"))
dataset_name = "synthetic"
experiment_dir = (
job_dir
/ dataset_name
/ f"ne-{num_examples}_gs-{gamma_star:.02f}_th-{theta:.02f}_be-{beta:.02f}_si-{sigma:.02f}_dl-{domain_limit:.02f}"
)
context.obj.update(
{
"dataset_name": dataset_name,
"experiment_dir": str(experiment_dir),
"ds_train": {
"num_examples": num_examples,
"gamma_star": gamma_star,
"theta": theta,
"beta": beta,
"mode": "mu",
"p_u": "bernoulli",
"sigma_y": sigma,
"domain": domain_limit,
"seed": context.obj.get("seed"),
},
"ds_valid": {
"num_examples": num_examples // 10,
"gamma_star": gamma_star,
"theta": theta,
"beta": beta,
"mode": "mu",
"p_u": "bernoulli",
"sigma_y": sigma,
"domain": domain_limit,
"seed": context.obj.get("seed") + 1,
},
"ds_test": {
"num_examples": min(num_examples, 2000),
"gamma_star": gamma_star,
"theta": theta,
"beta": beta,
"mode": "mu",
"p_u": "bernoulli",
"sigma_y": sigma,
"domain": domain_limit,
"seed": context.obj.get("seed") + 2,
},
}
)
@cli.command("ensemble")
@click.pass_context
@click.option("--dim-hidden", default=400, type=int, help="num neurons")
@click.option("--num-components", default=5, type=int, help="num mixture components")
@click.option("--depth", default=3, type=int, help="depth of feature extractor")
@click.option(
"--negative-slope",
default=-1,
type=float,
help="negative slope of leaky relu, default=-1 use elu",
)
@click.option(
"--dropout-rate", default=0.15, type=float, help="dropout rate, default=0.1"
)
@click.option(
"--spectral-norm",
default=0.95,
type=float,
help="Spectral normalization coefficient. If 0.0 do not use spectral norm, default=0.0",
)
@click.option(
"--learning-rate",
default=1e-3,
type=float,
help="learning rate for gradient descent, default=1e-3",
)
@click.option(
"--batch-size",
default=32,
type=int,
help="number of examples to read during each training step, default=100",
)
@click.option(
"--epochs", type=int, default=500, help="number of training epochs, default=50"
)
@click.option(
"--ensemble-size",
type=int,
default=10,
help="number of models in ensemble, default=1",
)
def ensemble(
context,
dim_hidden,
num_components,
depth,
negative_slope,
dropout_rate,
spectral_norm,
learning_rate,
batch_size,
epochs,
ensemble_size,
):
if context.obj["tune"]:
context.obj.update(
{"epochs": epochs, "ensemble_size": ensemble_size,}
)
workflows.tuning.hyper_tune(config=context.obj)
else:
context.obj.update(
{
"dim_hidden": dim_hidden,
"depth": depth,
"num_components": num_components,
"negative_slope": negative_slope,
"dropout_rate": dropout_rate,
"spectral_norm": spectral_norm,
"learning_rate": learning_rate,
"batch_size": batch_size,
"epochs": epochs,
"ensemble_size": ensemble_size,
}
)
@ray.remote(
num_gpus=context.obj.get("gpu_per_trial"),
num_cpus=context.obj.get("cpu_per_trial"),
)
def trainer(**kwargs):
func = workflows.training.ensemble_trainer(**kwargs)
return func
results = []
for trial in range(context.obj.get("num_trials")):
for ensemble_id in range(ensemble_size):
results.append(
trainer.remote(
config=context.obj,
experiment_dir=context.obj.get("experiment_dir"),
trial=trial,
ensemble_id=ensemble_id,
)
)
ray.get(results)
@cli.command("evaluate")
@click.option(
"--experiment-dir",
type=str,
required=True,
help="location for reading checkpoints",
)
@click.option(
"--output-dir",
type=str,
required=False,
default=None,
help="location for writing results",
)
@click.pass_context
def evaluate(
context, experiment_dir, output_dir,
):
output_dir = experiment_dir if output_dir is None else output_dir
context.obj.update(
{"experiment_dir": experiment_dir, "output_dir": output_dir,}
)
@cli.command("compute-intervals")
@click.option(
"--mc-samples",
type=int,
required=False,
default=300,
help="Number of samples from p(y | x, t), default=100",
)
@click.option(
"--gpu-per-trial",
default=0.0,
type=float,
help="number of gpus for each trial, default=0",
)
@click.option(
"--cpu-per-trial",
default=1.0,
type=float,
help="number of cpus for each trial, default=1",
)
@click.pass_context
def compute_intervals(
context, mc_samples, gpu_per_trial, cpu_per_trial,
):
ray.init(
num_gpus=context.obj["n_gpu"],
dashboard_host="127.0.0.1",
ignore_reinit_error=True,
)
@ray.remote(
num_gpus=gpu_per_trial, num_cpus=cpu_per_trial,
)
def evaluator(**kwargs):
func = workflows.evaluation.compute_intervals_ensemble(**kwargs)
return func
experiment_dir = Path(context.obj.get("experiment_dir"))
results = []
for trial_dir in sorted(experiment_dir.iterdir()):
if "trial-" not in str(trial_dir):
continue
results.append(evaluator.remote(trial_dir=trial_dir, mc_samples=mc_samples))
ray.get(results)
@cli.command("compute-intervals-kernel")
@click.option(
"--gpu-per-trial",
default=0.0,
type=float,
help="number of gpus for each trial, default=0",
)
@click.option(
"--cpu-per-trial",
default=1.0,
type=float,
help="number of cpus for each trial, default=1",
)
@click.pass_context
def compute_intervals_kernel(
context, gpu_per_trial, cpu_per_trial,
):
ray.init(
num_gpus=context.obj["n_gpu"],
dashboard_host="127.0.0.1",
ignore_reinit_error=True,
)
@ray.remote(
num_gpus=gpu_per_trial, num_cpus=cpu_per_trial,
)
def evaluator(**kwargs):
func = workflows.evaluation.compute_intervals_kernel(**kwargs)
return func
experiment_dir = Path(context.obj.get("experiment_dir"))
results = []
for trial_dir in sorted(experiment_dir.iterdir()):
if "trial-" not in str(trial_dir):
continue
results.append(evaluator.remote(trial_dir=trial_dir))
ray.get(results)
@cli.command("print-summary")
@click.pass_context
def print_summary(context,):
experiment_dir = Path(context.obj.get("experiment_dir"))
workflows.evaluation.print_summary(experiment_dir=experiment_dir, kernel=False)
@cli.command("paired-t-test")
@click.pass_context
def paired_t_test(context,):
experiment_dir = Path(context.obj.get("experiment_dir"))
workflows.evaluation.paired_t_test(experiment_dir=experiment_dir)
@cli.command("print-summary-kernel")
@click.pass_context
def print_summary(context,):
experiment_dir = Path(context.obj.get("experiment_dir"))
workflows.evaluation.print_summary(experiment_dir=experiment_dir, kernel=True)
@cli.command("plot-deferral")
@click.pass_context
def plot_deferral(context,):
experiment_dir = Path(context.obj.get("experiment_dir"))
workflows.evaluation.plot_deferral(experiment_dir=experiment_dir)
@cli.command("plot-ignorance")
@click.option(
"--trial", default=0, type=int, help="trial, default=0",
)
@click.pass_context
def plot_ignorance(context, trial):
trial_dir = Path(context.obj.get("experiment_dir")) / f"trial-{trial:03d}"
workflows.evaluation.plot_ignorance(trial_dir=trial_dir)
@cli.command("plot-errorbars")
@click.option(
"--trial", default=0, type=int, help="trial, default=0",
)
@click.pass_context
def plot_errorbars(context, trial):
trial_dir = Path(context.obj.get("experiment_dir")) / f"trial-{trial:03d}"
workflows.evaluation.plot_errorbars(trial_dir=trial_dir)
@cli.command("plot-errorbars-kernel")
@click.option(
"--trial", default=0, type=int, help="trial, default=0",
)
@click.pass_context
def plot_errorbars(context, trial):
trial_dir = Path(context.obj.get("experiment_dir")) / f"trial-{trial:03d}"
workflows.evaluation.plot_errorbars_kernel(trial_dir=trial_dir)
if __name__ == "__main__":
cli()
|
11460511
|
import unittest
from Vintageous.vi.utils import modes
from Vintageous.tests import set_text
from Vintageous.tests import add_sel
from Vintageous.tests import get_sel
from Vintageous.tests import first_sel
from Vintageous.tests import ViewTest
class Test_vi_dd_InNormalMode(ViewTest):
def testDeletesLastLine(self):
self.write('abc\nabc\nabc')
self.clear_sel()
self.add_sel(self.R((2, 0), (2, 0)))
self.view.run_command('_vi_dd', {'mode': modes.INTERNAL_NORMAL})
expected = self.view.substr(self.R(0, self.view.size()))
self.assertEqual(expected, 'abc\nabc')
|
11460526
|
import sys
sys.path.append('../../python')
import caffe
from caffe import layers as L, params as P
from caffe.coord_map import crop
def conv_relu(bottom, ks, nout, stride=1, pad=0, group=1):
conv = L.Convolution(bottom, kernel_size=ks, stride=stride,
num_output=nout, pad=pad, group=group)
return conv, L.ReLU(conv, in_place=True)
def max_pool(bottom, ks, stride=1):
return L.Pooling(bottom, pool=P.Pooling.MAX, kernel_size=ks, stride=stride)
def fcn(split):
n = caffe.NetSpec()
pydata_params = dict(split=split, mean=(104.00699, 116.66877, 122.67892),
seed=1337)
if split == 'train':
pydata_params['sbdd_dir'] = '../data/sbdd/dataset'
pylayer = 'SBDDSegDataLayer'
else:
pydata_params['voc_dir'] = '../data/pascal/VOC2011'
pylayer = 'VOCSegDataLayer'
n.data, n.label = L.Python(module='voc_layers', layer=pylayer,
ntop=2, param_str=str(pydata_params))
# the base net
n.conv1, n.relu1 = conv_relu(n.data, 11, 96, stride=4, pad=100)
n.pool1 = max_pool(n.relu1, 3, stride=2)
n.norm1 = L.LRN(n.pool1, local_size=5, alpha=1e-4, beta=0.75)
n.conv2, n.relu2 = conv_relu(n.norm1, 5, 256, pad=2, group=2)
n.pool2 = max_pool(n.relu2, 3, stride=2)
n.norm2 = L.LRN(n.pool2, local_size=5, alpha=1e-4, beta=0.75)
n.conv3, n.relu3 = conv_relu(n.norm2, 3, 384, pad=1)
n.conv4, n.relu4 = conv_relu(n.relu3, 3, 384, pad=1, group=2)
n.conv5, n.relu5 = conv_relu(n.relu4, 3, 256, pad=1, group=2)
n.pool5 = max_pool(n.relu5, 3, stride=2)
# fully conv
n.fc6, n.relu6 = conv_relu(n.pool5, 6, 4096)
n.drop6 = L.Dropout(n.relu6, dropout_ratio=0.5, in_place=True)
n.fc7, n.relu7 = conv_relu(n.drop6, 1, 4096)
n.drop7 = L.Dropout(n.relu7, dropout_ratio=0.5, in_place=True)
n.score_fr = L.Convolution(n.drop7, num_output=21, kernel_size=1, pad=0,
param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)])
n.upscore = L.Deconvolution(n.score_fr,
convolution_param=dict(num_output=21, kernel_size=63, stride=32,
bias_term=False),
param=[dict(lr_mult=0)])
n.score = crop(n.upscore, n.data)
n.loss = L.SoftmaxWithLoss(n.score, n.label,
loss_param=dict(normalize=True, ignore_label=255))
return n.to_proto()
def make_net():
with open('train.prototxt', 'w') as f:
f.write(str(fcn('train')))
with open('val.prototxt', 'w') as f:
f.write(str(fcn('seg11valid')))
if __name__ == '__main__':
make_net()
|
11460537
|
from collections import defaultdict, OrderedDict
from utils.sortedcollection import SortedCollection
from bisect import bisect_left
from math import log, exp
import sampling
from sortedcontainers import SortedListWithKey
measure_average = "Average_Restore_Old_Err"
class Sampling_Frequency(sampling.Sampling):
def __init__(self, save_counts = False,
name = None, **kwargs):
super(Sampling_Frequency, self).__init__(name = name, **kwargs)
if save_counts:
self.name += ".keepCounts"
self.saving_counts = save_counts
self.historical_nodes = dict()
self.average_restore = (0, 0)
def touch(self, node):
super(Sampling_Frequency, self).touch(node)
node.count += 1
def add_node(self, value, cost):
if self.saving_counts and value in self.historical_nodes:
new_node = self.restore_node(self.historical_nodes[value])
del self.historical_nodes[value]
self.Nodes[value] = new_node
else:
new_node = super(Sampling_Frequency, self).add_node(value, cost)
new_node.count = 1
new_node.is_restored = False
new_node.is_restoring = False
return new_node
def restore_node(self, old_node):
old_node.is_restoring = True
old_obj_f = self.objective_f(old_node)
self.touch(old_node)
old_node.is_restoring = False
old_node.is_restored = True
new_obj_f = self.objective_f(old_node)
if measure_average == "Average_Restore_ObjF":
update = new_obj_f
elif measure_average == "Average_Restore_Err":
if new_obj_f == 0:
return old_node
update = abs(self.last_obj_f - new_obj_f) / new_obj_f
elif measure_average == "Average_Restore_Old_Err":
if old_obj_f == 0:
return old_node
update = abs(self.last_obj_f - old_obj_f) / old_obj_f
self.average_restore = (self.average_restore[0] + update,
self.average_restore[1] + 1)
return old_node
def evict_node(self):
n = super(Sampling_Frequency, self).evict_node()
if self.saving_counts:
self.historical_nodes[n.value] = n
return n
def objective_f(self, node):
return node.cost * node.count
def Velo_Sample_Hyperbolic(name = "SV_Hyper", **kwargs):
strategy = Sample_Hyperbolic(name = name, **kwargs)
strategy.retention_strategy = sampling.SAMPLING_VELOCITY_RETENTION
return strategy
def Size_Aware_Hyperbolic(name = "S_Hyper_Sz", **kwargs):
strategy = Sample_Hyperbolic(name = name, **kwargs)
strategy.size_aware = True
return strategy
class Sample_Hyperbolic(Sampling_Frequency):
def __init__(self, leeway = 0.1, degrade = 1,
name = "S_Hyper", **kwargs):
name = "%s(%.0e; %.3f)" % (name, 1-degrade, leeway)
super(Sample_Hyperbolic, self).__init__(name = name, **kwargs)
self.degrade = degrade
self.leeway = leeway
self.last_miss = 0
self.count_min = 10 ** -10
self.size_aware = False
def touch(self, node):
if self.degrade != 1:
count_minus_min = (node.count - self.count_min) * (self.degrade ** (self.time - node.last_degrade))
node.count = self.count_min + count_minus_min
node.last_degrade = self.time
node.count += 1
self.time += 1
return node
def add_node(self, value, cost, size = 1):
# gdcost heuristic : cost = 1 / size
if self.size_aware and size != 1:
cost = float(cost) / size
new_node = super(Sample_Hyperbolic, self).add_node(value, cost)
# guess the count and delta for the object
new_node.last_degrade = self.time
new_node.entry_time = self.time - 1
self.last_miss = self.time
if not new_node.is_restored:
if self.leeway == 1 or cost == 0 or self.last_obj_f == 0:
return new_node
new_node.count = self.leeway + (1.0 - self.leeway) * (self.last_obj_f / cost)
# leeway = 1 - (proportion to weight guess)
# new_node.LA -= (time_guess * (1.0 - self.leeway))
# new_node.LA -= 1
# new_node.count = 1.0 - self.leeway
return new_node
def objective_f(self, node):
return self.objective_f_tdelta(node, 0)
def objective_f_tdelta(self, node, t_delta):
time = self.time + t_delta
if self.degrade != 1:
degrade_by = self.degrade ** (time - node.last_degrade)
return float(node.cost) * (self.count_min + ((node.count - self.count_min) * degrade_by)) / (time - node.entry_time)
else:
return float(node.cost * (node.count)) / (time - node.entry_time)
def moving_average(const = 0.97):
return (lambda a, b : ((const * a) + ((1.0 - const) * b)))
def replacement(a, b):
return b
class Sample_Hyperbolic_ClassTrack(Sample_Hyperbolic):
def __init__(self, update_cost = moving_average(), **kwargs):
super(Sample_Hyperbolic_ClassTrack, self).__init__(
name = "S_Hyper_ClassTrack", **kwargs)
self.costs = {}
self.update_cost_F = update_cost
def update_cost(self, cost_class, cost):
if cost_class in self.costs:
self.costs[cost_class] = self.update_cost_F(
self.costs[cost_class], cost)
else:
self.costs[cost_class] = cost
def add_node(self, value, cost, cost_class):
new_node = super(Sample_Hyperbolic_ClassTrack, self).add_node(value, cost)
self.update_cost(cost_class, cost)
new_node.cost_class = cost_class
return new_node
def objective_f_tdelta(self, node, t_delta):
node.cost = self.costs[node.cost_class]
return super(Sample_Hyperbolic_ClassTrack, self).objective_f_tdelta(node, t_delta)
class Sample_Hyperbolic_Pooled(Sample_Hyperbolic):
def __init__(self, *args, **kwargs):
super(Sample_Hyperbolic_Pooled, self).__init__(*args, name = "SP_Hyper", **kwargs)
self.retention_strategy = sampling.SAMPLING_POOLED_RETENTION
self.retain = 0
def touch(self, node):
super(Sample_Hyperbolic_Pooled, self).touch(node)
self.pooled_retain_recheck(node)
def add_node(self, *args, **kwargs):
new_node = super(Sample_Hyperbolic_Pooled, self).add_node(*args, **kwargs)
self.pooled_retain_recheck(new_node)
return new_node
class Sampling_LNC_R_W3(sampling.Sampling):
def __init__(self, k_access = 4, name = "S_LNC", **kwargs):
name = "%s(%d)" % (name, k_access)
super(Sampling_LNC_R_W3, self).__init__(name = name, **kwargs)
assert k_access >= 1
self.k_access = k_access
def add_node(self, value, cost):
new_node = super(Sampling_LNC_R_W3, self).add_node(value, cost)
new_node.accesses = [new_node.LA]
new_node.rate = (1.0 , new_node.LA)
return new_node
def touch(self, node):
super(Sampling_LNC_R_W3, self).touch(node)
if len(node.accesses) >= self.k_access:
node.accesses.pop(0)
node.accesses.append(node.LA)
node.rate = (float(len(node.accesses)), node.accesses[0])
def objective_f(self, node):
return node.rate[0] * node.cost / (self.time - node.rate[1])
class Sampling_TSP(sampling.Sampling):
def __init__(self, name = "S_TSP", **kwargs):
super(Sampling_TSP, self).__init__(name = name, **kwargs)
assert k_access >= 1
self.k_access = k_access
def add_node(self, value, cost):
new_node = super(Sampling_TSP, self).add_node(value, cost)
new_node.accesses = [new_node.LA]
new_node.rate_of_access = 1.0
return new_node
def touch(self, node):
super(Sampling_TSP, self).touch(node)
if len(node.accesses) >= self.k_access:
node.accesses.pop(0)
node.accesses.append(node.LA)
node.rate_of_access = float(len(node.accesses)) / node.accesses[0]
def objective_f(self, node):
return node.rate_of_access * node.cost
class Windowed_Hyper(Sample_Hyperbolic):
def __init__(self, window_size = 10**4, name = "W(%0.e)DegF", **kwargs):
super(Windowed_Hyper, self).__init__(degrade = 1.0, name = name % window_size, **kwargs)
self.window_size = window_size
def add_node(self, value, cost):
new_node = super(Windowed_Hyper, self).add_node(value, cost)
new_node.touches = [self.time - 1]
new_node.cur_entry_time = new_node.touches[0]
assert new_node.count >= 0.0 # should be set by parent!
return new_node
def touch(self, node):
if node.count <= 0:
node.cur_entry_time = self.time
node.touches.append(self.time)
node.count += 1.0
self.time += 1
return node
def objective_f(self, node):
# cutoff = bisect_left(node.touches, self.time - self.window_size)
# del node.touches[:cutoff]
window_start = self.time - self.window_size
while node.count > 0 and node.cur_entry_time < window_start:
node.touches.pop(0)
node.count -= 1.0
if node.count > 0:
node.cur_entry_time = node.touches[0]
else:
node.count = 0.0 # min to 0, can drop lower if we have "init-guess"
if node.count == 0:
return 0
return (node.cost * node.count) / (self.time - node.cur_entry_time)
class Windowed_Freq(Sampling_Frequency):
def __init__(self, window_size = 10**4, name = "W(%0.e)LFU", **kwargs):
super(Windowed_Freq, self).__init__(name = name % window_size, **kwargs)
self.window_size = window_size
def add_node(self, value, cost):
new_node = super(Windowed_Freq, self).add_node(value, cost)
new_node.touches = [self.time - 1]
new_node.cur_entry_time = new_node.touches[0]
assert new_node.count >= 0.0 # should be set by parent!
return new_node
def touch(self, node):
if node.count <= 0:
node.cur_entry_time = self.time
node.touches.append(self.time)
node.count += 1.0
self.time += 1
return node
def objective_f(self, node):
# cutoff = bisect_left(node.touches, self.time - self.window_size)
# del node.touches[:cutoff]
window_start = self.time - self.window_size
while node.count > 0 and node.cur_entry_time < window_start:
node.touches.pop(0)
node.count -= 1.0
if node.count > 0:
node.cur_entry_time = node.touches[0]
else:
node.count = 0.0 # min to 0, can drop lower if we have "init-guess"
if node.count == 0:
return 0
return (node.cost * node.count)
class Decrement_DegF(Sample_Hyperbolic):
def __init__(self, window_size = 10**4, name = "Dec(%0.e)DegF", **kwargs):
super(Decrement_DegF, self).__init__(S, degrade = 1.0, name = name % window_size, **kwargs)
self.decr_window = window_size
def add_node(self, value, cost):
new_node = super(Sample_Hyperbolic, self).add_node(value, cost)
new_node.count = 1.0
new_node.cur_entry_time = self.time - 1
new_node.last_decrement = float(self.time - 1)
return new_node
def decr_node(self, node):
decr_by = int(( self.time - node.last_decrement ) / self.decr_window)
if decr_by <= 0:
return
new_count = node.count - decr_by
if new_count <= 0:
node.count = 0
node.cur_entry_time = self.time - 1
else:
new_entry_time = self.time - ((float(new_count) / node.count) * (self.time - node.cur_entry_time))
node.count = new_count
node.cur_entry_time = new_entry_time
node.last_decrement = self.time - 1
def touch(self, node):
self.time += 1
self.decr_node(node)
if node.count <= 0:
node.cur_entry_time = self.time - 1
node.count += 1.0
return node
def objective_f(self, node):
self.decr_node(node)
if node.count == 0:
return 0
return (node.cost * node.count) / (self.time - node.cur_entry_time)
class AppWin_DegF(Sample_Hyperbolic):
def __init__(self, window_size = 10**4, name = "AppWin(%0.e)DegF", **kwargs):
super(AppWin_DegF, self).__init__(degrade = 1.0, name = name % window_size, **kwargs)
self.decr_window = window_size
def add_node(self, value, cost):
new_node = super(Sample_Hyperbolic, self).add_node(value, cost)
new_node.count = 1.0
new_node.cur_entry_time = self.time - 1
return new_node
def decr_node(self, node, window_start):
node.count *= float(self.decr_window) / (self.time - node.cur_entry_time)
node.cur_entry_time = window_start
def touch(self, node):
self.time += 1
window_start = self.time - self.decr_window
if node.cur_entry_time < window_start:
self.decr_node(node, window_start)
node.count += 1.0
return node
def objective_f(self, node):
window_start = self.time - self.decr_window
if node.cur_entry_time < window_start:
self.decr_node(node, window_start)
return (node.cost * node.count) / (self.time - node.cur_entry_time)
class GhostCache(object):
def __init__(self, gc_prop):
self.gc_prop = gc_prop
self.cache_size = -1
self.gc_sz = -1
self.ghost_cache = OrderedDict()
def touch(self, node):
return self.s_policy.touch(node)
def add_node(self, value, cost):
if self.gc_sz == -1:
self.gc_sz = int(self.gc_prop * self.cache_size)
assert self.cache_size != -1
if value in self.ghost_cache:
new_node = self.ghost_cache.pop(value)
self.touch(new_node) # should increment time.
return self.s_policy.add_existing_node(new_node)
return self.s_policy.add_node(value, cost)
def evict_node(self):
evicted = self.s_policy.evict_node()
if len(self.ghost_cache) >= self.gc_sz:
self.ghost_cache.popitem(last=False)
self.ghost_cache[evicted.value] = evicted
return evicted
class GC_RMDegF(GhostCache):
def __init__(self, gc_prop = 0.01, name = "GC(%f)RM_DegF", **kwargs):
super(GC_RMDegF, self).__init__(gc_prop)
name = name % gc_prop
self.s_policy = RealMin_DegF(name = name, **kwargs)
self.name = self.s_policy.name
class Sample_TimeAwareLRFU(Sampling_Frequency):
def __init__(self, degrade = 0.9, leeway = 1, name = None, **kwargs):
if name == None:
name = "S_TA_LRFU(%f; %f)" % (degrade, leeway)
super(Sample_TimeAwareLRFU, self).__init__(name = name, **kwargs)
self.degrade = degrade
self.leeway = leeway
self.last_miss = 0
def touch(self, node):
time_delta = (self.time - node.LA)
node.count += 1
node.denom_degrade_part *= (self.degrade ** time_delta)
node.denom_degrade_part += time_delta
node.numer_degrade_part *= (self.degrade ** time_delta)
node.numer_degrade_part += 1
node.LA = self.time
self.time += 1
def add_node(self, value, cost):
new_node = super(Sample_TimeAwareLRFU, self).add_node(value, cost)
# TODO: guess the count and delta for the object ?
if not new_node.is_restored:
new_node.numer_degrade_part = 0.0
new_node.denom_degrade_part = 0.0
self.last_miss = self.time
return new_node
def objective_f(self, node):
time_delta = (self.time - node.LA)
degrade_by = (self.degrade ** time_delta)
pr_denom = (node.denom_degrade_part * degrade_by) + time_delta
pr_numer = (node.numer_degrade_part * degrade_by) + 1
return ((pr_numer / pr_denom) * node.cost)
class Sample_LRFU(Sampling_Frequency):
"""
the degrade constant used here is a function of the \lambda
defined in the LRFU '96 paper.
degrade = (1/2)^(\lambda)
"""
def __init__(self, degrade = 0.99999, name = "S_LRFU", **kwargs):
name = "%s(%.0e)" % (name, 1-degrade)
super(Sample_LRFU, self).__init__(name = name, **kwargs)
self.degrade = degrade
def touch(self, node):
time_delta = (self.time - node.LA)
old_counts = node.count
new_counts = 1 + (old_counts * (self.degrade ** time_delta))
node.count = new_counts
node.LA = self.time
self.time += 1
def add_node(self, value, cost):
new_node = super(Sample_LRFU, self).add_node(value, cost)
new_node.insert_time = self.time
# if self.last_obj_f > 0 and cost > 0:
# self.counts[value] = self.last_obj_f / cost
return new_node
def objective_f(self, node):
scale_counts = self.degrade ** (self.time - node.LA)
return node.cost * node.count * scale_counts
class Sample_Frequency_Expiry(Sample_Hyperbolic):
def __init__(self, degrade_expiry = 1.01, name = "S_HyperExpiry",
**kwargs):
super(Sample_Frequency_Expiry, self).__init__(
name = "%s(%.3f)" % (name, degrade_expiry), **kwargs)
self.degrade_expiry = degrade_expiry
def add_node(self, value, cost, expires_at = -1):
node = super(Sample_Frequency_Expiry, self).add_node(value, cost)
node.expires_at = expires_at
return node
def set_expiry(self, node, expires_at):
node.expires_at = expires_at
def _expiry_weighted(self, node, time_expires):
return (1 - exp(-self.degrade_expiry * time_expires))
def expiry_weighted(self, node):
expires_at = node.expires_at
if expires_at == -1:
return 1
time_expires = expires_at - (self.time + 1)
if time_expires <= 0:
return 0
return self._expiry_weighted(node, time_expires)
def _expiry_weighted_poisson(self, node, time_expires):
lamb = float(node.count) / ((self.time + 1) - node.insert_time)
lamb_te = lamb * time_expires
rhs = exp(-1 * lamb_te) * (lamb_te + 1)
return (1.0 - rhs)
def _expiry_weighted_linear(self, node, time_expires):
midpoint = 10000.0
return min(midpoint, time_expires)
def objective_f(self, node):
base_f = super(Sample_Frequency_Expiry, self).objective_f(node)
return base_f * self.expiry_weighted(node)
class DummyList:
""" this class will pretend it's a list for you, but always disappoint."""
def add(self, key, value):
pass
class PQ_Frequency(Sampling_Frequency):
def __init__(self):
super(PQ_Frequency, self).__init__(sampling = 0)
self.nodes = SortedListWithKey(key = self.objective_f)
self.Nodes = DummyList()
def add_node(self, item, cost):
new_node = super(PQ_Frequency, self).add_node(item, cost)
self.nodes.add(new_node)
return new_node
def touch(self, node):
if not node.is_restoring:
self.nodes.remove(node)
super(PQ_Frequency, self).touch(node)
if not node.is_restoring:
self.nodes.add(node)
def evict_node(self):
to_evict = self.nodes.pop(0)
if self.saving_counts:
self.historical_nodes[to_evict.value] = to_evict
self.last_obj_f = self.objective_f(to_evict)
return to_evict
class RealMin_Frequency_Expiry(Sample_Frequency_Expiry):
def __init__(self, name = "RM_HyperExpiry", **kwargs):
super(RealMin_Frequency_Expiry, self).__init__(name = name,
**kwargs)
self.get_true_minimum = True
class RealMin_Hyper(Sample_Hyperbolic):
def __init__(self, name = "RM_Hyper", **kwargs):
super(RealMin_Hyper, self).__init__(name = name, **kwargs)
self.get_true_minimum = True
class PerfectKnowledge_Expiry(RealMin_Frequency_Expiry):
def __init__(self, degrade_expiry = 2.01):
super(PerfectKnowledge_Expiry, self).__init__(degrade_expiry = degrade_expiry,
name = "PK_Expiry")
def add_node(self, item, cost, popularity, expires_at):
new_node = super(PerfectKnowledge_Expiry, self).add_node(item, cost,
expires_at = expires_at)
new_node.popularity = popularity
return new_node
def objective_f(self, node):
return node.popularity * node.cost * self.expiry_weighted(node)
class Bucket_Frequency(sampling.LastAccess):
"""
bucket. perform frequency updates *per* bucket.
"""
def __init__(self, bucket_bounds):
super(CostBucket_Frequency, self).__init__(0)
self.bucket_bounds = bucket_bounds
self.bucket_queues = [ SortedCollection(key = self._get_count)
for _ in bucket_bounds ]
self.bucket_values = [ (0, 0) for _ in bucket_bounds ]
self.counts = defaultdict(lambda : 0)
self.generator = None
self.Nodes = DummyList()
def _get_count(self, node):
return self.counts[node.value]
def touch(self, node):
super(CostBucket_Frequency, self).touch(node)
self.bucket_queues[node.bucket_ix].remove(node)
self.counts[value] += 1
self.bucket_queues[node.bucket_ix].insert(node)
def evict_node(self):
numerators = [ ((float(b_numer) * self.counts[q[0].value] / b_denom), ix)
for (ix, ((b_numer, b_denom), q)) in
enumerate(zip(self.bucket_values, self.bucket_queues)) ]
objective_f, evict_from = min(numerators)
to_evict = self.bucket_queues[evict_from][0]
del self.bucket_queues[evict_from][0]
old_numer, old_denom = self.bucket_values[evict_from]
self.bucket_values[evict_from] = (old_numer - to_evict.cost, old_denom - 1)
return to_evict
def add_node(self, value, cost):
new_node = super(CostBucket_Frequency, self).add_node(value, cost)
bucket_ix = bisect_left(self.bucket_bounds, cost)
new_node.bucket_ix = bucket_ix
self.counts[value] += 1
self.bucket_queues[bucket_ix].insert(new_node)
old_numer , old_denom = self.bucket_values[bucket_ix]
self.bucket_values[bucket_ix] = (old_numer + cost, old_denom + 1)
return new_node
|
11460543
|
import boto3
import io
from PIL import Image, ImageDraw, ExifTags, ImageColor
if __name__ == "__main__":
photo='b1.jpg'
client=boto3.client('rekognition',region_name='us-east-1')
image = Image.open(open(photo,'rb'))
stream = io.BytesIO()
image.save(stream, format=image.format)
image_binary = stream.getvalue()
response = client.detect_faces(Image={'Bytes': image_binary}, Attributes=['ALL'])
imgWidth, imgHeight = image.size
draw = ImageDraw.Draw(image)
print('Detected faces for ' + photo)
for faceDetail in response['FaceDetails']:
print('The detected face is between ' + str(faceDetail['AgeRange']['Low'])
+ ' and ' + str(faceDetail['AgeRange']['High']) + ' years old')
box = faceDetail['BoundingBox']
left = imgWidth * box['Left']
top = imgHeight * box['Top']
width = imgWidth * box['Width']
height = imgHeight * box['Height']
print('Left: ' + '{0:.0f}'.format(left))
print('Top: ' + '{0:.0f}'.format(top))
print('Face Width: ' + "{0:.0f}".format(width))
print('Face Height: ' + "{0:.0f}".format(height))
points = (
(left,top),
(left + width, top),
(left + width, top + height),
(left , top + height),
(left, top)
)
draw.line(points, fill='#00d400', width=5)
image.show()
|
11460574
|
class DataPointType(object):
"""Data contract class for type DataPointType."""
# Enumeration value measurement
measurement = 0
# Enumeration value aggregation
aggregation = 1
|
11460661
|
import os
import unittest
from collections import defaultdict
from context import modules
class TestDocParser(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Set up test environment"""
cls.sgm_doc_path = os.path.join(
os.path.dirname(
os.path.abspath(__file__)),
'test_data/test.sgm')
cls.tsv_doc_path = os.path.join(
os.path.dirname(
os.path.abspath(__file__)),
'test_data/test.tsv')
cls.txt_doc_path = os.path.join(
os.path.dirname(
os.path.abspath(__file__)),
'test_data/test.txt')
cls.txt_doc_mapping_path = os.path.join(
os.path.dirname(
os.path.abspath(__file__)),
'test_data/test.doc_mapping.tsv')
cls.docs = defaultdict(list)
cls.sents = []
with open(cls.tsv_doc_path) as tsv_f:
for line in tsv_f:
doc_id, doc_sent = line.strip().split('\t')
cls.docs[doc_id].append(doc_sent)
cls.sents.append(doc_sent)
cls.total_sents = sum([len(doc) for doc in cls.docs.values()])
cls.total_docs = len(cls.docs.keys())
def test_init(self):
"""Test __init__ function of DocParser"""
sgm_doc_parser = modules.DocParser(self.sgm_doc_path)
txt_doc_parser = modules.DocParser(self.txt_doc_path, self.txt_doc_mapping_path)
txt_doc_parser2 = modules.DocParser(self.txt_doc_path, doc_length=2)
with self.assertRaises(Exception):
bad_doc_path = os.path.join(
os.path.dirname(
os.path.abspath(__file__)),
'test_data/this_file_does_not_exists')
modules.DocParser(bad_doc_path)
self.assertEqual(sgm_doc_parser.total_docs, self.total_docs)
self.assertEqual(sgm_doc_parser.total_sents, self.total_sents)
self.assertEqual(txt_doc_parser.total_docs, self.total_docs)
self.assertEqual(txt_doc_parser.total_sents, self.total_sents)
self.assertEqual(txt_doc_parser2.total_docs, self.total_sents/2)
self.assertEqual(txt_doc_parser2.total_sents, self.total_sents)
def test_get_file_type(self):
"""Test whether it is getting correct file_type"""
doc_parser = modules.DocParser(self.sgm_doc_path)
self.assertEqual(doc_parser.get_file_type(self.sgm_doc_path), 'sgml')
self.assertEqual(doc_parser.get_file_type(self.txt_doc_path), 'txt')
def test_parse_sgml(self):
"""Test sgml parser"""
docs, total_sents = modules.DocParser.parse_sgml(self.sgm_doc_path)
self.assertEqual(self.total_sents, total_sents)
for doc_id, doc in docs:
self.assertEqual("\n".join(self.docs[doc_id]), "\n".join(doc))
def test_parse_txt(self):
"""Test txt parser"""
docs, total_sents = modules.DocParser.parse_txt(self.txt_doc_path, self.txt_doc_mapping_path)
self.assertEqual(self.total_sents, total_sents)
for doc_id, doc in docs:
self.assertEqual("\n".join(self.docs[doc_id]), "\n".join(doc))
docs, total_sents = modules.DocParser.parse_txt(self.txt_doc_path, doc_length=1)
for sentence, (doc_id, doc) in zip(self.sents, docs):
self.assertEqual(sentence, "\n".join(doc))
def test_get_docs(self):
"""Test get docs"""
docs = modules.DocParser(self.sgm_doc_path).get_docs()
for doc_id, doc in docs:
self.assertEqual("\n".join(self.docs[doc_id]), "\n".join(doc))
def test_get_queries(self):
"""Test get queries"""
queries = modules.DocParser(self.sgm_doc_path).get_queries()
for doc_id in self.docs:
for i, sent in enumerate(self.docs[doc_id]):
self.assertIn(("%s_%d" % (doc_id, i), sent), queries)
|
11460688
|
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QColor
from brainframe.api.bf_codecs import Zone
from brainframe_qt.ui.resources.config import RenderSettings
from brainframe_qt.ui.resources.video_items.base import PolygonItem, \
VideoItem
from .abstract_zone_item import AbstractZoneItem
class ZoneRegionItem(AbstractZoneItem, PolygonItem):
def __init__(self, zone: Zone, *,
render_config: RenderSettings,
parent: VideoItem,
color: QColor = AbstractZoneItem.BORDER_COLOR,
thickness: int = AbstractZoneItem.BORDER_THICKNESS,
line_style: Qt.PenStyle = Qt.SolidLine):
self.color = color
self.thickness = thickness
self.line_style = line_style
AbstractZoneItem.__init__(self, zone)
PolygonItem.__init__(self, zone.coords, border_color=color,
parent=parent)
def _init_style(self):
super()._init_style()
self.border_linetype = self.line_style
self.border_thickness = self.thickness
|
11460721
|
from .mesh import Mesh
from .main_parser import MainParser
__version__ = "0.2.0"
__author__ = "<NAME> <<EMAIL>>"
def parse(filename: str) -> Mesh:
"""Parse Gmsh .msh file and return `Mesh` object."""
mesh = Mesh()
mesh.set_name(filename)
parser = MainParser()
with open(filename, "r") as io:
parser.parse(mesh, io)
return mesh
|
11460752
|
import theano
import theano.tensor as T
from nn.initializers import Zero, One, Identity, Uniform, Normal, Xavier, Orthonormal
from nn.activations import sigmoid, tanh, relu, softmax
class Unit(object):
def __init__(self, name='unit'):
self.name = name
@staticmethod
def _set_param(shape, init_type=None, name=None):
if init_type == 'zero':
init = Zero()
elif init_type == 'one':
init = One()
elif init_type == 'xavier':
init = Xavier()
elif init_type == 'orth':
init = Orthonormal()
elif init_type == 'identity':
init = Identity()
elif init_type == 'uniform':
init = Uniform()
else:
init = Normal()
return init(shape=shape, name=name)
@staticmethod
def _set_activation(activation_type):
if activation_type == 'sigmoid':
return sigmoid
elif activation_type == 'tanh':
return tanh
elif activation_type == 'relu':
return relu
elif activation_type == 'softmax':
return softmax
return None
class Dense(Unit):
def __init__(self,
input_dim,
output_dim,
activation=None,
use_bias=True,
weight_init='xavier',
bias_init='zero'):
super(Dense, self).__init__(name='Dense(%dx%d,%s)' % (input_dim, output_dim, activation))
self.W = self._set_param(shape=(input_dim, output_dim),
init_type=weight_init,
name='W_dense')
if use_bias:
self.b = self._set_param(shape=output_dim,
init_type=bias_init,
name='b_dense')
self.params = [self.W, self.b]
else:
self.b = None
self.params = [self.W]
self.activation = self._set_activation(activation)
def forward(self, x):
h = T.dot(x, self.W)
if self.b:
h = h + self.b
if self.activation:
h = self.activation(h)
return h
class Dropout(Unit):
"""
Reference: [Dropout: A Simple Way to Prevent Neural Networks from Overfitting]
"""
def __init__(self, rate, seed=0):
super(Dropout, self).__init__(name='Dropout(p={:>1.1})'.format(rate))
self.rate = min(1., max(0., rate))
self.srng = T.shared_randomstreams.RandomStreams(seed=seed)
def forward(self, x, is_train):
drop_mask = self.srng.binomial(size=x.shape, n=1, p=1 - self.rate, dtype=theano.config.floatX)
return T.switch(T.eq(is_train, 1), x * drop_mask, x * (1 - self.rate))
|
11460762
|
ISDEBUG = False
DEFAULT_CONCURENCY_LEVEL = '4'
DEFAULT_CASSANDRA_PORT = '9042'
ENABLE_TRACE_STATEMENTS_DEFAULT = 'False'
ALLOW_FILTERING_DEFAULT = 'False'
DEFAULT_CONNECTION_TIMEOUT = '2'
PREPARE_SELECTS_DEFAULT = 'False'
PER_TRANSACTION_CONNECTION = 'False'
BATCH_MODIFY_THRESHOLD = 10000
DEFAULT_TTL = '0'
|
11460776
|
import numpy as np
import h5py
def read_sdf_file_as_3d_array(name):
fp = open(name, 'rb')
line = fp.readline().strip()
if not line.startswith(b'#sdf'):
raise IOError('Not a sdf file')
dims = list(map(int, fp.readline().strip().split(b' ')[1:]))
line = fp.readline()
data = np.frombuffer(fp.read(), dtype=np.float32)
data = data.reshape(dims)
fp.close()
return data
def read_data_input_only(hdf5_dir,grid_size,input_type,out_bool,out_float):
hdf5_file = h5py.File(hdf5_dir, 'r')
if out_bool:
LOD_gt_int = np.zeros([grid_size+1,grid_size+1,grid_size+1,1],np.int32)
else:
LOD_gt_int = None
if out_float:
LOD_gt_float = np.zeros([grid_size+1,grid_size+1,grid_size+1,3],np.float32)
else:
LOD_gt_float = None
if input_type=="sdf":
LOD_input = hdf5_file[str(grid_size)+"_sdf"][:]
LOD_input = LOD_input*grid_size #denormalize
elif input_type=="voxel":
LOD_input = hdf5_file[str(grid_size)+"_voxel"][:]
hdf5_file.close()
return LOD_gt_int, LOD_gt_float, LOD_input
def read_data_bool_only(hdf5_dir,grid_size,input_type,out_bool,out_float):
hdf5_file = h5py.File(hdf5_dir, 'r')
if out_bool:
LOD_gt_int = hdf5_file[str(grid_size)+"_int"][:]
else:
LOD_gt_int = None
if out_float:
LOD_gt_float = np.zeros([grid_size+1,grid_size+1,grid_size+1,3],np.float32)
else:
LOD_gt_float = None
if input_type=="sdf":
LOD_input = hdf5_file[str(grid_size)+"_sdf"][:]
LOD_input = LOD_input*grid_size #denormalize
elif input_type=="voxel":
LOD_input = hdf5_file[str(grid_size)+"_voxel"][:]
hdf5_file.close()
return LOD_gt_int, LOD_gt_float, LOD_input
def read_data(hdf5_dir,grid_size,input_type,out_bool,out_float):
hdf5_file = h5py.File(hdf5_dir, 'r')
if out_bool:
LOD_gt_int = hdf5_file[str(grid_size)+"_int"][:]
else:
LOD_gt_int = None
if out_float:
LOD_gt_float = hdf5_file[str(grid_size)+"_float"][:]
else:
LOD_gt_float = None
if input_type=="sdf":
LOD_input = hdf5_file[str(grid_size)+"_sdf"][:]
LOD_input = LOD_input*grid_size #denormalize
elif input_type=="voxel":
LOD_input = hdf5_file[str(grid_size)+"_voxel"][:]
hdf5_file.close()
return LOD_gt_int, LOD_gt_float, LOD_input
def read_and_augment_data(hdf5_dir,grid_size,input_type,out_bool,out_float,aug_permutation=True,aug_reversal=True,aug_inversion=True):
grid_size_1 = grid_size+1
#read input hdf5
LOD_gt_int, LOD_gt_float, LOD_input = read_data(hdf5_dir,grid_size,input_type,out_bool,out_float)
newdict = {}
if out_bool:
newdict['int_V_signs'] = LOD_gt_int[:,:,:,0]
if out_float:
newdict['float_center_x_'] = LOD_gt_float[:-1,:-1,:-1,0]
newdict['float_center_y_'] = LOD_gt_float[:-1,:-1,:-1,1]
newdict['float_center_z_'] = LOD_gt_float[:-1,:-1,:-1,2]
if input_type=="sdf":
newdict['input_sdf'] = LOD_input[:,:,:]
elif input_type=="voxel":
newdict['input_voxel'] = LOD_input[:-1,:-1,:-1]
#augment data
permutation_list = [ [0,1,2], [0,2,1], [1,0,2], [1,2,0], [2,0,1], [2,1,0] ]
reversal_list = [ [0,0,0],[0,0,1],[0,1,0],[0,1,1], [1,0,0],[1,0,1],[1,1,0],[1,1,1] ]
if aug_permutation:
permutation = permutation_list[np.random.randint(len(permutation_list))]
else:
permutation = permutation_list[0]
if aug_reversal:
reversal = reversal_list[np.random.randint(len(reversal_list))]
else:
reversal = reversal_list[0]
if aug_inversion:
inversion_flag = np.random.randint(2)
else:
inversion_flag = 0
if reversal[0]:
for k in newdict: #inverse
newdict[k] = newdict[k][::-1,:,:]
if '_x_' in k:
mask = (newdict[k]>=0)
newdict[k] = newdict[k]*(1-mask)+(1-newdict[k])*mask
if reversal[1]:
for k in newdict: #inverse
newdict[k] = newdict[k][:,::-1,:]
if '_y_' in k:
mask = (newdict[k]>=0)
newdict[k] = newdict[k]*(1-mask)+(1-newdict[k])*mask
if reversal[2]:
for k in newdict: #inverse
newdict[k] = newdict[k][:,:,::-1]
if '_z_' in k:
mask = (newdict[k]>=0)
newdict[k] = newdict[k]*(1-mask)+(1-newdict[k])*mask
if permutation == [0,1,2]:
pass
else:
for k in newdict: #transpose
newdict[k] = np.transpose(newdict[k], permutation)
if out_float:
olddict = newdict
newdict = {}
for k in olddict:
newdict[k] = olddict[k]
if permutation == [0,2,1]:
newdict['float_center_y_'] = olddict['float_center_z_']
newdict['float_center_z_'] = olddict['float_center_y_']
elif permutation == [1,0,2]:
newdict['float_center_x_'] = olddict['float_center_y_']
newdict['float_center_y_'] = olddict['float_center_x_']
elif permutation == [2,1,0]:
newdict['float_center_x_'] = olddict['float_center_z_']
newdict['float_center_z_'] = olddict['float_center_x_']
elif permutation == [1,2,0]:
newdict['float_center_x_'] = olddict['float_center_y_']
newdict['float_center_y_'] = olddict['float_center_z_']
newdict['float_center_z_'] = olddict['float_center_x_']
elif permutation == [2,0,1]:
newdict['float_center_x_'] = olddict['float_center_z_']
newdict['float_center_y_'] = olddict['float_center_x_']
newdict['float_center_z_'] = olddict['float_center_y_']
#store outputs
if out_bool:
LOD_gt_int = np.zeros([grid_size_1,grid_size_1,grid_size_1,1], np.int32)
if inversion_flag:
LOD_gt_int[:,:,:,0] = 1-newdict['int_V_signs']
else:
LOD_gt_int[:,:,:,0] = newdict['int_V_signs']
else:
LOD_gt_int = None
if out_float:
LOD_gt_float = np.full([grid_size_1,grid_size_1,grid_size_1,3], -1, np.float32)
LOD_gt_float[:-1,:-1,:-1,0] = newdict['float_center_x_']
LOD_gt_float[:-1,:-1,:-1,1] = newdict['float_center_y_']
LOD_gt_float[:-1,:-1,:-1,2] = newdict['float_center_z_']
else:
LOD_gt_float = None
if input_type=="sdf":
LOD_input = np.ones([grid_size_1,grid_size_1,grid_size_1], np.float32)
LOD_input[:,:,:] = newdict['input_sdf']
if inversion_flag:
LOD_input = -LOD_input
elif input_type=="voxel":
LOD_input = np.zeros([grid_size_1,grid_size_1,grid_size_1], np.uint8)
LOD_input[:-1,:-1,:-1] = newdict['input_voxel']
if inversion_flag:
LOD_input = 1-LOD_input
return LOD_gt_int, LOD_gt_float, LOD_input
#this is not an efficient implementation. just for testing!
def dual_contouring_ndc_test(int_grid, float_grid):
all_vertices = []
all_triangles = []
int_grid = np.squeeze(int_grid)
dimx,dimy,dimz = int_grid.shape
vertices_grid = np.full([dimx,dimy,dimz], -1, np.int32)
#all vertices
for i in range(0,dimx-1):
for j in range(0,dimy-1):
for k in range(0,dimz-1):
v0 = int_grid[i,j,k]
v1 = int_grid[i+1,j,k]
v2 = int_grid[i+1,j+1,k]
v3 = int_grid[i,j+1,k]
v4 = int_grid[i,j,k+1]
v5 = int_grid[i+1,j,k+1]
v6 = int_grid[i+1,j+1,k+1]
v7 = int_grid[i,j+1,k+1]
if v1!=v0 or v2!=v0 or v3!=v0 or v4!=v0 or v5!=v0 or v6!=v0 or v7!=v0:
#add a vertex
vertices_grid[i,j,k] = len(all_vertices)
pos = float_grid[i,j,k]+np.array([i,j,k], np.float32)
all_vertices.append(pos)
all_vertices = np.array(all_vertices, np.float32)
#all triangles
#i-direction
for i in range(0,dimx-1):
for j in range(1,dimy-1):
for k in range(1,dimz-1):
v0 = int_grid[i,j,k]
v1 = int_grid[i+1,j,k]
if v0!=v1:
if v0==0:
all_triangles.append([vertices_grid[i,j-1,k-1],vertices_grid[i,j,k],vertices_grid[i,j,k-1]])
all_triangles.append([vertices_grid[i,j-1,k-1],vertices_grid[i,j-1,k],vertices_grid[i,j,k]])
else:
all_triangles.append([vertices_grid[i,j-1,k-1],vertices_grid[i,j,k-1],vertices_grid[i,j,k]])
all_triangles.append([vertices_grid[i,j-1,k-1],vertices_grid[i,j,k],vertices_grid[i,j-1,k]])
#j-direction
for i in range(1,dimx-1):
for j in range(0,dimy-1):
for k in range(1,dimz-1):
v0 = int_grid[i,j,k]
v1 = int_grid[i,j+1,k]
if v0!=v1:
if v0==0:
all_triangles.append([vertices_grid[i-1,j,k-1],vertices_grid[i,j,k-1],vertices_grid[i,j,k]])
all_triangles.append([vertices_grid[i-1,j,k-1],vertices_grid[i,j,k],vertices_grid[i-1,j,k]])
else:
all_triangles.append([vertices_grid[i-1,j,k-1],vertices_grid[i,j,k],vertices_grid[i,j,k-1]])
all_triangles.append([vertices_grid[i-1,j,k-1],vertices_grid[i-1,j,k],vertices_grid[i,j,k]])
#k-direction
for i in range(1,dimx-1):
for j in range(1,dimy-1):
for k in range(0,dimz-1):
v0 = int_grid[i,j,k]
v1 = int_grid[i,j,k+1]
if v0!=v1:
if v0==0:
all_triangles.append([vertices_grid[i-1,j-1,k],vertices_grid[i-1,j,k],vertices_grid[i,j,k]])
all_triangles.append([vertices_grid[i-1,j-1,k],vertices_grid[i,j,k],vertices_grid[i,j-1,k]])
else:
all_triangles.append([vertices_grid[i-1,j-1,k],vertices_grid[i,j,k],vertices_grid[i-1,j,k]])
all_triangles.append([vertices_grid[i-1,j-1,k],vertices_grid[i,j-1,k],vertices_grid[i,j,k]])
all_triangles = np.array(all_triangles, np.int32)
return all_vertices, all_triangles
def write_obj_triangle(name, vertices, triangles):
fout = open(name, 'w')
for ii in range(len(vertices)):
fout.write("v "+str(vertices[ii,0])+" "+str(vertices[ii,1])+" "+str(vertices[ii,2])+"\n")
for ii in range(len(triangles)):
fout.write("f "+str(int(triangles[ii,0]+1))+" "+str(int(triangles[ii,1]+1))+" "+str(int(triangles[ii,2]+1))+"\n")
fout.close()
def write_ply_triangle(name, vertices, triangles):
fout = open(name, 'w')
fout.write("ply\n")
fout.write("format ascii 1.0\n")
fout.write("element vertex "+str(len(vertices))+"\n")
fout.write("property float x\n")
fout.write("property float y\n")
fout.write("property float z\n")
fout.write("element face "+str(len(triangles))+"\n")
fout.write("property list uchar int vertex_index\n")
fout.write("end_header\n")
for ii in range(len(vertices)):
fout.write(str(vertices[ii,0])+" "+str(vertices[ii,1])+" "+str(vertices[ii,2])+"\n")
for ii in range(len(triangles)):
fout.write("3 "+str(triangles[ii,0])+" "+str(triangles[ii,1])+" "+str(triangles[ii,2])+"\n")
fout.close()
def write_ply_point(name, vertices):
fout = open(name, 'w')
fout.write("ply\n")
fout.write("format ascii 1.0\n")
fout.write("element vertex "+str(len(vertices))+"\n")
fout.write("property float x\n")
fout.write("property float y\n")
fout.write("property float z\n")
fout.write("end_header\n")
for ii in range(len(vertices)):
fout.write(str(vertices[ii,0])+" "+str(vertices[ii,1])+" "+str(vertices[ii,2])+"\n")
fout.close()
def write_ply_point_normal(name, vertices, normals=None):
fout = open(name, 'w')
fout.write("ply\n")
fout.write("format ascii 1.0\n")
fout.write("element vertex "+str(len(vertices))+"\n")
fout.write("property float x\n")
fout.write("property float y\n")
fout.write("property float z\n")
fout.write("property float nx\n")
fout.write("property float ny\n")
fout.write("property float nz\n")
fout.write("end_header\n")
if normals is None:
for ii in range(len(vertices)):
fout.write(str(vertices[ii,0])+" "+str(vertices[ii,1])+" "+str(vertices[ii,2])+" "+str(vertices[ii,3])+" "+str(vertices[ii,4])+" "+str(vertices[ii,5])+"\n")
else:
for ii in range(len(vertices)):
fout.write(str(vertices[ii,0])+" "+str(vertices[ii,1])+" "+str(vertices[ii,2])+" "+str(normals[ii,0])+" "+str(normals[ii,1])+" "+str(normals[ii,2])+"\n")
fout.close()
|
11460787
|
import sqlite3
from abstractas import *
class takosdb(BaseAbstracta):
def __init__(self):
self.conn=sqlite3.connect("Tako_db.db")
self.cursor = self.conn.cursor()
def insertar_tako(self,id_tako,Tako):
t = (id_tako,Tako.nombre,Tako.principal,Tako.salsas,Tako.condimentos,Tako.sasonadores,Tako.shell)
self.cursor.execute(''' INSERT INTO Takos VALUES (?,?,?,?,?,?,?)''',t)
self.conn.commit()
return True
def insertar_Cliente(self,id_cliente,Cliente):
c = (id_cliente,Cliente.name,Cliente.email,Cliente.phone,Cliente.picture,Cliente.location)
self.cursor.execute(''' INSERT INTO Clientes VALUES (?,?,?,?,?,?)''',c)
self.conn.commit()
return True
def insertar_Orden(self,id_orden,total,fecha,tipo,id_tako,id_user):
o = (id_orden,total,fecha,tipo,id_tako,id_user)
self.cursor.execute(''' INSERT INTO orden VALUES (?,?,?,?,?,?)''',o)
self.conn.commit()
print ('id_orden: {}\nTotal: {}\nFecha: {}\nTipo: {}\nTako: {}\nCliente: {})'.format(id_orden,total,fecha,tipo,id_tako,id_user))
return True
def delete_Orden(self,id_orden):
self.cursor.execute("DELETE FROM orden WHERE id_orden = ?",(id_orden,))
self.conn.commit()
return True
def select_tako(self):
self.cursor.execute("SELECT id_tako,nombre FROM Takos group by Nombre order by id_tako")
datos = self.cursor.fetchall()
if datos:
return(datos)
else:
return('Ya cerramos')
def select_Orden(self,id_orden):
odn = id_orden
self.cursor.execute("SELECT * FROM orden WHERE id_orden = ?",(odn,))
datos = self.cursor.fetchone()
if datos:
return('ID: {} \nTotal: {} \nFecha: {} \nTipo: {} \nTako: {} \nCliente: {}'.\
format(datos[0],datos[1],datos[2],datos[3],datos[4],datos[5]))
else:
return('La orden: {} ,no existe'.format(odn))
def select_Cliente(self,id_cliente):
cli = id_cliente
self.cursor.execute("SELECT * FROM Clientes WHERE id_user = ?",(cli,))
datos = self.cursor.fetchone()
if datos:
print('ID: {} \nNombre: {} \nEmail: {} \nTelefono: {} \nPicture: {} \nDireccion: {}'.\
format(datos[0],datos[1],datos[2],datos[3],datos[4],datos[5]))
return True
else:
print('El Cliente: {} ,no existe'.format(cli))
return False
if __name__ == '__main__':
db=basedatos()
|
11460807
|
from rb_status_plugin.core.report_repo import VariablesReportRepo
from rb_status_plugin.core.report import Report
import json
import pytest
@pytest.mark.compatibility
class TestVariablesReportRepo:
dummy_test = """
{
"report_title": "my report title",
"report_title_id": "my-report-title",
"description": "my report description",
"owner_name": "my name",
"owner_email": "<EMAIL>",
"subscribers":
[
"<EMAIL>",
"<EMAIL>",
"<EMAIL>"
],
"tests":
[
"example_dag.python_print_date_0",
"example_dag.python_random_0"
],
"schedule_type": "custom",
"schedule": "* * * 1 *",
"report_id": "rb_status_my report title"
}
"""
def test_parse_variable_value(self):
parsed = VariablesReportRepo.parse_variable_val(self.dummy_test)
assert parsed["tests"] == [
"example_dag.python_print_date_0",
"example_dag.python_random_0",
]
def test_parse_variable_value_no_json(self):
parsed = VariablesReportRepo.parse_variable_val("not_json")
assert parsed is None
def test_parse_variable_name(self):
parsed = VariablesReportRepo.parse_variable_name("rb_status_bob")
assert parsed == "bob"
def test_parse_variable_name_ci(self):
parsed = VariablesReportRepo.parse_variable_name("RB_STATUS_BOB")
assert parsed == "BOB"
def test_parse_variable_name_none(self):
parsed = VariablesReportRepo.parse_variable_name("not_correct")
assert parsed is None
def test_return_report(self):
parsed = json.loads(self.dummy_test)
r = VariablesReportRepo.to_report("bob", parsed)
assert type(r) == Report
|
11460850
|
import torch
import torch.nn.functional as F
from ignite.engine.engine import Engine, State, Events
from ignite._utils import convert_tensor
from utils.helpers import BinaryClassificationMeter, accuracy
class DefaultTrainer:
def __init__(self, model, optimizer, loss_fn, logger, config):
self.opts = config.trainer_config
self.model = model["Model"]
if optimizer:
self.optimizer = optimizer["Optim"]
if loss_fn:
self.loss_fn = loss_fn["Loss"][0]
self.logger = logger
self.device = config.device
self.log_freq = config.log_freq
self.attached = {}
self.curr_epoch = 0
self.metric = BinaryClassificationMeter()
self.metric_train = BinaryClassificationMeter()
def _prepare_batch(self, batch):
xs, ys = batch
if isinstance(xs, list):
xs = [convert_tensor(x, self.device).float() for x in xs]
else:
xs = [convert_tensor(xs, self.device).float()]
if isinstance(ys, list):
ys = [convert_tensor(y, self.device).float() for y in ys]
else:
ys = [convert_tensor(ys, self.device).float()]
return xs, ys
def train(self, engine, batch):
self.model.train()
curr_step = self.logger.counters["train"]
self.optimizer.zero_grad()
xs, ys = self._prepare_batch(batch)
y_pred = self.model(*xs)
if not (isinstance(y_pred, list) or isinstance(y_pred, tuple)):
ys = ys[0]
loss = self.loss_fn(y_pred, ys, pos_weight=torch.Tensor([1.5]).to("cuda"))
self.logger.add_scalars('train/loss', {'L': loss.item()}, curr_step)
if engine.state.iteration % 1000 == 0:
self.logger.log_image_grid("Input", xs[0], "train")
y_img = torch.ones_like(xs[0])*ys.view(ys.size(0),1,1,1)
self.logger.log_image_grid("Label", y_img, "train", normalize=False)
y2_img = torch.ones_like(xs[0])*torch.sigmoid(y_pred).view(y_pred.size(0),1,1,1)
self.logger.log_image_grid("Prediction", y2_img, "train", normalize=False)
loss.backward()
self.optimizer.step()
return loss.item()
def on_epoch_start(self, engine, phase=None):
self.log_batch = True
self.metric_train.reset()
if phase == "train":
self.curr_epoch = engine.state.epoch
def on_epoch_end(self, engine, phase=None):
if phase in ["evaluate", "test"]:
metrics = engine.state.metrics
log = ""
for k, v in metrics.items():
log += "{}: {:.2f} ".format(k, v)
print("{} Results - Epoch: {} {}".format(phase.capitalize(), self.curr_epoch, log))
if phase in ["evaluate"]:
curr_step = self.logger.counters["evaluate"]
self.logger.add_scalars('evaluate/metrics', {'Acc': self.metric.acc, 'Precision': self.metric.pre, 'f1':self.metric.f1, 'Recall': self.metric.rec}, curr_step)
self.metric.reset()
def on_iteration_start(self, engine, phase=None):
if phase == "train":
curr_iter = (engine.state.iteration - 1) % len(self.attached["train_loader"]) + 1
if curr_iter % self.log_freq == 0:
print("Epoch[{}] Iteration[{}/{}] Loss: {:.2f}".format(engine.state.epoch, curr_iter, len(self.attached["train_loader"]), engine.state.output))
elif phase == "test":
curr_iter = (engine.state.iteration - 1) % len(self.attached["test_loader"]) + 1
if curr_iter % self.log_freq == 0:
print("Iteration[{}/{}]".format(curr_iter, len(self.attached["test_loader"])))
def on_iteration_end(self, engine, phase=None):
pass
def infer_batch(self, batch):
self.model.eval()
with torch.no_grad():
xs, ys = self._prepare_batch(batch)
y_pred = self.model(*xs)
return xs, ys, y_pred
def evaluate(self, engine, batch):
curr_step = self.logger.counters["evaluate"]
xs, ys, y_pred = self.infer_batch(batch)
if not (isinstance(y_pred, list) or isinstance(y_pred, tuple)):
ys = ys[0]
if self.log_batch:
self.logger.log_image_grid("evInput", xs[0], "evaluate")
y_img = torch.ones_like(xs[0])*ys.view(ys.size(0),1,1,1)
self.logger.log_image_grid("evLabel", y_img, "evaluate", normalize=False)
y2_img = torch.ones_like(xs[0])*torch.sigmoid(y_pred).view(y_pred.size(0),1,1,1)
self.logger.log_image_grid("evPrediction", y2_img, "evaluate", normalize=False)
self.log_batch = False
loss = self.loss_fn(y_pred, ys)
self.metric.update(torch.sigmoid(y_pred), ys)
self.logger.add_scalars('evaluate/loss', {'L': loss.item()}, curr_step)
return y_pred.float(), ys.float()
def attach(self, name, obj):
self.attached[name] = obj
|
11460885
|
import time
import codecs
import platform
import sys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as ec
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
class Topic(object):
# Arguments passed through the batch file topic.bat
email, passw, url = sys.argv[1:]
# Opening PhantomJS webdriver
options = ['--proxy-type=none']
if "Windows" == platform.system():
driver = webdriver.PhantomJS('..\phantomjs.exe', service_args=options)
else:
driver = webdriver.PhantomJS(executable_path='../phantomjs',
service_args=options)
wait = WebDriverWait(driver, 60)
# Access to Quora and Login
driver.get("http://www.quora.com/")
driver.refresh()
time.sleep(2)
print ('Login to Quora..')
while True:
# Entering your username and password
form = driver.find_element_by_class_name('login')
username = form.find_element_by_name('email')
username.send_keys(email)
time.sleep(2)
password = form.find_element_by_name('password')
password.send_keys(<PASSWORD>)
time.sleep(2)
form.find_element_by_xpath(
".//input[contains(@value, 'Login')]").click()
time.sleep(2)
try:
if driver.find_element_by_css_selector(
'div[id*="_error"]').is_displayed():
driver.refresh()
print ('Login Error.Retry')
email = raw_input("Insert username: ")
passw = raw_input("Insert password: ")
except NoSuchElementException:
break
# Open Section Organize of a Topic
while True:
try:
driver.get(url)
if driver.find_element_by_xpath(
'//div[contains(@class, "TopicNavigationChildTree' +
' section_top")]').is_displayed():
break
except Exception:
print ('Error, page not avaible or wrong url')
url = raw_input("Re-Insert URL-ORGANIZE_TOPIC:")
filename = url.replace('https://www.quora.com/topic/', '')
filename = filename.replace('/organize', '')
filename += ".txt"
target = codecs.open(filename, 'w+', encoding='utf-8')
target.truncate()
top = driver.find_element_by_xpath(
'//div[contains(@class, "TopicNavigationChildTree section_top")]')
topics = top.find_elements_by_xpath(
'.//span[contains(@class, "TopicNameSpan TopicName")]')
show_more_list = top.find_elements_by_xpath(
'//div[contains(@class, "TopicTreeItemToggled SimpleToggle Toggle")]' +
'//small/span[not(contains(@class,"hidden"))]' +
'/a[contains(text(), "Show ")]')
# Expansion of the hierarchy of topics with Selenium
while True:
if len(show_more_list) > 0:
for elem in show_more_list:
driver.execute_script("arguments[0].scrollIntoView(true);",
elem)
driver.execute_script("window.scrollBy(0,-250);")
time.sleep(0.5)
# Click on "Show more" button
webdriver.ActionChains(driver).move_to_element(elem).click(
elem).perform()
wait.until(ec.invisibility_of_element_located(
(By.CLASS_NAME, 'loading')))
while len(topics) == len(top.find_elements_by_xpath(
'.//span[contains(@class, "TopicNameSpan TopicName")]')):
time.sleep(1)
time.sleep(2)
print "Topic found: " + str(len(driver.find_elements_by_xpath(
'//div[contains(@class, "TopicNavigationChildTree ' +
'section_top")]//span[contains(@class, ' +
'"TopicNameSpan TopicName")]')))
show_more_list = top.find_elements_by_xpath(
'//div[contains(@class, "TopicTreeItemToggled '
'SimpleToggle Toggle")]//small/' +
'span[not(contains(@class,"hidden"))]' +
'/a[contains(text(), "Show ")]')
print "Other " + str(len(show_more_list)) + " to expand"
else:
break
topics = top.find_elements_by_xpath(
'.//span[contains(@class, "TopicNameSpan TopicName")]')
topics_text = []
print ('Please Wait..')
for topic in topics:
topics_text.append(topic.text.encode('ascii', 'ignore'))
print ('Number of different Topic: ' + str(len(set(topics_text))))
print ('Writing on file the list of Topic..')
for topic in set(topics_text):
target.write(topic + '\n')
print ('Finish')
target.close()
driver.close()
|
11460888
|
import bpy
import math
import numpy as np
def get_calibration_matrix_K_from_blender(camd):
f_in_mm = camd.lens
scene = bpy.context.scene
resolution_x_in_px = scene.render.resolution_x
resolution_y_in_px = scene.render.resolution_y
scale = scene.render.resolution_percentage / 100
sensor_width_in_mm = camd.sensor_width
sensor_height_in_mm = camd.sensor_height
pixel_aspect_ratio = scene.render.pixel_aspect_x / scene.render.pixel_aspect_y
if (camd.sensor_fit == 'VERTICAL'):
# the sensor height is fixed (sensor fit is horizontal),
# the sensor width is effectively changed with the pixel aspect ratio
s_u = resolution_x_in_px * scale / sensor_width_in_mm / pixel_aspect_ratio
s_v = resolution_y_in_px * scale / sensor_height_in_mm
else: # 'HORIZONTAL' and 'AUTO'
# the sensor width is fixed (sensor fit is horizontal),
# the sensor height is effectively changed with the pixel aspect ratio
pixel_aspect_ratio = scene.render.pixel_aspect_x / scene.render.pixel_aspect_y
s_u = resolution_x_in_px * scale / sensor_width_in_mm
s_v = resolution_y_in_px * scale * pixel_aspect_ratio / sensor_height_in_mm
# Parameters of intrinsic calibration matrix K
alpha_u = f_in_mm * s_u
alpha_v = f_in_mm * s_v
u_0 = resolution_x_in_px*scale / 2
v_0 = resolution_y_in_px*scale / 2
skew = 0 # only use rectangular pixels
K = np.array(
[[alpha_u, skew, u_0],
[ 0 , alpha_v, v_0],
[ 0 , 0, 1 ]])
return K
def camPosToQuaternion(cx, cy, cz):
camDist = math.sqrt(cx * cx + cy * cy + cz * cz)
cx = cx / camDist
cy = cy / camDist
cz = cz / camDist
axis = (-cz, 0, cx)
angle = math.acos(cy)
a = math.sqrt(2) / 2
b = math.sqrt(2) / 2
w1 = axis[0]
w2 = axis[1]
w3 = axis[2]
c = math.cos(angle / 2)
d = math.sin(angle / 2)
q1 = a * c - b * d * w1
q2 = b * c + a * d * w1
q3 = a * d * w2 + b * d * w3
q4 = -b * d * w2 + a * d * w3
return (q1, q2, q3, q4)
def quaternionFromYawPitchRoll(yaw, pitch, roll):
c1 = math.cos(yaw / 2.0)
c2 = math.cos(pitch / 2.0)
c3 = math.cos(roll / 2.0)
s1 = math.sin(yaw / 2.0)
s2 = math.sin(pitch / 2.0)
s3 = math.sin(roll / 2.0)
q1 = c1 * c2 * c3 + s1 * s2 * s3
q2 = c1 * c2 * s3 - s1 * s2 * c3
q3 = c1 * s2 * c3 + s1 * c2 * s3
q4 = s1 * c2 * c3 - c1 * s2 * s3
return (q1, q2, q3, q4)
def camPosToQuaternion(cx, cy, cz):
q1a = 0
q1b = 0
q1c = math.sqrt(2) / 2
q1d = math.sqrt(2) / 2
camDist = math.sqrt(cx * cx + cy * cy + cz * cz)
cx = cx / camDist
cy = cy / camDist
cz = cz / camDist
t = math.sqrt(cx * cx + cy * cy)
tx = cx / t
ty = cy / t
yaw = math.acos(ty)
if tx > 0:
yaw = 2 * math.pi - yaw
pitch = 0
tmp = min(max(tx*cx + ty*cy, -1),1)
#roll = math.acos(tx * cx + ty * cy)
roll = math.acos(tmp)
if cz < 0:
roll = -roll
# print("%f %f %f" % (yaw, pitch, roll))
q2a, q2b, q2c, q2d = quaternionFromYawPitchRoll(yaw, pitch, roll)
q1 = q1a * q2a - q1b * q2b - q1c * q2c - q1d * q2d
q2 = q1b * q2a + q1a * q2b + q1d * q2c - q1c * q2d
q3 = q1c * q2a - q1d * q2b + q1a * q2c + q1b * q2d
q4 = q1d * q2a + q1c * q2b - q1b * q2c + q1a * q2d
return (q1, q2, q3, q4)
def camRotQuaternion(cx, cy, cz, theta):
theta = theta / 180.0 * math.pi
camDist = math.sqrt(cx * cx + cy * cy + cz * cz)
cx = -cx / camDist
cy = -cy / camDist
cz = -cz / camDist
q1 = math.cos(theta * 0.5)
q2 = -cx * math.sin(theta * 0.5)
q3 = -cy * math.sin(theta * 0.5)
q4 = -cz * math.sin(theta * 0.5)
return (q1, q2, q3, q4)
def quaternionProduct(qx, qy):
a = qx[0]
b = qx[1]
c = qx[2]
d = qx[3]
e = qy[0]
f = qy[1]
g = qy[2]
h = qy[3]
q1 = a * e - b * f - c * g - d * h
q2 = a * f + b * e + c * h - d * g
q3 = a * g - b * h + c * e + d * f
q4 = a * h + b * g - c * f + d * e
return (q1, q2, q3, q4)
def obj_centened_camera_pos(dist, azimuth_deg, elevation_deg):
phi = float(elevation_deg) / 180 * math.pi
theta = float(azimuth_deg) / 180 * math.pi
x = (dist * math.cos(theta) * math.cos(phi))
y = (dist * math.sin(theta) * math.cos(phi))
z = (dist * math.sin(phi))
return (x, y, z)
|
11460898
|
from __future__ import division
import string
from plag import main_func
#from plag import *
import codecs
from nltk.tokenize import sent_tokenize,word_tokenize
from nltk.stem import PorterStemmer
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
def pre_processing(text,flag=True):
'''This function cleans out the unnecessary information from the text and does the required pre processing .
Pre processing steps:
*Sentence segmentation (Seg)*
Split text in the document into sentences and thereby allowing line-by-line processing in the subsequent tests.
*Tokenisation (Tok)*
Determine token (words, punctuation symbols, etc.) boundaries in sentences.
*Lowercase (Low)*
Substitute every uppercase letters with lowercase to generalise the matching.
*Stop-word removal (Stop)*
Remove functional words ( articles pronouns prepositions complementisers and determiners ) .
*Punctuation removal (Pun)*
Remove punctuation symbols.
*Stemming (Stem)*
Transform words into their stems in order to generalise the comparison analysis
*Lemmatisation (Lem)*
Transform words into their dictionary base forms in order to generalise the comparison analysis.
:Argument1: text {string} -- text to be pre-processed
:Argument2: flag {bool} -- stop-word arg . (default: {True})
:returns: string -- pre-processed string
'''
text=text.lower()
#sent_tokenize_list = sent_tokenize(text)
#print sent_tokenize_list
#print len(sent_tokenize_list)
#tokenise words
a=stopwords.words('english')
stop_words=set(a)
#stop_words.append('u')
words=word_tokenize(text)
print words
result = []
#remove stop words
if flag:
for item in words:
if item not in stop_words:
result.append(item)
#print "Filtered",result
fil=str(result)
else:
result
#remove punctuation
repstr=" " * 32
table=string.maketrans(string.punctuation,repstr)
s=fil.translate(table)
#return s
#lemmatizing
lemmatizer=WordNetLemmatizer()
h=lemmatizer.lemmatize(s)
#print "Lemma",lemmatizer.lemmatize(s)
#stemming
wordss=word_tokenize(h)
ps=PorterStemmer()
list1=[]
for i in wordss:
k=(ps.stem(i))
list1.append(k)
#print list1
final= ' '.join(list1)
finall=str(final)
finallstr=''
sanwrd = 'u'
splitfinall = finall.split()
for wrd in splitfinall:
if wrd != sanwrd:
finallstr += str(wrd)+str(' ')
finallstr=str(finallstr)
#print finallstr
return finallstr
def main_method(file_list,inputFile):
"""
This function takes a list of original files which is to be compared with input file and displays the similar text and similarity
score.
:Argument1: file_list (list of files) -- A list of original files .
:Argument2: inputFile (file) -- Input file which is suspected to have plagiarism
"""
fileLastIndex=[]
combinedFile=''
inputText=''
for file in file_list:
with codecs.open(str(file), 'r', encoding='utf-8', errors='ignore') as rd:
originalFile = rd.read()
combinedFile= combinedFile+originalFile+'\n'
fileLastIndex.append((len(combinedFile.split()),file))
with codecs.open(str(inputFile), 'r', encoding='utf-8', errors='ignore') as rd:
inputText = rd.read()
main_func(inputText, combinedFile,fileLastIndex)
#main_method(['orig_taska.txt','g1pA_taska.txt','g2pE_taska.txt'],'g0pA_taska.txt')
|
11460932
|
import numpy as np
def allow_1d(which_argument):
def allow_1d_(function):
def decorated(*args, **kwargs):
args = list(args)
ndim = np.ndim(args[which_argument])
if ndim == 1:
args[which_argument] = np.atleast_2d(args[which_argument])
return function(*args, **kwargs)[0]
if ndim == 2:
return function(*args, **kwargs)
raise ValueError(
f"Argument number {which_argument} has to be 1d or 2d array"
)
return decorated
return allow_1d_
|
11460963
|
import os
class DynetSaver():
def __init__(self, parameter_collection, checkpoint_dir):
self.parameter_collection = parameter_collection
self.checkpoint_dir = checkpoint_dir
def save(self, epoch=None, n_bests=None):
assert epoch or (n_bests >= 0), "One of epoch or n_bests should be specified"
model_dir_path = "model-epoch-%08d" % epoch if epoch is not None else ("best-models-%08d" % n_bests)
model_checkpoint_dir_path = os.path.join(self.checkpoint_dir, model_dir_path)
if not os.path.exists(model_checkpoint_dir_path):
os.mkdir(model_checkpoint_dir_path)
self.parameter_collection.save(os.path.join(model_checkpoint_dir_path,
"model.ckpt"))
def restore(self, filepath):
self.parameter_collection.populate(filepath)
|
11460987
|
class StackVariableRecovery:
def __init__(self, fun):
assert fun
self._vars = {0: fun.binary.arch.bytes}
self._fun = fun
self._bp = None
self._tmp_bps = []
self._tmp_sp = None
def _detect_bp(self, stmt):
arch = self._fun.binary.arch
if self._tmp_bps and stmt.tag == 'Ist_Put':
if hasattr(stmt.data, 'tmp') and stmt.data.tmp in self._tmp_bps:
if 'sp' not in arch.register_names[stmt.offset]:
self._bp = stmt.offset
# accessing the tmp_sp and adding an offset -> tmp_bp
if self._tmp_sp is not None and hasattr(stmt, 'data'):
rd_tmps = [x for x in stmt.data.child_expressions if x.tag == 'Iex_RdTmp']
for rd_tmp in rd_tmps:
if rd_tmp.tmp == self._tmp_sp or rd_tmp.tmp in self._tmp_bps:
self._tmp_bps.append(stmt.tmp)
break
# accessing_sp and putting the result in tmp
if hasattr(stmt, 'data') and stmt.data.tag == 'Iex_Get' \
and 'sp' in arch.register_names[stmt.data.offset]:
self._tmp_sp = stmt.tmp
def _detect_var(self, stmt):
# new bp tmp
if hasattr(stmt, 'data') and stmt.data.tag == 'Iex_Get' \
and self._bp == stmt.data.offset:
self._tmp_bps.append(stmt.tmp)
# accessing a var
if hasattr(stmt, 'data') and hasattr(stmt.data, 'op'):
if 'Iop_Sub' in stmt.data.op or 'Iop_Add' in stmt.data.op:
rd_tmps = [x for x in stmt.data.child_expressions if x.tag == 'Iex_RdTmp']
if any([x for x in rd_tmps if x.tmp in self._tmp_bps]):
const = [x for x in stmt.data.child_expressions if x.tag == 'Iex_Const']
assert len(const) == 1, 'stack_variable_recovery: too many constants'
const = const[0].con.value
if const not in self._vars:
if 'Sub' in stmt.data.op:
const = -const
self._vars[const] = None
def _estimate_sizes(self):
for off in self._vars.keys():
if off == 0:
continue
if off > 0:
val = min([off - x for x in self._vars.keys() if x != off and off > x >= 0])
else:
val = -max([off - x for x in self._vars.keys() if x != off and off < x <= 0])
self._vars[off] = val
def _analyze(self):
topological_blocks = [x for x in self._fun.blocks]
topological_blocks.sort(key=lambda x: x.addr)
for block in topological_blocks:
for stmt in block.vex.statements:
# the base pointer is not set yet
if not self._bp:
self._detect_bp(stmt)
# it is
else:
self._detect_var(stmt)
self._estimate_sizes()
def run(self):
self._analyze()
return self._vars
|
11461003
|
import os
from datetime import datetime, timezone
from typing import Optional
from pyspark.sql import DataFrame
class ModelProfileSession:
def __init__(self, prediction_field: str, target_field: str, score_field: str):
self.prediction_field = prediction_field
self.target_field = target_field
self.score_field = score_field
class WhyProfileSession:
"""
A class that enable easy access to the profiling API
"""
def __init__(self, dataframe: DataFrame, name: str, time_column: Optional[str] = None, group_by_columns=None, model_profile: ModelProfileSession = None):
if group_by_columns is None:
group_by_columns = []
self._group_by_columns = group_by_columns
self._df = dataframe
self._name = name
self._time_colunn = time_column
self._model_profile = model_profile
def withTimeColumn(self, time_column: str): # noqa
"""
Set the column for grouping by time. This column must be of Timestamp type in Spark SQL.
Note that WhyLogs uses this column to group data together, so please make sure you truncate the
data to the appropriate level of precision (i.e. daily, hourly) before calling this.
The API only accepts a column name (string) at the moment.
:rtype: WhyLogSession
"""
return WhyProfileSession(dataframe=self._df, name=self._name, time_column=time_column, group_by_columns=self._group_by_columns)
def withClassificationModel(self, prediction_field: str, target_field: str, score_field: str): # noqa
"""
Track model performance. Specify the prediction field, target field and score field.
If score_field is not specified, the profiler will track regression metrics.
If score_field is specified, the profiler will track classification metrics.
:rtype: WhyLogSession
"""
model_profile = ModelProfileSession(prediction_field, target_field, score_field)
return WhyProfileSession(
dataframe=self._df, name=self._name, time_column=self._time_colunn, group_by_columns=self._group_by_columns, model_profile=model_profile
)
def withRegressionModel(self, prediction_field: str, target_field: str): # noqa
"""
Track model performance. Specify the prediction field, target field and score field.
If score_field is not specified, the profiler will track regression metrics.
If score_field is specified, the profiler will track classification metrics.
:rtype: WhyLogSession
"""
model_profile = ModelProfileSession(prediction_field, target_field, None)
return WhyProfileSession(
dataframe=self._df, name=self._name, time_column=self._time_colunn, group_by_columns=self._group_by_columns, model_profile=model_profile
)
def groupBy(self, col: str, *cols): # noqa
return WhyProfileSession(dataframe=self._df, name=self._name, time_column=self._time_colunn, group_by_columns=[col] + list(cols))
def aggProfiles(self, datetime_ts: Optional[datetime] = None, timestamp_ms: int = None) -> DataFrame: # noqa
if datetime_ts is not None:
timestamp_ms = int(datetime_ts.timestamp() * 1000)
elif timestamp_ms is None:
timestamp_ms = int(datetime.now(tz=timezone.utc).timestamp() * 1000)
jdf = self._create_j_session().aggProfiles(timestamp_ms)
return DataFrame(jdf=jdf, sql_ctx=self._df.sql_ctx)
def _create_j_session(self):
jvm = self._df.sql_ctx._sc._jvm # noqa
j_session = jvm.com.whylogs.spark.WhyLogs.newProfilingSession(self._df._jdf, self._name) # noqa
if self._time_colunn is not None:
j_session = j_session.withTimeColumn(self._time_colunn)
if len(self._group_by_columns) > 0:
j_session = j_session.groupBy(list(self._group_by_columns))
if self._model_profile is not None:
mp = self._model_profile
if mp.score_field:
j_session = j_session.withClassificationModel(mp.prediction_field, mp.target_field, mp.score_field)
else:
j_session = j_session.withRegressionModel(mp.prediction_field, mp.target_field)
return j_session
def aggParquet(self, path: str, datetime_ts: Optional[datetime] = None, timestamp_ms: int = None): # noqa
"""
A helper method to aggregate data and write to a parquet path
:param path: the Parquet path. In a file system that Spark supports
:param datetime_ts: Optional. The session timestamp as a datetime object
:param timestamp_ms: Optional. The session timestamp in milliseconds
"""
df = self.aggProfiles(datetime_ts=datetime_ts, timestamp_ms=timestamp_ms)
df.write.parquet(path)
def log(self, dt: Optional[datetime] = None, org_id: str = None, model_id: str = None, api_key: str = None, endpoint: str = "https://api.whylabsapp.com"):
"""
Run profiling and send results to WhyLabs using the WhyProfileSession's configurations.
Users must specify the organization ID, the model ID and the API key.
You can specify via WHYLABS_ORG_ID, WHYLABS_MODEL_ID and WHYLABS_API_KEY environment variables as well.
:param dt: the datetime of the dataset. Default to the current time
:param org_id: the WhyLabs organization ID. Defaults to WHYLABS_ORG_ID environment variable
:param model_id: the model or dataset ID. Defaults to WHYLABS_MODEL_ID environment variable
:param api_key: the whylabs API key. Defaults to WHYLABS_API_KEY environment variable
:param endpoint: theh API endpiont
"""
if dt is not None:
timestamp_ms = int(dt.timestamp() * 1000)
else:
timestamp_ms = int(datetime.now(tz=timezone.utc).timestamp() * 1000)
if org_id is None:
org_id = os.environ.get("WHYLABS_ORG_ID")
if org_id is None:
raise RuntimeError("Please specify the org ID")
if model_id is None:
model_id = os.environ.get("WHYLABS_MODEL_ID")
if model_id is None:
raise RuntimeError("Please specify the model ID")
if api_key is None:
api_key = os.environ.get("WHYLABS_API_KEY")
if api_key is None:
raise RuntimeError("Please specify the API key")
j_session = self._create_j_session()
j_session.log(timestamp_ms, org_id, model_id, api_key, endpoint)
def new_profiling_session(df: DataFrame, name: str, time_column: Optional[str] = None):
if time_column is None:
return WhyProfileSession(dataframe=df, name=name)
else:
return WhyProfileSession(dataframe=df, name=name, time_column=time_column)
|
11461005
|
from msgpackrpc import Loop
from msgpackrpc import session
from msgpackrpc.transport import tcp
class Client(session.Session):
"""\
Client is useful for MessagePack RPC API.
"""
def __init__(self, address, timeout=10, loop=None, builder=tcp, reconnect_limit=5, pack_encoding='utf-8', unpack_encoding=None):
loop = loop or Loop()
session.Session.__init__(self, address, timeout, loop, builder, reconnect_limit, pack_encoding, unpack_encoding)
if timeout:
loop.attach_periodic_callback(self.step_timeout, 1000) # each 1s
@classmethod
def open(cls, *args):
assert cls is Client, "should only be called on sub-classes"
client = Client(*args)
return Client.Context(client)
class Context(object):
"""\
For with statement
"""
def __init__(self, client):
self._client = client
def __enter__(self):
return self._client
def __exit__(self, type, value, traceback):
self._client.close()
if type:
return False
return True
|
11461052
|
import pdoc
s = pdoc.html('bayesian_bootstrap.bootstrap')
with open('bootstrap_documentation.html', 'w') as f:
f.write(s)
|
11461094
|
import subprocess
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--os",
help="your os (default is mac).",
type=str,
choices=["mac", "linux"],
default="mac",
)
parser.add_argument(
"--version",
help="Chromedriver version (need to be compatibility with your chrome)",
type=str,
default="79.0.3945.36",
)
args = parser.parse_args()
# https://chromedriver.chromium.org/downloads
version = args.version
url = f"https://chromedriver.storage.googleapis.com/{version}/chromedriver_{args.os}64.zip"
drivers_prefix = "drivers"
command = f"mkdir {drivers_prefix}".split(" ")
subprocess.call(command)
command = f"wget -P ./{drivers_prefix}/ {url}".split(" ")
subprocess.call(command)
command = f"unzip ./{drivers_prefix}/chromedriver_{args.os}64.zip -d ./{drivers_prefix}/".split(
" "
)
subprocess.call(command)
command = f"rm ./{drivers_prefix}/chromedriver_{args.os}64.zip".split(" ")
subprocess.call(command)
|
11461096
|
import numpy as np
from bayesian_bootstrap.bootstrap import mean, highest_density_interval
from matplotlib import pyplot as plt
import seaborn as sns # noqa: F401
def plot_group_hdis(samples, labels, alpha, n_replications):
for i, (s, l) in enumerate(zip(samples, labels)):
posterior = mean(s, n_replications)
l, r = highest_density_interval(posterior)
plt.plot([i, i], [l, r])
plt.plot([i], [np.mean(posterior)], marker="o")
plt.xticks(range(len(labels)), labels)
if __name__ == "__main__":
samples = [
np.random.normal(0, 1, 100),
np.random.normal(0, 2, 100),
np.random.normal(1, 1, 100),
]
labels = ["0,1", "0,2", "1,1"]
plot_group_hdis(samples, labels, 0.05, 10000)
plt.show()
|
11461106
|
from ctypes import *
from ctypes.util import find_library
import sys
WIN=False
if sys.platform.startswith('win'):
WIN=True
if WIN:
SOCKET = c_uint
_lib=CDLL('dnet')
else:
SOCKET = c_int
_lib_name = find_library('dnet')
if not _lib_name:
raise OSError("Cannot find libdnet.so")
_lib=CDLL(_lib_name)
ETH_ADDR_LEN = 6
INTF_NAME_LEN = 16
INTF_NAME_COUNT = 20
INTF_ALIAS_COUNT = 20
IP6_ADDR_LEN = 16
ADDR_TYPE_NONE = 0
ADDR_TYPE_ETH = 1
ADDR_TYPE_IP = 2
ADDR_TYPE_IP6 = 3
INTF_TYPE_OTHER = 1
INTF_TYPE_ETH = 6
INTF_TYPE_TOKENRING = 9
INTF_TYPE_FDDI = 15
INTF_TYPE_PPP = 23
INTF_TYPE_LOOPBACK = 24
INTF_TYPE_SLIP = 28
INTF_TYPE_TUN = 53
uint8_t = c_ubyte
uint16_t = c_ushort
uint32_t = c_uint
ssize_t = c_long
dnet_ip_addr_t = uint32_t
dnet_intf_name = c_char * INTF_NAME_LEN
class dnet_intf_list(Structure):
pass
dnet_intf_list._fields_ = [ ('length', c_int),
('interfaces', dnet_intf_name * 20) ]
class dnet_eth_addr(Structure):
pass
dnet_eth_addr._fields_ = [ ('data', uint8_t * ETH_ADDR_LEN) ]
dnet_eth_addr_t = dnet_eth_addr
class dnet_ip6_addr(Structure):
pass
dnet_ip6_addr._fields_ = [ ('data', uint8_t * IP6_ADDR_LEN) ]
dnet_ip6_addr_t = dnet_ip6_addr
class dnet_addr_u(Union):
pass
dnet_addr_u._fields_ = [ ('eth', dnet_eth_addr_t),
('ip', dnet_ip_addr_t),
('ip6', dnet_ip6_addr_t),
('data8', uint8_t * 16),
('data16', uint16_t * 8),
('data32', uint32_t * 4) ]
class dnet_addr(Structure):
pass
dnet_addr._anonymous_ = ('__addr_u', )
dnet_addr._fields_ = [ ('addr_type', uint16_t),
('addr_bits', uint16_t),
('__addr_u', dnet_addr_u) ]
class dnet_intf_entry(Structure):
pass
dnet_intf_entry._fields_ = [ ('intf_len', c_uint),
('intf_name', c_char * INTF_NAME_LEN),
('intf_type', c_ushort),
('intf_flags', c_ushort),
('intf_mtu', c_uint),
('intf_addr', dnet_addr),
('intf_dst_addr', dnet_addr),
('intf_link_addr', dnet_addr),
('intf_alias_num', c_uint),
('intf_alias_addrs', dnet_addr * INTF_ALIAS_COUNT) ]
eth_t = c_void_p
intf_t = c_void_p
ip_t = c_void_p
dnet_intf_handler = CFUNCTYPE(c_int, POINTER(dnet_intf_entry), POINTER(c_void_p))
dnet_eth_open = _lib.eth_open
dnet_eth_open.restype = POINTER(eth_t)
dnet_eth_open.argtypes = [ POINTER(c_char) ]
dnet_eth_get = _lib.eth_get
dnet_eth_get.restype = c_int
dnet_eth_get.argtypes = [ POINTER(eth_t), POINTER(dnet_eth_addr_t) ]
dnet_eth_set = _lib.eth_set
dnet_eth_set.restype = c_int
dnet_eth_set.argtypes = [ POINTER(eth_t), POINTER(dnet_eth_addr_t) ]
dnet_eth_send = _lib.eth_send
dnet_eth_send.restype = ssize_t
dnet_eth_send.argtypes = [ POINTER(eth_t), c_void_p, c_size_t ]
dnet_eth_close = _lib.eth_close
dnet_eth_close.restype = POINTER(eth_t)
dnet_eth_close.argtypes = [ POINTER(eth_t) ]
dnet_intf_open = _lib.intf_open
dnet_intf_open.restype = POINTER(intf_t)
dnet_intf_open.argtypes = [ ]
dnet_intf_get = _lib.intf_get
dnet_intf_get.restype = c_int
dnet_intf_get.argtypes = [ POINTER(intf_t), POINTER(dnet_intf_entry) ]
dnet_intf_get_src = _lib.intf_get_src
dnet_intf_get_src.restype = c_int
dnet_intf_get_src.argtypes = [ POINTER(intf_t), POINTER(dnet_intf_entry), POINTER(dnet_addr) ]
dnet_intf_get_dst = _lib.intf_get_dst
dnet_intf_get_dst.restype = c_int
dnet_intf_get_dst.argtypes = [ POINTER(intf_t), POINTER(dnet_intf_entry), POINTER(dnet_addr) ]
dnet_intf_set = _lib.intf_set
dnet_intf_set.restype = c_int
dnet_intf_set.argtypes = [ POINTER(intf_t), POINTER(dnet_intf_entry) ]
dnet_intf_loop = _lib.intf_loop
dnet_intf_loop.restype = POINTER(intf_t)
dnet_intf_loop.argtypes = [ POINTER(intf_t), dnet_intf_handler, c_void_p ]
dnet_intf_close = _lib.intf_close
dnet_intf_close.restype = POINTER(intf_t)
dnet_intf_close.argtypes = [ POINTER(intf_t) ]
dnet_ip_open = _lib.ip_open
dnet_ip_open.restype = POINTER(ip_t)
dnet_ip_open.argtypes = [ ]
dnet_ip_add_option = _lib.ip_add_option
dnet_ip_add_option.restype = ssize_t
dnet_ip_add_option.argtypes = [ POINTER(c_void_p), c_size_t, c_int, POINTER(c_void_p), c_size_t ]
dnet_ip_checksum = _lib.ip_checksum
dnet_ip_checksum.restype = None
dnet_ip_checksum.argtypes = [ POINTER(c_void_p), c_size_t ]
dnet_ip_send = _lib.ip_send
dnet_ip_send.restype = ssize_t
dnet_ip_send.argtypes = [ POINTER(ip_t), c_void_p, c_size_t ]
dnet_ip_close = _lib.ip_close
dnet_ip_close.restype = POINTER(ip_t)
dnet_ip_close.argtypes = [ POINTER(ip_t) ]
class dnet_eth:
def __init__(self, iface):
self.iface_b = create_string_buffer(iface.encode('ascii'))
self.eth = dnet_eth_open(self.iface_b)
def send(self, sx):
dnet_eth_send(self.eth, sx, len(sx))
def close(self):
return dnet_eth_close(self.eth)
class dnet_ip:
def __init__(self):
self.ip = dnet_ip_open()
def send(self, sx):
dnet_ip_send(self.ip, sx, len(sx))
def close(self):
return dnet_ip_close(self.ip)
def dnet_intf_name_loop(entry, intf_list):
l = cast(intf_list, POINTER(dnet_intf_list))
if l.contents.length >= INTF_NAME_COUNT:
return -1
for i in enumerate(entry.contents.intf_name):
l.contents.interfaces[l.contents.length][i[0]] = i[1]
l.contents.length += 1
return 0
class dnet_intf:
def __init__(self):
self.intf = dnet_intf_open()
intf_list = dnet_intf_list()
intf_list.length = 0
dnet_intf_loop(self.intf, dnet_intf_handler(dnet_intf_name_loop), pointer(intf_list))
self.names = []
for i in range(INTF_NAME_COUNT):
if i >= intf_list.length:
break
self.names.append(intf_list.interfaces[i].value.decode('ascii').strip('\0'))
def close(self):
return dnet_intf_close(self.intf)
def get(self, iface):
ret = {}
entry = dnet_intf_entry()
entry.intf_name = iface.encode('ascii')
entry.intf_len = sizeof(entry)
r = dnet_intf_get(self.intf, byref(entry))
if r < 0:
return {}
ret['addr6'] = []
for i in range(entry.intf_alias_num):
if entry.intf_alias_addrs[i].addr_type == ADDR_TYPE_IP6:
ret['addr6'].append(bytes(entry.intf_alias_addrs[i].data8[:16]))
ret['type'] = entry.intf_type
ret['addr'] = bytes(entry.intf_addr.data8[:4])
ret['link_addr'] = bytes(entry.intf_link_addr.data8[:6])
return ret
|
11461115
|
from PyQt5 import QtWidgets
from moviepy.editor import *
from threading import Thread
import settings
import clientUI
import os
import client
import requests
import scriptwrapper
from time import sleep
current_path = os.path.dirname(os.path.realpath(__file__))
script = None
menu = None
class App():
def __init__(self):
global menu
app = QtWidgets.QApplication(sys.argv)
app.processEvents()
login = clientUI.LoginWindow()
login.show()
Thread(target=client.VideoGeneratorRenderStatus).start()
sys.exit(app.exec_())
def init():
app = App()
sys._excepthook = sys.excepthook
def exception_hook(exctype, value, traceback):
print(exctype, value, traceback)
sys._excepthook(exctype, value, traceback)
sys.exit(1)
sys.excepthook = exception_hook
def getFileNames(file_path):
files = [os.path.splitext(filename)[0] for filename in os.listdir(file_path)]
return files
def deleteAllFilesInPath(path):
for file in os.listdir(path):
file_path = os.path.join(path, file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
except Exception as e:
print(e)
if __name__ == "__main__":
current_directory = os.path.dirname(os.path.realpath(__file__))
os.chdir(current_directory)
settings.generateConfigFile()
if not os.path.exists("TempClips"):
os.mkdir("TempClips")
os.mkdir("FirstClips")
os.mkdir("Intros")
os.mkdir("Outros")
os.mkdir("Finished Videos")
os.mkdir("Intervals")
os.mkdir("Save Data")
else:
deleteAllFilesInPath("TempClips")
client.requestGames()
init()
#requestGames()
#requestClips("Warzone", 10)
#connectFTP()
pass
#
# while len(getFileNames(f'{current_path}/Assets/Music')) == 0:
# print(f"No music files in directory: '{current_path}/Assets/Music'. Please add some!")
# sleep(5)
#
# while len(getFileNames(f'{current_path}/Assets/Intros')) == 0:
# print(f"No intro videos in directory: '{current_path}/Assets/Intros'. Please add some!")
# sleep(5)
#
# while len(getFileNames(f'{current_path}/Assets/Intervals')) == 0:
# print(f"No intro videos in directory: '{current_path}/Assets/Intervals'. Please add some!")
# sleep(5)
#
#init()
|
11461146
|
def get_position(initial_position, distance, qnt_jumps):
return initial_position + distance * qnt_jumps
def main(cang1_position, cang1_distance, cang2_position, cang2_distance):
count = 0
cang1Pos = get_position(cang1_position, cang1_distance, count)
cang2Pos = get_position(cang2_position, cang2_distance, count)
if cang1Pos == cang2Pos:
return True
while cang2Pos > cang1Pos:
count = count + 1
cang1Pos = get_position(cang1_position, cang1_distance, count)
cang2Pos = get_position(cang2_position, cang2_distance, count)
if cang1Pos == cang2Pos:
return True
return False
|
11461170
|
import collections
class HashTrie(object):
"""A fast trie, for short items."""
def __init__(self):
self.mapper = collections.defaultdict(set)
self.len_longest = 0
self.lengths = range(1, self.len_longest + 1)
def __getitem__(self, key):
return set.union(*[self.mapper[key[:i]] for i in self.lengths])
def __setitem__(self, key, value):
self.mapper[key].add(value)
self.len_longest = max(len(key), self.len_longest)
self.lengths = range(1, self.len_longest + 1)
|
11461183
|
import unittest
from hamcrest import assert_that, equal_to
from mock import MagicMock, call
from mac_os_scripts.enable_discrete_graphics import DiscreteGraphicsEnabler
from mac_os_scripts.utils import RunCommandOutput
class DiscreteGraphicsEnablerTest(unittest.TestCase):
def setUp(self):
self._subject = DiscreteGraphicsEnabler(
sudo_password=None,
)
self._subject.run_command = MagicMock()
def test_enable_discrete_graphics(self):
self._subject.run_command.return_value = RunCommandOutput(
stdout='', stderr='', error_level=-9
)
assert_that(
self._subject.enable_discrete_graphics(),
equal_to(True)
)
assert_that(
self._subject.run_command.mock_calls,
equal_to([
call(command_line='/usr/local/zetta/mac_os_scripts/external/gfxCardStatus.app/Contents/MacOS/gfxCardStatus --discrete',
quiet=True, sudo_password_override=False, timeout=5, send_lines=None)
])
)
def test_remove_gfxcardstatus_login_item(self):
self._subject.run_command.return_value = RunCommandOutput(
stdout='', stderr='', error_level=-9
)
assert_that(
self._subject.remove_gfxcardstatus_login_item(),
equal_to(True)
)
assert_that(
self._subject.run_command.mock_calls,
equal_to([
call(command_line='osascript -e \'tell application "System Events" to delete login item "gfxCardStatus"\'',
quiet=True, send_lines=None, sudo_password_override=False, timeout=None)
])
)
def test_run_pass(self):
self._subject.enable_discrete_graphics = MagicMock()
self._subject.enable_discrete_graphics.return_value = True
self._subject.remove_gfxcardstatus_login_item = MagicMock()
self._subject.remove_gfxcardstatus_login_item.return_value = True
assert_that(
self._subject.run(),
equal_to(True)
)
|
11461185
|
from __future__ import absolute_import, unicode_literals
import os
from contextlib import contextmanager
@contextmanager
def set_os_env_var(env_var_name, value):
"""Set an environment variable with unrolling once the context exists"""
prev_value = os.environ.get(env_var_name)
try:
os.environ[env_var_name] = str(value)
yield
finally:
if prev_value is None:
del os.environ[env_var_name]
else:
os.environ[env_var_name] = prev_value
|
11461206
|
import re
import inspect
from enum import Enum
from datetime import datetime
from pykintone.account import kintoneService as ks
import pykintone.structure_field as sf
class kintoneStructure(object):
def __init__(self):
self._property_details = []
def _pd(self, name, field_type=None, sub_type=None, unsent=False, field_name="", name_style_conversion=False):
# sugar syntax for adding property detail
snake_to_camel = lambda fn: "".join([n if i == 0 else n.capitalize() for i, n in enumerate(fn.split("_"))])
_field_name = field_name if (field_name and not name_style_conversion) else snake_to_camel(name)
self._property_details.append(PropertyDetail(name, field_type, sub_type, unsent, _field_name))
@classmethod
def _get_property_names(cls, instance):
properties = inspect.getmembers(instance, lambda m: not (inspect.isbuiltin(m) or inspect.isroutine(m)))
# exclude private attribute
public_properties = [p for p in properties if not (p[0].startswith("_"))]
names = [p[0] for p in public_properties]
return names
@classmethod
def deserialize(cls, json_body):
return cls._deserialize(json_body, lambda f: (f, ""))
@classmethod
def _deserialize(cls, json_body, get_value_and_type):
"""
deserialize json to model
:param json_body: json data
:param get_value_and_type: function(f: json_field) -> value, field_type_string(see FieldType)
:return:
"""
instance = cls()
is_set = False
properties = cls._get_property_names(instance)
def get_property_detail(name):
p = [p for p in instance._property_details if p.name == name or p.field_name == name]
return None if len(p) == 0 else p[0]
for k in json_body:
field = json_body[k]
pd = get_property_detail(k)
pn = k if not pd else pd.to_property_name(k)
if pn in properties:
v, t = get_value_and_type(field)
initial_value = getattr(instance, pn)
value = instance._field_to_property(v, t, pd, initial_value)
setattr(instance, pn, value)
is_set = True
return instance if is_set else None
@classmethod
def _field_to_property(cls, field_value, field_type=None, property_detail=None, initial_value=None):
value = field_value
# configure property's field type
# from user definition
_field_type = None if not property_detail else property_detail.field_type
# from type value in field
if not _field_type and field_type:
f = [e for e in list(FieldType) if e.value == field_type]
if len(f) > 0:
_field_type = f[0]
_field_type = _field_type if _field_type else cls._estimate_type_from_property(initial_value)
if not _field_type:
pass
elif _field_type in (FieldType.ID, FieldType.REVISION, FieldType.RECORD_NUMBER):
value = int(value)
elif _field_type == FieldType.NUMBER:
value = float(value)
elif _field_type == FieldType.DATE:
value = ks.value_to_date(value)
elif _field_type == FieldType.TIME:
value = ks.value_to_time(value)
elif _field_type in [FieldType.DATETIME, FieldType.CREATED_TIME, FieldType.UPDATED_TIME]:
value = ks.value_to_datetime(value)
elif _field_type == FieldType.TIME_STAMP:
value = ks.value_to_timestamp(value)
elif _field_type == FieldType.USER_SELECT:
value = cls.__map(value, lambda v: sf.UserSelect.deserialize(v), flatten=True)
elif _field_type in [FieldType.CREATOR, FieldType.MODIFIER]:
value = sf.UserSelect.deserialize(value)
elif _field_type == FieldType.SUBTABLE:
if property_detail and property_detail.sub_type:
table = []
for r in value:
row = property_detail.sub_type().record_to_model(r["value"])
row.record_id = int(r["id"])
table.append(row)
value = table
elif _field_type == FieldType.FILE:
value = cls.__map(value, lambda v: sf.File.deserialize(v), flatten=True)
elif _field_type == FieldType.STRUCTURE:
cls_type = None
if property_detail and property_detail.sub_type:
cls_type = property_detail.sub_type
elif initial_value:
cls_type = cls.__get_type(initial_value)
if cls_type:
def deserialize(v):
ins = cls_type()
ds = getattr(ins, "deserialize", None)
return None if not (ds and callable(ds)) else ds(v)
value = cls.__map(value, lambda v: deserialize(v))
return value
def serialize(self):
return self._serialize(lambda name, value, pd: (name, value))
def _serialize(self, convert_to_key_and_value, ignore_missing=False):
"""
serialize model object to dictionary
:param convert_to_key_and_value: function(field_name, value, property_detail) -> key, value
:return:
"""
serialized = {}
properties = self._get_property_names(self)
def get_property_detail(name):
p = [p for p in self._property_details if p.name == name]
return None if len(p) == 0 else p[0]
for p in properties:
pd = get_property_detail(p)
value = self._property_to_field(p, pd)
field_name = p if not pd else pd.to_field_name()
if value is None or (ignore_missing and not value) or (pd and pd.unsent):
continue
else:
key, value = convert_to_key_and_value(field_name, value, pd)
if key:
serialized[key] = value
return serialized
def _property_to_field(self, name, property_detail=None):
value = getattr(self, name)
if value is None:
return None
# configure field's type
# from user definition
field_type = None if not property_detail else property_detail.field_type
field_type = field_type if field_type else self._estimate_type_from_property(value)
if not field_type:
pass
elif field_type == FieldType.DATE:
value = ks.date_to_value(value)
elif field_type == FieldType.TIME:
value = ks.time_to_value(value)
elif field_type in [FieldType.DATETIME, FieldType.CREATED_TIME, FieldType.UPDATED_TIME, FieldType.TIME_STAMP]:
# time stamp is same as datetime format (there is no field for timestamp in kintone)
value = ks.datetime_to_value(value)
elif field_type == FieldType.USER_SELECT:
value = self.__map(value, lambda u: u.serialize(), to_list=True)
elif field_type in [FieldType.CREATOR, FieldType.MODIFIER]:
value = value.serialize()
elif field_type == FieldType.SUBTABLE:
if property_detail and property_detail.sub_type:
table = []
for r in value:
values = r.to_record()
row = {}
if "id" in values:
_id = values.pop("id")
row["id"] = _id["value"]
row["value"] = values
table.append(row)
value = table
elif field_type == FieldType.FILE:
value = self.__map(value, lambda v: v.serialize(), to_list=True)
elif field_type == FieldType.STRUCTURE:
def serialize(v):
s = getattr(v, "serialize", None)
return None if not (s and callable(s)) else s()
value = self.__map(value, lambda v: serialize(v))
return value
@classmethod
def _estimate_type_from_property(cls, value):
field_type = None
if isinstance(value, datetime):
field_type = FieldType.DATE
elif issubclass(cls.__get_type(value), kintoneStructure):
field_type = FieldType.STRUCTURE
elif issubclass(cls.__get_type(value), sf.UserSelect):
field_type = FieldType.USER_SELECT
elif issubclass(cls.__get_type(value), sf.File):
field_type = FieldType.FILE
return field_type
@classmethod
def __map(cls, value, func, flatten=False, to_list=False):
result = None
is_none = False
if isinstance(value, (list, tuple)):
result = [func(v) for v in value]
result = [r for r in result if r is not None]
if len(result) == 0:
is_none = True
else:
result = func(value)
if not result:
is_none = True
if is_none:
return None
else:
if flatten:
if isinstance(result, (list, tuple)) and len(result) == 1:
return result[0]
else:
return result
elif to_list:
if isinstance(result, (list, tuple)):
return result
else:
return [result]
else:
return result
@classmethod
def __get_type(cls, value):
get_type = lambda v: v if type(v) == type else type(v)
if isinstance(value, (list, tuple)):
if len(value) > 0:
return get_type(value[0])
else:
return type(None)
else:
return get_type(value)
class PropertyDetail(object):
def __init__(self, name, field_type=None, sub_type=None, unsent=False, field_name=""):
self.name = name
self.field_type = field_type
self.sub_type = sub_type
self.unsent = unsent
self.field_name = field_name
def to_property_name(self, field_name):
if self.field_name == field_name:
return self.name
else:
return field_name
def to_field_name(self):
if self.field_name:
return self.field_name
else:
return self.name
class FieldType(Enum):
DATE = "DATE"
TIME = "TIME"
DATETIME = "DATETIME"
CREATED_TIME = "CREATED_TIME"
UPDATED_TIME = "UPDATED_TIME"
USER_SELECT = "USER_SELECT"
CREATOR = "CREATOR"
MODIFIER = "MODIFIER"
FILE = "FILE"
RECORD_NUMBER = "RECORD_NUMBER"
NUMBER = "NUMBER"
SUBTABLE = "SUBTABLE"
CALC = "CALC"
CATEGORY = "CATEGORY"
CHECK_BOX = "CHECK_BOX"
DROP_DOWN = "DROP_DOWN"
HR = "HR"
LABEL = "LABEL"
LINK = "LINK"
MULTI_LINE_TEXT = "MULTI_LINE_TEXT"
MULTI_SELECT = "MULTI_SELECT"
RADIO_BUTTON = "RADIO_BUTTON"
RICH_TEXT = "RICH_TEXT"
SINGLE_LINE_TEXT = "SINGLE_LINE_TEXT"
SPACER = "SPACER"
STATUS = "STATUS"
STATUS_ASSIGNEE = "STATUS_ASSIGNEE"
ID = "__ID__"
REVISION = "__REVISION__"
TIME_STAMP = "__TIME_STAMP__"
STRUCTURE = "__STRUCTURE__"
class LayoutType(Enum):
ROW = "ROW"
SUBTABLE = "SUBTABLE"
GROUP = "GROUP"
|
11461208
|
import pickle
from opendr.perception.object_detection_3d.voxel_object_detection_3d.second_detector.builder import (
preprocess_builder,
)
from opendr.perception.object_detection_3d.voxel_object_detection_3d.second_detector.core.preprocess import (
DataBasePreprocessor,
)
from opendr.perception.object_detection_3d.voxel_object_detection_3d.second_detector.core.sample_ops import (
DataBaseSamplerV2,
)
def build(sampler_config):
cfg = sampler_config
groups = list(cfg.sample_groups)
prepors = [
preprocess_builder.build_db_preprocess(c) for c in cfg.database_prep_steps
]
db_prepor = DataBasePreprocessor(prepors)
rate = cfg.rate
grot_range = cfg.global_random_rotation_range_per_object
groups = [dict(g.name_to_max_num) for g in groups]
info_path = cfg.database_info_path
with open(info_path, "rb") as f:
db_infos = pickle.load(f)
grot_range = list(grot_range)
if len(grot_range) == 0:
grot_range = None
sampler = DataBaseSamplerV2(db_infos, groups, db_prepor, rate, grot_range)
return sampler
|
11461228
|
from pygin.components.text_mesh import TextMesh
from pygin.game_object import GameObject
from pygame.math import Vector2
import pygame
class Text(GameObject):
def __init__(self, position, message, material, size, font_path, layer=10):
super(Text, self).__init__(position, 0, Vector2(1, 1), layer=layer)
self.material = material
font = pygame.font.Font(font_path, size)
self.text_mesh = TextMesh(self, message, size, font)
|
11461267
|
import json
import os
import bson
import networkx as nx
from networkx.readwrite import json_graph
class NetworkAnalysis:
def __init__(self, population, tolerance=0.1):
self.population = population
self.tolerance = tolerance
self.pcdb = self.population.pcdb
def get_network(self):
# print 'Population evaluated', len(self.population.evaluated)
lista = list(self.population.ids_sorted(self.population.evaluated))
graph = nx.Graph()
for change in self.pcdb.db.generation_changes.find({'change': 'replace_by_other'}):
graph.add_edge(str(change['from']), str(change['to']))
for change in self.pcdb.db.generation_changes.find({'change': 'duplicate'}):
graph.add_edge(str(change['from']), str(change['to']))
# print 'Number of nodes:', graph.number_of_nodes()
# print 'Number of edges:', graph.number_of_edges()
for n in graph:
graph.node[n]['name'] = n
for n in graph:
entry_id = bson.ObjectId(n)
if 'spacegroup' in self.population.get_entry(bson.ObjectId(n))['properties']:
graph.node[n]['group'] = str(self.population.get_entry(bson.ObjectId(n))['properties']['spacegroup'])
graph.node[n]['name'] = graph.node[n]['group']
else:
graph.node[n]['group'] = str(20 * lista.index(entry_id) / len(lista))
graph.node[n]['name'] = str(lista.index(entry_id))
# print 'Computing distances...'
for n in graph.edges():
graph[n[0]][n[1]]['value'] = int(10 * self.population.distance(bson.ObjectId(n[0]), bson.ObjectId(n[1])))
# print 'done'
d = json_graph.node_link_data(graph)
json.dump(d, open('Network_%s.json' % self.population.name, 'w'), sort_keys=True, indent=4,
separators=(',', ': '))
if not os.path.isfile('NetworkBasins_%s.json' % self.population.name):
dupes_dict, dupes_list = self.population.dict_duplicates(self.population.evaluated, fast=True)
new_dupes_dict = {}
for i in dupes_dict:
new_dupes_dict[str(i)] = []
for j in dupes_dict[i]:
new_dupes_dict[str(i)].append(str(j))
wf = open('NetworkBasins_%s.json' % self.population.name, 'w')
json.dump(new_dupes_dict, wf, indent=2, separators=(',', ': '))
wf.close()
else:
rf = open('NetworkBasins_%s.json' % self.population.name, 'r')
new_dupes_dict = json.load(rf)
# print 'Number of non-duplicates', len(new_dupes_dict)
# print 'Reverting dictionary...'
graph_basins = nx.Graph()
tabla_reversa = {}
for i in lista:
if str(i) in new_dupes_dict:
for j in new_dupes_dict[str(i)]:
# print lista.index(bson.ObjectId(i)),' : ', lista.index(bson.ObjectId(j))
if j not in tabla_reversa:
tabla_reversa[j] = str(i)
# print 'done'
for i in graph.edges_iter():
if i[0] in tabla_reversa and i[1] in tabla_reversa:
graph_basins.add_edge(tabla_reversa[i[0]], tabla_reversa[i[1]])
# print 'Number of nodes:', graph_basins.number_of_nodes()
# print 'Number of edges:', graph_basins.number_of_edges()
for n in graph_basins:
entry_id = bson.ObjectId(n)
if 'spacegroup' in self.population.get_entry(entry_id)['properties']:
spacegroup = str(self.population.get_entry(entry_id)['properties']['spacegroup'])
graph_basins.node[n]['group'] = spacegroup
graph_basins.node[n]['name'] = spacegroup
else:
graph_basins.node[n]['group'] = str(20 * lista.index(entry_id) / len(lista))
graph_basins.node[n]['name'] = str(lista.index(entry_id))
for n in graph_basins.edges():
graph_basins[n[0]][n[1]]['value'] = int(
10 * self.population.distance(bson.ObjectId(n[0]), bson.ObjectId(n[1])))
d2 = json_graph.node_link_data(graph_basins)
json.dump(d2, open('Network_%s_basins.json' % self.population.name, 'w'), sort_keys=True, indent=4,
separators=(',', ': '))
best = lista[0]
line = 'Population: %10s Evaluated: %4d N:%5d E:%5d BN:%4d BE:%4d %7.2f GG: %8s %8s'
print(line % (self.population.name,
len(self.population),
graph.number_of_nodes(),
graph.number_of_nodes(),
graph_basins.number_of_nodes(),
graph_basins.number_of_edges(),
float(graph_basins.number_of_edges()) / (1.0 + graph_basins.number_of_nodes()),
str(best) in graph.nodes(),
str(best) in graph_basins.nodes()))
|
11461276
|
import renderdoc as rd
import rdtest
class D3D12_List_Types(rdtest.TestCase):
demos_test_name = 'D3D12_List_Types'
def check_capture(self):
action = self.find_action("Draw")
self.controller.SetFrameEvent(action.eventId, False)
self.check_triangle(out=action.outputs[0], fore=[0.0, 1.0, 1.0, 1.0])
postvs_data = self.get_postvs(action, rd.MeshDataStage.VSOut, 0, action.numIndices)
postvs_ref = {
0: {
'vtx': 0,
'idx': 0,
'SV_POSITION': [-0.5, -0.5, 0.0, 1.0],
'COLOR': [0.0, 1.0, 1.0, 1.0],
'TEXCOORD': [1234.0, 5678.0],
},
1: {
'vtx': 1,
'idx': 1,
'SV_POSITION': [0.0, 0.5, 0.0, 1.0],
'COLOR': [0.0, 1.0, 1.0, 1.0],
'TEXCOORD': [1234.0, 5678.0],
},
2: {
'vtx': 2,
'idx': 2,
'SV_POSITION': [0.5, -0.5, 0.0, 1.0],
'COLOR': [0.0, 1.0, 1.0, 1.0],
'TEXCOORD': [1234.0, 5678.0],
},
}
self.check_mesh_data(postvs_ref, postvs_data)
|
11461314
|
import argparse
import os
import time
from datetime import datetime, timezone, timedelta
from os.path import join, dirname, abspath
from os import walk
from typing import Union
import psycopg2 as pg
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
sql_path = abspath(join(dirname(abspath(__file__)), '..', 'sql'))
ddl_path = join(sql_path, 'ddl')
dml_path = join(sql_path, 'dml')
partial_update_path = join(sql_path, 'partial_update')
keepalive_kwargs = {
"keepalives": 1,
"keepalives_idle": 30,
"keepalives_interval": 5,
"keepalives_count": 5,
}
def _read_and_execute(
conn,
path: str,
vacuum: str = 'once',
temp_set_workmem: str = None,
query_parameters: Union[tuple, dict, None] = None,
commit_mode: str = 'once'
) -> None:
"""Internal method that reads sql query from file, connects to the database, and executes it."""
print(datetime.now(timezone.utc).astimezone().isoformat(), '- executing script:', path)
# start counter to measure execution time
sts = time.perf_counter()
cur = conn.cursor()
sql = open(path, 'r', encoding='UTF-8').read()
old_isolation_level = conn.isolation_level
if commit_mode == 'autocommit':
conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
if temp_set_workmem is not None:
cur.execute('show work_mem')
old_workmem: str = cur.fetchall()[0][0]
if old_workmem != temp_set_workmem:
print('old work_mem:', old_workmem, '- setting to:', temp_set_workmem)
cur.execute('set work_mem = %s', (temp_set_workmem,))
if query_parameters:
cur.execute(sql, query_parameters)
else:
cur.execute(sql)
if commit_mode == 'always':
conn.commit()
if vacuum == 'always':
print(datetime.now(timezone.utc).astimezone().isoformat(), '- running vacuum analyze...')
old_isolation_level2 = conn.isolation_level
conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
cur.execute('VACUUM ANALYZE;')
conn.set_isolation_level(old_isolation_level2)
if commit_mode == 'autocommit':
conn.set_isolation_level(old_isolation_level)
for notice in conn.notices:
print(str(notice).strip())
conn.notices = []
ets = time.perf_counter()
delta = ets - sts
print(datetime.now(timezone.utc).astimezone().isoformat(),
'- finished executing:', path,
'- ex. time:', str(timedelta(seconds=delta)))
def execute_scripts_from_files(
conn,
paths: Union[str, list, tuple],
vacuum: str = 'once',
temp_set_workmem: str = None,
query_parameters: Union[tuple, dict, None] = None,
commit_mode: str = 'once'
) -> None:
"""Method executes sql script from given file path(s)."""
if len(paths) == 0:
raise AttributeError('You need to specify at least one path for file with an sql script.')
try:
if type(paths) == str:
_read_and_execute(conn, paths, vacuum, temp_set_workmem, query_parameters, commit_mode)
elif type(paths) in (tuple, list):
if type(paths[0]) in (tuple, list):
for lst in paths:
for path in lst:
_read_and_execute(conn, path, vacuum, temp_set_workmem, query_parameters, commit_mode)
else:
for path in paths:
_read_and_execute(conn, path, vacuum, temp_set_workmem, query_parameters, commit_mode)
else:
raise AttributeError(f'Wrong arguments should be strings with paths or list of strings (paths): {paths}')
except FileNotFoundError:
print(datetime.now(timezone.utc).astimezone().isoformat(), '- Query file not found. Last transaction rolled back.')
conn.rollback()
raise
except:
conn.rollback()
raise
if commit_mode in ('always', 'once'):
conn.commit()
if vacuum in ('always', 'once'):
cur = conn.cursor()
print(datetime.now(timezone.utc).astimezone().isoformat(), '- running vacuum analyze...')
old_isolation_level = conn.isolation_level
conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
cur.execute('VACUUM ANALYZE;')
conn.set_isolation_level(old_isolation_level)
cur.close()
print(datetime.now(timezone.utc).astimezone().isoformat(), '- Done.')
def full_process(dsn: str, starting: str = '000', force: bool = False) -> None:
final_status: str = 'SUCCESS'
ddls: list = []
dmls: list = []
# get paths of sql files
# r=root, d=directories, f = files
if starting == '000':
for r, d, f in walk(ddl_path):
for file in f:
if file.endswith('.sql'):
ddls.append(join(r, file))
for r, d, f in walk(dml_path):
for file in f:
if file.endswith('.sql') and file >= starting:
dmls.append(join(r, file))
# make sure dml files are sorted by names, ddl files should not require any specific order
dmls = [x for x in sorted(dmls)]
# execute sql scripts
conn = pg.connect(dsn, **keepalive_kwargs)
cur = conn.cursor()
try:
cur.execute('SELECT in_progress FROM process_locks WHERE process_name = %s', ('prg_full_update',))
full_update_in_progress = cur.fetchone()[0] if not force else False
if not full_update_in_progress:
print(datetime.now(timezone.utc).astimezone().isoformat(), '- starting full update process.')
cur.execute('UPDATE process_locks SET (in_progress, start_time, end_time) = (true, \'now\', null) ' +
'WHERE process_name = %s',
('prg_full_update',))
conn.commit()
try:
if len(ddls) > 0:
execute_scripts_from_files(conn=conn, vacuum='never', paths=ddls, commit_mode='once')
execute_scripts_from_files(conn=conn, vacuum='once', paths=dmls, temp_set_workmem='2GB', commit_mode='always')
except Exception as e:
print(datetime.now(timezone.utc).astimezone().isoformat(), '- failure in full update process.')
print(e)
final_status = 'FAIL'
conn.rollback()
finally:
cur.execute('UPDATE process_locks SET (in_progress, end_time, last_status) = (false, \'now\', %s) ' +
'WHERE process_name = %s',
(final_status, 'prg_full_update'))
conn.commit()
print(datetime.now(timezone.utc).astimezone().isoformat(), '- finished full update process.')
else:
print(datetime.now(timezone.utc).astimezone().isoformat(),
'- full update in progress already. Not starting another one.')
finally:
conn.close()
def partial_update(dsn: str) -> None:
final_status: str = 'SUCCESS'
queries_paths: list = []
for r, d, f in walk(partial_update_path):
for file in f:
if file.endswith('.sql'):
queries_paths.append(join(r, file))
# make sure query files are sorted by names
sorted_queries_paths = [x for x in sorted(queries_paths)]
conn = pg.connect(dsn, **keepalive_kwargs)
cur = conn.cursor()
try:
cur.execute('SELECT in_progress FROM process_locks WHERE process_name in (%s, %s)',
('prg_full_update', 'prg_partial_update'))
update_in_progress = [x[0] for x in cur.fetchall()]
if not any(update_in_progress):
print(datetime.now(timezone.utc).astimezone().isoformat(), '- starting partial update process.')
cur.execute('UPDATE process_locks SET (in_progress, start_time, end_time) = (true, \'now\', null) ' +
'WHERE process_name = %s',
('prg_partial_update',))
conn.commit()
try:
execute_scripts_from_files(conn=conn, vacuum='never', paths=sorted_queries_paths, temp_set_workmem='128MB', commit_mode='autocommit')
except Exception as e:
print(datetime.now(timezone.utc).astimezone().isoformat(), '- failure in partial update process.')
print(e)
final_status = 'FAIL'
conn.rollback()
finally:
cur.execute('UPDATE process_locks SET (in_progress, end_time, last_status) = (false, \'now\', %s) ' +
'WHERE process_name = %s',
(final_status, 'prg_partial_update'))
conn.commit()
print(datetime.now(timezone.utc).astimezone().isoformat(), '- finished partial update process.')
else:
print(datetime.now(timezone.utc).astimezone().isoformat(),
'- update in progress skipping partial update.')
finally:
conn.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--full', help='Launch full process', nargs='?', const=True)
parser.add_argument('--update', help='Launch partial update process', nargs='?', const=True)
parser.add_argument('--force', help='Ignore checking if another process is running. Applies to full process.', nargs='?', const=True)
parser.add_argument('--dsn', help='Connection string for PostgreSQL DB.', nargs='?')
parser.add_argument('--dotenv', help='Path to .env file with credentials for PostgreSQL DB.', nargs='?')
parser.add_argument('--starting', help='Start from this query (DML). Must match name exactly.', nargs=1)
args = vars(parser.parse_args())
if args.get('dotenv'):
from dotenv import load_dotenv
dotenv_path = args['dotenv']
load_dotenv(dotenv_path, verbose=True)
PGHOSTADDR = os.environ['PGHOSTADDR']
PGPORT = os.environ['PGPORT']
PGDATABASE = os.environ['PGDATABASE']
PGUSER = os.environ['PGUSER']
PGPASSWORD = os.environ['PGPASSWORD']
dsn = f'host={PGHOSTADDR} port={PGPORT} dbname={PGDATABASE} user={PGUSER} password={<PASSWORD>}'
else:
dsn = args['dsn']
if 'full' in args and args.get('full'):
if args.get('starting'):
full_process(dsn, starting=args.get('starting')[0], force=args.get('force'))
else:
full_process(dsn, force=args.get('force'))
elif 'update' in args and args.get('update'):
partial_update(dsn)
|
11461329
|
from nose import with_setup
from pybbn.graph.dag import BbnUtil
from pybbn.pptc.moralizer import Moralizer
from pybbn.pptc.potentialinitializer import PotentialInitializer
def setup():
"""
Setup.
:return: None.
"""
pass
def teardown():
"""
Teardown.
:return: None.
"""
pass
@with_setup(setup, teardown)
def test_moralizer():
"""
Tests moralization.
:return: None.
"""
bbn = BbnUtil.get_huang_graph()
PotentialInitializer.init(bbn)
ug = Moralizer.moralize(bbn)
e_edges = set([
'0--1',
'0--2',
'1--3',
'2--4',
'3--5',
'4--5',
'2--6',
'4--7',
'6--7',
'3--4',
'4--6'
])
o_edges = set([str(edge) for edge in ug.get_edges()])
assert len(e_edges) == len(o_edges)
for e in e_edges:
assert e in o_edges
|
11461338
|
from flask import Flask
from web.Tasks import tasks_api
from web.Environments import environments_api
from web.Storage import storage_api
from web.AutoBuild import autobuild_api
app = Flask(__name__)
app.register_blueprint(tasks_api)
app.register_blueprint(environments_api)
app.register_blueprint(storage_api)
app.register_blueprint(autobuild_api)
if __name__ == "__main__":
app.run(host='0.0.0.0')
|
11461340
|
from numbers import Real
from unittest import TestCase
import numpy as np
from diffprivlib.validation import check_bounds
class TestCheckBounds(TestCase):
def test_none(self):
self.assertRaises(TypeError, check_bounds, None)
def test_non_tuple(self):
with self.assertRaises(TypeError):
check_bounds([1, 2, 3])
def test_incorrect_entries(self):
with self.assertRaises(ValueError):
check_bounds(([1, 2], 1))
with self.assertRaises(ValueError):
check_bounds(([1, 2], [1, 2, 3]))
with self.assertRaises(ValueError):
check_bounds(([1, 2], [1, 2], [1, 2]))
def test_consistency(self):
bounds = check_bounds(([1, 1], [2, 2]), shape=2)
bounds2 = check_bounds(bounds, shape=2)
self.assertTrue(np.all(bounds[0] == bounds2[0]))
self.assertTrue(np.all(bounds[1] == bounds2[1]))
def test_array_output(self):
bounds = check_bounds(([1, 1], [2, 2]), shape=2)
self.assertIsInstance(bounds[0], np.ndarray)
self.assertIsInstance(bounds[1], np.ndarray)
def test_scalar_output(self):
bounds = check_bounds((1, 2), shape=0)
self.assertIsInstance(bounds[0], Real)
self.assertIsInstance(bounds[1], Real)
bounds = check_bounds((1, 2), shape=0, dtype=int)
self.assertIsInstance(bounds[0], int)
self.assertIsInstance(bounds[1], int)
bounds = check_bounds((1, 2), shape=0, dtype=float)
self.assertIsInstance(bounds[0], float)
self.assertIsInstance(bounds[1], float)
def test_wrong_dims(self):
with self.assertRaises(ValueError):
check_bounds(([1, 1], [2, 2]), shape=3)
with self.assertRaises(ValueError):
check_bounds(([[1, 1]], [[2, 2]]), shape=2)
def test_bad_shape(self):
with self.assertRaises(ValueError):
check_bounds(([1, 1], [2, 2]), shape=-2)
with self.assertRaises(TypeError):
check_bounds(([1, 1], [2, 2]), shape=2.0)
def test_wrong_order(self):
with self.assertRaises(ValueError):
check_bounds((2, 1))
def test_non_numeric(self):
with self.assertRaises(ValueError):
check_bounds(("One", "Two"))
def test_complex(self):
with self.assertRaises(TypeError):
check_bounds((1.0, 1+2j), dtype=complex)
def test_min_separation(self):
bounds = check_bounds((1, 1), min_separation=2)
self.assertEqual(0, bounds[0])
self.assertEqual(2, bounds[1])
bounds = check_bounds((1., 1.), min_separation=1)
self.assertEqual(0.5, bounds[0])
self.assertEqual(1.5, bounds[1])
bounds = check_bounds((0.9, 1.1), min_separation=1)
self.assertEqual(0.5, bounds[0])
self.assertEqual(1.5, bounds[1])
|
11461343
|
import unittest
from decorator import CharacterConcreteComponent, SwordConcreteDecorator, ArmorConcreteDecorator, Character, RingConcreteDecorator, ShieldConcreteDecorator
class DecoratorTest(unittest.TestCase):
def test_character_without_equipment(self):
character = CharacterConcreteComponent(name='Luxor')
self.assertEqual(
character.equip(),
"Luxor equipment: Empty"
)
def test_character_with_armor(self):
character = CharacterConcreteComponent(name='Luxor')
armor = ArmorConcreteDecorator(character)
self.assertEqual(
armor.equip(),
"Luxor equipment:\nArmor: Yes"
)
def test_character_with_sword(self):
character = CharacterConcreteComponent(name='Luxor')
sword = SwordConcreteDecorator(character)
self.assertEqual(
sword.equip(),
"Luxor equipment:\nSword: Yes"
)
def test_character_with_armor_and_sword(self):
character = CharacterConcreteComponent(name='Luxor')
armor = ArmorConcreteDecorator(character)
sword = SwordConcreteDecorator(armor)
self.assertEqual(
sword.equip(),
"Luxor equipment:\nArmor: Yes\nSword: Yes"
)
def test_character_with_ring(self):
character = CharacterConcreteComponent(name='Luxor')
ring = RingConcreteDecorator(character)
self.assertEqual(
ring.equip(),
"Luxor equipment:\nRing: Yes"
)
def test_character_with_shield(self):
character = CharacterConcreteComponent(name='Luxor')
shield = ShieldConcreteDecorator(character)
self.assertEqual(
shield.equip(),
"Luxor equipment:\nShield: Yes"
)
if __name__ == "__main__":
unittest.main()
|
11461366
|
import os
import time
from collections import deque
import numpy as np
import torch
from a2c_ppo_acktr import algo, utils
from a2c_ppo_acktr.arguments import get_args
from a2c_ppo_acktr.envs import make_vec_envs
from a2c_ppo_acktr.model import Policy
from a2c_ppo_acktr.storage import RolloutStorage
from tianshou.data import Batch, to_numpy
from utils import make_policy, make_img_adv_attack, make_atari_env_watch, make_victim_network
import random as rd
from copy import deepcopy
from typing import Dict, List, Union, Optional, Callable
def main():
args = get_args()
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
if args.cuda and torch.cuda.is_available() and args.cuda_deterministic:
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.set_num_threads(1)
device = args.device
envs = make_vec_envs(args.env_name, args.seed, args.num_processes,
args.gamma, None, device, False)
if args.resume_path is None:
actor_critic = Policy(
envs.observation_space.shape,
envs.action_space,
device=args.device,
base_kwargs={'recurrent': args.recurrent_policy})
actor_critic.to(device)
actor_critic.init(device)
else:
actor_critic = make_policy(args, args.algo, args.resume_path)
assert args.resume_path is not None, \
"You are training with adversarial training but you haven't declared a base trained model"
if args.target_model_path:
victim_policy = make_policy(args, args.algo, args.target_model_path)
else:
victim_policy = actor_critic
args.target_policy, args.policy = args.algo, args.algo
args.perfect_attack = False
adv_net = make_victim_network(args, victim_policy)
adv_atk, _ = make_img_adv_attack(args, adv_net, targeted=False)
# watch agent's performance
def watch():
print("Testing agent ...")
actor_critic.eval()
args.task, args.frames_stack = args.env_name, 4
env = make_atari_env_watch(args)
obs = env.reset()
n_ep, tot_rew = 0, 0
succ_attacks, n_attacks = 0, 0
while True:
inputs = Batch(obs=np.expand_dims(obs, axis=0))
with torch.no_grad():
result = actor_critic(inputs)
action = result.act
# START ADVERSARIAL ATTACK
x = rd.uniform(0, 1)
if x < args.atk_freq:
ori_act = action
obs = torch.FloatTensor(inputs.obs).to(device)
data = Batch(obs=obs)
adv_act, adv_obs = obs_attacks(data, ori_act, adv_atk, actor_critic)
for i in range(len(adv_act)):
if adv_act[i] != ori_act[i]:
succ_attacks += 1
n_attacks += args.num_processes
action = adv_act
# Observe reward and next obs
obs, reward, done, _ = env.step(action)
tot_rew += reward
if done:
n_ep += 1
obs = env.reset()
if n_ep == args.test_num:
break
if n_attacks == 0:
n_attacks = 1
print("Evaluation using {} episodes: mean reward {:.5f}, succ_atks(%) {:.3f}\n".format(
n_ep, tot_rew / n_ep, succ_attacks / n_attacks))
if args.watch:
watch()
exit(0)
if args.algo == 'a2c':
agent = algo.A2C_ACKTR(
actor_critic,
args.value_loss_coef,
args.entropy_coef,
lr=args.lr,
eps=args.rms_eps,
alpha=args.alpha,
max_grad_norm=args.max_grad_norm)
elif args.algo == 'ppo':
agent = algo.PPO(
actor_critic,
args.clip_param,
args.ppo_epoch,
args.num_mini_batch,
args.value_loss_coef,
args.entropy_coef,
lr=args.lr,
eps=args.rms_eps,
max_grad_norm=args.max_grad_norm)
elif args.algo == 'acktr':
agent = algo.A2C_ACKTR(
actor_critic, args.value_loss_coef, args.entropy_coef, acktr=True)
rollouts = RolloutStorage(args.num_steps, args.num_processes,
envs.observation_space.shape, envs.action_space,
actor_critic.recurrent_hidden_state_size)
obs = envs.reset()
rollouts.obs[0].copy_(obs)
rollouts.to(device)
episode_rewards = deque(maxlen=10)
acc_rewards = np.zeros(args.num_processes)
best_reward = -np.inf
start = time.time()
num_updates = int(
args.num_env_steps) // args.num_steps // args.num_processes
print("start training")
succ_attacks = 0
n_attacks = 0
for j in range(num_updates):
if args.use_linear_lr_decay:
# decrease learning rate linearly
utils.update_linear_schedule(
agent.optimizer, j, num_updates,
agent.optimizer.lr if args.algo == "acktr" else args.lr)
for step in range(args.num_steps):
# Get action and value of original observation
with torch.no_grad():
value, action, action_log_prob, recurrent_hidden_states = actor_critic.act(
rollouts.obs[step], rollouts.recurrent_hidden_states[step],
rollouts.masks[step])
# Given original action, generate adversarial observation
x = rd.uniform(0, 1)
if x < args.atk_freq:
ori_act = action.flatten()
data = Batch(obs=obs)
adv_act, adv_obs = obs_attacks(data, ori_act, adv_atk, actor_critic)
for i in range(len(adv_act)):
if adv_act[i] != ori_act[i]:
succ_attacks += 1
n_attacks += len(adv_act)
adv_obs = torch.FloatTensor(adv_obs).to(device)
rollouts.obs[step] = adv_obs
# Get action and value of adversarial observation
with torch.no_grad():
value, action, action_log_prob, recurrent_hidden_states = actor_critic.act(
rollouts.obs[step], rollouts.recurrent_hidden_states[step],
rollouts.masks[step])
# Observe reward and next obs given adversarial action
obs, reward, done, infos = envs.step(action)
for i, d in enumerate(done):
acc_rewards[i] += reward[i].detach().cpu()[0]
if d:
episode_rewards.append(acc_rewards[i])
acc_rewards[i] = 0
# If done then clean the history of observations.
masks = torch.FloatTensor(
[[0.0] if done_ else [1.0] for done_ in done])
bad_masks = torch.FloatTensor(
[[0.0] if 'bad_transition' in info.keys() else [1.0]
for info in infos])
# Insert in memory adversarial action and value
rollouts.insert(obs, recurrent_hidden_states, action,
action_log_prob, value, reward, masks, bad_masks)
with torch.no_grad():
next_value = actor_critic.get_value(
rollouts.obs[-1], rollouts.recurrent_hidden_states[-1],
rollouts.masks[-1]).detach()
rollouts.compute_returns(next_value, args.use_gae, args.gamma,
args.gae_lambda, args.use_proper_time_limits)
value_loss, action_loss, dist_entropy = agent.update(rollouts)
rollouts.after_update()
# save for every interval-th episode or for the last epoch
if len(episode_rewards) > 0 and np.mean(episode_rewards) >= best_reward and args.save_dir != "":
save_path = os.path.join(args.save_dir, args.algo)
try:
os.makedirs(save_path)
except OSError:
pass
best_reward = np.mean(episode_rewards)
torch.save([
actor_critic,
getattr(utils.get_vec_normalize(envs), 'ob_rms', None)
], os.path.join(save_path, "policy.pth"))
if j % args.log_interval == 0 and len(episode_rewards) > 0:
total_num_steps = (j + 1) * args.num_processes * args.num_steps
end = time.time()
if n_attacks == 0:
n_attacks = 1
print(
"Updates {}, num timesteps {}, FPS {} \nLast {} training episodes: mean/median reward {:.1f}/{:.1f},"
" min/max reward {:.1f}/{:.1f} (best avg reward {:.1f}), succ_atks(%) {:.3f}\n"
.format(j, total_num_steps,
int(total_num_steps / (end - start)),
len(episode_rewards), np.mean(episode_rewards),
np.median(episode_rewards), np.min(episode_rewards),
np.max(episode_rewards), best_reward, succ_attacks / n_attacks))
n_attacks, succ_attacks = 0, 0
print("model saved to " + str(os.path.join(args.save_dir, args.algo, "policy.pth")))
watch()
def obs_attacks(data: Batch,
target_action: List[int],
obs_adv_atk,
policy
):
"""
Performs an image adversarial attack on the observation stored in 'obs' respect to
the action 'target_action' using the method defined in 'self.obs_adv_atk'
"""
data = deepcopy(data)
obs = data.obs
act = target_action
adv_obs = obs_adv_atk.perturb(obs, act) # create adversarial observation
with torch.no_grad():
adv_obs = adv_obs.cpu().detach().numpy()
data.obs = adv_obs
result = policy(data, last_state=None)
return to_numpy(result.act), adv_obs
if __name__ == "__main__":
main()
|
11461379
|
from subprocess import check_output
import importlib
def exec_in_terminal(command):
"""Run a command in the terminal and get the
output stripping the last newline.
Args:
command: a string or list of strings
"""
return check_output(command).strip().decode("utf8")
def is_available(lib_name: str) -> bool:
"""
Checks if a library can be imported
"""
try:
importlib.import_module(lib_name)
available = True
except ImportError:
available = False
return available
def version(lib_name) -> str:
"""
Returns the version of a library as a string or
unavailable if it cannot be imported
"""
if is_available(lib_name):
return _version(lib_name)
else:
return "unavailable"
def _version(lib_name):
"""
Returns the version of a package.
If version cannot be determined returns "available"
"""
lib = importlib.import_module(lib_name)
if hasattr(lib, "__version__"):
return lib.__version__
else:
return "available"
|
11461412
|
from pywatts.core.base_step import BaseStep
class EitherOrStep(BaseStep):
"""
This step merges the result of multiple input steps, by choosing the first step in the input list which
contains data for the current data.
:param input_step: The input_steps for the either_or_step
:type input_step: List[BaseStep]
"""
def __init__(self, input_steps):
super().__init__(input_steps)
self.name = "EitherOr"
def _compute(self, start, end):
input_data = self._get_input(start, end)
return self._transform(input_data)
def _get_input(self, start, batch):
inputs = []
for step in self.input_steps.values():
inp = step.get_result(start, batch)
inputs.append(inp)
return inputs
def _transform(self, input_step):
# Chooses the first input_step which calculation is not stopped.
for in_step in input_step:
if in_step is not None:
# This buffer is never changed in this step. Consequently, no copy is necessary..
return self._post_transform(in_step)
@classmethod
def load(cls, stored_step: dict, inputs, targets, module, file_manager):
"""
Load the Either or step from a stored step.
:param stored_step: Information about the stored either or step
:param inputs: the input steps
:param targets: Does not exist for eitherOr
:param module: Does not exist for either or step
:param file_manager: The filemanager used for saving informations.
:return: The restored eitherOrStep
"""
step = cls(inputs)
step.id = stored_step["id"]
step.name = stored_step["name"]
step.last = stored_step["last"]
return step
def _should_stop(self, start, end):
input_data = self._get_input(start, end)
return input_data and (all(map(lambda x: x is None, input_data)))
|
11461445
|
from injector import inject
from domain.operation.execution.adapters.execution.ExecuteAdapter import ExecuteAdapter
from domain.operation.execution.adapters.execution.integration.ExecuteIntegrationStrategyFactory import ExecuteIntegrationStrategyFactory
from domain.operation.execution.services.IntegrationExecutionService import IntegrationExecutionService
from domain.operation.execution.services.OperationCacheService import OperationCacheService
from domain.operation.services.DataOperationJobExecutionIntegrationService import \
DataOperationJobExecutionIntegrationService
from infrastructure.dependency.scopes import IScoped
from infrastructure.logging.SqlLogger import SqlLogger
from models.enums.events import EVENT_EXECUTION_INTEGRATION_EXECUTE_OPERATION
class ExecuteIntegrationAdapter(ExecuteAdapter, IScoped):
@inject
def __init__(self,
sql_logger: SqlLogger,
operation_cache_service: OperationCacheService,
data_operation_job_execution_integration_service: DataOperationJobExecutionIntegrationService,
integration_execution_service: IntegrationExecutionService,
execute_integration_strategy_factory: ExecuteIntegrationStrategyFactory,
):
self.execute_integration_strategy_factory = execute_integration_strategy_factory
self.operation_cache_service = operation_cache_service
self.integration_execution_service = integration_execution_service
self.sql_logger = sql_logger
self.data_operation_job_execution_integration_service = data_operation_job_execution_integration_service
def execute(self,
data_operation_integration_id: int,
data_operation_job_execution_id: int,
data_operation_job_execution_integration_id: int) -> int:
data_operation_integration = self.operation_cache_service.get_data_operation_integration_by_id(
data_operation_integration_id=data_operation_integration_id)
data_integration_id = data_operation_integration.DataIntegrationId
self.integration_execution_service.clear_data(
data_operation_job_execution_integration_id=data_operation_job_execution_integration_id,
data_integration_id=data_integration_id)
affected_row_count = self.execute_integration(
data_operation_job_execution_id=data_operation_job_execution_id,
data_operation_job_execution_integration_id=data_operation_job_execution_integration_id,
data_operation_integration_id=data_operation_integration_id)
return affected_row_count
def get_start_log(self, data_integration_id: int):
target_connection = self.operation_cache_service.get_target_connection(data_integration_id=data_integration_id)
if target_connection.Database is not None:
log = f"{target_connection.Database.Schema}.{target_connection.Database.TableName} integration execute started"
elif target_connection.File is not None:
log = f"{target_connection.File.Folder}\\{target_connection.File.FileName} integration execute started"
elif target_connection.Queue is not None:
log = f"{target_connection.Queue.TopicName} integration execute started"
else:
log = f"Integration execute started"
return log
def get_finish_log(self, data_integration_id: int, data_count: int):
target_connection = self.operation_cache_service.get_target_connection(data_integration_id=data_integration_id)
if target_connection.Database is not None:
log = f"{target_connection.Database.Schema}.{target_connection.Database.TableName} integration execute finished. (Source Data Count:{data_count})"
elif target_connection.File is not None:
log = f"{target_connection.File.Folder}\\{target_connection.File.FileName} integration execute finished. (Source Data Count:{data_count})"
elif target_connection.Queue is not None:
log = f"{target_connection.Queue.TopicName} integration execute finished. (Source Data Count:{data_count})"
else:
log = f"Integration execute finished"
return log
def get_error_log(self, data_integration_id: int):
target_connection = self.operation_cache_service.get_target_connection(data_integration_id=data_integration_id)
if target_connection.Database is not None:
log = f"{target_connection.Database.Schema}.{target_connection.Database.TableName} integration execute getting error"
elif target_connection.File is not None:
log = f"{target_connection.File.Folder}\\{target_connection.File.FileName} integration execute getting error"
elif target_connection.Queue is not None:
log = f"{target_connection.Queue.TopicName} integration execute getting error"
else:
log = f"Integration execute getting error"
return log
def check_error_raise(self) -> bool:
return True
def execute_integration(self,
data_operation_job_execution_id: int,
data_operation_job_execution_integration_id: int,
data_operation_integration_id: int) -> int:
data_operation_integration = self.operation_cache_service.get_data_operation_integration_by_id(
data_operation_integration_id=data_operation_integration_id)
limit = data_operation_integration.Limit
process_count = data_operation_integration.ProcessCount
data_operation_integration_order = data_operation_integration.Order
data_integration_code = data_operation_integration.DataIntegration.Code
execute_integration_strategy = self.execute_integration_strategy_factory.get(
data_operation_integration_id=data_operation_integration_id)
strategy_name=type(execute_integration_strategy).__name__
self.sql_logger.info(
f"{data_operation_integration_order}-{data_integration_code} - integration will execute on {strategy_name}. {process_count}-{limit}",
job_id=data_operation_job_execution_id)
affected_row_count = execute_integration_strategy.execute(
data_operation_job_execution_id=data_operation_job_execution_id,
data_operation_job_execution_integration_id=data_operation_job_execution_integration_id,
data_operation_integration_id=data_operation_integration_id)
self.data_operation_job_execution_integration_service.update_source_data_count(
data_operation_job_execution_integration_id=data_operation_job_execution_integration_id,
source_data_count=affected_row_count)
self.data_operation_job_execution_integration_service.create_event(
data_operation_job_execution_integration_id=data_operation_job_execution_integration_id,
event_code=EVENT_EXECUTION_INTEGRATION_EXECUTE_OPERATION, affected_row=affected_row_count)
return affected_row_count
|
11461519
|
import sys
from com.automationpanda.example.calc import Calculator
# Attempt to use back-ported unittest2 for Python 2.6 and earlier
# However, it is strongly recommended to use Python 2.7 or 3.<latest>
try:
if sys.version_info < (2, 7):
import unittest2
else:
raise ImportError()
except ImportError:
import unittest
NUMBER_1 = 3.0
NUMBER_2 = 2.0
FAILURE = 'incorrect value'
class CalculatorTest(unittest.TestCase):
def setUp(self):
self.calc = Calculator()
def test_last_answer_init(self):
value = self.calc.last_answer
self.assertEqual(value, 0.0, FAILURE)
def test_add(self):
value = self.calc.add(NUMBER_1, NUMBER_2)
self.assertEqual(value, 5.0, FAILURE)
self.assertEqual(value, self.calc.last_answer, FAILURE)
def test_subtract(self):
value = self.calc.subtract(NUMBER_1, NUMBER_2)
self.assertEqual(value, 1.0, FAILURE)
self.assertEqual(value, self.calc.last_answer, FAILURE)
def test_subtract_negative(self):
value = self.calc.subtract(NUMBER_2, NUMBER_1)
self.assertEqual(value, -1.0, FAILURE)
self.assertEqual(value, self.calc.last_answer, FAILURE)
def test_multiply(self):
value = self.calc.multiply(NUMBER_1, NUMBER_2)
self.assertEqual(value, 6.0, FAILURE)
self.assertEqual(value, self.calc.last_answer, FAILURE)
def test_divide(self):
value = self.calc.divide(NUMBER_1, NUMBER_2)
self.assertEqual(value, 1.5, FAILURE)
self.assertEqual(value, self.calc.last_answer, FAILURE)
def test_divide_by_zero(self):
self.assertRaises(ZeroDivisionError, self.calc.divide, NUMBER_1, 0)
def test_max_greater(self):
value = self.calc.maximum(NUMBER_1, NUMBER_2)
self.assertEqual(value, NUMBER_1, FAILURE)
self.assertEqual(value, self.calc.last_answer, FAILURE)
def test_max_less(self):
value = self.calc.maximum(NUMBER_2, NUMBER_1)
self.assertEqual(value, NUMBER_1, FAILURE)
self.assertEqual(value, self.calc.last_answer, FAILURE)
def test_max_equal(self):
value = self.calc.maximum(NUMBER_1, NUMBER_1)
self.assertEqual(value, NUMBER_1, FAILURE)
self.assertEqual(value, self.calc.last_answer, FAILURE)
def test_min_greater(self):
value = self.calc.minimum(NUMBER_1, NUMBER_2)
self.assertEqual(value, NUMBER_2, FAILURE)
self.assertEqual(value, self.calc.last_answer, FAILURE)
def test_min_less(self):
value = self.calc.minimum(NUMBER_2, NUMBER_1)
self.assertEqual(value, NUMBER_2, FAILURE)
self.assertEqual(value, self.calc.last_answer, FAILURE)
def test_min_equal(self):
value = self.calc.minimum(NUMBER_2, NUMBER_2)
self.assertEqual(value, NUMBER_2, FAILURE)
self.assertEqual(value, self.calc.last_answer, FAILURE)
if __name__ == '__main__':
import xmlrunner
unittest.main(
testRunner=xmlrunner.XMLTestRunner(output='test-reports'),
failfast=False,
buffer=False,
catchbreak=False)
|
11461537
|
from twisted.plugin import IPlugin
from twisted.words.protocols import irc
from txircd.config import ConfigValidationError
from txircd.module_interface import IModuleData, ModuleData
from zope.interface import implements
from fnmatch import fnmatchcase
irc.ERR_CHANNOTALLOWED = "926"
class DenyChannels(ModuleData):
implements(IPlugin, IModuleData)
name = "DenyChannels"
def actions(self):
return [ ("joinpermission", 50, self.blockNonDenied) ]
def verifyConfig(self, config):
for option in ("deny_channels", "allow_channels"):
if option in config:
if not isinstance(config[option], list):
raise ConfigValidationError(option, "value must be a list")
for chanName in config[option]:
if not isinstance(chanName, basestring) or not chanName:
raise ConfigValidationError(option, "\"{}\" is an invalid channel name".format(chanName))
def blockNonDenied(self, channel, user):
if self.ircd.runActionUntilValue("userhasoperpermission", user, "channel-denied", users=[user]) is True:
return None
deniedChannels = self.ircd.config.get("deny_channels", [])
allowedChannels = self.ircd.config.get("allow_channels", [])
for name in allowedChannels:
if fnmatchcase(channel.name, name):
return None
for name in deniedChannels:
if fnmatchcase(channel.name, name):
user.sendMessage(irc.ERR_CHANNOTALLOWED, channel.name, "Channel {} is forbidden".format(channel.name))
return False
return None
denyChans = DenyChannels()
|
11461599
|
import platform
import time
class Buffer:
"""A context manager that waits for a guaranteed minimum interval
between the previous exit and next enter.
"""
def __init__(self, logger, interval=0.1):
self.logger = logger
self.default = float(interval)
self.interval = self.default
self.previous = 0.0
def __call__(self, interval=None):
if interval is not None:
self.interval = float(interval)
return self
def __enter__(self):
duration = time.time() - self.previous
if duration < self.interval:
sleep_time = self.interval - duration
self.logger.debug("Buffering input for %.2f seconds", sleep_time)
time.sleep(sleep_time)
return self
def __exit__(self, exc_type, exc_value, traceback):
self.previous = time.time()
self.interval = self.default
def is_windows():
return platform.system() == "Windows"
def is_macos():
return platform.system() == "Darwin"
def is_linux():
return platform.system() == "Linux"
|
11461605
|
import glob
import os
from chainer.dataset import download
from chainercv.chainer_experimental.datasets.sliceable import GetterDataset
from chainercv.utils import read_image
class CityscapesTestImageDataset(GetterDataset):
"""Image dataset for test split of `Cityscapes dataset`_.
.. _`Cityscapes dataset`: https://www.cityscapes-dataset.com
.. note::
Please manually download the data because it is not allowed to
re-distribute Cityscapes dataset.
Args:
data_dir (string): Path to the dataset directory. The directory should
contain the :obj:`leftImg8bit` directory. If :obj:`auto` is given,
it uses :obj:`$CHAINER_DATSET_ROOT/pfnet/chainercv/cityscapes` by
default.
This dataset returns the following data.
.. csv-table::
:header: name, shape, dtype, format
:obj:`img`, ":math:`(3, H, W)`", :obj:`float32`, \
"RGB, :math:`[0, 255]`"
"""
def __init__(self, data_dir='auto'):
super(CityscapesTestImageDataset, self).__init__()
if data_dir == 'auto':
data_dir = download.get_dataset_directory(
'pfnet/chainercv/cityscapes')
img_dir = os.path.join(data_dir, os.path.join('leftImg8bit', 'test'))
if not os.path.exists(img_dir):
raise ValueError(
'Cityscapes dataset does not exist at the expected location.'
'Please download it from https://www.cityscapes-dataset.com/.'
'Then place directory leftImg8bit at {}.'.format(
os.path.join(data_dir, 'leftImg8bit')))
self.img_paths = []
for city_dname in sorted(glob.glob(os.path.join(img_dir, '*'))):
for img_path in sorted(glob.glob(
os.path.join(city_dname, '*_leftImg8bit.png'))):
self.img_paths.append(img_path)
self.add_getter('img', self._get_image)
self.keys = 'img' # do not return tuple
def __len__(self):
return len(self.img_paths)
def _get_image(self, i):
return read_image(self.img_paths[i])
|
11461626
|
import cv2
import numpy as np
import tensorflow as tf
GAMMA = 2.2 # LDR and HDR domain transform parameter
MU = 5000. # tonemapping parameter
def write_hdr(out_path, image):
if len(image.shape) == 4:
assert image.shape[0] == 1, 'invalid shape: {}'.format(image)
image = image[0]
assert len(image.shape) == 3 and image.shape[-1] == 3
with open(out_path, "wb") as f:
f.write(b"#?RADIANCE\n# Made with Python & Numpy\nFORMAT=32-bit_rle_rgbe\n\n")
f.write(b"-Y %d +X %d\n" % (image.shape[0], image.shape[1]))
brightest = np.maximum(np.maximum(image[..., 0], image[..., 1]), image[..., 2])
mantissa = np.zeros_like(brightest)
exponent = np.zeros_like(brightest)
np.frexp(brightest, mantissa, exponent)
scaled_mantissa = mantissa * 255.0 / brightest
rgbe = np.zeros((image.shape[0], image.shape[1], 4), dtype=np.uint8)
rgbe[..., 0:3] = np.around(image[..., 0:3] * scaled_mantissa[..., None])
rgbe[..., 3] = np.around(exponent + 128)
rgbe.flatten().tofile(f)
def read_hdr(hdr_path): # output -1~1
im = cv2.imread(hdr_path, cv2.IMREAD_UNCHANGED).astype(np.float32)
return (im * 2. - 1.)[..., ::-1]
def hdr2ldr(hdr, expo): # input/output -1~1
return (tf.clip_by_value(((hdr + 1) / 2. * expo), 0, 1) ** (1 / GAMMA)) * 2. - 1
def ldr2hdr(ldr, expo): # input/output -1~1
return (((ldr + 1.) / 2.) ** GAMMA / expo) * 2. - 1
def tonemap_np(hdr, mu=MU): # input/output -1~1
if mu is None:
mu = MU
return np.log(1 + mu * (hdr + 1.) / 2.) / np.log(1 + mu) * 2. - 1
def itonemap_np(tp, mu=MU):
if mu is None:
mu = MU
return ((1. + mu) ** ((tp + 1.) / 2) - 1) / mu * 2 - 1
def tonemap(hdr, mu=MU, name='tonemap'): # input/output -1~1
if mu is None:
mu = MU
with tf.name_scope(name):
return tf.log(1 + mu * (hdr + 1.) / 2.) / tf.log(1 + mu) * 2. - 1
def itonemap(tp, mu=MU, name='itonemap'):
if mu is None:
mu = MU
with tf.name_scope(name):
return (tf.pow((1. + mu), ((tp + 1.) / 2)) - 1) / mu * 2 - 1
|
11461693
|
from plugins.adversary.app.commands import net
from plugins.adversary.app.operation.operation import Step, OPUser, OPDomain, OPCredential, OPHost, OPRat, OPVar, OPShare
class NetUse(Step):
"""
Description:
This step mounts a C$ network share on a target remote machine using net use. This can then be leveraged
for a host of machine-to-machine techniques.
Requirements:
Requires administrative credentials for target machine ((needs both administrator enumeration 'GetAdmin',
and credential data 'Credentials') and domain enumeration.
"""
attack_mapping = [('T1077', 'Lateral Movement'), ('T1106', 'Execution')]
display_name = "net_use"
summary = "Mount a C$ network share using net use"
# prevents net_use
value = 0
preconditions = [("rat", OPRat),
('host', OPHost),
("cred", OPCredential({'$in': {'user': OPVar("host.admins")}})),
('user', OPUser(OPVar("cred.user"))),
('domain', OPDomain(OPVar("user.domain")))]
# These post-conditions create a weird behavior where the planner with think it has paths ahead due to Remove
# Net Share being an option. Will not break
# postconditions = [('share_g', OPShare({"src_host": OPVar("rat.host"), "dest_host": OPVar("host"),
# 'share_name': 'C$', 'share_removed': False}))]
postconditions = [('share_g', OPShare({"src_host": OPVar("rat.host"), "dest_host": OPVar("host"),
'share_name': 'C$'}))]
not_equal = [('host', 'rat.host')]
preproperties = ['domain.windows_domain', 'cred.password', 'host.fqdn', 'user.username']
postproperties = ["share_g.share_path", "share_g.mount_point", "share_g.share_removed"]
deterministic = True
cddl = """
Knowns:
rat: OPRat[host]
host: OPHost[fqdn]
cred: OPCredential[password, user[username, domain[windows_domain]]]
Where:
rat.host != host
Effects:
if not exist rat {
forget rat
} elif cred.user in host.admins {
create OPShare[src_host=rat.host, dest_host=host, share_name="C$", share_path="whatever", \
share_removed="False"]
}
"""
@staticmethod
def description(rat, host):
return "Mounting {}'s C$ network share on {} with net use".format(host.fqdn, rat.host.fqdn)
@staticmethod
async def action(operation, rat, host, cred, user, domain, share_g):
await operation.execute_shell_command(rat, *net.use(host.fqdn, 'C$', user=user.username,
user_domain=domain.windows_domain, password=<PASSWORD>))
await share_g({'share_path': '\\\\{}\\C$'.format(host.fqdn), 'mount_point': 'C:', 'share_removed': False})
return True
@staticmethod
async def cleanup(cleaner, share_g):
for share in share_g:
if not share.share_removed:
await cleaner.delete(share)
|
11461740
|
from replacy.test_helper import MatchDictTestHelper
if __name__ == '__main__':
test = MatchDictTestHelper()
test.run()
|
11461772
|
import openpyxl
from . import patch, xltypes
class Reader():
def __init__(self, file_name):
self.excel_file_name = file_name
def read(self):
with patch.openpyxl_WorksheetReader_patch():
self.book = openpyxl.load_workbook(self.excel_file_name)
def read_defined_names(self, ignore_sheets=[], ignore_hidden=False):
return {
defn.name: defn.value
for defn in self.book.defined_names.definedName
if defn.hidden is None and defn.value != '#REF!'
}
def read_cells(self, ignore_sheets=[], ignore_hidden=False):
cells = {}
formulae = {}
ranges = {}
for sheet_name in self.book.sheetnames:
if sheet_name in ignore_sheets:
continue
sheet = self.book[sheet_name]
for cell in sheet._cells.values():
addr = f'{sheet_name}!{cell.coordinate}'
if cell.data_type == 'f':
formula = xltypes.XLFormula(cell.value, sheet_name)
formulae[addr] = formula
value = cell.cvalue
else:
formula = None
value = cell.value
cells[addr] = xltypes.XLCell(
addr, value=value, formula=formula)
return [cells, formulae, ranges]
|
11461781
|
rom fractions import gcd
class Fractions:
"""Some comment
In a galaxy far, far away~"""
def __init__(self, nom, denom):
self.nom = nom
self.denom = denom
self.__reduce()
def __reduce(self):
GCD = gcd(self.nom, self.denom)
self.nom, self.denom = self.nom // GCD, self.denom // GCD
def __str__(self):
if self.nom/self.denom >= 1:
integer = self.nom // self.denom
flt = Fractions(self.nom % self.denom, self.denom)
if flt.nom == 0:
return str(integer)
else:
return str(integer)+' '+str(flt)
else:
return str(self.nom)+'/'+str(self.denom)
def __add__(self, other):
nom = self.nom*other.denom + self.denom*other.nom
denom = self.denom * other.denom
return Fractions(nom, denom)
def __sub__(self, other):
nom = self.nom*other.denom - self.denom*other.nom
denom = self.denom * other.denom
return Fractions(nom, denom)
def __truediv__(self, other):
nom, denom = self.nom*other.denom, self.denom*other.nom
return Fractions(nom, denom)
def __mul__(self, other):
nom, denom = self.nom * other.nom, self.denom * other.denom
return Fractions(nom, denom)
def __copy__(self):
return Fractions(self.nom, self.denom)
def __pow__(self, power, modulo=None):
ret = self.__copy__()
if power == 0:
return Fractions(1,1)
elif power < 0:
for _ in range(abs(power)-1):
ret *= self
return Fractions(1,1)/ret
else:
for _ in range(abs(power)):
ret *= self
return ret
def __eq__(self, other):
return self.nom == other.nom and self.denom == other.denom
def astuple(self):
return self.nom, self.denom
def destroy(self):
self.nom = None
self.denom = None
my_frac1 = Fractions(20, 30)
my_frac2 = Fractions(5, 173)
#my_frac1.destroy()
print(my_frac1)
print(my_frac1 == my_frac1.__copy__())
print(my_frac1 / (my_frac1 - my_frac2))
|
11461784
|
from pypy.tool.pairtype import pairtype
from pypy.annotation import model as annmodel
from pypy.rpython.lltypesystem.lltype import Signed, Unsigned, Bool, Float
from pypy.rpython.error import TyperError
from pypy.rpython.rmodel import IntegerRepr, BoolRepr
from pypy.rpython.robject import PyObjRepr, pyobj_repr
from pypy.rpython.rmodel import log
class __extend__(annmodel.SomeBool):
def rtyper_makerepr(self, rtyper):
return bool_repr
def rtyper_makekey(self):
return self.__class__,
bool_repr = BoolRepr()
class __extend__(BoolRepr):
def convert_const(self, value):
if not isinstance(value, bool):
raise TyperError("not a bool: %r" % (value,))
return value
def rtype_is_true(_, hop):
vlist = hop.inputargs(Bool)
return vlist[0]
def rtype_int(_, hop):
vlist = hop.inputargs(Signed)
hop.exception_cannot_occur()
return vlist[0]
def rtype_float(_, hop):
vlist = hop.inputargs(Float)
return vlist[0]
#
# _________________________ Conversions _________________________
class __extend__(pairtype(BoolRepr, IntegerRepr)):
def convert_from_to((r_from, r_to), v, llops):
if r_from.lowleveltype == Bool and r_to.lowleveltype == Unsigned:
log.debug('explicit cast_bool_to_uint')
return llops.genop('cast_bool_to_uint', [v], resulttype=Unsigned)
if r_from.lowleveltype == Bool and r_to.lowleveltype == Signed:
return llops.genop('cast_bool_to_int', [v], resulttype=Signed)
if r_from.lowleveltype == Bool:
from pypy.rpython.rint import signed_repr
v_int = llops.genop('cast_bool_to_int', [v], resulttype=Signed)
return llops.convertvar(v_int, signed_repr, r_to)
return NotImplemented
class __extend__(pairtype(IntegerRepr, BoolRepr)):
def convert_from_to((r_from, r_to), v, llops):
if r_from.lowleveltype == Unsigned and r_to.lowleveltype == Bool:
log.debug('explicit cast_uint_to_bool')
return llops.genop('uint_is_true', [v], resulttype=Bool)
if r_from.lowleveltype == Signed and r_to.lowleveltype == Bool:
log.debug('explicit cast_int_to_bool')
return llops.genop('int_is_true', [v], resulttype=Bool)
return NotImplemented
class __extend__(pairtype(PyObjRepr, BoolRepr)):
def convert_from_to((r_from, r_to), v, llops):
if r_to.lowleveltype == Bool:
# xxx put in table
return llops.gencapicall('PyObject_IsTrue', [v], resulttype=Bool,
_callable=lambda pyo: bool(pyo._obj.value))
return NotImplemented
class __extend__(pairtype(BoolRepr, PyObjRepr)):
def convert_from_to((r_from, r_to), v, llops):
if r_from.lowleveltype == Bool:
return llops.gencapicall('PyBool_FromLong', [v],
resulttype = pyobj_repr)
return NotImplemented
|
11461854
|
import pytest
from rest_framework.status import (
HTTP_200_OK,
HTTP_204_NO_CONTENT,
HTTP_400_BAD_REQUEST,
HTTP_401_UNAUTHORIZED,
HTTP_404_NOT_FOUND,
)
from django.shortcuts import reverse
from baserow.contrib.database.tokens.handler import TokenHandler
from baserow.contrib.database.tokens.models import Token, TokenPermission
@pytest.mark.django_db
def test_list_tokens(api_client, data_fixture):
user, token = data_fixture.create_user_and_token()
group_1 = data_fixture.create_group(user=user)
group_2 = data_fixture.create_group(user=user)
token_1 = data_fixture.create_token(user=user, group=group_1)
token_2 = data_fixture.create_token(user=user, group=group_1)
token_3 = data_fixture.create_token(user=user, group=group_2)
url = reverse("api:database:tokens:list")
response = api_client.get(url, HTTP_AUTHORIZATION=f"JWT random")
assert response.status_code == HTTP_401_UNAUTHORIZED
url = reverse("api:database:tokens:list")
response = api_client.get(url, HTTP_AUTHORIZATION=f"JWT {token}")
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert len(response_json) == 3
assert len(response_json[0]) == 5
assert response_json[0]["id"] == token_1.id
assert response_json[0]["name"] == token_1.name
assert response_json[0]["key"] == token_1.key
assert response_json[0]["group"] == token_1.group_id
assert response_json[0]["permissions"] == {
"create": False,
"read": False,
"update": False,
"delete": False,
}
assert len(response_json[1]) == 5
assert response_json[1]["id"] == token_2.id
assert response_json[1]["name"] == token_2.name
assert response_json[1]["key"] == token_2.key
assert response_json[1]["group"] == token_2.group_id
assert response_json[0]["permissions"] == {
"create": False,
"read": False,
"update": False,
"delete": False,
}
assert len(response_json[2]) == 5
assert response_json[2]["id"] == token_3.id
assert response_json[2]["name"] == token_3.name
assert response_json[2]["key"] == token_3.key
assert response_json[2]["group"] == token_3.group_id
assert response_json[0]["permissions"] == {
"create": False,
"read": False,
"update": False,
"delete": False,
}
@pytest.mark.django_db
def test_create_token(api_client, data_fixture):
user, token = data_fixture.create_user_and_token()
group_1 = data_fixture.create_group(user=user)
group_2 = data_fixture.create_group()
url = reverse("api:database:tokens:list")
response = api_client.post(
url,
{"name": "<NAME>", "group": group_1.id},
format="json",
HTTP_AUTHORIZATION=f"JWT random",
)
assert response.status_code == HTTP_401_UNAUTHORIZED
url = reverse("api:database:tokens:list")
response = api_client.post(
url, {}, format="json", HTTP_AUTHORIZATION=f"JWT {token}"
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["detail"]["name"][0]["code"] == "required"
assert response_json["detail"]["group"][0]["code"] == "required"
url = reverse("api:database:tokens:list")
response = api_client.post(
url,
{"name": "Test", "group": 9999},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["detail"]["group"][0]["code"] == "does_not_exist"
url = reverse("api:database:tokens:list")
response = api_client.post(
url,
{"name": "Test", "group": group_2.id},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_USER_NOT_IN_GROUP"
url = reverse("api:database:tokens:list")
response = api_client.post(
url,
{"name": "Test", "group": group_1.id},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert Token.objects.all().count() == 1
token = Token.objects.all().first()
assert response_json["id"] == token.id
assert response_json["name"] == token.name
assert response_json["group"] == token.group_id == group_1.id
assert response_json["key"] == token.key
assert len(response_json["key"]) == 32
assert response_json["permissions"] == {
"create": True,
"read": True,
"update": True,
"delete": True,
}
@pytest.mark.django_db
def test_get_token(api_client, data_fixture):
user, token = data_fixture.create_user_and_token()
group_1 = data_fixture.create_group(user=user)
group_2 = data_fixture.create_group()
token_1 = data_fixture.create_token(user=user, group=group_1)
token_2 = data_fixture.create_token(user=user, group=group_2)
token_3 = data_fixture.create_token()
database_1 = data_fixture.create_database_application(group=group_1)
database_2 = data_fixture.create_database_application(group=group_1)
data_fixture.create_database_table(database=database_1, create_table=False)
data_fixture.create_database_table(database=database_1, create_table=False)
table_3 = data_fixture.create_database_table(
database=database_2, create_table=False
)
url = reverse("api:database:tokens:item", kwargs={"token_id": token_1.id})
response = api_client.get(url, format="json", HTTP_AUTHORIZATION=f"JWT random")
assert response.status_code == HTTP_401_UNAUTHORIZED
url = reverse("api:database:tokens:item", kwargs={"token_id": 99999})
response = api_client.get(url, format="json", HTTP_AUTHORIZATION=f"JWT {token}")
assert response.status_code == HTTP_404_NOT_FOUND
assert response.json()["error"] == "ERROR_TOKEN_DOES_NOT_EXIST"
url = reverse("api:database:tokens:item", kwargs={"token_id": token_3.id})
response = api_client.get(url, format="json", HTTP_AUTHORIZATION=f"JWT {token}")
assert response.status_code == HTTP_404_NOT_FOUND
assert response.json()["error"] == "ERROR_TOKEN_DOES_NOT_EXIST"
url = reverse("api:database:tokens:item", kwargs={"token_id": token_2.id})
response = api_client.get(url, format="json", HTTP_AUTHORIZATION=f"JWT {token}")
assert response.status_code == HTTP_400_BAD_REQUEST
assert response.json()["error"] == "ERROR_USER_NOT_IN_GROUP"
url = reverse("api:database:tokens:item", kwargs={"token_id": token_1.id})
response = api_client.get(url, format="json", HTTP_AUTHORIZATION=f"JWT {token}")
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json["id"] == token_1.id
assert response_json["name"] == token_1.name
assert response_json["group"] == token_1.group_id
assert response_json["key"] == token_1.key
assert len(response_json["key"]) == 32
assert response_json["permissions"] == {
"create": False,
"read": False,
"update": False,
"delete": False,
}
TokenHandler().update_token_permissions(
user,
token_1,
create=True,
read=[database_2],
update=[database_1, table_3],
delete=False,
)
url = reverse("api:database:tokens:item", kwargs={"token_id": token_1.id})
response = api_client.get(url, format="json", HTTP_AUTHORIZATION=f"JWT {token}")
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json["permissions"]["create"] is True
assert len(response_json["permissions"]["read"]) == 1
assert response_json["permissions"]["read"][0] == ["database", database_2.id]
assert len(response_json["permissions"]["update"]) == 2
assert response_json["permissions"]["update"][0] == ["database", database_1.id]
assert response_json["permissions"]["update"][1] == ["table", table_3.id]
assert response_json["permissions"]["delete"] is False
TokenHandler().update_token_permissions(
user,
token_1,
create=[database_1, database_2],
read=False,
update=True,
delete=[table_3],
)
url = reverse("api:database:tokens:item", kwargs={"token_id": token_1.id})
response = api_client.get(url, format="json", HTTP_AUTHORIZATION=f"JWT {token}")
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert len(response_json["permissions"]["create"]) == 2
assert response_json["permissions"]["create"][0] == ["database", database_1.id]
assert response_json["permissions"]["create"][1] == ["database", database_2.id]
assert response_json["permissions"]["read"] is False
assert response_json["permissions"]["update"] is True
assert len(response_json["permissions"]["delete"]) == 1
assert response_json["permissions"]["delete"][0] == ["table", table_3.id]
@pytest.mark.django_db
def test_update_token(api_client, data_fixture):
user, token = data_fixture.create_user_and_token()
group_1 = data_fixture.create_group(user=user)
group_2 = data_fixture.create_group()
token_1 = data_fixture.create_token(user=user, group=group_1)
token_2 = data_fixture.create_token(user=user, group=group_2)
token_3 = data_fixture.create_token()
database_1 = data_fixture.create_database_application(group=group_1)
database_2 = data_fixture.create_database_application(group=group_1)
database_3 = data_fixture.create_database_application()
table_1 = data_fixture.create_database_table(
database=database_1, create_table=False
)
data_fixture.create_database_table(database=database_1, create_table=False)
table_3 = data_fixture.create_database_table(
database=database_2, create_table=False
)
table_4 = data_fixture.create_database_table(
database=database_3, create_table=False
)
url = reverse("api:database:tokens:item", kwargs={"token_id": token_1.id})
response = api_client.patch(
url, {"name": "New name"}, format="json", HTTP_AUTHORIZATION=f"JWT random"
)
assert response.status_code == HTTP_401_UNAUTHORIZED
url = reverse("api:database:tokens:item", kwargs={"token_id": 99999})
response = api_client.patch(
url, {"name": "New name"}, format="json", HTTP_AUTHORIZATION=f"JWT {token}"
)
assert response.status_code == HTTP_404_NOT_FOUND
assert response.json()["error"] == "ERROR_TOKEN_DOES_NOT_EXIST"
url = reverse("api:database:tokens:item", kwargs={"token_id": token_3.id})
response = api_client.patch(
url, {"name": "New name"}, format="json", HTTP_AUTHORIZATION=f"JWT {token}"
)
assert response.status_code == HTTP_404_NOT_FOUND
assert response.json()["error"] == "ERROR_TOKEN_DOES_NOT_EXIST"
url = reverse("api:database:tokens:item", kwargs={"token_id": token_2.id})
response = api_client.patch(
url, {"name": "New name"}, format="json", HTTP_AUTHORIZATION=f"JWT {token}"
)
assert response.status_code == HTTP_400_BAD_REQUEST
assert response.json()["error"] == "ERROR_USER_NOT_IN_GROUP"
url = reverse("api:database:tokens:item", kwargs={"token_id": token_2.id})
response = api_client.patch(
url, {"name": ""}, format="json", HTTP_AUTHORIZATION=f"JWT {token}"
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["detail"]["name"][0]["code"] == "blank"
url = reverse("api:database:tokens:item", kwargs={"token_id": token_3.id})
response = api_client.patch(
url, {}, format="json", HTTP_AUTHORIZATION=f"JWT {token}"
)
response_json = response.json()
assert response.status_code == HTTP_404_NOT_FOUND
assert response_json["error"] == "ERROR_TOKEN_DOES_NOT_EXIST"
url = reverse("api:database:tokens:item", kwargs={"token_id": token_1.id})
response = api_client.patch(
url, {"name": "New name"}, format="json", HTTP_AUTHORIZATION=f"JWT {token}"
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
token_1.refresh_from_db()
assert response_json["id"] == token_1.id
assert response_json["name"] == "New name" == token_1.name
assert response_json["group"] == token_1.group_id
assert response_json["key"] == token_1.key
assert len(response_json["key"]) == 32
assert response_json["permissions"] == {
"create": False,
"read": False,
"update": False,
"delete": False,
}
url = reverse("api:database:tokens:item", kwargs={"token_id": token_1.id})
response = api_client.patch(
url,
{
"permissions": {
"create": "something",
"read": False,
"update": False,
"delete": False,
}
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == HTTP_400_BAD_REQUEST
assert response.json()["error"] == "ERROR_REQUEST_BODY_VALIDATION"
url = reverse("api:database:tokens:item", kwargs={"token_id": token_1.id})
response = api_client.patch(
url,
{
"permissions": {
"create": True,
"read": [["something", 1]],
"update": False,
"delete": False,
}
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == HTTP_400_BAD_REQUEST
assert response.json()["error"] == "ERROR_REQUEST_BODY_VALIDATION"
url = reverse("api:database:tokens:item", kwargs={"token_id": token_1.id})
response = api_client.patch(
url,
{
"permissions": {
"create": True,
"read": [1],
"update": False,
"delete": False,
}
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == HTTP_400_BAD_REQUEST
assert response.json()["error"] == "ERROR_REQUEST_BODY_VALIDATION"
url = reverse("api:database:tokens:item", kwargs={"token_id": token_1.id})
response = api_client.patch(
url,
{"permissions": {"create": True, "update": False, "delete": False}},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == HTTP_400_BAD_REQUEST
assert response.json()["error"] == "ERROR_REQUEST_BODY_VALIDATION"
url = reverse("api:database:tokens:item", kwargs={"token_id": token_1.id})
response = api_client.patch(
url, {"permissions": {}}, format="json", HTTP_AUTHORIZATION=f"JWT {token}"
)
assert response.status_code == HTTP_400_BAD_REQUEST
assert response.json()["error"] == "ERROR_REQUEST_BODY_VALIDATION"
url = reverse("api:database:tokens:item", kwargs={"token_id": token_1.id})
response = api_client.patch(
url,
{
"permissions": {
"create": True,
"read": [["database", database_3.id]],
"update": True,
"delete": True,
}
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == HTTP_400_BAD_REQUEST
assert response.json()["error"] == "ERROR_DATABASE_DOES_NOT_BELONG_TO_GROUP"
url = reverse("api:database:tokens:item", kwargs={"token_id": token_1.id})
response = api_client.patch(
url,
{
"permissions": {
"create": True,
"read": [["table", table_4.id]],
"update": True,
"delete": True,
}
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == HTTP_400_BAD_REQUEST
assert response.json()["error"] == "ERROR_TABLE_DOES_NOT_BELONG_TO_GROUP"
url = reverse("api:database:tokens:item", kwargs={"token_id": token_1.id})
response = api_client.patch(
url,
{
"name": "New name 2",
"permissions": {
"create": True,
"read": [["database", database_1.id]],
"update": False,
"delete": [["table", table_1.id], ["table", table_3.id]],
},
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
token_1.refresh_from_db()
assert response_json["id"] == token_1.id
assert response_json["name"] == "New name 2" == token_1.name
assert response_json["group"] == token_1.group_id
assert response_json["key"] == token_1.key
assert len(response_json["key"]) == 32
assert response_json["permissions"]["create"] is True
assert len(response_json["permissions"]["read"]) == 1
assert response_json["permissions"]["read"][0] == ["database", database_1.id]
assert response_json["permissions"]["update"] is False
assert len(response_json["permissions"]["delete"]) == 2
assert response_json["permissions"]["delete"][0] == ["table", table_1.id]
assert response_json["permissions"]["delete"][1] == ["table", table_3.id]
assert TokenPermission.objects.all().count() == 4
assert TokenPermission.objects.filter(
token=token_1, type="create", database__isnull=True, table__isnull=True
).exists()
assert TokenPermission.objects.filter(
token=token_1, type="read", database_id=database_1.id, table__isnull=True
).exists()
assert TokenPermission.objects.filter(
token=token_1, type="delete", database__isnull=True, table_id=table_1.id
).exists()
assert TokenPermission.objects.filter(
token=token_1, type="delete", database__isnull=True, table_id=table_1.id
).exists()
url = reverse("api:database:tokens:item", kwargs={"token_id": token_1.id})
response = api_client.patch(
url, {"rotate_key": True}, format="json", HTTP_AUTHORIZATION=f"JWT {token}"
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json["key"] != token_1.key
token_1.refresh_from_db()
assert response_json["key"] == token_1.key
@pytest.mark.django_db
def test_delete_token(api_client, data_fixture):
user, token = data_fixture.create_user_and_token()
group_1 = data_fixture.create_group(user=user)
group_2 = data_fixture.create_group()
token_1 = data_fixture.create_token(user=user, group=group_1)
token_2 = data_fixture.create_token(user=user, group=group_2)
token_3 = data_fixture.create_token()
TokenHandler().update_token_permissions(
user, token_1, create=True, read=True, update=True, delete=True
)
url = reverse("api:database:tokens:item", kwargs={"token_id": token_1.id})
response = api_client.delete(url, format="json", HTTP_AUTHORIZATION=f"JWT random")
assert response.status_code == HTTP_401_UNAUTHORIZED
url = reverse("api:database:tokens:item", kwargs={"token_id": 99999})
response = api_client.delete(url, format="json", HTTP_AUTHORIZATION=f"JWT {token}")
assert response.status_code == HTTP_404_NOT_FOUND
assert response.json()["error"] == "ERROR_TOKEN_DOES_NOT_EXIST"
url = reverse("api:database:tokens:item", kwargs={"token_id": token_3.id})
response = api_client.delete(url, format="json", HTTP_AUTHORIZATION=f"JWT {token}")
assert response.status_code == HTTP_404_NOT_FOUND
assert response.json()["error"] == "ERROR_TOKEN_DOES_NOT_EXIST"
url = reverse("api:database:tokens:item", kwargs={"token_id": token_2.id})
response = api_client.delete(url, format="json", HTTP_AUTHORIZATION=f"JWT {token}")
assert response.status_code == HTTP_400_BAD_REQUEST
assert response.json()["error"] == "ERROR_USER_NOT_IN_GROUP"
url = reverse("api:database:tokens:item", kwargs={"token_id": token_3.id})
response = api_client.delete(url, format="json", HTTP_AUTHORIZATION=f"JWT {token}")
response_json = response.json()
assert response.status_code == HTTP_404_NOT_FOUND
assert response_json["error"] == "ERROR_TOKEN_DOES_NOT_EXIST"
assert Token.objects.all().count() == 3
assert TokenPermission.objects.all().count() == 4
url = reverse("api:database:tokens:item", kwargs={"token_id": token_1.id})
response = api_client.delete(url, format="json", HTTP_AUTHORIZATION=f"JWT {token}")
assert response.status_code == HTTP_204_NO_CONTENT
assert Token.objects.all().count() == 2
assert TokenPermission.objects.all().count() == 0
|
11461861
|
from pylot.planning.planner import Planner
from rrt_star_planner.RRTStar.rrt_star_wrapper import apply_rrt_star
class RRTStarPlanner(Planner):
"""Wrapper around the RRT* planner.
Note:
Details can be found at `RRT* Planner`_.
Args:
world: (:py:class:`~pylot.planning.world.World`): A reference to the
planning world.
flags (absl.flags): Object to be used to access absl flags.
.. _RRT* Planner:
https://github.com/erdos-project/rrt_star_planner
"""
def __init__(self, world, flags, logger):
super().__init__(world, flags, logger)
self._hyperparameters = {
"step_size": flags.step_size,
"max_iterations": flags.max_iterations,
"end_dist_threshold": flags.end_dist_threshold,
"obstacle_clearance": flags.obstacle_clearance_rrt,
"lane_width": flags.lane_width,
}
def run(self, timestamp, ttd=None):
"""Runs the planner.
Note:
The planner assumes that the world is up-to-date.
Returns:
:py:class:`~pylot.planning.waypoints.Waypoints`: Waypoints of the
planned trajectory.
"""
obstacle_list = self._world.get_obstacle_list()
if len(obstacle_list) == 0:
# Do not use RRT* if there are no obstacles.
# Do not use Hybrid A* if there are no obstacles.
output_wps = self._world.follow_waypoints(self._flags.target_speed)
else:
# RRT* does not take into account the driveable region.
# It constructs search space as a top down, minimum bounding
# rectangle with padding in each dimension.
self._logger.debug("@{}: Hyperparameters: {}".format(
timestamp, self._hyperparameters))
initial_conditions = self._compute_initial_conditions(
obstacle_list)
self._logger.debug("@{}: Initial conditions: {}".format(
timestamp, initial_conditions))
path_x, path_y, success = apply_rrt_star(initial_conditions,
self._hyperparameters)
if success:
self._logger.debug("@{}: RRT* succeeded".format(timestamp))
speeds = [self._flags.target_speed] * len(path_x)
self._logger.debug("@{}: RRT* Path X: {}".format(
timestamp, path_x.tolist()))
self._logger.debug("@{}: RRT* Path Y: {}".format(
timestamp, path_y.tolist()))
self._logger.debug("@{}: RRT* Speeds: {}".format(
timestamp, speeds))
output_wps = self.build_output_waypoints(
path_x, path_y, speeds)
else:
self._logger.error("@{}: RRT* failed. "
"Sending emergency stop.".format(timestamp))
output_wps = self._world.follow_waypoints(0)
return output_wps
def _compute_initial_conditions(self, obstacles):
ego_transform = self._world.ego_transform
self._world.waypoints.remove_completed(ego_transform.location)
end_index = min(self._flags.num_waypoints_ahead,
len(self._world.waypoints.waypoints) - 1)
if end_index < 0:
# If no more waypoints left. Then our location is our end wp.
self._logger.debug("@{}: No more waypoints left")
end_wp = ego_transform
else:
end_wp = self._world.waypoints.waypoints[end_index]
initial_conditions = {
"start": ego_transform.location.as_numpy_array_2D(),
"end": end_wp.location.as_numpy_array_2D(),
"obs": obstacles,
}
return initial_conditions
|
11461864
|
import argparse
from typing import NoReturn
from pipconf import __version__
from pipconf import __module__
from pipconf import __license__
from pipconf import __author_github__
from pipconf import __index__
from pipconf import kernel
from pipconf import environment
NAME = __module__
VERSION = __version__
LICENSE = __license__
AUTHOR = __author_github__
INDEX = __index__
DESCRIPTION = """
\033[93m______ ___________ _____ _____ _ _ ______ \033[0m
\033[93m| ___ \_ _| ___ \/ __ \ _ | \ | || ___|\033[0m
\033[93m| |_/ / | | | |_/ /| / \/ | | | \| || |_ \033[0m
\033[93m| __/ | | | __/ | | | | | | . ` || _|\033[0m
\033[93m| | _| |_| | | \__/\ \_/ / |\ || |\033[0m
\033[93m\_| \___/\_| \____/\___/\_| \_/\_|\033[0m v{}
Under {} License, by {}
Contribute at {}
""".format(VERSION, LICENSE, AUTHOR, INDEX)
def init_argparse() -> argparse.ArgumentParser:
"""
Function that initializes the `ArgumentParser` and returns it.
"""
parser = argparse.ArgumentParser(
prog=NAME,
description=DESCRIPTION,
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument("-v", "--version", action="store_true", help="show the version of the module")
display = parser.add_argument_group("display informations")
display.add_argument("--current", action="store_true", help="show the current pip configuration file")
display.add_argument("--list", action="store_true", help="list all user configurations avaliable at $HOME/.pip")
change = parser.add_argument_group("change configuration")
change.add_argument("--set", type=str, dest="filename", help="set the global configuration for pip from a file in $HOME/.pip")
change.add_argument("--local", action="store_true", help="set the pip configuration for the current directory file")
return parser
def handle_arguments(args) -> NoReturn:
"""
Function that verify all cli arguments and handle it.
"""
# Module version
if args.version:
# --version
print(f"{NAME} {VERSION}")
# Display arguments
if args.current:
# --current
kernel.print_current_configuration()
if args.list:
# --list
kernel.print_user_configurations()
# Change arguments
if args.filename:
# --set [filename]
kernel.set_user_configuration(args.filename)
if args.local:
# --local
kernel.set_local_configuration()
def main() -> NoReturn:
environment.initialize_environment()
parser = init_argparse()
args = parser.parse_args()
handle_arguments(args)
if __name__ == "__main__":
main()
|
11461866
|
from django.contrib import admin
from models import *
admin.site.register(Set)
admin.site.register(Commit)
admin.site.register(Comment)
admin.site.register(Paste)
admin.site.register(Favorite)
admin.site.register(Preference)
|
11461898
|
from builtins import str
import mwparserfromhell as mwp
import time
# Unclean code
def extract_indent_blocks(wikicode):
old_indent = 0
wc_block_list = []
cur_block_wc_lines = []
for wc_line in _split_wikicode_on_endlines(wikicode):
line = str(wc_line)
indent = _find_line_indent(line)
if indent != old_indent and line.strip() != "":
wc_block = _join_wikicode(cur_block_wc_lines)
block = str(wc_block)
if block.strip() != "":
wc_block_list.append(wc_block)
cur_block_wc_lines = []
old_indent = indent
cur_block_wc_lines.append(wc_line)
wc_block = _join_wikicode(cur_block_wc_lines)
block = str(wc_block)
if block.strip() != "":
wc_block_list.append(wc_block)
return wc_block_list
# Unclean code
def _split_wikicode_on_endlines(wikicode):
divided = []
cur = []
for node in wikicode.nodes:
if type(node) is mwp.nodes.text.Text:
split_nodes = _split_text_node_on_endline(node)
for sn in split_nodes:
cur.append(sn)
if "\n" in sn.value:
divided.append(mwp.wikicode.Wikicode(cur))
cur = []
else:
cur.append(node)
if len(cur) > 0:
divided.append(mwp.wikicode.Wikicode(cur))
return divided
def _split_text_node_on_endline(text_node):
text = text_node.value
lines = _split_text_and_leave_delimiter(text, "\n")
results = []
for line in lines:
if line != "":
results.append(mwp.nodes.text.Text(line))
return results
def _split_text_and_leave_delimiter(text, delimiter):
result = []
lines = text.split(delimiter)
for i, line in enumerate(lines):
if i == (len(lines) - 1):
break
result.append(line + delimiter)
result.append(lines[i])
return result
def _join_wikicode(wikicode_list):
nodes = []
for wc in wikicode_list:
nodes.extend(wc.nodes)
return mwp.wikicode.Wikicode(nodes)
def find_min_indent(wikicode):
text = str(wikicode)
lines = text.split('\n')
non_empty = [line for line in lines if line.strip() != ""]
indents = [_find_line_indent(line) for line in non_empty]
return min(indents)
def find_line_indent(wcode):
text = str(wcode)
if text.strip() != "":
return _find_line_indent(text)
return None
def _find_line_indent(line):
return _count_indent_in_some_order(line)
def _count_indent_in_some_order(line):
line = line.strip()
count = 0
count_star = 0
indent_chars = [':', '*', '#']
while len(indent_chars) > 0:
if len(line) > count and line[count] in indent_chars:
char = line[count]
count += _count_leading_char(line[count:], line[count])
if char == '*' or char =='#':
count_star += count
indent_chars.remove(char)
else:
break
if count_star > 0:
return count - 1
return count
def _count_leading_char(line, char):
line = line.strip()
if len(line) == 0 or line[0] != char:
return 0
else:
return 1 + _count_leading_char(line[1:], char)
def has_continuation_indent(wikicode):
if len(wikicode.nodes) > 0:
start_node = wikicode.nodes[0]
if type(start_node) is mwp.nodes.template.Template:
return "outdent" in str(start_node).lower() or "undent" in str(start_node).lower() or "od" in str(start_node).lower()
if type(start_node) is mwp.nodes.template.Text:
return "outdent" in str(start_node).lower() or "undent" in str(start_node).lower()
return False
|
11461899
|
import os
from .base_video_dataset import BaseVideoDataset
from ltr.data.image_loader import jpeg4py_loader
import torch
import random
from pycocotools.coco import COCO
from collections import OrderedDict
from ltr.admin.environment import env_settings
class MSCOCOSeq(BaseVideoDataset):
""" The COCO dataset. COCO is an image dataset. Thus, we treat each image as a sequence of length 1.
Publication:
Microsoft COCO: Common Objects in Context.
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, <NAME> and <NAME>
ECCV, 2014
https://arxiv.org/pdf/1405.0312.pdf
Download the images along with annotations from http://cocodataset.org/#download. The root folder should be
organized as follows.
- coco_root
- annotations
- instances_train2014.json
- instances_train2017.json
- images
- train2014
- train2017
Note: You also have to install the coco pythonAPI from https://github.com/cocodataset/cocoapi.
"""
def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None, split="train", version="2014"):
"""
args:
root - path to the coco dataset.
image_loader (default_image_loader) - The function to read the images. If installed,
jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else,
opencv's imread is used.
data_fraction (None) - Fraction of images to be used. The images are selected randomly. If None, all the
images will be used
split - 'train' or 'val'.
version - version of coco dataset (2014 or 2017)
"""
root = env_settings().coco_dir if root is None else root
super().__init__('COCO', root, image_loader)
self.img_pth = os.path.join(root, 'images/{}{}/'.format(split, version))
self.anno_path = os.path.join(root, 'annotations/instances_{}{}.json'.format(split, version))
# Load the COCO set.
self.coco_set = COCO(self.anno_path)
self.cats = self.coco_set.cats
self.class_list = self.get_class_list()
self.sequence_list = self._get_sequence_list()
if data_fraction is not None:
self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))
self.seq_per_class = self._build_seq_per_class()
def _get_sequence_list(self):
ann_list = list(self.coco_set.anns.keys())
seq_list = [a for a in ann_list if self.coco_set.anns[a]['iscrowd'] == 0]
return seq_list
def is_video_sequence(self):
return False
def get_num_classes(self):
return len(self.class_list)
def get_name(self):
return 'coco'
def has_class_info(self):
return True
def get_class_list(self):
class_list = []
for cat_id in self.cats.keys():
class_list.append(self.cats[cat_id]['name'])
return class_list
def has_segmentation_info(self):
return True
def get_num_sequences(self):
return len(self.sequence_list)
def _build_seq_per_class(self):
seq_per_class = {}
for i, seq in enumerate(self.sequence_list):
class_name = self.cats[self.coco_set.anns[seq]['category_id']]['name']
if class_name not in seq_per_class:
seq_per_class[class_name] = [i]
else:
seq_per_class[class_name].append(i)
return seq_per_class
def get_sequences_in_class(self, class_name):
return self.seq_per_class[class_name]
def get_sequence_info(self, seq_id):
anno = self._get_anno(seq_id)
bbox = torch.Tensor(anno['bbox']).view(1, 4)
mask = torch.Tensor(self.coco_set.annToMask(anno)).unsqueeze(dim=0)
valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)
visible = valid.clone().byte()
return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible}
def _get_anno(self, seq_id):
anno = self.coco_set.anns[self.sequence_list[seq_id]]
return anno
def _get_frames(self, seq_id):
path = self.coco_set.loadImgs([self.coco_set.anns[self.sequence_list[seq_id]]['image_id']])[0]['file_name']
img = self.image_loader(os.path.join(self.img_pth, path))
return img
def get_meta_info(self, seq_id):
try:
cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']]
object_meta = OrderedDict({'object_class_name': cat_dict_current['name'],
'motion_class': None,
'major_class': cat_dict_current['supercategory'],
'root_class': None,
'motion_adverb': None})
except:
object_meta = OrderedDict({'object_class_name': None,
'motion_class': None,
'major_class': None,
'root_class': None,
'motion_adverb': None})
return object_meta
def get_class_name(self, seq_id):
cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']]
return cat_dict_current['name']
def get_frames(self, seq_id=None, frame_ids=None, anno=None):
# COCO is an image dataset. Thus we replicate the image denoted by seq_id len(frame_ids) times, and return a
# list containing these replicated images.
frame = self._get_frames(seq_id)
frame_list = [frame.copy() for _ in frame_ids]
if anno is None:
anno = self.get_sequence_info(seq_id)
anno_frames = {}
for key, value in anno.items():
anno_frames[key] = [value[0, ...] for _ in frame_ids]
object_meta = self.get_meta_info(seq_id)
return frame_list, anno_frames, object_meta
|
11461902
|
import os
from niaaml import Pipeline
"""
This example presents how to load a saved Pipeline object from a file. You can use all of its methods after it has been loaded successfully.
"""
# load Pipeline object from a file
pipeline = Pipeline.load(
os.path.dirname(os.path.abspath(__file__)) + "/example_files/pipeline.ppln"
)
# all of the Pipeline's classes methods can be called after a successful load
|
11461929
|
from django.db import migrations
def insert_default_config(apps, schema_editor):
# We can't import the migrated model directly as it may be a newer
# version than this migration expects. We use the historical version.
MainConfigModel = apps.get_model('dfirtrack_config', 'MainConfigModel')
MainConfigModel.objects.get_or_create(main_config_name='MainConfig')
ArtifactExporterSpreadsheetXlsConfigModel = apps.get_model(
'dfirtrack_config', 'ArtifactExporterSpreadsheetXlsConfigModel'
)
ArtifactExporterSpreadsheetXlsConfigModel.objects.get_or_create(
artifact_exporter_spreadsheet_xls_config_name='ArtifactExporterSpreadsheetXlsConfig'
)
SystemExporterMarkdownConfigModel = apps.get_model(
'dfirtrack_config', 'SystemExporterMarkdownConfigModel'
)
SystemExporterMarkdownConfigModel.objects.get_or_create(
system_exporter_markdown_config_name='SystemExporterMarkdownConfig'
)
SystemExporterSpreadsheetCsvConfigModel = apps.get_model(
'dfirtrack_config', 'SystemExporterSpreadsheetCsvConfigModel'
)
SystemExporterSpreadsheetCsvConfigModel.objects.get_or_create(
system_exporter_spreadsheet_csv_config_name='SystemExporterSpreadsheetCsvConfig'
)
SystemExporterSpreadsheetXlsConfigModel = apps.get_model(
'dfirtrack_config', 'SystemExporterSpreadsheetXlsConfigModel'
)
SystemExporterSpreadsheetXlsConfigModel.objects.get_or_create(
system_exporter_spreadsheet_xls_config_name='SystemExporterSpreadsheetXlsConfig'
)
SystemImporterFileCsvConfigModel = apps.get_model(
'dfirtrack_config', 'SystemImporterFileCsvConfigModel'
)
SystemImporterFileCsvConfigModel.objects.get_or_create(
system_importer_file_csv_config_name='SystemImporterFileCsvConfig'
)
class Migration(migrations.Migration):
dependencies = [
('dfirtrack_config', '0018_add_defaults'),
]
operations = [
migrations.RunPython(insert_default_config),
]
|
11461949
|
from defusedxml import ElementTree as ET
from html2text import html2text
from dojo.models import Endpoint, Finding
class ZapParser(object):
"""Parser for XML file generated by the OWASP Zed Attacl Proxy (ZAP) tool https://www.zaproxy.org/."""
MAPPING_SEVERITY = {"0": "Info", "1": "Low", "2": "Medium", "3": "High"}
MAPPING_CONFIDENCE = {
# "0": ??? CONFIDENCE_FALSE_POSITIVE => we don't do anything for now. it seems that the tool doesn't export them (filtered)
"1": 7, # CONFIDENCE_LOW => Tentative
"2": 4, # CONFIDENCE_MEDIUM => Firm
"3": 1, # CONFIDENCE_HIGH => Certain
"4": 1, # CONFIDENCE_USER_CONFIRMED => Certain
}
def get_scan_types(self):
return ["ZAP Scan"]
def get_label_for_scan_types(self, scan_type):
return "ZAP Scan"
def get_description_for_scan_types(self, scan_type):
return "ZAP XML report format."
def get_findings(self, file, test):
tree = ET.parse(file)
items = list()
for node in tree.findall("site"):
for item in node.findall("alerts/alertitem"):
finding = Finding(
test=test,
title=item.findtext("alert"),
description=html2text(item.findtext("desc")),
severity=self.MAPPING_SEVERITY.get(item.findtext("riskcode")),
scanner_confidence=self.MAPPING_CONFIDENCE.get(item.findtext("riskcode")),
mitigation=html2text(item.findtext("solution")),
references=html2text(item.findtext("reference")),
dynamic_finding=True,
static_finding=False,
vuln_id_from_tool=item.findtext("pluginid"),
)
if item.findtext("cweid") is not None and item.findtext("cweid").isdigit():
finding.cwe = int(item.findtext("cweid"))
finding.unsaved_endpoints = []
finding.unsaved_req_resp = []
for instance in item.findall("instances/instance"):
endpoint = Endpoint.from_uri(instance.findtext("uri"))
request = f"{instance.findtext('method')} {endpoint.query}#{endpoint.fragment}"
# we remove query and fragment because with some configuration
# the tool generate them on-the-go and it produces a lot of fake endpoints
endpoint.query = None
endpoint.fragment = None
finding.unsaved_endpoints.append(endpoint)
finding.unsaved_req_resp.append({"req": request, "resp": f"{instance.findtext('evidence')}"})
items.append(finding)
return items
|
11461951
|
class App():
name = None
link = None
rating = 0
free = False
IAP = False
update_date = None
def __init__(self):
pass
|
11461976
|
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import minimize
from common_functions import load_data, add_zero_feature, lr_accuracy, cf_lr as cost_function, gf_lr as grad_function
if __name__ == '__main__':
X, y = load_data('ex2data1.txt')
x1, x2 = X.T
f_y = y.ravel()
plt.plot(x1[f_y==0], x2[f_y==0], 'yo')
plt.plot(x1[f_y==1], x2[f_y==1], 'bx')
plt.show()
X = add_zero_feature(X)
m, n = X.shape
initial_theta = np.ones((n, 1))
theta = minimize(cost_function, initial_theta, method='BFGS', jac=grad_function, options={'disp': False},
args=(X, y)).x
print theta
print cost_function(theta, X, y)
x1_boundery = np.array([np.min(x1)-2, np.max(x1)+2])
x2_boundery = (-1/theta[2])*(theta[1]*x1_boundery + theta[0])
plt.plot(x1[f_y==0], x2[f_y==0], 'yo')
plt.plot(x1[f_y==1], x2[f_y==1], 'bx')
plt.plot(x1_boundery, x2_boundery)
plt.show()
print 'Train Accuracy: {}'.format(lr_accuracy(X, y, theta))
|
11461980
|
import os
import shutil
import numpy as np
import torch
import torchvision
from torchvision import transforms
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
import torch
from torch import nn
from torch.autograd import Variable
import torch.nn.functional as F
import pdb
import numpy as np
def save_checkpoint(state, is_best, path, filename='checkpoint.pth.tar'):
filename = os.path.join(path, filename)
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, os.path.join(path,'model_best.pth.tar'))
def load_checkpoint(model, checkpoint):
m_keys = list(model.state_dict().keys())
if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
c_keys = list(checkpoint['state_dict'].keys())
not_m_keys = [i for i in c_keys if i not in m_keys]
not_c_keys = [i for i in m_keys if i not in c_keys]
model.load_state_dict(checkpoint['state_dict'], strict=False)
else:
c_keys = list(checkpoint.keys())
not_m_keys = [i for i in c_keys if i not in m_keys]
not_c_keys = [i for i in m_keys if i not in c_keys]
model.load_state_dict(checkpoint, strict=False)
print("--------------------------------------\n LOADING PRETRAINING \n")
print("Not in Model: ")
print(not_m_keys)
print("Not in Checkpoint")
print(not_c_keys)
print('\n\n')
def get_cifar100_dataloaders(train_batch_size, test_batch_size):
transform_train = transforms.Compose([
transforms.Pad(4, padding_mode='reflect'),
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32),
transforms.ToTensor(),
transforms.Normalize(mean=[x / 255.0 for x in [129.3, 124.1, 112.4]],
std=[x / 255.0 for x in [68.2, 65.4, 70.4]])
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[x / 255.0 for x in [129.3, 124.1, 112.4]],
std=[x / 255.0 for x in [68.2, 65.4, 70.4]])])
trainset = torchvision.datasets.CIFAR100(root='~/data', train=True, download=True,
transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=train_batch_size, shuffle=True, num_workers=4)
testset = torchvision.datasets.CIFAR100(root='~/data', train=False, download=True,
transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=test_batch_size, shuffle=False, num_workers=4)
subset_idx = np.random.randint(0, len(trainset), size=10000)
valloader = torch.utils.data.DataLoader(trainset, batch_size=train_batch_size, shuffle=False, num_workers=4, sampler=SubsetRandomSampler(subset_idx))
return trainloader, valloader, testloader
def get_cifar100_dataloaders_disjoint(train_batch_size, test_batch_size):
np.random.seed(0)
transform_train = transforms.Compose([
transforms.Pad(4, padding_mode='reflect'),
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32),
transforms.ToTensor(),
transforms.Normalize(mean=[x / 255.0 for x in [129.3, 124.1, 112.4]],
std=[x / 255.0 for x in [68.2, 65.4, 70.4]])
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[x / 255.0 for x in [129.3, 124.1, 112.4]],
std=[x / 255.0 for x in [68.2, 65.4, 70.4]])])
trainset = torchvision.datasets.CIFAR100(root='~/data', train=True, download=True,transform=transform_train)
total_idx = np.arange(0,len(trainset))
np.random.shuffle(total_idx)
subset_idx = total_idx[:10000]
_subset_idx = total_idx[~np.in1d(total_idx, subset_idx)]
valloader = torch.utils.data.DataLoader(trainset, batch_size=train_batch_size, shuffle=False, num_workers=4, sampler=SubsetRandomSampler(subset_idx))
trainloader = torch.utils.data.DataLoader(trainset, batch_size=train_batch_size, shuffle=False, num_workers=4, sampler=SubsetRandomSampler(_subset_idx))
testset = torchvision.datasets.CIFAR100(root='~/data', train=False, download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=test_batch_size, shuffle=False, num_workers=4)
return trainloader, valloader, testloader
class KLLoss(nn.Module):
def __init__(self):
super(KLLoss, self).__init__()
def forward(self, pred, label):
# pred: 2D matrix (batch_size, num_classes)
# label: 1D vector indicating class number
T=3
predict = F.log_softmax(pred/T,dim=1)
target_data = F.softmax(label/T,dim=1)
target_data =target_data+10**(-7)
target = Variable(target_data.data.cuda(),requires_grad=False)
loss=T*T*((target*(target.log()-predict)).sum(1).sum()/target.size()[0])
return loss
def sigmoid_rampup(current, rampup_length):
"""Exponential rampup from https://arxiv.org/abs/1610.02242"""
if rampup_length == 0:
return 1.0
else:
current = np.clip(current, 0.0, rampup_length)
phase = 1.0 - current / rampup_length
return float(np.exp(-5.0 * phase * phase))
|
11462012
|
import torch
import torch.nn as nn
from functools import partial
# from timm.models.vision_transformer import VisionTransformer, _cfg
from vision_transformer import VisionTransformer, _cfg
from conformer import Conformer
from timm.models.registry import register_model
@register_model
def deit_tiny_patch16_224(pretrained=False, **kwargs):
model = VisionTransformer(
patch_size=16, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_tiny_patch16_224-a1311bcf.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_small_patch16_224(pretrained=False, **kwargs):
model = VisionTransformer(
patch_size=16, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_small_patch16_224-cd65a155.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_med_patch16_224(pretrained=False, **kwargs):
model = VisionTransformer(
patch_size=16, embed_dim=576, depth=12, num_heads=9, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
raise NotImplementedError
return model
@register_model
def deit_base_patch16_224(pretrained=False, **kwargs):
model = VisionTransformer(
patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def Conformer_tiny_patch16(pretrained=False, **kwargs):
model = Conformer(patch_size=16, channel_ratio=1, embed_dim=384, depth=12,
num_heads=6, mlp_ratio=4, qkv_bias=True, **kwargs)
if pretrained:
raise NotImplementedError
return model
@register_model
def Conformer_small_patch16(pretrained=False, **kwargs):
model = Conformer(patch_size=16, channel_ratio=4, embed_dim=384, depth=12,
num_heads=6, mlp_ratio=4, qkv_bias=True, **kwargs)
if pretrained:
raise NotImplementedError
return model
@register_model
def Conformer_small_patch32(pretrained=False, **kwargs):
model = Conformer(patch_size=32, channel_ratio=4, embed_dim=384, depth=12,
num_heads=6, mlp_ratio=4, qkv_bias=True, **kwargs)
if pretrained:
raise NotImplementedError
return model
@register_model
def Conformer_base_patch16(pretrained=False, **kwargs):
model = Conformer(patch_size=16, channel_ratio=6, embed_dim=576, depth=12,
num_heads=9, mlp_ratio=4, qkv_bias=True, **kwargs)
if pretrained:
raise NotImplementedError
return model
|
11462013
|
def pargen(left,right,ans):
if(left==0 and right==0):
print ans;
if(left>0):
pargen(left-1,right+1,ans+'(');
if(right>0):
pargen(left,right-1,ans+')');
pargen(3,0,''); #can pass any starting value as left,initial value of right is always 0.
|
11462024
|
from justgood import imjustgood
text1 = "top"
text2 = "bottom"
image = "http://www.gstatic.com/webp/gallery/1.png"
api = imjustgood("YOUR_APIKEY_HERE")
data = api.meme(text1, text2, imageUrl)
result = data["result"]
print(result)
|
11462031
|
from typing import List, Tuple
from pyrep.objects.joint import Joint
from pyrep.objects.dummy import Dummy
from rlbench.backend.task import Task
from rlbench.backend.conditions import JointCondition, NothingGrasped
OPTIONS = ['right', 'left']
class SlideCabinetOpen(Task):
def init_task(self):
self.left_joint = Joint('left_joint')
self.right_joint = Joint('right_joint')
self.waypoint0 = Dummy('waypoint0')
self.waypoint1 = Dummy('waypoint1')
self.waypoint2 = Dummy('waypoint2')
self.left_initial_waypoint = Dummy('waypoint4')
self.left_close_waypoint = Dummy('waypoint5')
self.left_far_waypoint = Dummy('waypoint6')
def init_episode(self, index: int) -> List[str]:
option = OPTIONS[index]
if option == 'left':
self.waypoint0.set_position(
self.left_initial_waypoint.get_position())
self.waypoint1.set_position(self.left_close_waypoint.get_position())
self.waypoint2.set_position(self.left_far_waypoint.get_position())
self.register_success_conditions(
[JointCondition(self.left_joint, 0.06),
NothingGrasped(self.robot.gripper)])
else:
self.register_success_conditions(
[JointCondition(self.right_joint, 0.06),
NothingGrasped(self.robot.gripper)])
return ['slide %s cabinet open' % option,
'open the %s door' % option,
'open the %s half of the cabinet' % option,
'slide open the %s slide of the cabinet' % option,
'grip the %s handle and slide the door open' % option,
'grasp the %s door\'s handle and and drag it towards the middle'
' of the cabinet in order to slide that door open' % option]
def variation_count(self) -> int :
return 2
def base_rotation_bounds(self) -> Tuple[List[float], List[float]]:
return [0.0, 0.0, -3.14 / 4.], [0.0, 0.0, 3.14 / 4.]
|
11462079
|
from library.api.db import EntityWithNameModel, db
class InterfaceTask(EntityWithNameModel):
ACTIVE = 0
DISABLE = 1
num = db.Column(db.Integer(), comment='任务序号')
task_name = db.Column(db.String(64), comment='任务名称')
task_config_time = db.Column(db.String(256), nullable=True, comment='cron表达式')
set_id = db.Column(db.String(2048))
case_id = db.Column(db.String(2048))
task_type = db.Column(db.String(16))
task_to_email_address = db.Column(db.String(256), comment='收件人邮箱')
task_send_email_address = db.Column(db.String(256), comment='发件人邮箱')
email_password = db.Column(db.String(256), comment='发件人邮箱密码')
status = db.Column(db.String(16), default=u'创建', comment='任务的运行状态,默认是创建')
project_id = db.Column(db.String(16), nullable=True)
delete_status = db.Column(db.Integer, default=ACTIVE) # 状态
|
11462115
|
import subprocess
from collections import defaultdict
from tempfile import NamedTemporaryFile as Temp
from . import data
def compare_trees(*trees):
entries = defaultdict(lambda: [None] * len(trees))
for i, tree in enumerate(trees):
for path, oid in tree.items():
entries[path][i] = oid
for path, oids in entries.items():
yield (path, *oids)
def iter_changed_files(t_from, t_to):
for path, o_from, o_to in compare_trees(t_from, t_to):
if o_from != o_to:
action = ('new file' if not o_from else
'deleted' if not o_to else
'modified')
yield path, action
def diff_trees(t_from, t_to):
output = b''
for path, o_from, o_to in compare_trees(t_from, t_to):
if o_from != o_to:
output += diff_blobs(o_from, o_to, path)
return output
def diff_blobs(o_from, o_to, path='blob'):
with Temp() as f_from, Temp() as f_to:
for oid, f in ((o_from, f_from), (o_to, f_to)):
if oid:
f.write(data.get_object(oid))
f.flush()
with subprocess.Popen(
['diff', '--unified', '--show-c-function',
'--label', f'a/{path}', f_from.name,
'--label', f'b/{path}', f_to.name],
stdout=subprocess.PIPE) as proc:
output, _ = proc.communicate()
return output
def merge_trees(t_base, t_HEAD, t_other):
tree = {}
for path, o_base, o_HEAD, o_other in compare_trees(t_base, t_HEAD, t_other):
tree[path] = data.hash_object(merge_blobs(o_base, o_HEAD, o_other))
return tree
def merge_blobs(o_base, o_HEAD, o_other):
with Temp() as f_base, Temp() as f_HEAD, Temp() as f_other:
# Write blobs to files
for oid, f in ((o_base, f_base), (o_HEAD, f_HEAD), (o_other, f_other)):
if oid:
f.write(data.get_object(oid))
f.flush()
with subprocess.Popen(
['diff3', '-m',
'-L', 'HEAD', f_HEAD.name,
'-L', 'BASE', f_base.name,
'-L', 'MERGE_HEAD', f_other.name,
], stdout=subprocess.PIPE) as proc:
output, _ = proc.communicate()
assert proc.returncode in (0, 1)
return output
|
11462127
|
from datetime import datetime
from .serving_line import ServingLine
from .line_operator import LineOperator
class Arrival:
r"""
Arrival object from a arrival request of one station.
Attributes
-----------
raw :class:`dict`
Raw dict received by the API.
stop_id :class:`str`
Station_id of the arrival.
x :class:`str`
Coordinates of the station.
y :class:`str`
Coordinates of the station.
map_name :class:`str`
Map name the API works on.
area :class:`str`
The area of the station (unsure atm)
platform :class:`str`
Platform / track of the arrival.
platform_name :class:`str`
name of the platform.
stop_name :class:`str`
name of the station.
name_wo :class:`str`
name of the station.
countdown :class:`int`
minutes until arrival.
datetime :class:`datetime.datetime`
Planned arrival datetime.
real_datetime :class:`datetime.datetime`
Estimated arrival datetime (equal to ``self.datetime`` if no realtime data is available).
delay :class:`int`
Delay of arrival in minutes.
serving_line :class:`ServingLine`
line of the incoming arrival.
operator :class:`LineOperator`
Operator of the incoming arrival.
stop_infos Optional[:class:`dict`]
All related info to the station (e.g. maintenance work).
line_infos Optional[:class:`dict`]
All related info to the station (e.g. maintenance work).
"""
def __init__(self, **kwargs):
self.stop_id = kwargs.get("stopID")
self.x = kwargs.get("x")
self.y = kwargs.get("y")
self.map_name = kwargs.get("mapName")
self.area = kwargs.get("area")
self.platform = kwargs.get("platform")
self.platform_name = kwargs.get("platformName")
self.stop_name = kwargs.get("stopName")
self.name_wo = kwargs.get("nameWO")
self.point_type = kwargs.get("pointType")
self.countdown = int(kwargs.get("countdown", "0"))
dt = kwargs.get("dateTime")
if dt:
try:
self.datetime = datetime(
year=int(dt.get("year", datetime.now().year)),
month=int(dt.get("month", datetime.now().month)),
day=int(dt.get("day", datetime.now().day)),
hour=int(dt.get("hour", datetime.now().hour)),
minute=int(dt.get("minute", datetime.now().minute))
)
except ValueError:
pass
else:
self.datetime = None
r_dt = kwargs.get("realDateTime")
if r_dt:
try:
self.real_datetime = datetime(
year=int(r_dt.get("year", datetime.now().year)),
month=int(r_dt.get("month", datetime.now().month)),
day=int(r_dt.get("day", datetime.now().day)),
hour=int(r_dt.get("hour", datetime.now().hour)),
minute=int(r_dt.get("minute", datetime.now().minute))
)
except ValueError:
pass
else:
self.real_datetime = self.datetime
self.delay = int((self.real_datetime - self.datetime).total_seconds() / 60)
self.serving_line = ServingLine(**kwargs.get("servingLine", {}))
self.operator = LineOperator(**kwargs.get("operator", {}))
# inserted raw
self.raw = kwargs
self.stop_infos = kwargs.get("stopInfos")
self.line_infos = kwargs.get("lineInfos")
def __str__(self):
pre = "[Delayed] " if self.delay else ""
if self.real_datetime.date() == datetime.now().date():
return f"{pre}[{str(self.real_datetime.strftime('%H:%M'))}] {self.serving_line}"
return f"{pre}[{str(self.real_datetime)}] {self.serving_line}"
|
11462168
|
from DB.Connection.BaseConnection import BaseConnection
from Config.Run.config import Config
class LoginConnection(BaseConnection):
def __init__(self):
super().__init__(
user=Config.Database.Connection.username,
password=Config.Database.Connection.password,
host=Config.Database.Connection.host,
db_name=Config.Database.DBNames.login_db
)
|
11462184
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from six.moves import xrange
import tensorflow as tf
import os, sys, pickle, argparse
sys.path.append('../utils/')
from model_eval import model_eval
from scipy.misc import logsumexp
import keras.backend
sys.path.append('load/')
from load_classifier import load_classifier
def comp_logp(logit, y, text, comp_logit_dist = False):
logpx = logsumexp(logit, axis=1)
logpx_mean = np.mean(logpx)
logpx_std = np.sqrt(np.var(logpx))
logpxy = np.sum(y * logit, axis=1)
logpxy_mean = []; logpxy_std = []
for i in xrange(y.shape[1]):
ind = np.where(y[:, i] == 1)[0]
logpxy_mean.append(np.mean(logpxy[ind]))
logpxy_std.append(np.sqrt(np.var(logpxy[ind])))
print('%s: logp(x) = %.3f +- %.3f, logp(x|y) = %.3f +- %.3f' \
% (text, logpx_mean, logpx_std, np.mean(logpxy_mean), np.mean(logpxy_std)))
results = [logpx, logpx_mean, logpx_std, logpxy, logpxy_mean, logpxy_std]
# compute distribution of the logits
if comp_logit_dist:
logit_mean = []
logit_std = []
logit_kl_mean = []
logit_kl_std = []
softmax_mean = []
for i in xrange(y.shape[1]):
ind = np.where(y[:, i] == 1)[0]
logit_mean.append(np.mean(logit[ind], 0))
logit_std.append(np.sqrt(np.var(logit[ind], 0)))
logit_tmp = logit[ind] - logsumexp(logit[ind], axis=1)[:, np.newaxis]
softmax_mean.append(np.mean(np.exp(logit_tmp), 0))
logit_kl = np.sum(softmax_mean[i] * (np.log(softmax_mean[i]) - logit_tmp), 1)
logit_kl_mean.append(np.mean(logit_kl))
logit_kl_std.append(np.sqrt(np.var(logit_kl)))
results.extend([logit_mean, logit_std, logit_kl_mean, logit_kl_std, softmax_mean])
return results
def comp_detect(x, x_mean, x_std, alpha, plus):
if plus:
detect_rate = np.mean(x > x_mean + alpha * x_std)
else:
detect_rate = np.mean(x < x_mean - alpha * x_std)
return detect_rate * 100
def search_alpha(x, x_mean, x_std, target_rate = 5.0, plus = False):
alpha_min = 0.0
alpha_max = 3.0
alpha_now = 1.5
detect_rate = comp_detect(x, x_mean, x_std, alpha_now, plus)
T = 0
while np.abs(detect_rate - target_rate) > 0.01 and T < 20:
if detect_rate > target_rate:
alpha_min = alpha_now
else:
alpha_max = alpha_now
alpha_now = 0.5 * (alpha_min + alpha_max)
detect_rate = comp_detect(x, x_mean, x_std, alpha_now, plus)
T += 1
return alpha_now, detect_rate
def test_attacks(batch_size, conv, guard_name, targeted, attack_method, victim_name, data_name, save):
# Set TF random seed to improve reproducibility
tf.set_random_seed(1234)
# Create TF session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
print("Created TensorFlow session.")
# Get MNIST test data
use_data = True
if use_data:
if data_name == 'mnist':
img_rows, img_cols, channels = 28, 28, 1
from cleverhans.utils_mnist import data_mnist
x_train, y_train, x_clean, y_clean = data_mnist(train_start=0,
train_end=60000,
test_start=0,
test_end=10000)
if data_name in ['cifar10', 'plane_frog']:
img_rows, img_cols, channels = 32, 32, 3
from import_data_cifar10 import load_data_cifar10
labels = None
if data_name == 'plane_frog':
labels = [0, 6]
datapath = '../cifar_data/'
x_train, x_clean, y_train, y_clean = load_data_cifar10(datapath, labels=labels)
nb_classes = y_train.shape[1]
# Define input TF placeholder
x = tf.placeholder(tf.float32, shape=(batch_size, img_rows, img_cols, channels))
y = tf.placeholder(tf.float32, shape=(batch_size, nb_classes))
# Define TF model graph
gen = load_classifier(sess, guard_name, data_name)
if 'bayes' in guard_name and 'distill' not in guard_name and 'query' not in guard_name:
vae_type = guard_name[-1]
guard_name += '_cnn'
# now perform detection
path = 'raw_attack_results/' + victim_name + '/'
print(path)
assert os.path.isdir(path)
filename = data_name + '_' + attack_method
if targeted:
filename = filename + '_targeted'
else:
filename = filename + '_untargeted'
filename = path + filename + '.pkl'
x_adv, _, y_clean, adv_logits = pickle.load(open(filename, 'rb'))
# for cifar-binary, need to extract test data that all the classifiers agree on
if data_name == 'plane_frog':
load_path = 'data_ind/'
ind = range(x_clean.shape[0])
classifiers = ['bayes_K10_A_cnn', 'bayes_K10_B_cnn', 'bayes_K10_C_cnn',
'bayes_K10_D_cnn', 'bayes_K10_E_cnn', 'bayes_K10_F_cnn',
'bayes_K10_G_cnn']#, 'bnn_K10']
for c in classifiers:
filename = load_path + data_name + '_' + c + '.pkl'
tmp = pickle.load(open(filename, 'rb'))
ind = list(set(ind) & set(tmp))
print('crafting adversarial examples only on correctly prediced images...')
print('%d / %d in total' % (len(ind), x_clean.shape[0]))
x_clean = x_clean[ind]; y_clean = y_clean[ind]
print(len(ind), x_adv.shape, adv_logits.shape)
x_adv = x_adv[ind]; adv_logits = adv_logits[ind]
print("data loaded from %s, %d samples in total" % (filename, x_adv.shape[0]))
print(x_clean.shape, x_adv.shape)
if 'bnn' not in guard_name:
keras.backend.set_learning_phase(0)
else:
keras.backend.set_learning_phase(1)
y_logit_op = gen.predict(x, softmax=False)
# compute classification
y_logit_adv = []
for i in xrange(int(x_adv.shape[0] / batch_size)):
X_batch = x_adv[i*batch_size:(i+1)*batch_size]
y_logit_adv.append(sess.run(y_logit_op, feed_dict={x: X_batch}))
y_logit_adv = np.concatenate(y_logit_adv, 0)
N_adv_total = y_logit_adv.shape[0]
x_clean = x_clean[:N_adv_total]; y_clean = y_clean[:N_adv_total]
x_adv = x_adv[:N_adv_total]; adv_logits = adv_logits[:N_adv_total]
test_attack = False
if guard_name != victim_name:
if guard_name + '_cnn' != victim_name:
print('test transfer attack: attack crafted on victim model')
test_attack = True
if 'distill' in victim_name:
print('test gray-box attack: attack crafted on a distilled model')
test_attack = True
if test_attack:
# test adversarial example transfer, compute the classification again
print('test adversarial example transfer from %s to %s' % (victim_name, guard_name))
y_adv = np.zeros((y_logit_adv.shape[0], nb_classes), dtype=np.float32)
y_adv[np.arange(y_logit_adv.shape[0]), np.argmax(y_logit_adv, 1)] = 1
# get index of victim sucessful attacks
y_adv_victim = np.zeros((adv_logits.shape[0], nb_classes), dtype=np.float32)
y_adv_victim[np.arange(adv_logits.shape[0]), np.argmax(adv_logits, 1)] = 1
correct_prediction = (np.argmax(y_adv_victim, 1) == np.argmax(y_clean, 1))
ind_success_victim = np.where(correct_prediction==0)[0]
else:
y_adv = np.zeros((adv_logits.shape[0], nb_classes), dtype=np.float32)
y_adv[np.arange(adv_logits.shape[0]), np.argmax(adv_logits, 1)] = 1
correct_prediction = (np.argmax(y_adv, 1) == np.argmax(y_clean, 1))
accuracy = np.mean(correct_prediction)
success_rate = 100.0 * (1 - accuracy)
ind_success = np.where(correct_prediction==0)[0]
if not test_attack:
ind_success_victim = ind_success
# compute success rate on successful victim attack
success_rate_victim = 100.0 * (1 - np.mean( ( np.argmax(y_adv[ind_success_victim], 1) \
== np.argmax(y_clean[ind_success_victim], 1) ) ))
print("attack success rate (all/victim) = %.4f / %.4f" % (success_rate, success_rate_victim))
# compute the perturbation on successful attacks
if len(ind_success) > 0:
diff = x_adv[ind_success] - x_clean[ind_success]
l2_diff = np.sqrt(np.sum(diff**2, axis=(1, 2, 3)))
li_diff = np.max(np.abs(diff), axis=(1, 2, 3))
l0_diff = np.sum((diff != 0), axis=(1, 2, 3))
print('preturb for successful attack: L_2 = %.3f +- %.3f' % (np.mean(l2_diff), np.sqrt(np.var(l2_diff))))
print('preturb for successful attack: L_inf = %.3f +- %.3f' % (np.mean(li_diff), np.sqrt(np.var(li_diff))))
print('preturb for successful attack: L_0 = %.3f +- %.3f' % (np.mean(l0_diff), np.sqrt(np.var(l0_diff))))
# confidence of the attack (using entropy)
tmp_logp = adv_logits - logsumexp(adv_logits, 1)[:, np.newaxis]
tmp_p = np.exp(tmp_logp)
print(tmp_logp.mean(), tmp_p.mean())
entropy = -np.sum(tmp_p * tmp_logp, 1)
print('entropy successful attack: %.3f +- %.3f' % (np.mean(entropy), np.sqrt(np.var(entropy))))
else:
print('no successful attack, abort...')
return 0
# then compute logit on both clean and adv samples
y_logit_train = []
print('-------------------------------------')
print('compute statistics on data')
for i in xrange(int(x_train.shape[0] / batch_size)):
X_batch = x_train[i*batch_size:(i+1)*batch_size]
y_logit_train.append(sess.run(y_logit_op, feed_dict={x: X_batch}))
y_logit_train = np.concatenate(y_logit_train)
y_train = y_train[:y_logit_train.shape[0]]
results_train = comp_logp(y_logit_train, y_train, 'train', comp_logit_dist = True)
y_logit_clean = []
for i in xrange(int(x_clean.shape[0] / batch_size)):
X_batch = x_clean[i*batch_size:(i+1)*batch_size]
y_logit_clean.append(sess.run(y_logit_op, feed_dict={x: X_batch}))
y_logit_clean = np.concatenate(y_logit_clean, 0)
# now produce the logits!
results_clean = comp_logp(y_logit_clean, y_clean, 'clean')
results_adv = comp_logp(y_logit_adv[ind_success], y_adv[ind_success], 'adv (wrong)')
tmp_logp = y_logit_adv[ind_success] - logsumexp(y_logit_adv[ind_success], 1)[:, np.newaxis]
tmp_p = np.exp(tmp_logp)
entropy = -np.sum(tmp_p * tmp_logp, 1)
print('entropy on ind_success: %.3f +- %.3f' % (np.mean(entropy), np.sqrt(np.var(entropy))))
# use mean as rejection
print("-------------------------------------")
results = {}
results['success_rate'] = success_rate
results['success_rate_victim'] = success_rate_victim
results['mean_dist_l2'] = np.mean(l2_diff)
results['std_dist_l2'] = np.sqrt(np.var(l2_diff))
results['mean_dist_l0'] = np.mean(l0_diff)
results['std_dist_l0'] = np.sqrt(np.var(l0_diff))
results['mean_dist_li'] = np.mean(li_diff)
results['std_dist_li'] = np.sqrt(np.var(li_diff))
if guard_name in ['mlp', 'cnn']:
plus = True
else:
plus = False
alpha, detect_rate = search_alpha(results_train[0], results_train[1], results_train[2], plus=plus)
detect_rate = comp_detect(results_train[0], results_train[1], results_train[2], alpha, plus=plus)
delta_marginal = -(results_train[1] - alpha * results_train[2])
print('delta_marginal:', delta_marginal)
print('false alarm rate (reject < mean of logp(x) - %.2f * std): %.4f' % (alpha, detect_rate))
results['FP_logpx'] = detect_rate
detect_rate = comp_detect(results_adv[0], results_train[1], results_train[2], alpha, plus=plus)
print('detection rate (reject < mean of logp(x) - %.2f * std): %.4f' % (alpha, detect_rate))
results['TP_logpx'] = detect_rate
fp_rate = []
tp_rate = []
delta_logit = []
for i in xrange(nb_classes):
ind = np.where(y_train[:, i] == 1)[0]
alpha, detect_rate = search_alpha(results_train[3][ind], results_train[4][i], results_train[5][i], plus=plus)
detect_rate = comp_detect(results_train[3][ind], results_train[4][i], results_train[5][i], alpha, plus=plus)
fp_rate.append(detect_rate)
delta_logit.append(-(results_train[4][i] - alpha * results_train[5][i]))
ind = np.where(y_adv[ind_success][:, i] == 1)[0]
if len(ind) == 0: # no success attack, skip
continue
detect_rate = comp_detect(results_adv[3][ind], results_train[4][i], results_train[5][i], alpha, plus=plus)
tp_rate.append(detect_rate)
delta_logit = np.asarray(delta_logit, dtype='f')
print('delta_logit:', delta_logit)
tp_rate = np.mean(tp_rate)
fp_rate = np.mean(fp_rate)
print('false alarm rate (reject < mean of logp(x|y) - %.2f * std): %.4f' % (alpha, fp_rate))
results['FP_logpxy'] = fp_rate
print('detection rate (reject < mean of logp(x|y) - %.2f * std): %.4f' % (alpha, tp_rate))
results['TP_logpxy'] = tp_rate
# now test the kl rejection scheme
logit_mean, _, kl_mean, kl_std, softmax_mean = results_train[-5:]
fp_rate = []
tp_rate = []
delta_kl = []
for i in xrange(nb_classes):
ind = np.where(y_train[:, i] == 1)[0]
logit_tmp = y_logit_train[ind] - logsumexp(y_logit_train[ind], axis=1)[:, np.newaxis]
kl = np.sum(softmax_mean[i] * (np.log(softmax_mean[i]) - logit_tmp), 1)
alpha, detect_rate = search_alpha(kl, kl_mean[i], kl_std[i], plus=True)
detect_rate = comp_detect(kl, kl_mean[i], kl_std[i], alpha, plus=True)
fp_rate.append(detect_rate)
delta_kl.append(kl_mean[i] + alpha * kl_std[i])
ind = np.where(y_adv[ind_success][:, i] == 1)[0]
if len(ind) == 0: # no success attack, skip
continue
logit_tmp = y_logit_adv[ind] - logsumexp(y_logit_adv[ind], axis=1)[:, np.newaxis]
kl = np.sum(softmax_mean[i] * (np.log(softmax_mean[i]) - logit_tmp), 1)
detect_rate = comp_detect(kl, kl_mean[i], kl_std[i], alpha, plus=True)
tp_rate.append(detect_rate)
delta_kl = np.asarray(delta_kl, dtype='f')
print('delta_kl:', delta_kl)
tp_rate = np.mean(tp_rate)
fp_rate = np.mean(fp_rate)
print('false alarm rate (reject > mean of conditional KL + %.2f * std): %.4f' % (alpha, fp_rate))
results['FP_kl'] = fp_rate
print('detection rate (reject > mean of conditional KL + %.2f * std): %.4f' % (alpha, tp_rate))
results['TP_kl'] = tp_rate
# save results
if save:
if not os.path.isdir('detection_results/'):
os.mkdir('detection_results/')
print('create path detection_results/')
path = 'detection_results/' + guard_name + '/'
if not os.path.isdir(path):
os.mkdir(path)
print('create path ' + path)
filename = data_name + '_' + victim_name + '_' + attack_method
if targeted:
filename = filename + '_targeted'
else:
filename = filename + '_untargeted'
pickle.dump(results, open(path+filename+'.pkl', 'wb'))
print("results saved at %s.pkl" % (path+filename))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Run RVAE experiments.')
parser.add_argument('--batch_size', '-B', type=int, default=100)
parser.add_argument('--data', '-D', type=str, default='mnist')
parser.add_argument('--conv', '-C', action='store_true', default=False)
parser.add_argument('--guard', '-G', type=str, default='bayes_K10')
parser.add_argument('--targeted', '-T', action='store_true', default=False)
parser.add_argument('--attack', '-A', type=str, default='fgsm_eps0.10')
parser.add_argument('--victim', '-V', type=str, default='mlp')
parser.add_argument('--save', '-S', action='store_true', default=False)
args = parser.parse_args()
test_attacks(args.batch_size, args.conv, args.guard, args.targeted,
args.attack, args.victim, args.data, args.save)
|
11462367
|
import re
from random import shuffle
positive_reviews_file_path = "data/amazon-reviews/pos.txt"
negative_reviews_file_path = "data/amazon-reviews/neg.txt"
train_text_file_path = "data/amazon-reviews/reviews-train.txt"
train_labels_file_path = "data/amazon-reviews/sentiment-train.txt"
val_text_file_path = "data/amazon-reviews/reviews-val.txt"
val_labels_file_path = "data/amazon-reviews/sentiment-val.txt"
test_text_file_path = "data/amazon-reviews/reviews-test.txt"
test_labels_file_path = "data/amazon-reviews/sentiment-test.txt"
train_size = 65536
val_size = 1024
test_size = 16384
def clean_text(string):
string = re.sub(r"\\n", " ", string)
string = re.sub(r"\'m", " am", string)
string = re.sub(r"\'ve", " have", string)
string = re.sub(r"n\'t", " not", string)
string = re.sub(r"\'re", " are", string)
string = re.sub(r"\'d", " would", string)
string = re.sub(r"\'ll", " will", string)
string = re.sub(r'\d+', "number", string)
string = string.replace("\r", " ")
string = string.replace("\n", " ")
string = string.strip().lower()
return string
count = 0
total_reviews = train_size + val_size + test_size
print("Total Review size: {}".format(total_reviews))
collected_positive_reviews = list()
collected_negative_reviews = list()
with open(positive_reviews_file_path, 'r') as positive_reviews_file, \
open(negative_reviews_file_path, 'r') as negative_reviews_file:
for positive_review, negative_review in zip(positive_reviews_file, negative_reviews_file):
collected_positive_reviews.append(clean_text(positive_review))
collected_negative_reviews.append(clean_text(negative_review))
shuffle(collected_positive_reviews)
shuffle(collected_negative_reviews)
def write_file(text_file_path, labels_file_path, positive_reviews, negative_reviews):
print("Positive reviews: {}, Negative reviews : {}".format(len(positive_reviews), len(negative_reviews)))
with open(text_file_path, 'w') as text_file, open(labels_file_path, 'w') as label_file:
for review in positive_reviews:
text_file.write(review + "\n")
label_file.write("pos" + "\n")
for review in negative_reviews:
text_file.write(review + "\n")
label_file.write("neg" + "\n")
write_file(train_text_file_path, train_labels_file_path, collected_positive_reviews[:train_size],
collected_negative_reviews[:train_size])
print("Training files saved")
write_file(val_text_file_path, val_labels_file_path, collected_positive_reviews[train_size:train_size + val_size],
collected_negative_reviews[train_size:train_size + val_size])
print("Validation files saved")
write_file(test_text_file_path, test_labels_file_path,
collected_positive_reviews[train_size + val_size:total_reviews],
collected_negative_reviews[train_size + val_size:total_reviews])
print("Testing files saved")
|
11462374
|
import unittest
import numpy as np
import pandas as pd
from pyrolite.geochem.ind import REE
from pyrolite.geochem.parse import *
class TestIsChem(unittest.TestCase):
"""Checks the 'is a chem' function."""
def setUp(self):
self.ree = REE()
def test_ischem_str(self):
ret = ischem(self.ree[0])
self.assertTrue(isinstance(ret, bool))
self.assertTrue(ret)
def test_notchem_str(self):
ret = ischem("Notachemical")
self.assertTrue(isinstance(ret, bool))
self.assertFalse(ret)
def test_ischem_list(self):
ret = ischem(self.ree)
self.assertTrue(isinstance(ret, list))
self.assertTrue(all([isinstance(i, bool) for i in ret]))
class TestToChem(unittest.TestCase):
"""Checks the 'transform to chem' function."""
def setUp(self):
self.ree = REE()
def test_tochem_str(self):
ret = tochem([self.ree[0]])
self.assertTrue(ret == [str(self.ree[0])])
def test_tonotchem_str(self):
ret = tochem(["Notachemical"])
self.assertTrue(ret == ["Notachemical"])
def test_tochem_list(self):
ret = tochem(self.ree)
self.assertTrue(ret == list(map(str, self.ree)))
class TestMultipleCationInclusion(unittest.TestCase):
"""Tests the pandas dataframe multiple inclusion checking."""
def setUp(self):
self.cols = ["MgO", "FeO", "Fe2O3", "Mg", "Fe", "FeOT"]
self.df = pd.DataFrame(
{k: v for k, v in zip(self.cols, np.random.rand(len(self.cols), 10))}
)
def test_none(self):
"""Check the function copes with no records."""
# Note that this function runs from columns - doesn't need records
df = self.df.head(0)
self.assertTrue(len(check_multiple_cation_inclusion(df)) > 0)
self.assertTrue(
all(
[
i.__str__() in ["Mg", "Fe"]
for i in check_multiple_cation_inclusion(df)
]
)
)
def test_one(self):
"""Check the transformation functions for one record."""
df = self.df.head(1)
self.assertTrue(len(check_multiple_cation_inclusion(df)) > 0)
self.assertTrue(
all(
[
i.__str__() in ["Mg", "Fe"]
for i in check_multiple_cation_inclusion(df)
]
)
)
def test_multiple(self):
"""Check the transformation functions for multiple records."""
df = self.df
self.assertTrue(len(check_multiple_cation_inclusion(df)) > 0)
self.assertTrue(
all(
[
i.__str__() in ["Mg", "Fe"]
for i in check_multiple_cation_inclusion(df)
]
)
)
def test_exclusion(self):
"""Checks that exclusions are properly handled."""
# Check that excluded components aren't considered
pass
def test_output(self):
"""Checks that the list returned is complete."""
# Check complete
# Check precise
pass
class TestReprIsotopeRatio(unittest.TestCase):
def setUp(self):
self.expect = "87Sr/86Sr"
self.expect_succeed = [
"87Sr/86Sr",
"87Sr_86Sr",
"Sr87/Sr86",
"Sr87_Sr86",
"87Sr 86Sr",
]
self.expect_fail = ["87Sr/Sr86", "87Sr_Sr86", "87Sr Sr86"]
def test_default(self):
for ratio in self.expect_succeed:
with self.subTest(ratio=ratio):
out = repr_isotope_ratio(ratio)
self.assertEqual(out, self.expect)
for ratio in self.expect_fail:
with self.subTest(ratio=ratio):
out = repr_isotope_ratio(ratio)
self.assertEqual(out, ratio) # hasn't changed the string
if __name__ == "__main__":
unittest.main()
|
11462377
|
from pytest import mark # type: ignore
from graphql import graphql, graphql_sync
from graphql.type import (
GraphQLField,
GraphQLInputField,
GraphQLInt,
GraphQLObjectType,
GraphQLSchema,
)
from graphql_relay import mutation_with_client_mutation_id
class Result:
# noinspection PyPep8Naming
def __init__(self, result, clientMutationId=None):
self.clientMutationId = clientMutationId
self.result = result
def dummy_resolve(_info, **_input):
return Result(1)
simple_mutation = mutation_with_client_mutation_id(
"SimpleMutation",
input_fields={},
output_fields={"result": GraphQLField(GraphQLInt)},
mutate_and_get_payload=dummy_resolve,
)
simple_mutation_with_description = mutation_with_client_mutation_id(
"SimpleMutationWithDescription",
description="Simple Mutation Description",
input_fields={},
output_fields={"result": GraphQLField(GraphQLInt)},
mutate_and_get_payload=dummy_resolve,
)
simple_mutation_with_deprecation_reason = mutation_with_client_mutation_id(
"SimpleMutationWithDeprecationReason",
input_fields={},
output_fields={"result": GraphQLField(GraphQLInt)},
mutate_and_get_payload=dummy_resolve,
deprecation_reason="Just because",
)
# noinspection PyPep8Naming
simple_mutation_with_thunk_fields = mutation_with_client_mutation_id(
"SimpleMutationWithThunkFields",
input_fields=lambda: {"inputData": GraphQLInputField(GraphQLInt)},
output_fields=lambda: {"result": GraphQLField(GraphQLInt)},
mutate_and_get_payload=lambda _info, inputData, **_input: Result(inputData),
)
# noinspection PyPep8Naming
async def mutate_and_get_one_as_payload_async(_info, **_input):
return Result(1)
simple_async_mutation = mutation_with_client_mutation_id(
"SimpleAsyncMutation",
input_fields={},
output_fields={"result": GraphQLField(GraphQLInt)},
mutate_and_get_payload=mutate_and_get_one_as_payload_async,
)
simple_root_value_mutation = mutation_with_client_mutation_id(
"SimpleRootValueMutation",
input_fields={},
output_fields={"result": GraphQLField(GraphQLInt)},
mutate_and_get_payload=lambda info, **_input: info.root_value,
)
query_type: GraphQLObjectType = GraphQLObjectType(
"Query", lambda: {"query": GraphQLField(query_type)}
)
mutation_type = GraphQLObjectType(
"Mutation",
fields={
"simpleMutation": simple_mutation,
"simpleMutationWithDescription": simple_mutation_with_description,
"simpleMutationWithDeprecationReason": simple_mutation_with_deprecation_reason,
"simpleMutationWithThunkFields": simple_mutation_with_thunk_fields,
"simpleAsyncMutation": simple_async_mutation,
"simpleRootValueMutation": simple_root_value_mutation,
},
)
schema = GraphQLSchema(query=query_type, mutation=mutation_type)
def describe_mutation_with_client_mutation_id():
def requires_an_argument():
source = """
mutation {
simpleMutation {
result
}
}
"""
assert graphql_sync(schema, source) == (
None,
[
{
"message": "Field 'simpleMutation' argument 'input'"
" of type 'SimpleMutationInput!' is required,"
" but it was not provided.",
"locations": [(3, 15)],
}
],
)
def returns_the_same_client_mutation_id():
source = """
mutation {
simpleMutation(input: {clientMutationId: "abc"}) {
result
clientMutationId
}
}
"""
assert graphql_sync(schema, source) == (
{"simpleMutation": {"result": 1, "clientMutationId": "abc"}},
None,
)
def supports_thunks_as_input_and_output_fields():
source = """
mutation {
simpleMutationWithThunkFields(
input: {inputData: 1234, clientMutationId: "abc"}) {
result
clientMutationId
}
}
"""
assert graphql_sync(schema, source) == (
{
"simpleMutationWithThunkFields": {
"result": 1234,
"clientMutationId": "abc",
}
},
None,
)
@mark.asyncio
async def supports_async_mutations():
source = """
mutation {
simpleAsyncMutation(input: {clientMutationId: "abc"}) {
result
clientMutationId
}
}
"""
assert await graphql(schema, source) == (
{"simpleAsyncMutation": {"result": 1, "clientMutationId": "abc"}},
None,
)
def can_access_root_value():
source = """
mutation {
simpleRootValueMutation(input: {clientMutationId: "abc"}) {
result
clientMutationId
}
}
"""
assert graphql_sync(schema, source, root_value=Result(1)) == (
{"simpleRootValueMutation": {"result": 1, "clientMutationId": "abc"}},
None,
)
def supports_mutations_returning_null():
source = """
mutation {
simpleRootValueMutation(input: {clientMutationId: "abc"}) {
result
clientMutationId
}
}
"""
assert graphql_sync(schema, source, root_value=None) == (
{"simpleRootValueMutation": {"result": None, "clientMutationId": "abc"}},
None,
)
def describe_introspection():
def contains_correct_input():
source = """
{
__type(name: "SimpleMutationInput") {
name
kind
inputFields {
name
type {
name
kind
}
}
}
}
"""
assert graphql_sync(schema, source) == (
{
"__type": {
"name": "SimpleMutationInput",
"kind": "INPUT_OBJECT",
"inputFields": [
{
"name": "clientMutationId",
"type": {"name": None, "kind": "NON_NULL"},
}
],
}
},
None,
)
def contains_correct_payload():
source = """
{
__type(name: "SimpleMutationPayload") {
name
kind
fields {
name
type {
name
kind
}
}
}
}
"""
assert graphql_sync(schema, source) == (
{
"__type": {
"name": "SimpleMutationPayload",
"kind": "OBJECT",
"fields": [
{
"name": "result",
"type": {"name": "Int", "kind": "SCALAR"},
},
{
"name": "clientMutationId",
"type": {"name": None, "kind": "NON_NULL"},
},
],
}
},
None,
)
def contains_correct_field():
source = """
{
__schema {
mutationType {
fields {
name
args {
name
type {
name
kind
ofType {
name
kind
}
}
}
type {
name
kind
}
}
}
}
}
"""
assert graphql_sync(schema, source) == (
{
"__schema": {
"mutationType": {
"fields": [
{
"name": "simpleMutation",
"args": [
{
"name": "input",
"type": {
"name": None,
"kind": "NON_NULL",
"ofType": {
"name": "SimpleMutationInput",
"kind": "INPUT_OBJECT",
},
},
}
],
"type": {
"name": "SimpleMutationPayload",
"kind": "OBJECT",
},
},
{
"name": "simpleMutationWithDescription",
"args": [
{
"name": "input",
"type": {
"name": None,
"kind": "NON_NULL",
"ofType": {
"name": "SimpleMutation"
"WithDescriptionInput",
"kind": "INPUT_OBJECT",
},
},
}
],
"type": {
"name": "SimpleMutationWithDescriptionPayload",
"kind": "OBJECT",
},
},
{
"name": "simpleMutationWithThunkFields",
"args": [
{
"name": "input",
"type": {
"name": None,
"kind": "NON_NULL",
"ofType": {
"name": "SimpleMutation"
"WithThunkFieldsInput",
"kind": "INPUT_OBJECT",
},
},
}
],
"type": {
"name": "SimpleMutationWithThunkFieldsPayload",
"kind": "OBJECT",
},
},
{
"name": "simpleAsyncMutation",
"args": [
{
"name": "input",
"type": {
"name": None,
"kind": "NON_NULL",
"ofType": {
"name": "SimpleAsyncMutationInput",
"kind": "INPUT_OBJECT",
},
},
}
],
"type": {
"name": "SimpleAsyncMutationPayload",
"kind": "OBJECT",
},
},
{
"name": "simpleRootValueMutation",
"args": [
{
"name": "input",
"type": {
"name": None,
"kind": "NON_NULL",
"ofType": {
"name": "SimpleRootValueMutationInput", # noqa: E501
"kind": "INPUT_OBJECT",
},
},
}
],
"type": {
"name": "SimpleRootValueMutationPayload",
"kind": "OBJECT",
},
},
]
}
}
},
None,
)
def contains_correct_descriptions():
source = """
{
__schema {
mutationType {
fields {
name
description
}
}
}
}
"""
assert graphql_sync(schema, source) == (
{
"__schema": {
"mutationType": {
"fields": [
{"name": "simpleMutation", "description": None},
{
"name": "simpleMutationWithDescription",
"description": "Simple Mutation Description",
},
{
"name": "simpleMutationWithThunkFields",
"description": None,
},
{"name": "simpleAsyncMutation", "description": None},
{
"name": "simpleRootValueMutation",
"description": None,
},
]
}
}
},
None,
)
def contains_correct_deprecation_reason():
source = """
{
__schema {
mutationType {
fields(includeDeprecated: true) {
name
isDeprecated
deprecationReason
}
}
}
}
"""
assert graphql_sync(schema, source) == (
{
"__schema": {
"mutationType": {
"fields": [
{
"name": "simpleMutation",
"isDeprecated": False,
"deprecationReason": None,
},
{
"name": "simpleMutationWithDescription",
"isDeprecated": False,
"deprecationReason": None,
},
{
"name": "simpleMutationWithDeprecationReason",
"isDeprecated": True,
"deprecationReason": "Just because",
},
{
"name": "simpleMutationWithThunkFields",
"isDeprecated": False,
"deprecationReason": None,
},
{
"name": "simpleAsyncMutation",
"isDeprecated": False,
"deprecationReason": None,
},
{
"name": "simpleRootValueMutation",
"isDeprecated": False,
"deprecationReason": None,
},
]
}
}
},
None,
)
|
11462382
|
import networkx as nx
import numpy as np
import itertools, logging
import pandas as pd
#from .nodes import ProteinInteractionNode
# THESE ARE THE KEY METAPATH FUNCTIONS, SOME MAYBE REMOVED OVERTIME
def listCompute(graph,falseP,trueP,middleNode,edgeNode):
# this is now the slowest operation ... followed up w/ this loop
result = pd.DataFrame(data={"protein_id":edgeNode,"pathway_id":middleNode})
# we need to sort this frame into....proteins, and pathways ...
right = result[result.protein_id.isin(trueP)]
left = result[result.protein_id.isin(falseP)|result.protein_id.isin(trueP)]
#print ('right:', right)
#print ('left:', left)
merged = pd.merge(left,right,on='pathway_id')
deduplicates = merged[merged.protein_id_x != merged.protein_id_y].copy()
UNIQUE_DISEASE = len(set(deduplicates.protein_id_y))
deduplicates['middle'] = deduplicates.groupby(['protein_id_y'])['protein_id_y'].transform('count')
deduplicates['edge'] = deduplicates.groupby(['protein_id_x'])['protein_id_x'].transform('count')
deduplicates['endSet'] = deduplicates['middle']**(-0.5) * deduplicates['edge']**(-0.5) * (UNIQUE_DISEASE**(-0.5))
# pathway ID??
final = deduplicates.pivot_table(index=['protein_id_x'],columns=['pathway_id'], values='endSet').fillna(0)
return final
# we need every path from A to B ... B has to be true, but thats it
def singleHop(graph,nodes,trueP,falseP, idDescription, fh):
filterNodes = [k for k in nodes if len(set(graph.adj[k]).intersection(trueP)) > 0]
# lets get all of the nodes, that connect to true nodes
#print(len(filterNodes),len(trueP)) # you've cross with EVERY true, only some exist
edgeFinal = list(itertools.product(filterNodes, list(trueP)))
filteredEdges = []
for e in edgeFinal:
if graph.has_edge(e[0],e[1]): # and (e[1],e[0]) not in savedEdges and (e[0],e[1]) not in savedEdges: # make sure edge is one of a kind? #ie if both true, push once
filteredEdges.append(e)
savedEdges = {}
finalEdges = []
count = 0
for e in filteredEdges: # filter down to edges we've got
if e not in savedEdges:
finalEdges.append(e)
savedEdges[(e[0],e[1])] = True
savedEdges[(e[1],e[0])] = True
middleNodes = []
edgeNodes = []
combinedScores = []
for e in finalEdges:
middleNodes.append(e[1])
edgeNodes.append(e[0])
try:
combinedScores.append(graph.get_edge_data(e[0],e[1])['combined_score'])
except:
combinedScores.append(0)
dataset = pd.DataFrame(data={"protein_id":edgeNodes,"protein_m_id":middleNodes,"scores":combinedScores})
#result.to_csv('STUFF')
###write the edgenodes and middlenodes in a log file
setA = set(edgeNodes)
setB = set(middleNodes)
for node in setA:
try:
line = str(node) + ' : ' + idDescription[node] + '\n'
fh.write(line)
except Exception as e:
logging.error('Node not found: {0}; {1}'.format(node, e))
for node in setB:
try:
line = str(node) + ' : ' + idDescription[node] + '\n'
fh.write(line)
except Exception as e:
logging.error('Node not found: {0}; {1}'.format(node, e))
#return listCompute({},falseP,trueP,middleNodes,edgeNodes)
UNIQUE_DISEASE = len(set(dataset.protein_m_id))
dataset['middle'] = dataset.groupby(['protein_id'])['protein_id'].transform('count').astype(float)
dataset['edge'] = dataset.groupby(['protein_m_id'])['protein_m_id'].transform('count').astype(float)
#print(dataset.dtypes,type(UNIQUE_DISEASE))
# .astype(float) prevents "ZeroDivisionError: 0.0 cannot be raised to a negative power
UNIQUE_DISEASE = float(UNIQUE_DISEASE)
dataset['pdp'] = dataset['middle']**(-0.5) * dataset['edge']**(-0.5) * UNIQUE_DISEASE**(-0.5) * dataset['scores']
final = dataset.pivot_table(index=['protein_id'],columns=['protein_m_id'], values='pdp').fillna(0)
return final
# filter this list-no
def computeType(graph,nodes,trueP,falseP,idDescription,fh):
filterNodes = [k for k in nodes if len(set(graph.adj[k]).intersection(trueP)) > 0]
sub = graph.subgraph(filterNodes+list(falseP|trueP))
# we can actually filter out edges which matter, only those connected to kegg
#for path in nx.all_simple_paths(G, source=0, target=3)
proteinEdges = set()
edgeNode = set()
middleNodeList = []
edgeNodeList = []
for n in filterNodes:
edges = itertools.product([n], list(sub.adj[n]))
middleNodes,edgeNodes = zip(*edges)
middleNodeList = middleNodeList + list(middleNodes)
edgeNodeList = edgeNodeList + list(edgeNodes)
proteinEdges = proteinEdges | set(edges)
edges = list(proteinEdges)
middleNode = filterNodes
newG = {}
###write the edgenodes and middlenodes in a log file
setA = set(edgeNodeList)
setB = set(middleNodeList)
for node in setA:
try:
line = str(node) + ' : ' + idDescription[node] + '\n'
fh.write(line)
except Exception as e:
logging.error('Node not found: {0}; {1}'.format(node, e))
for node in setB:
try:
line = str(node) + ' : ' + idDescription[node] + '\n'
fh.write(line)
except Exception as e:
logging.error('Node not found: {0}; {1}'.format(node, e))
return listCompute(newG,falseP,trueP,middleNodeList,edgeNodeList)
def metapathMatrix(adjMatrix,weight=-0.5):
across = np.sum(adjMatrix,axis=1) # compute count for each base node
down = np.sum(adjMatrix,axis=0) # compute count for each connection
uniqueCount = sum(np.where(down>0,1,0)) #scalar ... compute unique values of the connection (in total graph)
uniqueVector = np.full_like(np.arange(len(down),dtype=float),uniqueCount) # make a vector of the unique count
return adjMatrix * (down**weight) * (across[:,np.newaxis]**weight) * (uniqueVector**weight) # lets perform the computations
def completePPI(graph,trues,allNodes,adjGraph):
scoredGraph = adjGraph[trues].loc[allNodes]
resultsGraph = np.nan_to_num(np.divide(scoredGraph,scoredGraph))
final = metapathMatrix(resultsGraph) * scoredGraph
final = final.fillna(0)
combinedScores = list(itertools.product(trues,allNodes))
return final
def sPPICompute(graph,proteinNodes,trueP,falseP,idDescription,fh):
#computeType(graph,nodes,trueP,falseP)
return singleHop(graph,proteinNodes,trueP,falseP,idDescription,fh) #computeType(graph,proteinNodes,trueP,falseP)
def PPICompute(graph,proteinNodes,trueP,falseP):
#print('PPI - starting subgraph')
subPROTEIN = graph.subgraph(proteinNodes) # did the PPI filter out some proteins?
#subPROTEIN = subPROTEIN.subgraph( set(subPROTEIN.nodes) - set(nx.isolates(subPROTEIN)) )
trues = set()
neighborSet = set()
#print('PPI - making neighborSet')
for protein in trueP:
if protein in set(proteinNodes):
trues.add(protein)
neighborSet = neighborSet | set(nx.all_neighbors(subPROTEIN, protein))
nodesList = trues | neighborSet
finalGraph = subPROTEIN.subgraph(nodesList)
#print('PPI - making adj subgraph') # this is slow
adjIt = nx.to_pandas_adjacency(finalGraph,weight='combined_score')
# this does our PPI computations with the values we want
#print('PPI - matrix and loop')
RR = completePPI(finalGraph,trues,nodesList,adjIt)
#print('PPI - done with these computations')
return RR
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.