id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
11534944
|
import inspect
import subprocess
import importlib
import importlib.util
from typing import Callable
import sys
def pip_install(package, upgrade=False):
if upgrade:
subprocess.check_call([sys.executable, '-m', 'pip', 'install', package, '-U'])
else:
subprocess.check_call([sys.executable, '-m', 'pip', 'install', package])
def import_package(package):
return importlib.import_module(package)
def load_callable(module, className) -> Callable:
return getattr(module, className)
def is_installed(package_name):
if package_name in sys.modules or importlib.util.find_spec(package_name) is not None:
return True
else:
return False
def is_coroutine(object):
return inspect.iscoroutinefunction(object)
|
11534960
|
import random
from tree import AVLTree
from gtree import GraphicalTree
n = 20
random.seed(0)
arr = random.sample(range(n), n)
avl_tree = AVLTree(arr)
print("n={0} Размер <NAME> Высота Средн.высота".format(n))
print("АВЛ {0} {1} {2} {3}".format(avl_tree.size(), avl_tree.check_sum(),
avl_tree.height(), avl_tree.medium_height()))
g_tree = GraphicalTree(avl_tree, "AVL", 1000, 400).start()
|
11535005
|
from django.conf import settings
from django.core.mail import send_mail
from django.shortcuts import render, redirect
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from oldp.apps.contact.forms import ContactForm, ReportContentForm
def form_view(request):
subject_tpl = 'Contact: %(name)s'
message_tpl = 'Name: %(name)s\nEmail: %(email)s\nMessage:\n\n%(message)s'
if request.method == 'POST':
contact_form = ContactForm(request.POST)
if contact_form.is_valid():
subject = subject_tpl % contact_form.cleaned_data
message = message_tpl % contact_form.cleaned_data
send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, [settings.SITE_EMAIL])
return redirect(reverse('contact:thankyou'))
else:
contact_form = ContactForm()
return render(request, 'contact/form.html', {
'title': _('Contact'),
'form': contact_form
})
def report_content_view(request):
subject_tpl = 'Reported content: %(name)s'
message_tpl = 'Subject: %(subject)s\nSource: %(source)s\nName: %(name)s\nEmail: %(email)s\nMessage:\n\n%(message)s'
if request.method == 'POST':
form = ReportContentForm(request.POST)
if form.is_valid():
subject = subject_tpl % form.cleaned_data
message = message_tpl % form.cleaned_data
send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, [settings.SITE_EMAIL])
return redirect(reverse('contact:thankyou'))
else:
form = ReportContentForm(initial={'source': request.GET.get('source')})
return render(request, 'contact/report_content.html', {
'title': _('Report content'),
'form': form
})
def thankyou_view(request):
return render(request, 'contact/thankyou.html', {
'title': _('Contact'),
})
|
11535065
|
import time
import unittest
import threading
import socket
import struct
from contextlib import closing
from pyads import add_route_to_plc
from pyads.constants import PORT_REMOTE_UDP
from pyads.utils import platform_is_linux
class PLCRouteTestCase(unittest.TestCase):
SENDER_AMS = "1.2.3.4.1.1"
PLC_IP = "127.0.0.1"
USERNAME = "user"
PASSWORD = "password"
ROUTE_NAME = "Route"
ADDING_AMS_ID = "5.6.7.8.1.1"
HOSTNAME = "Host"
PLC_AMS_ID = "11.22.33.44.1.1"
def setUp(self):
pass
def tearDown(self):
pass
def plc_route_receiver(self):
with closing(socket.socket(socket.AF_INET, socket.SOCK_DGRAM)) as sock:
sock.bind(("", PORT_REMOTE_UDP))
# Keep looping until we get an add address packet
addr = [0]
while addr[0] != self.PLC_IP:
data, addr = sock.recvfrom(1024)
# Decipher data and 'add route'
data = data[12:] # Remove our data header
sending_ams_bytes = data[:6] # Sending AMS address
sending_ams = ".".join(map(str, struct.unpack(">6B", sending_ams_bytes)))
data = data[6:]
comm_port = struct.unpack("<H", data[:2])[
0
] # Internal communication port (PORT_SYSTEMSERVICE)
data = data[2:]
command_code = struct.unpack("<H", data[:2])[
0
] # Comand code to write to PLC
data = data[2:]
data = data[4:] # Remove protocol bytes
len_route_name = struct.unpack("<H", data[:2])[0] # Length of route name
data = data[2:]
route_name = data[:len_route_name].decode(
"utf-8"
) # Null terminated username
data = data[len_route_name:]
data = data[2:] # Remove protocol bytes
len_ams_id = struct.unpack("<H", data[:2])[0] # Length of adding AMS ID
data = data[2:]
adding_ams_id_bytes = data[:len_ams_id] # AMS ID being added to PLC
adding_ams_id = ".".join(
map(str, struct.unpack(">6B", adding_ams_id_bytes))
)
data = data[len_ams_id:]
data = data[2:] # Remove protocol bytes
len_username = struct.unpack("<H", data[:2])[0] # Length of PLC username
data = data[2:]
username = data[:len_username].decode("utf-8") # Null terminated username
data = data[len_username:]
data = data[2:] # Remove protocol bytes
len_password = struct.unpack("<H", data[:2])[0] # Length of PLC password
data = data[2:]
password = data[:len_password].decode("utf-8") # Null terminated username
data = data[len_password:]
data = data[2:] # Remove protocol bytes
len_sending_host = struct.unpack("<H", data[:2])[0] # Length of host name
data = data[2:]
hostname = data[:len_sending_host].decode(
"utf-8"
) # Null terminated hostname
data = data[len_sending_host:]
self.assertEqual(len(data), 0) # We should have popped everything from data
self.assertEqual(sending_ams, self.SENDER_AMS)
self.assertEqual(comm_port, 10000)
self.assertEqual(command_code, 5)
self.assertEqual(
len_sending_host, len(self.HOSTNAME) + 1
) # +1 for the null terminator
self.assertEqual(hostname, self.HOSTNAME + "\0")
self.assertEqual(adding_ams_id, self.ADDING_AMS_ID)
self.assertEqual(
len_username, len(self.USERNAME) + 1
) # +1 for the null terminator
self.assertEqual(username, self.USERNAME + "\0")
# Don't check the password since that's part the correct/incorrect response test
# We can also assume that if the data after the password is correct, then the password was sent/read correctly
# self.assertEqual(len_password, len(self.PASSWORD) + 1) # +1 for the null terminator
# self.assertEqual(password, self.PASSWORD + '\0')
self.assertEqual(
len_route_name, len(self.ROUTE_NAME) + 1
) # +1 for the null terminator
self.assertEqual(route_name, self.ROUTE_NAME + "\0")
if password == self.PASSWORD + "\0":
password_correct = True
else:
password_correct = False
# Build response
response = struct.pack(
">12s", b"\x03\x66\x14\x71\x00\x00\x00\x00\x06\x00\x00\x80"
) # Same header as being sent to the PLC, but with 80 at the end
response += struct.pack(
">6B", *map(int, self.PLC_AMS_ID.split("."))
) # PLC AMS id
response += struct.pack(
"<H", 10000
) # Internal communication port (PORT_SYSTEMSERVICE)
response += struct.pack(">2s", b"\x01\x00") # Command code read
response += struct.pack(
">4s", b"\x00\x00\x01\x04"
) # Block of unknown protocol
if password_correct:
response += struct.pack(">3s", b"\x04\x00\x00") # Password Correct
else:
response += struct.pack(">3s", b"\x00\x04\x07") # Password Incorrect
response += struct.pack(">2s", b"\x00\x00") # Block of unknown protocol
# Send our response back to sender
sock.sendto(response, addr)
def test_correct_route(self):
if platform_is_linux():
# Start receiving listener
route_thread = threading.Thread(target=self.plc_route_receiver)
route_thread.setDaemon(True)
route_thread.start()
time.sleep(1)
# Try to set up a route with ourselves using all the optionals
try:
result = add_route_to_plc(
self.SENDER_AMS,
self.HOSTNAME,
self.PLC_IP,
self.USERNAME,
self.PASSWORD,
route_name=self.ROUTE_NAME,
added_net_id=self.ADDING_AMS_ID,
)
except:
result = None
self.assertTrue(result)
def test_incorrect_route(self):
if platform_is_linux():
# Start receiving listener
route_thread = threading.Thread(target=self.plc_route_receiver)
route_thread.setDaemon(True)
route_thread.start()
# Try to set up a route with ourselves using all the optionals AND an incorrect password
try:
result = add_route_to_plc(
self.SENDER_AMS,
self.HOSTNAME,
self.PLC_IP,
self.USERNAME,
"<PASSWORD>",
route_name=self.ROUTE_NAME,
added_net_id=self.ADDING_AMS_ID,
)
except:
result = None
self.assertFalse(result)
if __name__ == "__main__":
unittest.main()
|
11535085
|
import re
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddlenlp.ops import einsum
global_dtype = paddle.get_default_dtype()
def sample_logits(embedding, bias, labels, inputs, sampler):
true_log_probs, samp_log_probs, neg_samples = sampler.sample(labels)
n_sample = neg_samples.shape[0]
b1, b2 = labels.shape[0], labels.shape[1]
all_ids = paddle.concat([paddle.reshape(labels, shape=[-1]), neg_samples])
all_w = embedding(all_ids)
true_w = paddle.reshape(all_w[:-n_sample], shape=[b1, b2, -1])
sample_w = paddle.reshape(all_w[-n_sample:], shape=[n_sample, -1])
all_b = paddle.gather(bias, all_ids)
true_b = paddle.reshape(all_b[:-n_sample], shape=[b1, b2])
sample_b = all_b[-n_sample:]
hit = paddle.cast(
(labels.unsqueeze([2]) == neg_samples), dtype=global_dtype).detach()
true_logits = paddle.sum(true_w * inputs, axis=-1) + true_b - true_log_probs
sample_logits = paddle.transpose(
paddle.matmul(sample_w, paddle.transpose(inputs, [0, 2, 1])),
[0, 2, 1]) + sample_b - samp_log_probs
sample_logits = sample_logits - 1e30 * hit
logits = paddle.concat([true_logits.unsqueeze([2]), sample_logits], -1)
return logits
class ProjAdaptiveSoftmax(nn.Layer):
"""
Combine projection and logsoftmax.
"""
def __init__(self,
n_token,
d_embed,
d_proj,
cutoffs,
div_val=1,
keep_order=False):
super(ProjAdaptiveSoftmax, self).__init__()
self.n_token = n_token
self.d_embed = d_embed
self.d_proj = d_proj
self.cutoffs = cutoffs + [n_token]
self.cutoff_ends = [0] + self.cutoffs
self.div_val = div_val
self.shortlist_size = self.cutoffs[0]
self.num_clusters = len(self.cutoffs) - 1
self.head_size = self.shortlist_size + self.num_clusters
if self.num_clusters > 0:
self.cluster_weight = paddle.create_parameter(
shape=[self.num_clusters, self.d_embed],
dtype=global_dtype,
default_initializer=paddle.nn.initializer.Normal(
mean=0.0, std=0.01))
self.cluster_bias = paddle.create_parameter(
shape=[self.num_clusters],
dtype=global_dtype,
is_bias=True,
default_initializer=paddle.nn.initializer.Constant(0.0))
self.out_layers_weight = nn.ParameterList()
self.out_layers_bias = nn.ParameterList()
self.out_projs = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs)):
if d_proj != d_embed:
self.out_projs.append(
paddle.create_parameter(
shape=[d_proj, d_embed],
dtype=global_dtype,
default_initializer=paddle.nn.initializer.Normal(
mean=0.0, std=0.01)))
else:
self.out_projs.append(None)
self.out_layers_weight.append(
paddle.create_parameter(
shape=[n_token, d_embed],
dtype=global_dtype,
default_initializer=paddle.nn.initializer.Constant(0.0)))
self.out_layers_bias.append(
paddle.create_parameter(
shape=[n_token],
dtype=global_dtype,
is_bias=True,
default_initializer=paddle.nn.initializer.Constant(0.0)))
else:
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
d_emb_i = d_embed // (div_val**i)
self.out_projs.append(
paddle.create_parameter(
shape=[d_proj, d_emb_i],
dtype=global_dtype,
default_initializer=paddle.nn.initializer.Normal(
mean=0.0, std=0.01)))
self.out_layers_weight.append(
paddle.create_parameter(
shape=[r_idx - l_idx, d_emb_i],
dtype=global_dtype,
default_initializer=paddle.nn.initializer.Uniform(
low=-(r_idx - l_idx)**(-1.0 / 2.0),
high=(r_idx - l_idx)**(-1.0 / 2.0))))
self.out_layers_bias.append(
paddle.create_parameter(
shape=[r_idx - l_idx],
dtype=global_dtype,
is_bias=True,
default_initializer=paddle.nn.initializer.Uniform(
low=-(r_idx - l_idx)**(-1.0 / 2.0),
high=(r_idx - l_idx)**(-1.0 / 2.0))))
self.keep_order = keep_order
def _compute_logits(self, hidden, weight, bias, proj=None):
if proj is None:
logit = F.linear(hidden, weight.t(), bias=bias)
else:
proj_hid = F.linear(hidden, proj)
logit = F.linear(proj_hid, weight.t(), bias=bias)
return logit
def forward(self, hidden, target, keep_order=False):
assert (hidden.shape[0] == target.shape[0])
if self.num_clusters == 0:
logit = self._compute_logits(hidden, self.out_layers_weight[0],
self.out_layers_bias[0],
self.out_projs[0])
nll = -paddle.log(F.softmax(logit, axis=-1))
idx = paddle.concat(
[
paddle.arange(0, nll.shape[0]).unsqueeze([1]),
target.unsqueeze(1)
],
axis=1)
nll = paddle.gather_nd(nll, idx)
else:
weights, biases = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
weight_i = self.out_layers_weight[0][l_idx:r_idx]
bias_i = self.out_layers_bias[0][l_idx:r_idx]
else:
weight_i = self.out_layers_weight[i]
bias_i = self.out_layers_bias[i]
if i == 0:
weight_i = paddle.concat(
[weight_i, self.cluster_weight], axis=0)
bias_i = paddle.concat([bias_i, self.cluster_bias], axis=0)
weights.append(weight_i)
biases.append(bias_i)
head_weight, head_bias, head_proj = weights[0], biases[
0], self.out_projs[0]
head_logit = self._compute_logits(hidden, head_weight, head_bias,
head_proj)
head_logprob = paddle.log(F.softmax(head_logit, axis=-1))
nll = paddle.zeros_like(target, dtype=hidden.dtype)
offset = 0
cutoff_values = [0] + self.cutoffs
for i in range(len(cutoff_values) - 1):
l_idx, r_idx = cutoff_values[i], cutoff_values[i + 1]
mask_i = paddle.cast(
target >= l_idx,
dtype=paddle.get_default_dtype()) * paddle.cast(
target < r_idx, dtype="int64")
indices_i = paddle.nonzero(mask_i).squeeze([1])
if paddle.numel(indices_i) == 0:
continue
target_i = paddle.gather(target, indices_i, axis=0) - l_idx
head_logprob_i = paddle.gather(head_logprob, indices_i, axis=0)
if i == 0:
target_i_idx = paddle.concat(
[
paddle.arange(0, head_logprob_i.shape[0]).unsqueeze(
[1]), target_i.unsqueeze([1])
],
axis=1)
logprob_i = head_logprob_i.gather_nd(target_i_idx)
else:
weight_i, bias_i, proj_i = weights[i], biases[
i], self.out_projs[i].weight if self.out_projs[
i] is not None else None
hidden_i = paddle.gather(hidden, indices_i, axis=0)
tail_logit_i = self._compute_logits(hidden_i, weight_i,
bias_i, proj_i)
tail_logprob_i = paddle.log(
F.softmax(
tail_logit_i, axis=-1))
target_i_idx = paddle.concat(
[
paddle.arange(0, tail_logprob_i.shape[0]).unsqueeze(
[1]), target_i.unsqueeze([1])
],
axis=1)
logprob_i = tail_logprob_i.gather_nd(target_i_idx)
logprob_i = head_logprob_i[:, -i] + logprob_i
if self.keep_order or keep_order:
nll = paddle.scatter(nll, indices_i, -logprob_i)
else:
index = paddle.arange(offset, offset + logprob_i.shape[0],
1)
nll = paddle.scatter(nll, index, -logprob_i)
offset += logprob_i.shape[0]
return nll
class LogUniformSampler(object):
def __init__(self, range_max, n_sample):
with paddle.no_grad():
self.range_max = range_max
log_indices = paddle.log(
paddle.arange(
1., range_max + 2., 1., dtype=global_dtype))
self.dist = (log_indices[1:] - log_indices[:-1]) / log_indices[-1]
self.log_q = paddle.cast(
paddle.log(
paddle.exp(-(paddle.log1p(-paddle.cast(
self.dist, dtype=global_dtype)) * 2 * n_sample)) - 1),
dtype=global_dtype)
self.n_sample = n_sample
def sample(self, labels):
n_sample = self.n_sample
n_tries = 2 * n_sample
batch_size = labels.shape[0]
with paddle.no_grad():
neg_samples = paddle.unique(
paddle.multinomial(
self.dist, n_tries, replacement=True))
true_log_probs = paddle.gather(self.log_q, labels.flatten())
true_log_probs = paddle.reshape(
true_log_probs, shape=[batch_size, -1])
samp_log_probs = paddle.gather(self.log_q, neg_samples)
return true_log_probs, samp_log_probs, neg_samples
class PositionEmbedding(nn.Layer):
def __init__(self, emb_dim):
super(PositionEmbedding, self).__init__()
self.emb_dim = emb_dim
self.inv_freq = 1.0 / (10000.0**(paddle.arange(
0.0, emb_dim, 2.0, dtype=global_dtype) / emb_dim))
def forward(self, pos_seq, bsz=None):
sinusoid_inp = paddle.matmul(
pos_seq.unsqueeze([1]), self.inv_freq.unsqueeze([0]))
pos_emb = paddle.concat(
[paddle.sin(sinusoid_inp), paddle.cos(sinusoid_inp)], axis=-1)
if bsz is not None:
pos_emb = pos_emb.unsqueeze([0]).expand([bsz, -1, -1])
pos_emb.stop_gradient = True
return pos_emb
else:
pos_emb = pos_emb.unsqueeze([0])
pos_emb.stop_gradient = True
return pos_emb
class PositionwiseFFN(nn.Layer):
def __init__(self, d_model, d_inner, dropout, normalize_before=False):
super(PositionwiseFFN, self).__init__()
self.d_model = d_model
self.d_inner = d_inner
self.CoreNet = nn.Sequential(
nn.Linear(
d_model,
d_inner,
weight_attr=paddle.nn.initializer.Normal(
mean=0.0, std=0.01),
bias_attr=paddle.nn.initializer.Constant(0.0)),
nn.ReLU(),
nn.Dropout(dropout),
nn.Linear(
d_inner,
d_model,
weight_attr=paddle.nn.initializer.Normal(
mean=0.0, std=0.01),
bias_attr=paddle.nn.initializer.Constant(0.0)),
nn.Dropout(dropout))
self.layer_norm = nn.LayerNorm(
d_model,
weight_attr=paddle.nn.initializer.Normal(
mean=1.0, std=0.01),
bias_attr=paddle.nn.initializer.Constant(0.0))
self.normalize_before = normalize_before
def forward(self, inp):
if self.normalize_before:
core_out = self.CoreNet(self.layer_norm(inp))
output = core_out + inp
else:
core_out = self.CoreNet(inp)
output = self.layer_norm(inp + core_out)
return output
class MultiHeadAttn(nn.Layer):
def __init__(self,
n_head,
d_model,
d_head,
dropout,
attn_dropout=0,
normalize_before=False):
super(MultiHeadAttn, self).__init__()
self.n_head = n_head
self.d_model = d_model
self.d_head = d_head
self.q_proj = nn.Linear(
d_model,
n_head * d_head,
weight_attr=paddle.nn.initializer.Normal(
mean=0.0, std=0.01),
bias_attr=False)
self.kv_proj = nn.Linear(
d_model,
2 * n_head * d_head,
weight_attr=paddle.nn.initializer.Normal(
mean=0.0, std=0.01),
bias_attr=False)
self.drop = nn.Dropout(p=dropout)
self.attn_drop = nn.Dropout(p=attn_dropout)
self.o_proj = nn.Linear(
n_head * d_head,
d_model,
weight_attr=paddle.nn.initializer.Normal(
mean=0.0, std=0.01),
bias_attr=False)
self.layer_norm = nn.LayerNorm(
d_model,
weight_attr=paddle.nn.initializer.Normal(
mean=1.0, std=0.01),
bias_attr=paddle.nn.initializer.Constant(0.0))
self.scale = 1 / (d_head**0.5)
self.normalize_before = normalize_before
def forward(self, h, attn_mask=None, mems=None):
if mems is not None:
c = paddle.concat([mems, h], axis=1)
else:
c = h
if self.normalize_before:
c = self.layer_norm(c)
head_q = self.q_proj(h)
head_k, head_v = paddle.chunk(self.kv_proj(c), chunks=2, axis=-1)
head_q = paddle.reshape(
head_q, shape=[h.shape[0], h.shape[1], self.n_head, self.d_head])
head_k = paddle.reshape(
head_k, shape=[c.shape[0], c.shape[1], self.n_head, self.d_head])
head_v = paddle.reshape(
head_v, shape=[c.shape[0], c.shape[1], self.n_head, self.d_head])
attn_score = einsum('bind,bjnd->bnij', head_q, head_k)
attn_score = attn_score * self.scale
if attn_mask is not None:
attn_score = attn_score - float('inf') * attn_mask
attn_prob = F.softmax(attn_score, dim=-1)
attn_prob = self.attn_drop(attn_prob)
attn_vec = einsum('bnij,bjnd->bind', attn_prob, head_v)
attn_vec = paddle.reshape(
attn_vec,
shape=[
attn_vec.shape[0], attn_vec.shape[1], self.n_head * self.d_head
])
attn_out = self.o_proj(attn_vec)
attn_out = self.drop(attn_out)
if self.normalize_before:
output = h + attn_out
else:
output = self.layer_norm(h + attn_out)
return output
class RelMultiHeadAttn(nn.Layer):
def __init__(self,
n_head,
d_model,
d_head,
dropout,
attn_dropout=0,
tgt_len=None,
ext_len=None,
mem_len=None,
normalize_before=False):
super(RelMultiHeadAttn, self).__init__()
self.n_head = n_head
self.d_model = d_model
self.d_head = d_head
self.dropout = dropout
self.qkv_proj = nn.Linear(
d_model,
3 * n_head * d_head,
weight_attr=paddle.nn.initializer.Normal(
mean=0.0, std=0.01),
bias_attr=False)
self.drop = nn.Dropout(dropout)
self.attn_drop = nn.Dropout(attn_dropout)
self.o_proj = nn.Linear(
n_head * d_head,
d_model,
weight_attr=paddle.nn.initializer.Normal(
mean=0.0, std=0.01),
bias_attr=False)
self.layer_norm = nn.LayerNorm(
d_model,
weight_attr=paddle.nn.initializer.Normal(
mean=1.0, std=0.01),
bias_attr=paddle.nn.initializer.Constant(0.0))
self.scale = 1 / (d_head**0.5)
self.normalize_before = normalize_before
def _rel_shift(self, x, zero_triu=False):
x_shape = x.shape
zero_pad = paddle.zeros(
[x_shape[0], x_shape[1], x_shape[2], 1], dtype=x.dtype)
x_padded = paddle.concat([zero_pad, x], axis=-1)
x_padded = paddle.reshape(
x_padded,
shape=[x_shape[0], x_shape[1], x_shape[3] + 1, x_shape[2]])
x = paddle.reshape(x_padded[:, :, 1:, :], shape=x_shape)
if zero_triu:
ones = paddle.ones([x_shape[2], x_shape[3]])
x = x * paddle.tril(
ones, diagonal=x_shape[3] - x_shape[2]).unsqueeze([2, 3])
return x
def forward(self, w, r, attn_mask=None, mems=None):
raise NotImplementedError
class RelPartialLearnableMultiHeadAttn(RelMultiHeadAttn):
def __init__(self, *args, **kwargs):
super(RelPartialLearnableMultiHeadAttn, self).__init__(*args, **kwargs)
self.r_proj = nn.Linear(
self.d_model,
self.n_head * self.d_head,
weight_attr=paddle.nn.initializer.Normal(
mean=0.0, std=0.01),
bias_attr=False)
def forward(self, w, r, r_w_bias, r_r_bias, attn_mask=None, mems=None):
qlen, rlen, bsz = w.shape[1], r.shape[1], w.shape[0]
if mems is not None:
cat = paddle.concat([mems, w], axis=1)
if self.normalize_before:
w_heads = self.qkv_proj(self.layer_norm(cat))
else:
w_heads = self.qkv_proj(cat)
r_head_k = self.r_proj(r)
w_head_q, w_head_k, w_head_v = paddle.chunk(
w_heads, chunks=3, axis=-1)
w_head_q = w_head_q[:, -qlen:, :]
else:
if self.normalize_before:
w_heads = self.qkv_proj(self.layer_norm(w))
else:
w_heads = self.qkv_proj(w)
r_head_k = self.r_proj(r)
w_head_q, w_head_k, w_head_v = paddle.chunk(
w_heads, chunks=3, axis=-1)
klen = w_head_k.shape[1]
w_head_q = paddle.reshape(
w_head_q, shape=[bsz, qlen, self.n_head, self.d_head])
w_head_k = paddle.reshape(
w_head_k, shape=[bsz, klen, self.n_head, self.d_head])
w_head_v = paddle.reshape(
w_head_v, shape=[bsz, klen, self.n_head, self.d_head])
r_head_k = paddle.reshape(
r_head_k, shape=[bsz, rlen, self.n_head, self.d_head])
rw_head_q = w_head_q + r_w_bias
AC = einsum('bind,bjnd->bnij', rw_head_q, w_head_k)
rr_head_q = w_head_q + r_r_bias
BD = einsum('bind,bjnd->bnij', rr_head_q, r_head_k)
BD = self._rel_shift(BD)
attn_score = AC + BD
attn_score = attn_score * self.scale
if attn_mask is not None:
attn_score = attn_score - 1e30 * attn_mask
attn_prob = F.softmax(attn_score, axis=-1)
attn_prob = self.attn_drop(attn_prob)
attn_vec = einsum('bnij,bjnd->bind', attn_prob, w_head_v)
attn_vec = paddle.reshape(
attn_vec,
shape=[
attn_vec.shape[0], attn_vec.shape[1], self.n_head * self.d_head
])
attn_out = self.o_proj(attn_vec)
attn_out = self.drop(attn_out)
if self.normalize_before:
output = w + attn_out
else:
output = self.layer_norm(w + attn_out)
return output
class RelLearnableMultiHeadAttn(RelMultiHeadAttn):
def __init__(self, *args, **kwargs):
super(RelLearnableMultiHeadAttn, self).__init__(*args, **kwargs)
def forward(self, w, r_emb, r_w_bias, r_bias, attn_mask=None, mems=None):
qlen, bsz = w.shape[1], w.shape[0]
if mems is not None:
cat = paddle.concat([mems, w], 1)
if self.normalize_before:
w_heads = self.qkv_proj(self.layer_norm(cat))
else:
w_heads = self.qkv_proj(cat)
w_head_q, w_head_k, w_head_v = paddle.chunk(
w_heads, chunks=3, axis=-1)
w_head_q = w_head_q[-qlen:]
else:
if self.normalize_before:
w_heads = self.qkv_proj(self.layer_norm(w))
else:
w_heads = self.qkv_proj(w)
w_head_q, w_head_k, w_head_v = paddle.chunk(
w_heads, chunks=3, axis=-1)
klen = w_head_k.shape[1]
w_head_q = paddle.reshape(
w_head_q,
shape=[
w_head_q.shape[0], w_head_q.shape[1], self.n_head, self.d_head
])
w_head_k = paddle.reshape(
w_head_k,
shape=[
w_head_k.shape[0], w_head_k.shape[1], self.n_head, self.d_head
])
w_head_v = paddle.reshape(
w_head_v,
shape=[
w_head_v.shape[0], w_head_v.shape[1], self.n_head, self.d_head
])
if klen > r_emb.shape[0]:
r_emb_pad = r_emb[0:1].expand(klen - r_emb.shape[0], -1, -1)
r_emb = paddle.concat([r_emb_pad, r_emb], 0)
r_bias_pad = r_bias[0:1].expand(klen - r_bias.shape[0], -1)
r_bias = paddle.concat([r_bias_pad, r_bias], 0)
else:
r_emb = r_emb[-klen:]
r_bias = r_bias[-klen:]
rw_head_q = w_head_q + r_w_bias.unsqueeze([0])
AC = einsum('bind,bjnd->bnij', rw_head_q, w_head_k)
r_emb = r_emb.unsqueeze([0]).expand([bsz, -1, -1, -1])
B_ = einsum('bind,bjnd->bnij', w_head_q, r_emb)
D_ = r_bias.unsqueeze([0, 2])
BD = self._rel_shift(B_ + D_)
attn_score = AC + BD
attn_score = attn_score * self.scale
if attn_mask is not None:
attn_score = attn_score - float('inf') * attn_mask
attn_prob = F.softmax(attn_score, dim=-1)
attn_prob = self.attn_drop(attn_prob)
attn_vec = einsum('bnij,bjnd->bind', attn_prob, w_head_v)
attn_vec = paddle.reshape(
attn_vec,
shape=[
attn_vec.shape[0], attn_vec.shape[1], self.n_head * self.d_head
])
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.normalize_before:
output = w + attn_out
else:
output = self.layer_norm(w + attn_out)
return output
class DecoderLayer(nn.Layer):
def __init__(self, n_head, d_model, d_head, d_inner, dropout, **kwargs):
super(DecoderLayer, self).__init__()
self.dec_attn = MultiHeadAttn(n_head, d_model, d_head, dropout,
**kwargs)
self.pos_ff = PositionwiseFFN(
d_model,
d_inner,
dropout,
normalize_before=kwargs.get('normalize_before'))
def forward(self, dec_inp, dec_attn_mask=None, mems=None):
output = self.dec_attn(dec_inp, attn_mask=dec_attn_mask, mems=mems)
output = self.pos_ff(output)
return output
class RelLearnableDecoderLayer(nn.Layer):
def __init__(self, n_head, d_model, d_head, d_inner, dropout, **kwargs):
super(RelLearnableDecoderLayer, self).__init__()
self.dec_attn = RelLearnableMultiHeadAttn(n_head, d_model, d_head,
dropout, **kwargs)
self.pos_ff = PositionwiseFFN(
d_model,
d_inner,
dropout,
normalize_before=kwargs.get('normalize_before'))
def forward(self,
dec_inp,
r_emb,
r_w_bias,
r_bias,
dec_attn_mask=None,
mems=None):
output = self.dec_attn(
dec_inp,
r_emb,
r_w_bias,
r_bias,
attn_mask=dec_attn_mask,
mems=mems)
output = self.pos_ff(output)
return output
class RelPartialLearnableDecoderLayer(nn.Layer):
def __init__(self, n_head, d_model, d_head, d_inner, dropout, **kwargs):
super(RelPartialLearnableDecoderLayer, self).__init__()
self.dec_attn = RelPartialLearnableMultiHeadAttn(
n_head, d_model, d_head, dropout, **kwargs)
self.pos_ff = PositionwiseFFN(
d_model,
d_inner,
dropout,
normalize_before=kwargs.get('normalize_before'))
def forward(self,
dec_inp,
r,
r_w_bias,
r_r_bias,
dec_attn_mask=None,
mems=None):
output = self.dec_attn(
dec_inp, r, r_w_bias, r_r_bias, attn_mask=dec_attn_mask, mems=mems)
output = self.pos_ff(output)
return output
class AdaptiveEmbedding(nn.Layer):
def __init__(self,
n_token,
d_embed,
d_proj,
cutoffs,
div_val=1,
sample_softmax=False):
super(AdaptiveEmbedding, self).__init__()
self.n_token = n_token
self.d_embed = d_embed
self.cutoffs = cutoffs + [n_token]
self.div_val = div_val
self.d_proj = d_proj
self.emb_scale = d_proj**0.5
self.cutoff_ends = [0] + self.cutoffs
self.emb_layers = nn.LayerList()
self.emb_projs = nn.ParameterList()
if div_val == 1:
self.emb_layers.append(
nn.Embedding(
n_token,
d_embed,
sparse=sample_softmax > 0,
weight_attr=paddle.nn.initializer.Normal(
mean=0.0, std=0.01)))
if d_proj != d_embed:
self.emb_projs.append(
paddle.create_parameter(
shape=[d_embed, d_proj],
dtype=global_dtype,
default_initializer=paddle.nn.initializer.Normal(
mean=0.0, std=0.01)))
else:
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
d_emb_i = d_embed // (div_val**i)
self.emb_layers.append(
nn.Embedding(
r_idx - l_idx,
d_emb_i,
weight_attr=paddle.nn.initializer.Normal(
mean=0.0, std=0.01)))
self.emb_projs.append(
paddle.create_parameter(
shape=[d_emb_i, d_proj],
dtype=global_dtype,
default_initializer=paddle.nn.initializer.Normal(
mean=0.0, std=0.01)))
def forward(self, inp):
if self.div_val == 1:
embed = self.emb_layers[0](inp)
if self.d_proj != self.d_embed:
embed = F.linear(embed, self.emb_projs[0])
else:
inp_flat = paddle.reshape(inp, shape=[-1])
emb_flat = paddle.zeros(
[inp_flat.shape[0], self.d_proj], dtype=global_dtype)
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
mask_i = (inp_flat >= l_idx) & (inp_flat < r_idx)
indices_i = paddle.nonzero(mask_i).squeeze([1])
if indices_i.numel() == 0:
continue
inp_i = paddle.gather(inp_flat, indices_i, axis=0) - l_idx
emb_i = self.emb_layers[i](inp_i)
emb_i = F.linear(emb_i, self.emb_projs[i])
emb_flat = paddle.scatter(emb_flat, indices_i, emb_i)
embed = paddle.reshape(
emb_flat, shape=inp.shape.append(self.d_proj))
embed = embed * self.emb_scale
return embed
class MemTransformerLM(nn.Layer):
def __init__(self,
n_token,
n_layer,
n_head,
d_model,
d_head,
d_inner,
dropout,
attn_dropout,
tie_weight=True,
d_embed=None,
div_val=1,
tie_projs=[False],
normalize_before=False,
tgt_len=None,
ext_len=None,
mem_len=None,
cutoffs=[],
adapt_inp=False,
same_length=False,
attn_type=0,
clamp_len=-1,
sample_softmax=-1):
super(MemTransformerLM, self).__init__()
self.n_token = n_token
d_embed = d_model if d_embed is None else d_embed
self.d_embed = d_embed
self.d_model = d_model
self.n_head = n_head
self.d_head = d_head
self.word_emb = AdaptiveEmbedding(
n_token, d_embed, d_model, cutoffs, div_val=div_val)
self.drop = nn.Dropout(dropout)
self.n_layer = n_layer
self.tgt_len = tgt_len
self.mem_len = mem_len
self.ext_len = ext_len
self.max_klen = tgt_len + ext_len + mem_len
self.attn_type = attn_type
self.layers = nn.LayerList()
if attn_type == 0:
for i in range(n_layer):
self.layers.append(
RelPartialLearnableDecoderLayer(
n_head,
d_model,
d_head,
d_inner,
dropout,
tgt_len=tgt_len,
ext_len=ext_len,
mem_len=mem_len,
attn_dropout=attn_dropout,
normalize_before=normalize_before))
elif attn_type == 1:
for i in range(n_layer):
self.layers.append(
RelLearnableDecoderLayer(
n_head,
d_model,
d_head,
d_inner,
dropout,
tgt_len=tgt_len,
ext_len=ext_len,
mem_len=mem_len,
attn_dropout=attn_dropout,
normalize_before=normalize_before))
elif attn_type in [2, 3]:
for i in range(n_layer):
self.layers.append(
DecoderLayer(
n_head,
d_model,
d_head,
d_inner,
dropout,
attn_dropout=attn_dropout,
normalize_before=normalize_before))
self.sample_softmax = sample_softmax
if sample_softmax > 0:
self.out_layer = nn.Linear(
d_model,
n_token,
weight_attr=paddle.nn.initializer.Normal(
mean=0.0, std=0.01),
bias_attr=paddle.nn.initializer.Constant(0.0))
self.tie_weight = tie_weight
self.sampler = LogUniformSampler(n_token, sample_softmax)
else:
self.crit = ProjAdaptiveSoftmax(
n_token, d_embed, d_model, cutoffs, div_val=div_val)
if tie_weight:
for i in range(len(self.crit.out_layers_weight)):
self.crit.out_layers_weight[i] = self.word_emb.emb_layers[
i].weight
if tie_projs:
for i, tie_proj in enumerate(tie_projs):
if tie_proj and div_val == 1 and d_model != d_embed:
self.crit.out_projs[i] = self.word_emb.emb_projs[0]
elif tie_proj and div_val != 1:
self.crit.out_projs[i] = self.word_emb.emb_projs[i]
self.same_length = same_length
self.clamp_len = clamp_len
self._create_params()
def backward_compatible(self):
self.sample_softmax = -1
def _create_params(self):
if self.attn_type == 0:
self.pos_emb = PositionEmbedding(self.d_model)
self.r_w_bias = paddle.create_parameter(
shape=[self.n_head, self.d_head],
dtype=global_dtype,
default_initializer=paddle.nn.initializer.Normal(
mean=0.0, std=0.01))
self.r_r_bias = paddle.create_parameter(
shape=[self.n_head, self.d_head],
dtype=global_dtype,
default_initializer=paddle.nn.initializer.Normal(
mean=0.0, std=0.01))
elif self.attn_type == 1:
self.r_emb = paddle.create_parameter(
shape=[self.n_layer, self.max_klen, self.n_head, self.d_head],
dtype=global_dtype,
default_initializer=paddle.nn.initializer.Normal(
mean=0.0, std=0.01))
self.r_w_bias = paddle.create_parameter(
shape=[self.n_layer, self.n_head, self.d_head],
dtype=global_dtype,
default_initializer=paddle.nn.initializer.Normal(
mean=0.0, std=0.01))
self.r_bias = paddle.create_parameter(
shape=[self.n_layer, self.max_klen, self.n_head],
dtype=global_dtype,
default_initializer=paddle.nn.initializer.Normal(
mean=0.0, std=0.01))
elif self.attn_type == 2:
self.pos_emb = PositionEmbedding(self.d_model)
elif self.attn_type == 3:
self.r_emb = paddle.create_parameter(
shape=[self.n_layer, self.max_klen, self.n_head, self.d_head],
dtype=global_dtype,
default_initializer=paddle.nn.initializer.Normal(
mean=0.0, std=0.01))
def reset_length(self, tgt_len, ext_len, mem_len):
self.tgt_len = tgt_len
self.mem_len = mem_len
self.ext_len = ext_len
def init_mems(self, batch_size, d_model):
if self.mem_len > 0:
mems = []
for _ in range(self.n_layer + 1):
empty = paddle.empty(
shape=[batch_size, 0, d_model], dtype=global_dtype)
mems.append(empty)
return mems
else:
return None
def _update_mems(self, hids, mems, qlen, mlen):
if mems is None: return None
assert len(hids) == len(
mems), "length of hids and length of mems must be the same. "
with paddle.no_grad():
new_mems = []
end_idx = mlen + max(0, qlen - 0 - self.ext_len)
beg_idx = max(0, end_idx - self.mem_len)
for i in range(len(hids)):
cat = paddle.concat([mems[i], hids[i]], axis=1)
new_mems.append(cat[:, beg_idx:end_idx].detach())
return new_mems
def _forward(self, dec_inputs, mems=None):
bsz, qlen = dec_inputs.shape
word_emb = self.word_emb(dec_inputs)
mlen = mems[0].shape[1] if mems is not None else 0
klen = mlen + qlen
if self.same_length:
all_ones = paddle.ones(shape=[qlen, klen], dtype=word_emb.dtype)
mask_len = klen - self.mem_len
if mask_len > 0:
mask_shift_len = qlen - mask_len
else:
mask_shift_len = qlen
dec_attn_mask = (paddle.triu(
all_ones, diagonal=1 + mlen) + paddle.tril(
all_ones, -mask_shift_len)).unsqueeze([0, 1])
else:
dec_attn_mask = paddle.ones(
shape=[qlen, klen], dtype=word_emb.dtype)
dec_attn_mask = paddle.triu(
dec_attn_mask, diagonal=1 + mlen).unsqueeze([0, 1])
hids = []
if self.attn_type == 0:
pos_seq = paddle.arange(klen - 1, -1, -1.0, dtype=word_emb.dtype)
if self.clamp_len > 0:
# TODO: clamp and clip
pos_seq = paddle.clip(pos_seq, max=self.clamp_len)
pos_emb = self.pos_emb(pos_seq, bsz)
core_out = self.drop(word_emb)
pos_emb = self.drop(pos_emb)
hids.append(core_out)
for i, layer in enumerate(self.layers):
mems_i = None if mems is None else mems[i]
core_out = layer(
core_out,
pos_emb,
self.r_w_bias,
self.r_r_bias,
dec_attn_mask=dec_attn_mask,
mems=mems_i)
hids.append(core_out)
elif self.attn_type == 1:
core_out = self.drop(word_emb)
hids.append(core_out)
for i, layer in enumerate(self.layers):
if self.clamp_len > 0:
r_emb = self.r_emb[i][-self.clamp_len:]
r_bias = self.r_bias[i][-self.clamp_len:]
else:
r_emb, r_bias = self.r_emb[i], self.r_bias[i]
mems_i = None if mems is None else mems[i]
core_out = layer(
core_out,
r_emb,
self.r_w_bias[i],
r_bias,
dec_attn_mask=dec_attn_mask,
mems=mems_i)
hids.append(core_out)
elif self.attn_type == 2:
pos_seq = paddle.arange(klen - 1, -1, -1.0, dtype=word_emb.dtype)
if self.clamp_len > 0:
pos_seq = paddle.clip(pos_seq, max=self.clamp_len)
pos_emb = self.pos_emb(pos_seq, bsz)
core_out = self.drop(word_emb + pos_emb[-qlen:])
hids.append(core_out)
for i, layer in enumerate(self.layers):
mems_i = None if mems is None else mems[i]
if mems_i is not None and i == 0:
mems_i += pos_emb[:mlen]
core_out = layer(
core_out, dec_attn_mask=dec_attn_mask, mems=mems_i)
hids.append(core_out)
elif self.attn_type == 3:
core_out = self.drop(word_emb)
hids.append(core_out)
for i, layer in enumerate(self.layers):
mems_i = None if mems is None else mems[i]
if mems_i is not None and mlen > 0:
cur_emb = self.r_emb[i][:-qlen]
cur_size = cur_emb.size(0)
if cur_size < mlen:
cur_emb_pad = cur_emb[0:1].expand(mlen - cur_size, -1,
-1)
cur_emb = paddle.concat([cur_emb_pad, cur_emb], 0)
else:
cur_emb = cur_emb[-mlen:]
mems_i += cur_emb.view(mlen, 1, -1)
core_out += self.r_emb[i][-qlen:].view(qlen, 1, -1)
core_out = layer(
core_out, dec_attn_mask=dec_attn_mask, mems=mems_i)
hids.append(core_out)
core_out = self.drop(core_out)
new_mems = self._update_mems(hids, mems, mlen, qlen)
return core_out, new_mems
def forward(self, data, target, *mems):
if not mems:
batch_size = data.shape[0]
mems = self.init_mems(batch_size, self.d_model)
hidden, new_mems = self._forward(data, mems=mems)
# TODO(FrostML): use getitem.
tgt_len = target.shape[1]
pred_hid = paddle.slice(hidden, [1], [-tgt_len], [hidden.shape[1]])
if self.sample_softmax > 0 and self.training:
assert self.tie_weight, "tie_weight must be True if sample_softmax > 0"
logit = sample_logits(self.word_emb, self.out_layer.bias, target,
pred_hid, self.sampler)
loss = -paddle.log(F.softmax(logit, axis=-1))[:, :, 0]
else:
loss = self.crit(
paddle.reshape(
pred_hid, shape=[-1, pred_hid.shape[-1]]),
paddle.reshape(
target, shape=[-1]))
if new_mems is None:
return [loss.mean()]
else:
return [loss.mean()] + new_mems
|
11535086
|
import unittest
import numpy as np
from data_holders import BBoxDetInst, PBoxDetInst, GroundTruthInstance
import os
import shutil
import json
import read_files
class TestReadPBoxJson(unittest.TestCase):
def setUp(self):
self.det_files_root = '/tmp/pdq_utest_read_files/'
if not os.path.isdir(self.det_files_root):
os.makedirs(self.det_files_root)
self.default_det_dict = {'bbox': [750, 750, 1250, 1250], "label_probs": [0, 1, 0]}
self.default_covar = [[1000, 0], [0, 1000]]
self.defaul_det_classes = ['background', 'square', 'cross']
self.default_img_names = ['square.jpg']
self.default_gt_classes = ['none', 'square', 'cross', 'diamond']
self.default_gt_class_ids_dict = {'none': 0, 'square': 1, 'cross': 2, 'diamond': 3}
def tearDown(self):
if os.path.isdir(self.det_files_root):
shutil.rmtree(self.det_files_root)
def test_single_pbox(self):
# Create detection.json file
det_data = {}
det_data['classes'] = self.defaul_det_classes
det_data['img_names'] = self.default_img_names
self.default_det_dict.update({'covars': [self.default_covar, self.default_covar]})
det_data['detections'] = [[self.default_det_dict]]
det_file_name = os.path.join(self.det_files_root, 'det_file.json')
with open(det_file_name, 'w') as f:
json.dump(det_data, f)
detections = read_files.read_pbox_json(det_file_name)
self.assertEqual(len(detections), 1)
self.assertIsInstance(detections, read_files.BoxLoader)
img_iterator = iter(detections)
img_dets = next(img_iterator)
self.assertEqual(len(img_dets), 1)
self.assertIsInstance(img_dets[0], PBoxDetInst)
self.assertTrue(np.allclose(img_dets[0].covs, [self.default_covar, self.default_covar]))
self.assertTrue(np.allclose(img_dets[0].box, self.default_det_dict['bbox']))
self.assertListEqual(list(img_dets[0].class_list), self.default_det_dict['label_probs'])
def test_two_pboxes_one_img(self):
# Create detection.json file
det_data = {}
det_data['classes'] = self.defaul_det_classes
det_data['img_names'] = self.default_img_names
self.default_det_dict.update({'covars': [self.default_covar, self.default_covar]})
det_data['detections'] = [[self.default_det_dict, self.default_det_dict]]
det_file_name = os.path.join(self.det_files_root, 'det_file.json')
with open(det_file_name, 'w') as f:
json.dump(det_data, f)
detections = read_files.read_pbox_json(det_file_name)
self.assertEqual(len(detections), 1)
self.assertIsInstance(detections, read_files.BoxLoader)
img_iterator = iter(detections)
img_dets = next(img_iterator)
self.assertEqual(len(img_dets), 2)
self.assertIsInstance(img_dets[0], PBoxDetInst)
self.assertIsInstance(img_dets[1], PBoxDetInst)
self.assertTrue(np.allclose(img_dets[0].covs, [self.default_covar, self.default_covar]))
self.assertTrue(np.allclose(img_dets[0].box, self.default_det_dict['bbox']))
self.assertListEqual(list(img_dets[0].class_list), self.default_det_dict['label_probs'])
self.assertTrue(np.allclose(img_dets[1].covs, [self.default_covar, self.default_covar]))
self.assertTrue(np.allclose(img_dets[1].box, self.default_det_dict['bbox']))
self.assertListEqual(list(img_dets[1].class_list), self.default_det_dict['label_probs'])
def test_two_pboxes_two_imgs(self):
# Create detection.json file
det_data = {}
det_data['classes'] = self.defaul_det_classes
det_data['img_names'] = self.default_img_names
self.default_det_dict.update({'covars': [self.default_covar, self.default_covar]})
det_data['detections'] = [[self.default_det_dict], [self.default_det_dict]]
det_file_name = os.path.join(self.det_files_root, 'det_file.json')
with open(det_file_name, 'w') as f:
json.dump(det_data, f)
detections = read_files.read_pbox_json(det_file_name)
self.assertEqual(len(detections), 2)
self.assertIsInstance(detections, read_files.BoxLoader)
img_iterator = iter(detections)
img_dets = next(img_iterator)
self.assertEqual(len(img_dets), 1)
self.assertIsInstance(img_dets[0], PBoxDetInst)
self.assertTrue(np.allclose(img_dets[0].covs, [self.default_covar, self.default_covar]))
self.assertTrue(np.allclose(img_dets[0].box, self.default_det_dict['bbox']))
self.assertListEqual(list(img_dets[0].class_list), self.default_det_dict['label_probs'])
img_dets = next(img_iterator)
self.assertEqual(len(img_dets), 1)
self.assertIsInstance(img_dets[0], PBoxDetInst)
self.assertTrue(np.allclose(img_dets[0].covs, [self.default_covar, self.default_covar]))
self.assertTrue(np.allclose(img_dets[0].box, self.default_det_dict['bbox']))
self.assertListEqual(list(img_dets[0].class_list), self.default_det_dict['label_probs'])
def test_empty_img(self):
# Create detection.json file
det_data = {}
det_data['classes'] = self.defaul_det_classes
det_data['img_names'] = self.default_img_names
self.default_det_dict.update({'covars': [self.default_covar, self.default_covar]})
det_data['detections'] = [[self.default_det_dict], []]
det_file_name = os.path.join(self.det_files_root, 'det_file.json')
with open(det_file_name, 'w') as f:
json.dump(det_data, f)
detections = read_files.read_pbox_json(det_file_name)
self.assertEqual(len(detections), 2)
self.assertIsInstance(detections, read_files.BoxLoader)
img_iterator = iter(detections)
img_dets = next(img_iterator)
self.assertEqual(len(img_dets), 1)
self.assertIsInstance(img_dets[0], PBoxDetInst)
self.assertTrue(np.allclose(img_dets[0].covs, [self.default_covar, self.default_covar]))
self.assertTrue(np.allclose(img_dets[0].box, self.default_det_dict['bbox']))
self.assertListEqual(list(img_dets[0].class_list), self.default_det_dict['label_probs'])
img_dets = next(img_iterator)
self.assertEqual(len(img_dets), 0)
def test_single_pbox_w_extra_gt_label(self):
# Create detection.json file
det_data = {}
det_data['classes'] = self.defaul_det_classes
det_data['img_names'] = self.default_img_names
self.default_det_dict.update({'covars': [self.default_covar, self.default_covar]})
det_data['detections'] = [[self.default_det_dict]]
det_file_name = os.path.join(self.det_files_root, 'det_file.json')
with open(det_file_name, 'w') as f:
json.dump(det_data, f)
detections = read_files.read_pbox_json(det_file_name, self.default_gt_class_ids_dict)
self.assertEqual(len(detections), 1)
self.assertIsInstance(detections, read_files.BoxLoader)
img_iterator = iter(detections)
img_dets = next(img_iterator)
self.assertEqual(len(img_dets), 1)
self.assertIsInstance(img_dets[0], PBoxDetInst)
self.assertTrue(np.allclose(img_dets[0].covs, [self.default_covar, self.default_covar]))
self.assertTrue(np.allclose(img_dets[0].box, self.default_det_dict['bbox']))
expected_label_probs = [0, 1, 0, 0]
self.assertListEqual(list(img_dets[0].class_list), expected_label_probs)
def test_single_pbox_w_rearranged_gt_label(self):
# Create detection.json file
det_data = {}
det_data['classes'] = self.defaul_det_classes
det_data['img_names'] = self.default_img_names
self.default_det_dict.update({'covars': [self.default_covar, self.default_covar]})
det_data['detections'] = [[self.default_det_dict]]
det_file_name = os.path.join(self.det_files_root, 'det_file.json')
with open(det_file_name, 'w') as f:
json.dump(det_data, f)
gt_class_ids_dict = {'none': 0, 'cross': 1, 'square': 2}
detections = read_files.read_pbox_json(det_file_name, gt_class_ids_dict)
self.assertEqual(len(detections), 1)
self.assertIsInstance(detections, read_files.BoxLoader)
img_iterator = iter(detections)
img_dets = next(img_iterator)
self.assertEqual(len(img_dets), 1)
self.assertIsInstance(img_dets[0], PBoxDetInst)
self.assertTrue(np.allclose(img_dets[0].covs, [self.default_covar, self.default_covar]))
self.assertTrue(np.allclose(img_dets[0].box, self.default_det_dict['bbox']))
expected_label_probs = [0, 0, 1]
self.assertListEqual(list(img_dets[0].class_list), expected_label_probs)
def test_single_pbox_w_fewer_gt_labels(self):
# Create detection.json file
det_data = {}
det_data['classes'] = self.defaul_det_classes
det_data['img_names'] = self.default_img_names
self.default_det_dict.update({'covars': [self.default_covar, self.default_covar]})
det_data['detections'] = [[self.default_det_dict]]
det_file_name = os.path.join(self.det_files_root, 'det_file.json')
with open(det_file_name, 'w') as f:
json.dump(det_data, f)
gt_class_ids_dict = {'square': 0, 'cross': 1}
detections = read_files.read_pbox_json(det_file_name, gt_class_ids_dict)
self.assertEqual(len(detections), 1)
self.assertIsInstance(detections, read_files.BoxLoader)
img_iterator = iter(detections)
img_dets = next(img_iterator)
self.assertEqual(len(img_dets), 1)
self.assertIsInstance(img_dets[0], PBoxDetInst)
self.assertTrue(np.allclose(img_dets[0].covs, [self.default_covar, self.default_covar]))
self.assertTrue(np.allclose(img_dets[0].box, self.default_det_dict['bbox']))
expected_label_probs = [1, 0]
self.assertListEqual(list(img_dets[0].class_list), expected_label_probs)
def test_single_pbox_w_gt_label_id_skipped(self):
# Create detection.json file
det_data = {}
det_data['classes'] = self.defaul_det_classes
det_data['img_names'] = self.default_img_names
self.default_det_dict.update({'covars': [self.default_covar, self.default_covar]})
det_data['detections'] = [[self.default_det_dict]]
det_file_name = os.path.join(self.det_files_root, 'det_file.json')
with open(det_file_name, 'w') as f:
json.dump(det_data, f)
gt_class_ids_dict = {'square': 1, 'cross': 4}
detections = read_files.read_pbox_json(det_file_name, gt_class_ids_dict)
self.assertEqual(len(detections), 1)
self.assertIsInstance(detections, read_files.BoxLoader)
img_iterator = iter(detections)
img_dets = next(img_iterator)
self.assertEqual(len(img_dets), 1)
self.assertIsInstance(img_dets[0], PBoxDetInst)
self.assertTrue(np.allclose(img_dets[0].covs, [self.default_covar, self.default_covar]))
self.assertTrue(np.allclose(img_dets[0].box, self.default_det_dict['bbox']))
expected_label_probs = [0, 1, 0, 0, 0]
self.assertListEqual(list(img_dets[0].class_list), expected_label_probs)
def test_single_pbox_w_synonym_label_id(self):
# Create detection.json file
det_data = {}
det_data['classes'] = self.defaul_det_classes
det_data['img_names'] = self.default_img_names
self.default_det_dict.update({'covars': [self.default_covar, self.default_covar]})
self.default_det_dict.update({'label_probs': [0.3, 0.7, 0]})
det_data['detections'] = [[self.default_det_dict]]
det_file_name = os.path.join(self.det_files_root, 'det_file.json')
with open(det_file_name, 'w') as f:
json.dump(det_data, f)
gt_class_ids_dict = {'none': 0, 'cross': 1, 'square': 2}
detections = read_files.read_pbox_json(det_file_name, gt_class_ids_dict)
self.assertEqual(len(detections), 1)
self.assertIsInstance(detections, read_files.BoxLoader)
img_iterator = iter(detections)
img_dets = next(img_iterator)
self.assertEqual(len(img_dets), 1)
self.assertIsInstance(img_dets[0], PBoxDetInst)
self.assertTrue(np.allclose(img_dets[0].covs, [self.default_covar, self.default_covar]))
self.assertTrue(np.allclose(img_dets[0].box, self.default_det_dict['bbox']))
expected_label_probs = np.array([0.3, 0.0, 0.7])
self.assertTrue(np.allclose(img_dets[0].class_list, expected_label_probs))
def test_single_pbox_w_non_gt_class(self):
# Create detection.json file
det_data = {}
det_data['classes'] = self.defaul_det_classes
det_data['img_names'] = self.default_img_names
self.default_det_dict.update({'covars': [self.default_covar, self.default_covar]})
self.default_det_dict.update({'label_probs': [0, 0.7, 0.3]})
det_data['detections'] = [[self.default_det_dict]]
det_file_name = os.path.join(self.det_files_root, 'det_file.json')
with open(det_file_name, 'w') as f:
json.dump(det_data, f)
gt_class_ids_dict = {'none': 0, 'diamond': 1, 'square': 2}
detections = read_files.read_pbox_json(det_file_name, gt_class_ids_dict)
self.assertEqual(len(detections), 1)
self.assertIsInstance(detections, read_files.BoxLoader)
img_iterator = iter(detections)
img_dets = next(img_iterator)
self.assertEqual(len(img_dets), 1)
self.assertIsInstance(img_dets[0], PBoxDetInst)
self.assertTrue(np.allclose(img_dets[0].covs, [self.default_covar, self.default_covar]))
self.assertTrue(np.allclose(img_dets[0].box, self.default_det_dict['bbox']))
expected_label_probs = np.array([0.0, 0.0, 0.7])
self.assertTrue(np.allclose(img_dets[0].class_list, expected_label_probs))
def test_single_bbox(self):
# Create detection.json file
det_data = {}
det_data['classes'] = self.defaul_det_classes
det_data['img_names'] = self.default_img_names
det_data['detections'] = [[self.default_det_dict]]
det_file_name = os.path.join(self.det_files_root, 'det_file.json')
with open(det_file_name, 'w') as f:
json.dump(det_data, f)
detections = read_files.read_pbox_json(det_file_name)
self.assertEqual(len(detections), 1)
self.assertIsInstance(detections, read_files.BoxLoader)
img_iterator = iter(detections)
img_dets = next(img_iterator)
self.assertEqual(len(img_dets), 1)
self.assertIsInstance(img_dets[0], BBoxDetInst)
self.assertTrue(np.allclose(img_dets[0].box, self.default_det_dict['bbox']))
self.assertListEqual(list(img_dets[0].class_list), self.default_det_dict['label_probs'])
def test_single_bbox_0_covar_in_file(self):
# Create detection.json file
det_data = {}
det_data['classes'] = self.defaul_det_classes
det_data['img_names'] = self.default_img_names
det_data['detections'] = [[self.default_det_dict]]
det_file_name = os.path.join(self.det_files_root, 'det_file.json')
with open(det_file_name, 'w') as f:
json.dump(det_data, f)
detections = read_files.read_pbox_json(det_file_name)
self.assertEqual(len(detections), 1)
self.assertIsInstance(detections, read_files.BoxLoader)
img_iterator = iter(detections)
img_dets = next(img_iterator)
self.assertEqual(len(img_dets), 1)
self.assertIsInstance(img_dets[0], BBoxDetInst)
self.assertTrue(np.allclose(img_dets[0].box, self.default_det_dict['bbox']))
self.assertListEqual(list(img_dets[0].class_list), self.default_det_dict['label_probs'])
def test_single_bbox_set_covar_0(self):
# Create detection.json file
det_data = {}
det_data['classes'] = self.defaul_det_classes
det_data['img_names'] = self.default_img_names
self.default_det_dict.update({'covars': [[[0, 0], [0, 0]], [[0, 0], [0, 0]]]})
det_data['detections'] = [[self.default_det_dict]]
det_file_name = os.path.join(self.det_files_root, 'det_file.json')
with open(det_file_name, 'w') as f:
json.dump(det_data, f)
detections = read_files.read_pbox_json(det_file_name)
self.assertEqual(len(detections), 1)
self.assertIsInstance(detections, read_files.BoxLoader)
img_iterator = iter(detections)
img_dets = next(img_iterator)
self.assertEqual(len(img_dets), 1)
self.assertIsInstance(img_dets[0], BBoxDetInst)
self.assertTrue(np.allclose(img_dets[0].box, self.default_det_dict['bbox']))
self.assertListEqual(list(img_dets[0].class_list), self.default_det_dict['label_probs'])
def test_single_bbox_set_covar_0_base_covar_non_zero(self):
# Create detection.json file
det_data = {}
det_data['classes'] = self.defaul_det_classes
det_data['img_names'] = self.default_img_names
self.default_det_dict.update({'covars': [self.default_covar, self.default_covar]})
det_data['detections'] = [[self.default_det_dict]]
det_file_name = os.path.join(self.det_files_root, 'det_file.json')
with open(det_file_name, 'w') as f:
json.dump(det_data, f)
detections = read_files.read_pbox_json(det_file_name, override_cov=0)
self.assertEqual(len(detections), 1)
self.assertIsInstance(detections, read_files.BoxLoader)
img_iterator = iter(detections)
img_dets = next(img_iterator)
self.assertEqual(len(img_dets), 1)
self.assertIsInstance(img_dets[0], BBoxDetInst)
self.assertTrue(np.allclose(img_dets[0].box, self.default_det_dict['bbox']))
self.assertListEqual(list(img_dets[0].class_list), self.default_det_dict['label_probs'])
def test_single_pbox_set_covar(self):
# Create detection.json file
det_data = {}
det_data['classes'] = self.defaul_det_classes
det_data['img_names'] = self.default_img_names
det_data['detections'] = [[self.default_det_dict]]
det_file_name = os.path.join(self.det_files_root, 'det_file.json')
with open(det_file_name, 'w') as f:
json.dump(det_data, f)
detections = read_files.read_pbox_json(det_file_name, override_cov=200)
self.assertEqual(len(detections), 1)
self.assertIsInstance(detections, read_files.BoxLoader)
img_iterator = iter(detections)
img_dets = next(img_iterator)
self.assertEqual(len(img_dets), 1)
self.assertIsInstance(img_dets[0], PBoxDetInst)
self.assertTrue(np.allclose(img_dets[0].covs, [[[200, 0], [0, 200]], [[200, 0], [0, 200]]]))
self.assertTrue(np.allclose(img_dets[0].box, self.default_det_dict['bbox']))
self.assertListEqual(list(img_dets[0].class_list), self.default_det_dict['label_probs'])
def test_one_pbox_one_bbox_one_img(self):
# Create detection.json file
det_data = {}
det_data['classes'] = self.defaul_det_classes
det_data['img_names'] = self.default_img_names
# Create one bbox detection and one pbox detection
box_det_dict = self.default_det_dict.copy()
self.default_det_dict.update({'covars': [self.default_covar, self.default_covar]})
det_data['detections'] = [[self.default_det_dict, box_det_dict]]
det_file_name = os.path.join(self.det_files_root, 'det_file.json')
with open(det_file_name, 'w') as f:
json.dump(det_data, f)
detections = read_files.read_pbox_json(det_file_name)
self.assertEqual(len(detections), 1)
self.assertIsInstance(detections, read_files.BoxLoader)
img_iterator = iter(detections)
img_dets = next(img_iterator)
self.assertEqual(len(img_dets), 2)
self.assertIsInstance(img_dets[0], PBoxDetInst)
self.assertIsInstance(img_dets[1], BBoxDetInst)
self.assertTrue(np.allclose(img_dets[0].covs, [self.default_covar, self.default_covar]))
self.assertTrue(np.allclose(img_dets[0].box, self.default_det_dict['bbox']))
self.assertTrue(np.allclose(img_dets[1].box, self.default_det_dict['bbox']))
self.assertListEqual(list(img_dets[0].class_list), self.default_det_dict['label_probs'])
self.assertListEqual(list(img_dets[1].class_list), self.default_det_dict['label_probs'])
def test_one_pbox_one_bbox_diff_imgs(self):
# Create detection.json file
det_data = {}
det_data['classes'] = self.defaul_det_classes
det_data['img_names'] = self.default_img_names
# Create one bbox detection and one pbox detection
box_det_dict = self.default_det_dict.copy()
self.default_det_dict.update({'covars': [self.default_covar, self.default_covar]})
det_data['detections'] = [[self.default_det_dict], [box_det_dict]]
det_file_name = os.path.join(self.det_files_root, 'det_file.json')
with open(det_file_name, 'w') as f:
json.dump(det_data, f)
detections = read_files.read_pbox_json(det_file_name)
self.assertEqual(len(detections), 2)
self.assertIsInstance(detections, read_files.BoxLoader)
# Image 1
img_iterator = iter(detections)
img_dets = next(img_iterator)
self.assertEqual(len(img_dets), 1)
self.assertIsInstance(img_dets[0], PBoxDetInst)
self.assertTrue(np.allclose(img_dets[0].covs, [self.default_covar, self.default_covar]))
self.assertTrue(np.allclose(img_dets[0].box, self.default_det_dict['bbox']))
self.assertListEqual(list(img_dets[0].class_list), self.default_det_dict['label_probs'])
# Image 2
img_dets = next(img_iterator)
self.assertEqual(len(img_dets), 1)
self.assertIsInstance(img_dets[0], BBoxDetInst)
self.assertTrue(np.allclose(img_dets[0].box, self.default_det_dict['bbox']))
self.assertListEqual(list(img_dets[0].class_list), self.default_det_dict['label_probs'])
|
11535099
|
import numpy as np
import pandas as pd
import os
from jrieke import utils
from tqdm import tqdm_notebook
import multiprocessing
from settings import settings
import torch
from torch.utils.data import Dataset, DataLoader
from tabulate import tabulate
# Binary brain mask used to cut out the skull.
mask = utils.load_nifti(settings["binary_brain_mask"])
# ------------------------- ADNI data tables -----------------------------------
# Ritter/Haynes lab file system at BCCN Berlin.
ADNI_DIR = settings["ADNI_DIR"]
# Filepaths for 1.5 Tesla scans.
table_15T = None#os.path.join(ADNI_DIR, settings["1.5T_table"])
image_dir_15T = None #os.path.join(ADNI_DIR, settings["1.5T_image_dir"])
corrupt_images_15T = ['067_S_0077/Screening']
# TODO: Maybe rename to load_table or load_adni_table
def load_data_table(table, image_dir, corrupt_images=None):
"""Read data table, find corresponding images, filter out corrupt,
missing and MCI images, and return the samples as a pandas dataframe."""
# Read table into dataframe.
print('Loading dataframe for', table)
df = pd.read_csv(table)
print('Found', len(df), 'images in table')
# Add column with filepaths to images.
df['filepath'] = df.apply(lambda row: get_image_filepath(row, image_dir), axis=1)
# Filter out corrupt images (i.e. images where the preprocessing failed).
len_before = len(df)
if corrupt_images is not None:
df = df[df.apply(lambda row: '{}/{}'.format(row['PTID'], row['Visit']) not in corrupt_images, axis=1)]
print('Filtered out', len_before - len(df), 'of', len_before, 'images because of failed preprocessing')
# Filter out images where files are missing.
len_before = len(df)
# print(df[~np.array(map(os.path.exists, df['filepath']))]['filepath'].values)
df = df.loc[map(os.path.exists, df['filepath'])]
print('Filtered out', len_before - len(df), 'of', len_before, 'images because of missing files')
# Filter out images with MCI.
len_before = len(df)
df = df[df['DX'] != 'MCI']
print('Filtered out', len_before - len(df), 'of', len_before, 'images that were MCI')
print('Final dataframe contains', len(df), 'images from', len(df['PTID'].unique()), 'patients')
print()
return df
def load_data_table_3T():
"""Load the data table for all 3 Tesla images."""
return load_data_table(table_3T, image_dir_3T, corrupt_images_3T)
def load_data_table_15T():
"""Load the data table for all 1.5 Tesla images."""
return load_data_table(table_15T, image_dir_15T, corrupt_images_15T)
def load_data_table_both():
"""Load the data tables for all 1.5 Tesla and 3 Tesla images and combine them."""
df_15T = load_data_table(table_15T, image_dir_15T, corrupt_images_15T)
df_3T = load_data_table(table_3T, image_dir_3T, corrupt_images_3T)
df = pd.concat([df_15T, df_3T])
return df
def get_image_filepath(df_row, root_dir=''):
"""Return the filepath of the image that is described in the row of the data table."""
# Current format for the image filepath is:
# <PTID>/<Visit (spaces removed)>/<PTID>_<Scan.Date (/ replaced by -)>_
# <Visit (spaces removed)>_<Image.ID>_<DX>_Warped.nii.gz
filedir = os.path.join(df_row['PTID'], df_row['Visit'].replace(' ', ''))
filename = '{}_{}_{}_{}_{}_Warped.nii.gz'.format(df_row['PTID'], df_row['Scan.Date'].replace('/', '-'),
df_row['Visit'].replace(' ', ''), df_row['Image.ID'], df_row['DX'])
return os.path.join(root_dir, filedir, filename)
# ------------------------ PyTorch datasets and loaders ----------------------
class ADNIDataset(Dataset):
"""
PyTorch dataset that consists of MRI images and labels.
Args:
filenames (iterable of strings): The filenames fo the MRI images.
labels (iterable): The labels for the images.
mask (array): If not None (default), images are masked by multiplying with this array.
transform: Any transformations to apply to the images.
"""
def __init__(self, filenames, labels, mask=None, transform=None):
self.filenames = filenames
self.labels = torch.LongTensor(labels)
self.mask = mask
self.transform = transform
# Required by torchsample.
self.num_inputs = 1
self.num_targets = 1
# Default values. Should be set via fit_normalization.
self.mean = 0
self.std = 1
def __len__(self):
return len(self.filenames)
def __getitem__(self, idx):
"""Return the image as a numpy array and the label."""
label = self.labels[idx]
struct_arr = utils.load_nifti(self.filenames[idx], mask=self.mask)
# TDOO: Try normalizing each image to mean 0 and std 1 here.
# struct_arr = (struct_arr - struct_arr.mean()) / (struct_arr.std() + 1e-10)
struct_arr = (struct_arr - self.mean) / (self.std + 1e-10) # prevent 0 division by adding small factor
struct_arr = struct_arr[None] # add (empty) channel dimension
struct_arr = torch.FloatTensor(struct_arr)
if self.transform is not None:
struct_arr = self.transform(struct_arr)
return struct_arr, label
def image_shape(self):
"""The shape of the MRI images."""
return utils.load_nifti(self.filenames[0], mask=mask).shape
def fit_normalization(self, num_sample=None, show_progress=False):
"""
Calculate the voxel-wise mean and std across the dataset for normalization.
Args:
num_sample (int or None): If None (default), calculate the values across the complete dataset,
otherwise sample a number of images.
show_progress (bool): Show a progress bar during the calculation."
"""
if num_sample is None:
num_sample = len(self)
image_shape = self.image_shape()
all_struct_arr = np.zeros((num_sample, image_shape[0], image_shape[1], image_shape[2]))
sampled_filenames = np.random.choice(self.filenames, num_sample, replace=False)
if show_progress:
sampled_filenames = tqdm_notebook(sampled_filenames)
for i, filename in enumerate(sampled_filenames):
struct_arr = utils.load_nifti(filename, mask=mask)
all_struct_arr[i] = struct_arr
self.mean = all_struct_arr.mean(0)
self.std = all_struct_arr.std(0)
def get_raw_image(self, idx):
"""Return the raw image at index idx (i.e. not normalized, no color channel, no transform."""
return utils.load_nifti(self.filenames[idx], mask=self.mask)
def print_df_stats(df, df_train, df_val):
"""Print some statistics about the patients and images in a dataset."""
headers = ['Images', '-> AD', '-> CN', 'Patients', '-> AD', '-> CN']
def get_stats(df):
df_ad = df[df['DX'] == 'Dementia']
df_cn = df[df['DX'] == 'CN']
return [len(df), len(df_ad), len(df_cn), len(df['PTID'].unique()), len(df_ad['PTID'].unique()),
len(df_cn['PTID'].unique())]
stats = []
stats.append(['All'] + get_stats(df))
stats.append(['Train'] + get_stats(df_train))
stats.append(['Val'] + get_stats(df_val))
print(tabulate(stats, headers=headers))
print()
# TODO: Rename *_val to *_test.
def build_datasets(df, patients_train, patients_val, print_stats=True, normalize=True):
"""
Build PyTorch datasets based on a data table and a patient-wise train-test split.
Args:
df (pandas dataframe): The data table from ADNI.
patients_train (iterable of strings): The patients to include in the train set.
patients_val (iterable of strings): The patients to include in the val set.
print_stats (boolean): Whether to print some statistics about the datasets.
normalize (boolean): Whether to caluclate mean and std across the dataset for later normalization.
Returns:
The train and val dataset.
"""
# Compile train and val dfs based on patients.
df_train = df[df.apply(lambda row: row['PTID'] in patients_train, axis=1)]
df_val = df[df.apply(lambda row: row['PTID'] in patients_val, axis=1)]
if print_stats:
print_df_stats(df, df_train, df_val)
# Extract filenames and labels from dfs.
train_filenames = np.array(df_train['filepath'])
val_filenames = np.array(df_val['filepath'])
train_labels = np.array(df_train['DX'] == 'Dementia', dtype=int) # [:, None]
val_labels = np.array(df_val['DX'] == 'Dementia', dtype=int) # [:, None]
train_dataset = ADNIDataset(train_filenames, train_labels, mask=mask)
val_dataset = ADNIDataset(val_filenames, val_labels, mask=mask)
# TODO: Maybe normalize each scan first, so that they are on a common scale.
# TODO: Save these values to file together with the model.
# TODO: Sample over more images.
if normalize:
print('Calculating mean and std for normalization:')
train_dataset.fit_normalization(200, show_progress=True)
val_dataset.mean, val_dataset.std = train_dataset.mean, train_dataset.std
else:
print('Dataset is not normalized, this could dramatically decrease performance')
return train_dataset, val_dataset
def build_loaders(train_dataset, val_dataset):
"""Build PyTorch data loaders from the datasets."""
# In contrast to Korolev et al. 2017, we do not enforce one sample per class in each batch.
# TODO: Maybe change batch size to 3 or 4. Check how this affects memory and accuracy.
train_loader = DataLoader(train_dataset, batch_size=5, shuffle=True, num_workers=multiprocessing.cpu_count(),
pin_memory=torch.cuda.is_available())
val_loader = DataLoader(val_dataset, batch_size=5, shuffle=False, num_workers=multiprocessing.cpu_count(),
pin_memory=torch.cuda.is_available())
return train_loader, val_loader
|
11535147
|
from django.urls import reverse
from django_ical.views import ICalFeed
from .models import TimePlace
class EventFeed(ICalFeed):
"""An iCal feed of all the events available to the user."""
file_name = 'events.ics'
timezone = "CET"
def get_object(self, request, *args, **kwargs):
return {
'user_can_view_private': request.user.has_perm('news.can_view_private'),
'query_kwargs': {},
}
def items(self, attrs):
items = TimePlace.objects.all()
if attrs['query_kwargs']:
items = items.filter(**attrs['query_kwargs'])
if not attrs['user_can_view_private']:
items = items.filter(event__private=False)
return items
def item_link(self, item: TimePlace):
return reverse('event_detail', kwargs={'pk': item.pk})
def item_title(self, item: TimePlace):
return item.event.title
def item_description(self, item: TimePlace):
return item.event.clickbait
def item_start_datetime(self, item: TimePlace):
return item.start_time
def item_end_datetime(self, item: TimePlace):
return item.end_time
def item_location(self, item: TimePlace):
return item.place
def product_id(self):
return "MAKE NTNU"
class SingleEventFeed(EventFeed):
"""An iCal feed of all occurences of a single event."""
def file_name(self, attrs):
title = self.items(attrs).values_list('event__title', flat=True).first()
return f'{title}.ics'
def get_object(self, request, *args, **kwargs):
attrs = super().get_object(request, *args, **kwargs)
attrs['query_kwargs']['event_id'] = int(kwargs['pk'])
return attrs
class SingleTimePlaceFeed(EventFeed):
"""An iCal feed of a single occurences of an event."""
def file_name(self, attrs):
title = self.items(attrs).values_list('event__title', flat=True).first()
return f'{title}.ics'
def get_object(self, request, *args, **kwargs):
attrs = super().get_object(request, *args, **kwargs)
attrs['query_kwargs']['id'] = int(kwargs['pk'])
return attrs
|
11535185
|
import torch
from torch.utils.data import DistributedSampler as _DistributedSampler
import math
import numpy as np
import random
class DistributedSampler(_DistributedSampler):
def __init__(self,
dataset,
num_replicas=None,
rank=None,
shuffle=True,
round_up=True):
super().__init__(dataset, num_replicas=num_replicas, rank=rank)
self.shuffle = shuffle
self.round_up = round_up
if self.round_up:
self.total_size = self.num_samples * self.num_replicas
else:
self.total_size = len(self.dataset)
# added to adapt PK sampling strategy
self.do_pk = hasattr(dataset, "K")
if self.do_pk:
if self.rank == 0:
print("Start using PK sampling strategy!")
self.spkr_dataset_ids = dataset.spkr_dataset_ids
self.K = dataset.K
self.P = dataset.P
self.batch_size = self.P*self.K
def __iter__(self):
if not self.do_pk:
# deterministically shuffle based on epoch
if self.shuffle:
g = torch.Generator()
g.manual_seed(self.epoch)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
# add extra samples to make it evenly divisible
if self.round_up:
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
if self.round_up:
assert len(indices) == self.num_samples
return iter(indices)
else:
lol = lambda lst, sz: [lst[i:i + sz] for i in range(0, len(lst), sz)]
items = list(self.spkr_dataset_ids.items())
# metric learning naturally needs shuffle to be True
g = torch.Generator()
g.manual_seed(self.epoch)
flattened_list = []
flattened_label = []
for spkr, ids in items:
numSeg = (len(ids) // self.K) * self.K
rp = lol(torch.randperm(len(ids), generator=g).tolist()[:numSeg], self.K)
flattened_label.extend([spkr]*len(rp))
for indices in rp:
flattened_list.append([ids[i] for i in indices])
mixid = torch.randperm(len(flattened_label), generator=g).tolist()
mixlabel = []
mixmap = []
assert self.batch_size % self.K == 0, \
"batchsize %d should be exactly divided by K %d" % (self.batch_size, self.K)
tuple_batch_size = self.batch_size // self.K
for ii in mixid:
startbatch = len(mixlabel) - len(mixlabel) % tuple_batch_size
if flattened_label[ii] not in mixlabel[startbatch:]:
mixlabel.append(flattened_label[ii])
mixmap.append(ii)
all_indices = []
for idx in mixmap:
all_indices.extend(flattened_list[idx])
round_len = (len(all_indices) // (self.num_replicas * self.batch_size)) * self.batch_size
sub_indices = all_indices[self.rank * round_len: (self.rank+1) * round_len]
# since round_len is definitely a bit smaller than the original len,
# to complement the original length, some chunks will be oversampled randomly
if self.round_up:
epoch_iter = math.ceil(self.total_size / (self.batch_size * self.num_replicas))
truncated_iter = round_len // self.batch_size
sub_indices = np.asarray(sub_indices)
split_batches = np.split(sub_indices, truncated_iter)
assert truncated_iter == len(split_batches), "%d, %d" % (truncated_iter, len(split_batches))
random_batches_selected = random.sample(split_batches, epoch_iter - truncated_iter)
split_batches.extend(random_batches_selected)
sub_indices = np.concatenate(split_batches).tolist()
return iter(sub_indices)
|
11535188
|
import copy
from collections import namedtuple
import numpy as np
import torch
from torchvision import transforms
from PIL import Image, ImageFilter, ImageEnhance
# An augmentation object consists of its name, the transform functions of type
# torchvision.transforms, and the resulting augmented dataset of type
# torch.utils.data.Dataset.
Augmentation = namedtuple('Augmentation', ['name', 'transforms', 'dataset'])
def copy_with_new_transform(dataset, transform):
"""A copy of @dataset with its transform set to @transform.
Will work for datasets from torchvision, e.g., MNIST, CIFAR10, etc. Probably
won't work for a generic dataset.
"""
new_dataset = copy.copy(dataset)
new_dataset.transform = transform
return new_dataset
def augment_transforms(augmentations, base_transform, add_id_transform=True):
"""Construct a new transform that stack all the augmentations.
Parameters:
augmentations: list of transforms (e.g. image rotations)
base_transform: transform to be applied after augmentation (e.g. ToTensor)
add_id_transform: whether to include the original image (i.e. identity transform) in the new transform.
Return:
a new transform that takes in a data point and applies all the
augmentations, then stack the result.
"""
if add_id_transform:
fn = lambda x: torch.stack([base_transform(x)] + [base_transform(aug(x))
for aug in augmentations])
else:
fn = lambda x: torch.stack([base_transform(aug(x)) for aug in augmentations])
return transforms.Lambda(fn)
def rotation(base_dataset, base_transform, angles=range(-15, 16, 2)):
"""Rotations, e.g. between -15 and 15 degrees
"""
rotations = [transforms.RandomRotation((angle, angle)) for angle in angles]
aug_dataset = copy_with_new_transform(base_dataset,
augment_transforms(rotations, base_transform))
return Augmentation('rotation', rotations, aug_dataset)
def resized_crop(base_dataset, base_transform, size=28, scale=(0.64, 1.0), n_random_samples=31):
"""Random crop (with resize)
"""
random_resized_crops = [transforms.RandomResizedCrop(size, scale=scale) for _ in range(n_random_samples)]
aug_dataset = copy_with_new_transform(base_dataset,
augment_transforms(random_resized_crops, base_transform))
return Augmentation('crop', random_resized_crops, aug_dataset)
def blur(base_dataset, base_transform, radii=np.linspace(0.05, 1.0, 20)):
"""Random Gaussian blur
"""
def gaussian_blur_fn(radius):
return transforms.Lambda(lambda img: img.filter(ImageFilter.GaussianBlur(radius)))
blurs = [gaussian_blur_fn(radius) for radius in radii]
aug_dataset = copy_with_new_transform(base_dataset,
augment_transforms(blurs, base_transform))
return Augmentation('blur', blurs, aug_dataset)
def rotation_crop_blur(base_dataset, base_transform, angles=range(-15, 16, 2),
size=28, scale=(0.64, 1.0), n_random_samples=31,
radii=np.linspace(0.05, 1.0, 20)):
"""All 3: rotations, random crops, and blurs
"""
rotations = rotation(base_dataset, base_transform, angles).transforms
random_resized_crops = resized_crop(base_dataset, base_transform, size, scale, n_random_samples).transforms
blurs = blur(base_dataset, base_transform, radii).transforms
aug_dataset = copy_with_new_transform(base_dataset,
augment_transforms(rotations + random_resized_crops + blurs, base_transform))
return Augmentation('rotation_crop_blur', blurs, aug_dataset)
def hflip(base_dataset, base_transform):
"""Horizontal flip
"""
flip = [transforms.Lambda(lambda img: img.transpose(Image.FLIP_LEFT_RIGHT))]
aug_dataset = copy_with_new_transform(base_dataset,
augment_transforms(flip, base_transform))
return Augmentation('hflip', flip, aug_dataset)
def hflip_vflip(base_dataset, base_transform):
"""Both horizontal and vertical flips
"""
allflips = [transforms.Lambda(lambda img: img.transpose(Image.FLIP_LEFT_RIGHT)),
transforms.Lambda(lambda img: img.transpose(Image.FLIP_TOP_BOTTOM)),
transforms.Lambda(lambda img: img.transpose(Image.FLIP_LEFT_RIGHT).transpose(Image.FLIP_TOP_BOTTOM))]
aug_dataset = copy_with_new_transform(base_dataset,
augment_transforms(allflips, base_transform))
return Augmentation('hflip_vflip', allflips, aug_dataset)
def brightness(base_dataset, base_transform, brightness_factors=np.linspace(1 - 0.25, 1 + 0.25, 11)):
"""Random brightness adjustment
"""
def brightness_fn(brightness_factor):
return transforms.Lambda(lambda img: ImageEnhance.Brightness(img).enhance(brightness_factor))
brightness_transforms = [brightness_fn(factor) for factor in brightness_factors]
aug_dataset = copy_with_new_transform(base_dataset,
augment_transforms(brightness_transforms, base_transform))
return Augmentation('brightness', brightness_transforms, aug_dataset)
def contrast(base_dataset, base_transform, contrast_factors=np.linspace(1 - 0.35, 1 + 0.35, 11)):
"""Random contrast adjustment
"""
def contrast_fn(contrast_factor):
return transforms.Lambda(lambda img: ImageEnhance.Contrast(img).enhance(contrast_factor))
contrast_transforms = [contrast_fn(factor) for factor in contrast_factors]
aug_dataset = copy_with_new_transform(base_dataset,
augment_transforms(contrast_transforms, base_transform))
return Augmentation('contrast', contrast_transforms, aug_dataset)
|
11535209
|
def test():
assert (
'spacy.load("en_core_web_md")' in __solution__
), "Are you loading the medium model correctly?"
assert "doc[1].vector" in __solution__, "Are you getting the correct vector?"
__msg__.good(
"Well done! In the next exercise, you'll be using spaCy to predict "
"similarities between documents, spans and tokens via the word vectors "
"under the hood."
)
|
11535219
|
import pickle as pk
import numpy as np
import pandas as pd
import subprocess
from corpus_utils import PatentCorpus
from plot_utils import plot_score_distr, calc_simcoef_distr, group_combis, calc_auc
def make_input_file(dir_name='/home/lea/Documents/master_thesis/patent_search/database/human_eval/patent_sheets/*'):
"""
Iterate over patents in single_pat_corpus and write into a dataframe that is then stored in a csv
"""
# load patent corpus
pat_corpus = PatentCorpus()
pat_corpus.mode = 'wmd'
# ugly hack to invoke __iter__() function :-( :
list(pat_corpus)
single_pat_corpus = pat_corpus.single_pat_corpus
# make empty data frame
df = pd.DataFrame(columns=['id', 'text'])
# go through single_pat_corpus and fill data frame
for pid, text in single_pat_corpus.items():
df = df.append({'id': pid, 'text': text}, ignore_index=True)
df.to_csv('human_eval/wmd_pat_corpus.txt', sep='\t', header=False, index=False, encoding='utf-8')
if __name__ == "__main__":
#make_input_file()
dist_mat = pk.load(open('/home/lea/Documents/master_thesis/patent_search/wmd-master/dist_matrix.pk'))
vectors = pk.load(open('/home/lea/Documents/master_thesis/patent_search/wmd-master/vectors.pk'))
id_list = list(vectors[3])
combis = np.load('human_eval/corpus_info/combis.npy')
binary_label_pairs = np.load('human_eval/corpus_info/binary_label_pairs.npy').item()
human_label_pairs = np.load('human_eval/corpus_info/human_label_pairs.npy').item()
binary_sim_combis, binary_diff_combis = group_combis(binary_label_pairs)
human_sim_combis, human_diff_combis = group_combis(human_label_pairs)
binary_scores = {}
human_scores = {}
binary_scores['cited'] = []
binary_scores['random'] = []
human_scores['relevant'] = []
human_scores['not relevant'] = []
for combi in binary_sim_combis:
i = id_list.index(combi[0])
j = id_list.index(combi[1])
binary_scores['cited'].append(dist_mat[i,j])
for combi in binary_diff_combis:
i = id_list.index(combi[0])
j = id_list.index(combi[1])
binary_scores['random'].append(dist_mat[i,j])
for combi in human_sim_combis:
i = id_list.index(combi[0])
j = id_list.index(combi[1])
human_scores['relevant'].append(dist_mat[i,j])
for combi in human_diff_combis:
i = id_list.index(combi[0])
j = id_list.index(combi[1])
human_scores['not relevant'].append(dist_mat[i,j])
binary_auc = calc_auc(binary_scores['cited'], binary_scores['random'])[2]
human_auc = calc_auc(human_scores['relevant'], human_scores['not relevant'])[2]
plot_score_distr('human_eval', 'linear', ['cited', 'random'],
{'cited': binary_scores['cited'], 'random': binary_scores['random']},
binary_auc, ['cited'], histdir='wmd', bins=50)
plot_score_distr('human_eval', 'linear', ['relevant', 'not relevant'],
{'relevant': human_scores['relevant'], 'not relevant': human_scores['not relevant']},
human_auc, ['relevant'], histdir='wmd', bins=50)
|
11535227
|
from datetime import timedelta
from os import path
from random import choice
from string import ascii_letters, digits, punctuation
from pygal import Config
from pygal.style import Style
from watch.config.menu import menu_tree
# Please create local_config.py file and set all [REQUIRED] parameters.
# #############################################################################################
# Flask debug. Turning it to True leads to unexpected behaviour: internal threads will be started twice.
DEBUG = False
# Flask secret key. Set this key to a fixed value to keep user sessions valid even if your server is restarted.
SECRET_KEY = ''.join([choice(ascii_letters + digits + punctuation) for n in range(32)])
# [REQUIRED]
# On production Set host to 0.0.0.0, choose port and proper server_name (must include port, mycompany.com:8181).
# It is possible to use default local settings, buuuuut.....
# Please note:
# 1) Chrome does not send a cookie to localhost, so you may not be authenticated.
# 2) Telegram does not recognize local hyperlinks.
# That's why it is recommended to use "global" settings on a local machine.
# See the official Flask docs to learn more about these params.
HOST = '127.0.0.1' # [REQUIRED]
PORT = 5000
SERVER_NAME = None
CUSTOM_SERVER_NAME = None
# How much time client browser should keep our cookies.
PERMANENT_USER_SESSION = True
PERMANENT_SESSION_LIFETIME = timedelta(days=7)
# Limit parallel queries count for each user
MAX_DB_SESSIONS_PER_USER = 20
# How many rows can be fetched
ORA_NUM_ROWS = 100_000
# Limit parallel session count for each target
ORA_MAX_POOL_SIZE = 40
# Datetime display format
DATETIME_FORMAT = '%d.%m.%Y %H:%M:%S'
# The main menu structure. Can be imported from other source.
MENU_TREE = menu_tree
# Logger params
LOG_MAX_BYTES = 1024 * 1024
LOG_BACKUP_COUNT = 3
ERROR_LOG_NAME = 'error.log'
ACCESS_LOG_NAME = 'access.log'
ENABLE_ACCESS_LOG = False
# Background task worker tries to process all active tasks, then sleep for this period (seconds).
# 0 value means that the worker will not be started. So you can turn it off if you are not going to create tasks.
WORKER_FREQ_SEC = 10 # set to 0 to turn it off
# Pause task worker in case of these errors:
# ORA-12170: TNS:Connect timeout occurred
# ORA-03113: end-of-file on communication channel
# 0 means False, otherwise - sleep interval in seconds
SLEEP_ON_FAIL_SEC = 0
# Limit the maximum number of notifications to show in "Tasks notifications" view.
MAX_KEPT_NOTIFICATIONS = 100
# All active tasks can be stored to disk before the server shutdown.
# Set it to '' to disable task storing.
STORE_FILE = path.join(path.dirname(__file__), 'stored_tasks')
# Each task remembers sent warnings and doesn't repeat it twice.
# This option limits the maximum number of database objects which the task marks as sent.
# When the limit will be exceeded the oldest object removing (FIFO), even if it's warning is actual.
MAX_STORED_OBJECTS = 1000
# If the task message is a list, this parameter limits it's length.
MAX_MESSAGE_ITEMS = 10
# Your telegram bot name and token.
# These params must be set to use messaging (sending notifications and receiving commands)
# See the official telegram docs to know how to create your own bot.
BOT_NAME = ''
BOT_TOKEN = ''
BOT_PATH = 'https://api.telegram.org/bot'
# If you have to use a proxy put it here. For example {'https': 'https://127.0.0.1:81'}
BOT_PROXY = {}
# For proxy servers which accept original address as /path
BOT_SIMPLE_PROXY = ''
# Watch server receives chat messages via long polls.
# Set this parameter to 60 (seconds) or even more. if you are going to use a chat bot.
# Please note: there is no reason to use extremely small values for this parameter.
# 0 means that the bot is turned off (no income messages).
BOT_POLLING_FREQ_SEC = 0
# User can choose one of these chats while creating a task. All the task notifications will be sent to selected chat.
# Example: {-123: 'Critical alerts', -124:'Other alerts'}.
# You must get -123 and -124 values from telegram:
# 1) Create your own bot.
# 2) Set bot params mentioned above.
# 3) Start the app.
# 4) Create a new group.
# 5) Add the bot to your new group.
# 6) Send /id command.
# 7) The bot will show you group id.
BOT_CHAT_LIST = {}
# Don't send anything, ignore notifying options for tasks. Turn WORKER_FREQ_SEC to 0 to stop task processing.
MUTE_MESSAGES = False
# Do not perform task till reset if previous message was not sent.
FAIL_TASK_ON_MSG_ERROR = True
# Do not disturb hours [from, to].
# Tasks with `default` sound mode will send silent notifications. Examples: [22, 7] or [0, 8]
DND_HOURS = []
# Pygal charts configuration. See the official Pygal docs.
CHART_CONFIG = {'style': Style(font_family='Arial'
, guide_stroke_dasharray='1,1'
, major_guide_stroke_dasharray='1,1'
, label_font_size=12
, major_label_font_size=12
, value_font_size=12
, value_label_font_size=12
, legend_font_size=12
, background='#FFFFFF'
, plot_background='#FFFFFF'
, title_font_family='Arial'
, title_font_size=12)
, 'explicit_size': True
, 'height': 400
, 'width': 1000
, 'margin': 4
, 'show_x_guides': True
, 'tooltip_border_radius': 2
, 'dots_size': 2
, 'stroke_style': {'width': 1}}
# [REQUIRED]
# Here is our targets. Each target describes an Oracle DB connection.
TARGETS = {
# 'OUR-DEV': {'host': '127.0.0.1',
# 'port': '0000',
# 'sid': 'SID',
# 'encoding': 'windows-1251',
# 'user': 'username',
# 'password': '<PASSWORD>'}
# , ...
}
# [REQUIRED]
# Add users to the system.
# key = login (str), must be in a lowercase.
# value[0] = password (str).
# value[1] = telegram account id (int).
# user should send you id, to be able to communicate with the bot in private chat.
# if it is not necessary, set to None
# user can send /id command to your bot in private chat to find out it's id.
# value[2] = list of targets (str) allowed to user.
USERS = {
# 'admin': ['<PASSWORD>', 123, ['OUR-DEV', ...]],
# 'guest': ['psswrOrd', None, ['OUR-DEV', ...]]
}
# [REQUIRED]
# List of users, which allowed to:
# - shutdown the app server;
# - remove other users' tasks.
ADMIN_GROUP = []
ADMIN_ONLY_VIEWS = ['get_access_log', 'get_error_log', 'stop_server']
# If your custom view is specific for some target it will not be shown for other targets.
# {view_name: [target_name1, target_name2, ...], ...}
TARGET_SPECIFIC_VIEWS = {}
# Now let's try to import settings which you have set in local_config.py
try:
from watch.config.local_config import *
except ImportError:
pass
# Finishing touch
CHART_CONFIG['config'] = Config(js=[f'http://{SERVER_NAME or CUSTOM_SERVER_NAME or (HOST + ":" + str(PORT))}'
f'/static/pygal-tooltips.min.js'])
# That's all. Now try to start the app. Good luck!
|
11535285
|
import json
class ComplexEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, complex):
return {
'_meta': '_complex',
'num': [obj.real, obj.imag],
}
return json.JSONEncoder.default(self, obj)
data = {
'an_int': 42,
'a_float': 3.14159265,
'a_complex': 3 + 4j,
}
json_data = json.dumps(data, cls=ComplexEncoder)
print(json_data)
def object_hook(obj):
try:
if obj['_meta'] == '_complex':
return complex(*obj['num'])
except (KeyError, TypeError):
return obj
data_out = json.loads(json_data, object_hook=object_hook)
print(data_out)
|
11535286
|
import pycomicvine
import datetime
from pycomicvine.tests.utils import *
pycomicvine.api_key = "476302e62d7e8f8f140182e36aebff2fe935514b"
class TestVolumesList(ListResourceTestCase):
def test_get_id_and_name(self):
self.get_id_and_name_test(
pycomicvine.Volumes,
pycomicvine.Volume
)
class TestVolumeAttributes(SingularResourceTestCase):
def setUp(self):
self.get_random_instance(pycomicvine.Volumes)
def test_search(self):
self.search_test(pycomicvine.Volumes, pycomicvine.Volume)
def test_get_all_attributes(self):
volume = self.get_sample(pycomicvine.Volume)
if volume != None:
self.assertIsInstance(
volume.aliases,
(type(None),list)
)
self.assertIsInstance(
volume.api_detail_url,
(type(None),str)
)
self.assertIsInstance(
volume.character_credits,
pycomicvine.Characters
)
self.assertIsInstance(
volume.concept_credits,
pycomicvine.Concepts
)
self.assertIsInstance(
volume.count_of_issues,
int
)
self.assertIsInstance(
volume.date_added,
datetime.datetime
)
self.assertIsInstance(
volume.date_last_updated,
datetime.datetime
)
self.assertIsInstance(
volume.deck,
(type(None),str)
)
self.assertIsInstance(
volume.description,
(type(None),str)
)
self.assertIsInstance(
volume.first_issue,
(type(None),pycomicvine.Issue)
)
self.assertIsInstance(
volume.id,
int
)
self.assertIsInstance(
volume.image,
(type(None),dict)
)
self.assertIsInstance(
volume.last_issue,
(type(None),pycomicvine.Issue)
)
self.assertIsInstance(
volume.location_credits,
pycomicvine.Locations
)
self.assertIsInstance(
volume.name,
(type(None),str)
)
self.assertIsInstance(
volume.object_credits,
pycomicvine.Objects
)
self.assertIsInstance(
volume.person_credits,
pycomicvine.People
)
self.assertIsInstance(
volume.publisher,
(type(None),pycomicvine.Publisher)
)
self.assertIsInstance(
volume.site_detail_url,
(type(None),str)
)
self.assertIsInstance(
volume.start_year,
(type(None),int)
)
with self.assertRaises(AttributeError):
volume.team_credits # not in API despite documentation
|
11535296
|
from typing import List
import tensorflow as tf
def parse_tfrecord_fn(example) -> dict:
feature_description = {
"image/height": tf.io.FixedLenFeature([], tf.int64),
"image/width": tf.io.FixedLenFeature([], tf.int64),
"image/filename": tf.io.VarLenFeature(tf.string),
"image/encoded": tf.io.FixedLenFeature([], tf.string),
"image/format": tf.io.FixedLenFeature([], tf.string),
"image/object/class/label": tf.io.VarLenFeature(tf.int64),
"image/object/class/text": tf.io.VarLenFeature(tf.string),
"image/object/bbox/xmin": tf.io.VarLenFeature(tf.float32),
"image/object/bbox/xmax": tf.io.VarLenFeature(tf.float32),
"image/object/bbox/ymin": tf.io.VarLenFeature(tf.float32),
"image/object/bbox/ymax": tf.io.VarLenFeature(tf.float32),
}
example = tf.io.parse_single_example(example, feature_description)
example["image/encoded"] = tf.io.decode_png(example["image/encoded"], channels=3)
return example
def data_loader(
record_file_pattern: List[str] = [
"tfrecords/train.record-00000-of-00001",
"tfrecords/testdev.record-00000-of-00001",
]
):
dataset = tf.data.TFRecordDataset(record_file_pattern)
dataset = dataset.map(parse_tfrecord_fn)
return dataset
|
11535305
|
import datetime
def prep_data(data):
# type: (dict) -> dict
"""Takes a dict intended to be converted to JSON for use with Google Charts and transforms date and datetime
into date string representations as described here:
https://developers.google.com/chart/interactive/docs/datesandtimes
TODO: Implement Timeofday formatting"""
for row in data['rows']:
for val in row['c']:
if isinstance(val['v'], datetime.datetime):
val['v'] = "Date({}, {}, {})".format(val['v'].year,
val['v'].month-1, # JS Dates are 0-based
val['v'].day,
val['v'].hour,
val['v'].minute,
val['v'].second,
val['v'].microsecond)
elif isinstance(val['v'], datetime.date):
val['v'] = "Date({}, {}, {})".format(val['v'].year,
val['v'].month-1, # JS Dates are 0-based
val['v'].day)
return data
def render_data(columns, rows):
# type: (list, list) -> dict
data = {'cols': [], 'rows': []}
for column in columns:
data['cols'].append({"id": "", "label": column[1], "pattern": "", "type": column[0]})
for row in rows:
new_row = {'c': []}
for field in row:
new_row['c'].append({"v": field, "f": None})
data['rows'].append(new_row)
return prep_data(data)
|
11535315
|
class _Getch:
"""Gets a single character from standard input. Does not echo to the
screen."""
def __init__(self):
try:
self.impl = _GetchWindows()
except ImportError:
self.impl = _GetchUnix()
def __call__(self): return self.impl()
class _GetchUnix:
"""Fetch and character using the termios module."""
def __init__(self):
import tty, sys
from select import select
def __call__(self):
import sys, tty, termios
from select import select
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
# [ Wait until ready for reading,
# wait until ready for writing
# wait for an "exception condition" ]
# The below line times out after 1 second
# This can be changed to a floating-point value if necessary
[i, o, e] = select([sys.stdin.fileno()], [], [], 1)
if i:
ch = sys.stdin.read(1)
else:
ch = None
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
class _GetchWindows:
"""Fetch a character using the Microsoft Visual C Runtime."""
def __init__(self):
import msvcrt
def __call__(self):
import msvcrt
import time
# Delay timeout to match UNIX behaviour
time.sleep(1)
# Check if there is a character waiting, otherwise this would block
if msvcrt.kbhit():
return msvcrt.getch()
else:
return
getch = _Getch()
|
11535318
|
from __future__ import division
from models import *
from utils.utils import *
from utils.datasets import *
import os
import sys
import time
import datetime
import argparse
from PIL import Image
import torch
from torch.utils.data import DataLoader
from torchvision import datasets
from torch.autograd import Variable
from json_tricks import dump, dumps, load, loads, strip_comments
import cv2
import numpy as np
def get_annotations(img_path, detections, current_dim, classes):
def get_clazz(detection):
_, _, _, _, _, _, pred = detection
return classes[int(pred)]
def is_person_prediction(detection):
clazz = get_clazz(detection)
return clazz == 'person'
def get_coords(detection):
x1, y1, x2, y2, _, _, cls_pred = detection
x1, y1 = x1.detach().cpu().numpy().item(), y1.detach().cpu().numpy().item()
x2, y2 = x2.detach().cpu().numpy().item(), y2.detach().cpu().numpy().item()
w = x2 - x1
h = y2 - y1
return {
'x1': x1,
'y1': y1,
'x2': x2,
'y2': y2,
'w': w,
'h': h,
'center': {
'x': w / 2.0,
'y': h / 2.0
}
}
img = np.array(Image.open(img_path))
original_shape = img.shape[:2]
detections = rescale_boxes(detections, current_dim, original_shape)
return {
'path': img_path,
'boxes': [get_coords(d) for d in detections if is_person_prediction(d)]
}
def parse_args(args):
parser = argparse.ArgumentParser()
parser.add_argument("--image_folder", type=str, default="data/samples", help="path to dataset")
parser.add_argument('--annot_folder', type=str, default='annot', help='path to save annotations')
parser.add_argument('--inspect_folder', type=str, default='inspect', help='path to annotated images')
parser.add_argument('--cut_folder', type=str, default='cut', help='path to cut images')
parser.add_argument("--model_def", type=str, default="config/yolov3.cfg", help="path to model definition file")
parser.add_argument("--weights_path", type=str, default="weights/yolov3.weights", help="path to weights file")
parser.add_argument("--class_path", type=str, default="data/coco.names", help="path to class label file")
parser.add_argument("--conf_thres", type=float, default=0.8, help="object confidence threshold")
parser.add_argument("--nms_thres", type=float, default=0.4, help="iou thresshold for non-maximum suppression")
parser.add_argument("--batch_size", type=int, default=1, help="size of the batches")
parser.add_argument("--n_cpu", type=int, default=0, help="number of cpu threads to use during batch generation")
parser.add_argument("--img_size", type=int, default=416, help="size of each image dimension")
parser.add_argument("--checkpoint_model", type=str, help="path to checkpoint model")
return parser.parse_args()
def get_device():
return torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def get_tensor_type():
return torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor
def get_model(opt):
device = get_device()
model = Darknet(opt.model_def, img_size=opt.img_size).to(device)
if opt.weights_path.endswith(".weights"):
model.load_darknet_weights(opt.weights_path)
else:
model.load_state_dict(torch.load(opt.weights_path))
model.eval()
return model
def get_data_loader(opt):
return DataLoader(
ImageFolder(opt.image_folder, img_size=opt.img_size),
batch_size=opt.batch_size,
shuffle=False,
num_workers=opt.n_cpu,
)
def do_predictions(opt):
model = get_model(opt)
dataloader = get_data_loader(opt)
paths = []
predictions = []
print("\nPerforming object detection:")
prev_time = time.time()
tensor_type = get_tensor_type()
with torch.no_grad():
for batch_i, (img_paths, input_imgs) in enumerate(dataloader):
input_imgs = Variable(input_imgs.type(tensor_type))
detections = model(input_imgs)
detections = non_max_suppression(detections, opt.conf_thres, opt.nms_thres)
current_time = time.time()
inference_time = datetime.timedelta(seconds=current_time - prev_time)
prev_time = current_time
print("\t+ Batch %d, Inference Time: %s" % (batch_i, inference_time))
paths.extend(img_paths)
predictions.extend(detections)
return paths, predictions
def convert_predictions(paths, predictions, opt):
classes = load_classes(opt.class_path)
current_dim = opt.img_size
return [get_annotations(path, detections, current_dim, classes)
for path, detections in zip(paths, predictions) if detections is not None]
def get_output_filename(a, odir, ext, suffix=None):
path = a['path']
base_name = os.path.basename(path)
fstem, fext = os.path.splitext(base_name)
if suffix is None:
fname = f'{fstem}.{ext}'
else:
fname = f'{fstem}-{suffix}.{ext}'
opath = f'{odir}/{fname}'
return opath
def get_image(image_path):
image = Image.open(image_path).convert('RGB')
image = np.array(image)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
return image
def save_images(annotations, opt):
def annotate(ipath, annots, color=[0, 0, 255], thickness=5):
image = get_image(ipath)
for box in annots['boxes']:
start, end = (int(box['x1']), int(box['y1'])), (int(box['x2']), int(box['y2']))
cv2.rectangle(image, start, end, color, thickness)
return image
os.makedirs(opt.inspect_folder, exist_ok=True)
for a in annotations:
ipath = a['path']
image = annotate(ipath, a)
opath = get_output_filename(a, opt.inspect_folder, 'jpg')
cv2.imwrite(opath, image)
print(f'saved annotated images to "{opt.inspect_folder}" directory')
def save_annotations(annotations, opt):
os.makedirs(opt.annot_folder, exist_ok=True)
for a in annotations:
fname = get_output_filename(a, opt.annot_folder, 'json')
with open(fname, 'w') as f:
dump(a, f, indent=2)
print(f'saved annotations to "{opt.annot_folder}" directory')
def save_cuts(annotations, opt):
os.makedirs(opt.cut_folder, exist_ok=True)
for a in annotations:
ipath = a['path']
im = cv2.imread(ipath)
for i, b in enumerate(a['boxes']):
x, y = int(b['x1']), int(b['y1'])
w, h = int(b['w']), int(b['h'])
cut = im[y:y+h, x:x+w]
print(f'x,y = ({x}, {y}), w,h = ({w}, {h}), im = {im.shape}, cut = {cut.shape}, path = {ipath}')
opath = get_output_filename(a, opt.cut_folder, 'jpg', i)
cv2.imwrite(opath, cut)
print(f'saved cut images to "{opt.cut_folder}" directory')
if __name__ == "__main__":
opt = parse_args(sys.argv[1:])
paths, predictions = do_predictions(opt)
annotations = convert_predictions(paths, predictions, opt)
save_annotations(annotations, opt)
save_images(annotations, opt)
save_cuts(annotations, opt)
|
11535325
|
import FWCore.ParameterSet.Config as cms
from DQMOffline.PFTau.candidateBenchmark_cfi import *
from DQMOffline.PFTau.pfCandidateBenchmark_cfi import *
from DQMOffline.PFTau.metBenchmark_cfi import *
DQMOfflineParticleFlowSequence = cms.Sequence (
candidateBenchmark +
pfCandidateBenchmark +
metBenchmark +
matchMetBenchmark
)
|
11535331
|
import eggsample
@eggsample.hookimpl
def eggsample_add_ingredients():
spices = ["salt", "pepper"]
you_can_never_have_enough_eggs = ["egg", "egg"]
ingredients = spices + you_can_never_have_enough_eggs
return ingredients
@eggsample.hookimpl
def eggsample_prep_condiments(condiments):
condiments["mint sauce"] = 1
|
11535361
|
import random
import deepxde as dde
from baselines.data import NSdata
'''
Training deepONet using deepxde implementation.
Note that deepxde requires passing the whole dataset to Triple, which is very memory consuming.
'''
def train(config):
seed = random.randint(1, 10000)
print(f'Random seed :{seed}')
# construct dataloader
data_config = config['data']
train_set = NSdata(datapath1=data_config['datapath'],
offset=0, num=10,
nx=data_config['nx'], nt=data_config['nt'],
sub=data_config['sub'], sub_t=data_config['sub_t'],
vel=False,
t_interval=data_config['time_interval'])
val_set = NSdata(datapath1=data_config['data_val'],
offset=310, num=10,
nx=data_config['val_nx'], nt=data_config['val_nt'],
sub=data_config['val_sub'], sub_t=data_config['val_subt'],
vel=False,
t_interval=data_config['time_interval'])
# assert train_set.S == val_set.S
dim_a = train_set.S ** 2
dim_x = 3
X_train, y_train = train_set.get_operator_data()
X_val, y_val = val_set.get_operator_data()
data = dde.data.Triple(X_train=X_train, y_train=y_train, X_test=X_val, y_test=y_val)
activation = config['model']['activation']
initializer = 'Glorot normal' # He normal or Glorot normal
net = dde.maps.DeepONet([dim_a] + config['model']['layers'],
[dim_x] + config['model']['layers'],
activation,
initializer,
use_bias=True,
stacked=False)
model = dde.Model(data, net)
model.compile('adam', lr=config['train']['base_lr'])
checker = dde.callbacks.ModelCheckpoint(
'checkpoints/deeponet.ckpt', save_better_only=True, period=10,
)
model.train(epochs=config['train']['epochs'], callbacks=[checker])
|
11535364
|
import glob
from moviepy.editor import ImageSequenceClip
images_dir = './results/baby30/train_latest/videos/7/'
## Change XX to be objective images name
v = 'g05_c02'
images_list = glob.glob(images_dir + "v_BabyCrawling_{}**.png".format(v))
images_list.sort()
ouput_file = './{}.mp4'.format(v)
fps = 1
if __name__ == '__main__':
clip = ImageSequenceClip(images_list, fps=fps)
clip.write_videofile(ouput_file, fps=fps, audio=False)
|
11535440
|
import os
import sys
from lib.geometry import sparse_to_tensor, sparse_dense_matmul_batch_tile
if sys.version_info[0] == 3:
import _pickle as pkl
else:
import cPickle as pkl
body_25_reg = None
face_reg = None
def joints_body25(v):
global body_25_reg
if body_25_reg is None:
body_25_reg = sparse_to_tensor(
pkl.load(open(os.path.join(os.path.dirname(__file__), '../assets/J_regressor.pkl'), 'rb'),encoding='iso-8859-1').T
)
return sparse_dense_matmul_batch_tile(body_25_reg, v)
def face_landmarks(v):
global face_reg
if face_reg is None:
face_reg = sparse_to_tensor(
pkl.load(open(os.path.join(os.path.dirname(__file__), '../assets/face_regressor.pkl'), 'rb'),encoding='iso-8859-1').T
)
return sparse_dense_matmul_batch_tile(face_reg, v)
|
11535448
|
import os
import sys
here = sys.path[0]
sys.path.insert(0, os.path.join(here,'..'))
import logging
import logging.handlers
import threading
import time
import pytest
import binascii
import testUtils as utils
import snoopyDispatcher as snoopyDis
from coap import coap, \
coapDefines as d, \
coapResource, \
coapObjectSecurity as oscore
#============================ logging =========================================
log = logging.getLogger('conftest')
log.addHandler(utils.NullHandler())
#============================ defines =========================================
LOG_MODULES = [
'conftest',
'coap',
'coapUri',
'coapOption',
'coapMessage',
'coapResource',
'coapTransmitter',
'coapUtils',
'socketUdp',
'socketUdpReal',
'socketUdpDispatcher',
'snoopyDispatcher',
]
IPADDRESS1 = 'fc00:e968:6179::de52:7100'
IPADDRESS2 = 'fc00:db20:35b:7399::5'
RESOURCE = 'res'
DUMMYVAL = bytes([0x00,0x01,0x02])
OSCORECLIENTCONTEXT = os.path.join(here, "oscore_test_context_client.json")
OSCORESERVERCONTEXT = os.path.join(here, "oscore_test_context_server.json")
OSCOREDUMMYMASTERSECRETCONTEXT = os.path.join(here, "oscore_test_context_dummymastersecret.json")
OSCOREDUMMYSENDERIDCONTEXT = os.path.join(here, "oscore_test_context_dummysenderid.json")
#============================ fixtures ========================================
#===== logFixture
def getTestModuleName(request):
return request.module.__name__.split('.')[-1]
def getTestFunctionName(request):
return request.function.__name__.split('.')[-1]
def loggingSetup(request):
moduleName = getTestModuleName(request)
# create logHandler
logHandler = logging.handlers.RotatingFileHandler(
filename = '{0}.log'.format(moduleName),
mode = 'w',
backupCount = 5,
)
logHandler.setFormatter(
logging.Formatter(
'%(asctime)s [%(name)s:%(levelname)s] %(message)s'
)
)
# setup logging
for loggerName in [moduleName]+LOG_MODULES:
temp = logging.getLogger(loggerName)
temp.setLevel(logging.DEBUG)
temp.addHandler(logHandler)
# log
log.debug("\n\n---------- loggingSetup")
def loggingTeardown(request):
moduleName = getTestModuleName(request)
# print threads
output = []
output += ['threads:']
for t in threading.enumerate():
output += ['- {0}'.format(t.name)]
output = '\n'.join(output)
log.debug(output)
# log
log.debug("\n\n---------- loggingTeardown")
# teardown logging
for loggerName in [moduleName]+LOG_MODULES:
temp = logging.getLogger(loggerName)
temp.handler = []
@pytest.fixture(scope='module')
def logFixtureModule(request):
loggingSetup(request)
f = lambda : loggingTeardown(request)
request.addfinalizer(f)
@pytest.fixture(scope='function')
def logFixture(logFixtureModule,request):
# log
log.debug('\n\n---------- {0}'.format(getTestFunctionName(request)))
return logFixtureModule
#===== snoopyDispatcher
def snoppyTeardown(snoppy):
snoppy.close()
@pytest.fixture(scope='module')
def snoopyDispatcher(request):
moduleName = getTestModuleName(request)
snoopy = snoopyDis.snoopyDispatcher('{0}.pcap'.format(moduleName))
f = lambda : snoppyTeardown(snoopy)
request.addfinalizer(f)
#===== twoEndPoints
class dummyResource(coapResource.coapResource):
def __init__(self):
# initialize parent class
coapResource.coapResource.__init__(
self,
path = RESOURCE,
)
#======================== parent methods ==================================
def GET(self,options=[]):
log.debug('dummyResource GET')
respCode = d.COAP_RC_2_05_CONTENT
respOptions = []
respPayload = DUMMYVAL
time.sleep(0.500)
return (respCode,respOptions,respPayload)
def twoEndPointsTeardown(coap1,coap2):
coap1.close()
coap2.close()
time.sleep(0.500)
assert len(threading.enumerate())==1
SECURITYFIXTURE = [
False,
True,
]
@pytest.fixture(params=SECURITYFIXTURE, scope='function')
def twoEndPoints(request):
# start two coap endpoints
coap1 = coap.coap(ipAddress=IPADDRESS1, testing=True)
coap2 = coap.coap(ipAddress=IPADDRESS2, testing=True)
# create new resource
newResource = dummyResource()
if request.param == True: # if testing with security, protect the resource with security context
context = oscore.SecurityContext(OSCORESERVERCONTEXT)
# add resource - context binding with authorized methods
newResource.addSecurityBinding((context, d.METHOD_ALL))
# install resource on coap1
coap1.addResource(newResource)
f = lambda: twoEndPointsTeardown(coap1, coap2)
request.addfinalizer(f)
return (coap1, coap2, request.param)
#===== confirmableFixture
CONFIRMABLEFIXTURE = [
True,
False,
]
@pytest.fixture(params=CONFIRMABLEFIXTURE)
def confirmableFixture(request):
return request.param
|
11535465
|
MONGO_ADDRESS = "192.168.10.1:27017"
MONGO_DB_NAME = "db"
RFCLIENT_RFSERVER_CHANNEL = "rfclient<->rfserver"
RFSERVER_RFPROXY_CHANNEL = "rfserver<->rfproxy"
RFMONITOR_RFPROXY_CHANNEL = "rfmonitor<->rfproxy"
RFTABLE_NAME = "rftable"
RFCONFIG_NAME = "rfconfig"
RFISL_NAME = "rfisl"
RFISLCONF_NAME = "rfislconf"
RFSERVER_ID = "rfserver"
RFPROXY_ID = "rfproxy"
RFMONITOR_ID = "rfmonitor"
DEFAULT_RFCLIENT_INTERFACE = "eth0"
RFVS_PREFIX = 0x72667673
is_rfvs = lambda dp_id: not ((dp_id >> 32) ^ RFVS_PREFIX)
RF_ETH_PROTO = 0x0A0A # RF ethernet protocol
VLAN_HEADER_LEN = 4
ETH_HEADER_LEN = 14
ETH_CRC_LEN = 4
ETH_PAYLOAD_MAX = 1500
ETH_TOTAL_MAX = (ETH_HEADER_LEN + ETH_PAYLOAD_MAX + ETH_CRC_LEN)
RF_MAX_PACKET_SIZE = (VLAN_HEADER_LEN + ETH_TOTAL_MAX)
MATCH_L2 = True
DC_DROP_ALL = 0 # Drop all incoming packets
DC_CLEAR_FLOW_TABLE = 1 # Clear flow table
DC_VM_INFO = 2 # Flow to communicate two linked VMs
DC_RIPV2 = 3 # RIPv2 protocol
DC_OSPF = 4 # OSPF protocol
DC_BGP_PASSIVE = 5 # BGP protocol
DC_BGP_ACTIVE = 6 # BGP protocol
DC_ARP = 7 # ARP protocol
DC_ICMP = 8 # ICMP protocol
DC_LDP_PASSIVE = 9 # LDP protocol
DC_LDP_ACTIVE = 10 # LDP protocol
DC_ICMPV6 = 11 # ICMPv6 protocol
DC_ALL = 255 # Send all traffic to the controller
RMT_ADD = 0 # Add flow to datapath
RMT_DELETE = 1 # Remove flow from datapath
#RMT_MODIFY = 2 # Modify existing flow (Unimplemented)
PC_MAP = 0
PC_RESET = 1
# Format 12-digit hex ID
format_id = lambda dp_id: hex(dp_id).rstrip("L")
netmask_prefix = lambda a: sum([bin(int(x)).count("1") for x in a.split(".", 4)])
cidr_to_mask = lambda a: ((1 << a) - 1) << (32 - a)
ETHERTYPE_IP = 0x0800
ETHERTYPE_ARP = 0x0806
ETHERTYPE_IPV6 = 0x86DD
IPPROTO_ICMP = 0x01
IPPROTO_TCP = 0x06
IPPROTO_UDP = 0x11
IPPROTO_OSPF = 0x59
IPPROTO_ICMPV6 = 0x3A
IPADDR_RIPv2 = '172.16.17.32'
IPV4_MASK_EXACT = '255.255.255.255'
IPV6_MASK_EXACT = 'fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b'
TPORT_BGP = 0x00B3
TPORT_LDP = 0x286
OFPP_CONTROLLER = 0xFFFFFFFD
PRIORITY_BAND = 0xA
PRIORITY_LOWEST = 0x0000
PRIORITY_LOW = 0x4010
PRIORITY_HIGH = 0x8020
PRIORITY_HIGHEST = 0xC030
|
11535468
|
import io
from pathlib import Path
from PIL import Image
from starwhale.api.dataset import BuildExecutor
class PennFudanPedSlicer(BuildExecutor):
def iter_data_slice(self, path: str):
img = Image.open(path).convert("RGB")
img_byte_arr = io.BytesIO()
img.save(img_byte_arr, format='PNG')
img_byte_arr = img_byte_arr.getvalue()
return [img_byte_arr]
def iter_label_slice(self, path: str):
img = Image.open(path)
img_byte_arr = io.BytesIO()
img.save(img_byte_arr, format='PNG')
return [img_byte_arr.getvalue()]
if __name__ == "__main__":
executor = PennFudanPedSlicer(
data_dir=Path("../data"),
data_filter="PNGImages/*6.png", label_filter="PedMasks/*6_mask.png",
batch=1,
alignment_bytes_size=4 * 1024,
volume_bytes_size=4 * 1024 * 1024,
)
executor.make_swds()
|
11535478
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("test")
process.ecalCompactTrigPrimProducerTest = cms.EDAnalyzer("EcalCompactTrigPrimProducerTest",
#tpDigiColl = cms.InputTag("simEcalTriggerPrimitiveDigis"),
tpDigiColl = cms.InputTag("ecalDigis:EcalTriggerPrimitives"),
tpRecColl = cms.InputTag("ecalCompactTrigPrim"),
tpSkimColl = cms.InputTag("ecalTPSkim")
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:in.root')
)
#if TP rec hits not in input data sample:
#process.load("RecoLocalCalo.EcalRecProducers.ecalCompactTrigPrim_cfi")
# #process.ecalCompactTrigPrim.inColl = cms.InputTag("simEcalTriggerPrimitiveDigis")
#process.p = cms.Path(process.ecalCompactTrigPrim*process.ecalCompactTrigPrimProducerTest)
#else
process.p = cms.Path(process.ecalCompactTrigPrimProducerTest)
|
11535493
|
import orjson
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.files.images import ImageFile
from django.db.models import TextField, ForeignKey, FileField, ImageField, Field
from django.db.models.fields.files import FileDescriptor, FieldFile
from django.db.models.query_utils import DeferredAttribute
from vstutils.utils import raise_context_decorator_with_default
"""
These model fields used in :class:`vstutils.api.serializers.VstSerializer`
to form model_field_class: serializer_field_class mapping
"""
class MultipleFieldFile(FieldFile):
"""
Subclasses :class:`django.db.models.fields.files.FieldFile`. Provides :meth:`MultipleFieldFile.save`
and :meth:`MultipleFieldFile.delete` to manipulate the underlying file, as well as update the
associated model instance.
"""
def __init__(self, instance, field, name):
super(MultipleFieldFile, self).__init__(instance, field, name)
self._old_name = self.name
def _set_attr_value(self):
"""
Set new value of file to object attr.
"""
setattr(
self.instance,
self.field.attname,
[
self.name if file.name == self._old_name else file
for file in getattr(self.instance, self.field.attname)
]
)
def _clear_attr_value(self):
"""
Pop None values from file list.
"""
setattr(
self.instance,
self.field.attname,
[
file
for file in getattr(self.instance, self.field.attname) if file.name != self.name
]
)
def save(self, name, content, save=True):
"""
Save changes in file to storage and to object attr.
"""
self._old_name = self.name
name = self.field.generate_filename(self.instance, name)
self.name = self.storage.save(name, content, max_length=self.field.max_length)
self._set_attr_value()
self._committed = True
if save:
self.instance.save()
def delete(self, save=True):
"""
Delete file from storage and from object attr.
"""
if not self:
return # nocv
if hasattr(self, '_file'):
self.close()
del self.file
self.storage.delete(self.name)
self._clear_attr_value()
self._committed = False
if save:
self.instance.save()
class MultipleFileDescriptor(FileDescriptor):
"""
Subclasses :class:`django.db.models.fields.files.FileDescriptor` to handle list of files.
Return a list of :class:`MultipleFieldFile` when accessed so you can write code like:
.. sourcecode:: python
from myapp.models import MyModel
instance = MyModel.objects.get(pk=1)
instance.files[0].size
"""
def get_file(self, file, instance):
"""
Always return valid attr_class object.For details on logic see
:meth:`django.db.models.fields.files.FileDescriptor.__get__`.
"""
if isinstance(file, str) or file is None:
attr = self.field.attr_class(instance, self.field, file)
file = attr
elif isinstance(file, SimpleUploadedFile):
file_copy = self.field.attr_class(instance, self.field, file.name)
file_copy.file = file
file_copy._committed = False # pylint: disable=W0212 protected-access
file = file_copy
elif isinstance(file, MultipleFieldFile) and instance != file.instance: # nocv
file.instance = instance
file.field = self.field
file.storage = self.field.storage
return file
def __get__(self, instance, cls=None):
"""
Return list of MultipleFieldFile at all times.
"""
if instance is None:
return self # nocv
instance.__dict__[self.field.attname] = [
self.get_file(file, instance)
for file in DeferredAttribute.__get__(self, instance, cls) or []
]
return instance.__dict__[self.field.attname]
class MultipleFileMixin:
"""
Mixin suited to use with :class:`django.db.models.fields.files.FieldFile` to transform it to
a Field with list of files.
"""
def __init__(self, **kwargs):
kwargs['max_length'] = None
super().__init__(**kwargs)
def pre_save(self, model_instance, add):
"""
Call .save() method on every file in list
"""
files = getattr(model_instance, self.attname)
for file in files:
if file and not file._committed: # pylint: disable=W0212 protected-access
file.save(file.name, file.file, save=False)
return files
def get_prep_value(self, value):
"""
Prepare value for database insertion
"""
value = Field.get_prep_value(self, value)
if value is None:
return value
return orjson.dumps(list(map(str, value))).decode('utf-8')
@raise_context_decorator_with_default(default=[])
def from_db_value(self, value, expression, connection):
"""
Transform db value to an internal value
"""
if value:
return orjson.loads(value)
return value # nocv
def get_internal_type(self):
return "TextField"
class MultipleFileField(MultipleFileMixin, FileField):
"""
Subclasses :class:`django.db.models.fields.files.FileField`.
Field for storing a list of Storage-kept files. All args passed to FileField.
"""
attr_class = MultipleFieldFile
descriptor_class = MultipleFileDescriptor
description = "List of Files"
class MultipleImageFieldFile(ImageFile, MultipleFieldFile):
"""
Subclasses :class:`MultipleFieldFile` and :class:`ImageFile mixin`,
handles deleting _dimensions_cache when file is deleted.
"""
def delete(self, save=True):
if hasattr(self, '_dimensions_cache'):
del self._dimensions_cache
super().delete(save)
class MultipleImageField(MultipleFileMixin, ImageField):
"""
Field for storing a list of storage-kept images. All args are passed to
:class:`django.db.models.fields.files.ImageField`, except height_field and width_field,
they are not currently implemented.
"""
attr_class = MultipleImageFieldFile
descriptor_class = MultipleFileDescriptor
description = "List of Images"
def update_dimension_fields(self, instance, force=False, *args, **kwargs):
pass
class NamedBinaryFileInJSONField(TextField):
"""
Extends :class:`django.db.models.TextField`. Use this field in :class:`vstutils.models.BModel` to get
`vstutils.api.NamedBinaryFileInJSONField` in serializer.
"""
class NamedBinaryImageInJSONField(NamedBinaryFileInJSONField):
"""
Extends :class:`django.db.models.TextField`. Use this field in :class:`vstutils.models.BModel` to get
`vstutils.api.NamedBinaryImageInJSONField` in serializer.
"""
class MultipleNamedBinaryFileInJSONField(TextField):
"""
Extends :class:`django.db.models.TextField`. Use this field in :class:`vstutils.models.BModel` to get
`vstutils.api.MultipleNamedBinaryFileInJSONField` in serializer.
"""
class MultipleNamedBinaryImageInJSONField(MultipleNamedBinaryFileInJSONField):
"""
Extends :class:`django.db.models.TextField`. Use this field in :class:`vstutils.models.BModel` to get
`vstutils.api.MultipleNamedBinaryImageInJSONField in serializer`.
"""
class FkModelField(ForeignKey):
"""
Extends :class:`django.db.models.ForeignKey`. Use this field in :class:`vstutils.models.BModel` to get
`vstutils.api.FkModelField in serializer`. To set Foreign Key relation set `to` argument to string path to model
or to Model Class as in :class:`django.db.models.ForeignKey`
"""
|
11535499
|
import os
from setuptools import setup
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(THIS_DIR, "requirements.in")) as f:
install_requires = [line for line in f if line and line[0] not in "#-"]
with open(os.path.join(THIS_DIR, "test-requirements.in")) as f:
test_requires = [line for line in f if line and line[0] not in "#-"]
with open(os.path.join(THIS_DIR, "README.md")) as f:
long_description = f.read()
setup(
name="geomeppy",
packages=["geomeppy", "geomeppy.geom", "geomeppy.io", "tests"],
version="0.11.8",
description="Geometry editing for E+ idf files",
long_description=long_description,
long_description_content_type="text/markdown",
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/jamiebull1/geomeppy",
download_url="https://github.com/jamiebull1/geomeppy/tarball/v0.11.8",
license="MIT License",
keywords=["EnergyPlus", "geometry", "building performance simulation"],
platforms="any",
install_requires=install_requires,
classifiers=[
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Development Status :: 3 - Alpha",
"Natural Language :: English",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering",
],
extras_require={
':python_version>="3.7"': ["mypy", "black"],
"testing": test_requires,
}, # static type checking
include_package_data=True,
)
|
11535520
|
import functools
import inspect
import six
from doubles.call_count_accumulator import CallCountAccumulator
from doubles.exceptions import MockExpectationError, VerifyingBuiltinDoubleArgumentError
import doubles.lifecycle
from doubles.verification import verify_arguments
_any = object()
def _get_future():
try:
from concurrent.futures import Future
except ImportError:
try:
from tornado.concurrent import Future
except ImportError:
raise ImportError(
'Error Importing Future, Could not find concurrent.futures or tornado.concurrent',
)
return Future()
def verify_count_is_non_negative(func):
@functools.wraps(func)
def inner(self, arg):
if arg < 0:
raise TypeError(func.__name__ + ' requires one positive integer argument')
return func(self, arg)
return inner
def check_func_takes_args(func):
if six.PY3:
arg_spec = inspect.getfullargspec(func)
return any([arg_spec.args, arg_spec.varargs, arg_spec.varkw, arg_spec.defaults])
else:
arg_spec = inspect.getargspec(func)
return any([arg_spec.args, arg_spec.varargs, arg_spec.keywords, arg_spec.defaults])
def build_argument_repr_string(args, kwargs):
args = [repr(x) for x in args]
kwargs = ['{}={!r}'.format(k, v) for k, v in kwargs.items()]
return '({})'.format(', '.join(args + kwargs))
class Allowance(object):
"""An individual method allowance (stub)."""
def __init__(self, target, method_name, caller):
"""
:param Target target: The object owning the method to stub.
:param str method_name: The name of the method to stub.
"""
self._target = target
self._method_name = method_name
self._caller = caller
self.args = _any
self.kwargs = _any
self._custom_matcher = None
self._is_satisfied = True
self._call_counter = CallCountAccumulator()
self._return_value = lambda *args, **kwargs: None
def and_raise(self, exception, *args, **kwargs):
"""Causes the double to raise the provided exception when called.
If provided, additional arguments (positional and keyword) passed to
`and_raise` are used in the exception instantiation.
:param Exception exception: The exception to raise.
"""
def proxy_exception(*proxy_args, **proxy_kwargs):
raise exception
self._return_value = proxy_exception
return self
def and_raise_future(self, exception):
"""Similar to `and_raise` but the doubled method returns a future.
:param Exception exception: The exception to raise.
"""
future = _get_future()
future.set_exception(exception)
return self.and_return(future)
def and_return_future(self, *return_values):
"""Similar to `and_return` but the doubled method returns a future.
:param object return_values: The values the double will return when called,
"""
futures = []
for value in return_values:
future = _get_future()
future.set_result(value)
futures.append(future)
return self.and_return(*futures)
def and_return(self, *return_values):
"""Set a return value for an allowance
Causes the double to return the provided values in order. If multiple
values are provided, they are returned one at a time in sequence as the double is called.
If the double is called more times than there are return values, it should continue to
return the last value in the list.
:param object return_values: The values the double will return when called,
"""
if not return_values:
raise TypeError('and_return() expected at least 1 return value')
return_values = list(return_values)
final_value = return_values.pop()
self.and_return_result_of(
lambda: return_values.pop(0) if return_values else final_value
)
return self
def and_return_result_of(self, return_value):
""" Causes the double to return the result of calling the provided value.
:param return_value: A callable that will be invoked to determine the double's return value.
:type return_value: any callable object
"""
if not check_func_takes_args(return_value):
self._return_value = lambda *args, **kwargs: return_value()
else:
self._return_value = return_value
return self
def is_satisfied(self):
"""Returns a boolean indicating whether or not the double has been satisfied.
Stubs are always satisfied, but mocks are only satisfied if they've been
called as was declared.
:return: Whether or not the double is satisfied.
:rtype: bool
"""
return self._is_satisfied
def with_args(self, *args, **kwargs):
"""Declares that the double can only be called with the provided arguments.
:param args: Any positional arguments required for invocation.
:param kwargs: Any keyword arguments required for invocation.
"""
self.args = args
self.kwargs = kwargs
self.verify_arguments()
return self
def with_args_validator(self, matching_function):
"""Define a custom function for testing arguments
:param func matching_function: The function used to test arguments passed to the stub.
"""
self.args = None
self.kwargs = None
self._custom_matcher = matching_function
return self
def __call__(self, *args, **kwargs):
"""A short hand syntax for with_args
Allows callers to do:
allow(module).foo.with_args(1, 2)
With:
allow(module).foo(1, 2)
:param args: Any positional arguments required for invocation.
:param kwargs: Any keyword arguments required for invocation.
"""
return self.with_args(*args, **kwargs)
def with_no_args(self):
"""Declares that the double can only be called with no arguments."""
self.args = ()
self.kwargs = {}
self.verify_arguments()
return self
def satisfy_any_args_match(self):
"""Returns a boolean indicating whether or not the stub will accept arbitrary arguments.
This will be true unless the user has specified otherwise using ``with_args`` or
``with_no_args``.
:return: Whether or not the stub accepts arbitrary arguments.
:rtype: bool
"""
return self.args is _any and self.kwargs is _any
def satisfy_exact_match(self, args, kwargs):
"""Returns a boolean indicating whether or not the stub will accept the provided arguments.
:return: Whether or not the stub accepts the provided arguments.
:rtype: bool
"""
if self.args is None and self.kwargs is None:
return False
elif self.args is _any and self.kwargs is _any:
return True
elif args == self.args and kwargs == self.kwargs:
return True
elif len(args) != len(self.args) or len(kwargs) != len(self.kwargs):
return False
if not all(x == y or y == x for x, y in zip(args, self.args)):
return False
for key, value in self.kwargs.items():
if key not in kwargs:
return False
elif not (kwargs[key] == value or value == kwargs[key]):
return False
return True
def satisfy_custom_matcher(self, args, kwargs):
"""Return a boolean indicating if the args satisfy the stub
:return: Whether or not the stub accepts the provided arguments.
:rtype: bool
"""
if not self._custom_matcher:
return False
try:
return self._custom_matcher(*args, **kwargs)
except Exception:
return False
def return_value(self, *args, **kwargs):
"""Extracts the real value to be returned from the wrapping callable.
:return: The value the double should return when called.
"""
self._called()
return self._return_value(*args, **kwargs)
def verify_arguments(self, args=None, kwargs=None):
"""Ensures that the arguments specified match the signature of the real method.
:raise: ``VerifyingDoubleError`` if the arguments do not match.
"""
args = self.args if args is None else args
kwargs = self.kwargs if kwargs is None else kwargs
try:
verify_arguments(self._target, self._method_name, args, kwargs)
except VerifyingBuiltinDoubleArgumentError:
if doubles.lifecycle.ignore_builtin_verification():
raise
@verify_count_is_non_negative
def exactly(self, n):
"""Set an exact call count allowance
:param integer n:
"""
self._call_counter.set_exact(n)
return self
@verify_count_is_non_negative
def at_least(self, n):
"""Set a minimum call count allowance
:param integer n:
"""
self._call_counter.set_minimum(n)
return self
@verify_count_is_non_negative
def at_most(self, n):
"""Set a maximum call count allowance
:param integer n:
"""
self._call_counter.set_maximum(n)
return self
def never(self):
"""Set an expected call count allowance of 0"""
self.exactly(0)
return self
def once(self):
"""Set an expected call count allowance of 1"""
self.exactly(1)
return self
def twice(self):
"""Set an expected call count allowance of 2"""
self.exactly(2)
return self
@property
def times(self):
return self
time = times
def _called(self):
"""Indicate that the allowance was called
:raise MockExpectationError if the allowance has been called too many times
"""
if self._call_counter.called().has_too_many_calls():
self.raise_failure_exception()
def raise_failure_exception(self, expect_or_allow='Allowed'):
"""Raises a ``MockExpectationError`` with a useful message.
:raise: ``MockExpectationError``
"""
raise MockExpectationError(
"{} '{}' to be called {}on {!r} with {}, but was not. ({}:{})".format(
expect_or_allow,
self._method_name,
self._call_counter.error_string(),
self._target.obj,
self._expected_argument_string(),
self._caller.filename,
self._caller.lineno,
)
)
def _expected_argument_string(self):
"""Generates a string describing what arguments the double expected.
:return: A string describing expected arguments.
:rtype: str
"""
if self.args is _any and self.kwargs is _any:
return 'any args'
elif self._custom_matcher:
return "custom matcher: '{}'".format(self._custom_matcher.__name__)
else:
return build_argument_repr_string(self.args, self.kwargs)
|
11535524
|
import os
from os.path import join, abspath, dirname, splitext
import yaml
from library.api.parse import TParse
from library.api.security import Validation
current_path = dirname(abspath(__file__))
yml_json = {}
try:
validate_yml_path = join(current_path, 'validations')
for fi in os.listdir(validate_yml_path):
if splitext(fi)[-1] != '.yml':
continue
with open(join(validate_yml_path, fi), 'rb') as f:
yml_json.update(yaml.safe_load(f.read()))
except yaml.YAMLError as e:
print(e)
v = Validation(yml_json)
validation = v.validation
p = TParse(yml_json)
parse_list_args = p.parse_list_args
parse_list_args2 = p.parse_list_args2
parse_json_form = p.parse_json_form
parse_pwd = p.parse_pwd
|
11535561
|
from utility import *
import glob
import pandas as pd
import os
def graph_construct(outputdir):
path0 = os.path.join(os.getcwd(), outputdir)
#' import processed data
files1 = glob.glob(path0 + "/count_data/*.csv")
files1.sort()
count_list = []
for df in files1:
print(df)
count_list.append(pd.read_csv(df, index_col=0))
files2 = glob.glob(path0 + "/norm_data/*.csv")
files2.sort()
norm_list = []
for df in files2:
print(df)
norm_list.append(pd.read_csv(df, index_col=0))
files3 = glob.glob(path0 + "/scale_data/*.csv")
files3.sort()
scale_list = []
for df in files3:
print(df)
scale_list.append(pd.read_csv(df, index_col=0))
files4 = glob.glob(path0 + "/label_data/*.csv")
files4.sort()
label_list = []
for df in files4:
print(df)
label_list.append(pd.read_csv(df, index_col=0))
fpath = os.path.join(path0, 'sel_features.csv')
features = pd.read_csv(fpath, index_col=False).values.flatten()
#' graph construction
import itertools
N = len(count_list)
if (N == 1):
combine = pd.Series([(0, 0)])
else:
combin = list(itertools.product(list(range(N)), list(range(N))))
index = [i for i, x in enumerate([i[0] < i[1] for i in combin]) if x]
combine = pd.Series(combin)[index]
pairss1 = generate_graph(count_list=count_list,
norm_list=norm_list,
scale_list=scale_list,
features=features,
combine=combine, k_neighbor=10)
count_list2 = [count_list[1], count_list[1]]
norm_list2 = [norm_list[1], norm_list[1]]
scale_list2 = [scale_list[1], scale_list[1]]
pairss2 = generate_graph(count_list=count_list2,
norm_list=norm_list2,
scale_list=scale_list2,
features=features,
combine=combine,k_neighbor=10)
#'@param graph1: inter-dataset graph
graph1 = pairss1[0].iloc[:, 0:2].reset_index()
graph1.to_csv('./input/inter_graph.csv')
#'@param graph2: intra-dataset graph
graph2 = pairss2[0].iloc[:, 0:2].reset_index()
graph2.to_csv('./input/intra_graph.csv')
label1 = label_list[0]
label1.to_csv('./input/Label1.csv', index=False)
label2 = label_list[1]
label2.to_csv('./input/Label2.csv', index=False)
|
11535564
|
import argparse
import json
import os
import abc
class ArgParser(object):
def __init__(self, model_name):
self.model_name = model_name
parser = argparse.ArgumentParser()
# parser from main args or load defaults
self.parser = self._build_parser(parser)
self.args, self.unknown_args = self.parser.parse_known_args()
# check if hyper_params pre defined from file
args, args_path = self.load_from_file(self.args.base_train_dir, model_name)
if args is None:
print("Pre defined hyper params not found. Use main args and defaults")
else:
print(f"Loaded pre defined hyper params from: {args_path}")
self.args = args
self.unknown_args = []
@abc.abstractmethod
def _add_model_arguments(self, parser):
pass
def _build_parser(self, parser):
# dataset related arguments
parser.add_argument("--base_train_dir", type=str, default=".",
help="Directory to write checkpoints and training history."
"default will save to current (working) directory")
parser.add_argument("--data_dir", type=str, default=None,
help="Dataset dir")
parser.add_argument("--dataset_name", type=str, default=None,
help="Dataset name to work with. E.g: CUB,SUN,AWA1,AWA2")
parser.add_argument("--transfer_task", type=str, default="DRAGON",
help="One of [ZSL,GZSL,GFSL,DRAGON]")
parser.add_argument("--train_dist", type=str,
default="dragon",
help='Distribution function for train set - dragon or fewshot')
parser.add_argument("--train_max_fs_samples", type=int,
default=1,
help='Number of samples for few shot classes (for train_dist=fewshot only)')
parser.add_argument("--test_mode", action='store_true',
help="If test mode, will train on train+val and test on testset")
parser.add_argument("--use_xian_normed_class_description", type=int, default=0,
help="Use Xian (CVPR 2017) class description. This is a "
"L2 normalized version of the mean attribute values"
"that are provided with the datasets. "
"This can **not** be used with LAGO.")
parser.add_argument("--sort_attr_by_names", type=int, default=0,
help="If this flag is set, then we sort attributes by "
"names. The underlying assumtion is that the naming"
" convention is 'group_name::attribute_name'. "
"Therefore enabling this sort will cluster together"
"attributes from the same group. This is needed"
"because LAGO with Semantic groups requires that "
"kind of name clustering.")
# training related arguments
parser.add_argument("--initial_learning_rate", type=float,
default=3e-4,
help='Initial learning rate')
parser.add_argument("--batch_size", type=int, default=64,
help='Batch size')
parser.add_argument("--max_epochs", type=int, default=100,
help='Max number of epochs to train')
parser.add_argument("--patience", type=int, default=50,
help="Early stopping: number of epochs with no improvement after which training "
"will be stopped.")
parser.add_argument("--min_delta", type=float, default=1e-7,
help='minimum change in the monitored quantity to qualify as an improvement')
parser.add_argument("--verbose", type=int, default=1, help='Verbose')
# default lago model
parser.add_argument("--att_model_name", type=str, default='LAGO',
help="Attributes model name. \in {'LAGO', 'ESZSL'}.")
parser.add_argument("--att_model_variant", type=str, default="LAGO_SemanticSoft",
help="The model variant \in { 'LAGO_SemanticSoft', "
"'Singletons', 'LAGO_KSoft', None }. "
"For LAGO-SemanticHARD choose LAGO_SemanticSoft"
"and set --SG_trainable=0")
parser.add_argument("--LG_norm_groups_to_1", type=int, default=1,
help="Normalize the semantic description in each "
"semantic group to sum to 1, in order to comply "
"with the mutual-exclusion approximation. "
"This is crucial for the LAGO_Semantic* variants."
"See IMPLEMENTATION AND TRAINING DETAILS on LAGO paper.")
# model related arguments
parser = self._add_model_arguments(parser)
return parser
@staticmethod
def load_from_file(dir_path, model_name):
path = os.path.join(dir_path, f"{model_name}_hyper_params.json")
try:
with open(os.path.join(path), "r") as hp_f:
return argparse.Namespace(**json.load(fp=hp_f)), path
except:
return None, path
@staticmethod
def save_to_file(args, dir_path, model_name):
with open(os.path.join(dir_path, f"{model_name}_hyper_params.json"), "w") as hp_f:
json.dump(vars(args), fp=hp_f)
class VisualExpertParser(ArgParser):
def __init__(self, model_name):
super().__init__(model_name)
def _add_model_arguments(self, parser):
parser.add_argument("--l2", type=float, default=0.0001,
help='L2 regularization value to use on visual expert')
return parser
class LagoExpertParser(ArgParser):
def __init__(self, model_name):
super().__init__(model_name)
def _test_args(self, args):
if args.SG_psi > 0:
# Allow only LAGO_SemanticSoft for semantic prior
assert (args.att_model_variant == 'LAGO_SemanticSoft')
if args.LG_norm_groups_to_1:
assert ('Semantic' in args.att_model_variant)
if 'LAGO' in args.att_model_name:
assert (args.use_xian_normed_class_description == 0)
# Default computed values
if args.SG_seed is None:
args.SG_seed = args.repeat + 1000
vars(args)['inference_noise_seed'] = args.repeat + 1001
def _add_model_arguments(self, parser):
# Loss Regularizations related arguments
parser.add_argument("--LG_beta", type=float, default=0,
help="hyper-param: beta")
parser.add_argument("--LG_lambda", type=float, default=1e-7,
help="hyper-param: gamma")
parser.add_argument("--SG_psi", type=float, default=0,
help="hyper-param: Psi, the regularization coefficient "
"for Semantic prior on Gamma.")
parser.add_argument("--attributes_weight", type=float, default=0,
help="Attributes weight in loss function.")
parser.add_argument("--LG_uniformPa", type=int, default=1,
help="LAGO: Use a uniform Prior for Pa")
parser.add_argument("--LG_true_compl_x", type=int, default=1,
help="LAGO: Set P(complementary attrib.|x)=1")
parser.add_argument("--orth_init_gain", type=float, default=0.1,
help="Gain for keras initializers.Orthogonal: "
"We didn't tune this hyper param. Except once, on "
"a very preliminary experiment.")
# train related arguments
parser.add_argument("--SG_trainable", type=int, default=0,
help="Set SoftGroup weights to be trainable.")
# more params
parser.add_argument("--SG_gain", type=float, default=3,
help="hyper-param: Softmax kernel gain with SoftGroups")
parser.add_argument("--SG_num_K", type=int, default=-1,
help="hyper-param: Number of groups for LAGO_KSoft")
parser.add_argument("--SG_seed", type=int, default=None,
help="Random seed for Gamma matrix when using LAGO_KSoft.")
return parser
class FusionModuleParser(ArgParser):
def __init__(self, model_name, *expert_names):
super().__init__(model_name)
# load hyper params of experts
combined_args = {}
for expert_name in expert_names:
if self.args.test_mode is None:
curr_path = os.path.join(self.args.base_train_dir, expert_name)
else:
curr_path = os.path.join(self.args.base_train_dir, expert_name, "test")
expert_hp, hp_path = self.load_from_file(curr_path, expert_name)
if expert_hp is None:
print(f"Could not find pretrained expert hyper params in {hp_path}")
quit(1)
print(f"Loaded pre defined expert hyper params from {hp_path}")
combined_args = {**combined_args, **vars(expert_hp)}
# merge params to the combined args
self.args = argparse.Namespace(**{**combined_args, **vars(self.args)})
def _add_model_arguments(self, parser):
parser.add_argument("--topk", type=int,
default=-1,
help='K value when applying topK on experts outputs. -1 means take '
'experts output without any change')
parser.add_argument("--sort_preds", type=int,
default=0, # False
help='Should experts outputs be sorted')
parser.add_argument("--freeze_experts", type=int, default=0, # False
help='Should freeze experts model')
parser.add_argument("--nparams", type=int, default=4,
help='Params number to learn')
return parser
UserArgs = None
def visual_args():
global UserArgs
UserArgs = VisualExpertParser("Visual").args
def lago_args():
global UserArgs
UserArgs = LagoExpertParser("LAGO").args
def fusion_args():
global UserArgs
UserArgs = FusionModuleParser("Fusion", "Visual", "LAGO").args
|
11535591
|
from flasgger import swag_from
from flask import jsonify
from app.doc.notice.rule import RULE_LIST_GET, RULE_GET
from app.model import RuleModel
from app.view.base_resource import NoticeResource
class RuleList(NoticeResource):
@swag_from(RULE_LIST_GET)
def get(self):
return jsonify(RuleModel.get_rule_list())
class Rule(NoticeResource):
@swag_from(RULE_GET)
def get(self, rule_id):
return jsonify(RuleModel.get_rule(rule_id))
|
11535655
|
import numpy as np
from typing import List, Optional
from itertools import product
def binary_data_ids(data: np.ndarray) -> np.ndarray:
"""
Given some binary data, compute the id of each sample.
This is done by converting the binary features into a decimal number.
:param data: The data.
:return: The id of each data sample.
"""
return np.dot(data, 1 << np.arange(data.shape[-1] - 1, -1, -1)).astype(np.int64)
def compute_mpe_ids(complete_mpe_data: np.ndarray, complete_lls: np.ndarray) -> List[int]:
"""
Compute the maximum at posterior samples ids.
:param complete_mpe_data: The complete posterior data.
:param complete_lls: The log-likelihoods of each data sample.
:return: A list of maximum at posterior sample ids.
"""
sample_ids = binary_data_ids(complete_mpe_data)
sample_lls = complete_lls[sample_ids]
mpe_idx = np.argmax(sample_lls, axis=1)
mpe_sample_ids = sample_ids[np.arange(len(sample_ids)), mpe_idx]
return mpe_sample_ids.tolist()
def resample_data(data: np.ndarray, n_samples: int, random_state: Optional[np.random.RandomState] = None) -> np.ndarray:
"""
Resample data with replacement.
:param data: The original data.
:param n_samples: The number of samples to extract.
:param random_state: The random state.
:return: The resampled data.
"""
if random_state is None:
random_state = np.random.RandomState(42)
return data[random_state.choice(len(data), size=n_samples, replace=True)]
def marginalize_data(data: np.ndarray, mar_features: List[int]) -> np.ndarray:
"""
Marginalize data, given a list of features to marginalize.
:param data: The original data.
:param mar_features: A list of features to marginalize.
:return: The marginalized features (using NaNs).
"""
data = data.astype(np.float32, copy=True)
data[:, mar_features] = np.nan
return data
def random_marginalize_data(data: np.ndarray, p: float, random_state: Optional[np.random.RandomState] = None) -> np.ndarray:
"""
Marginalize data sample-wise randomly.
:param data: The original data.
:param p: The probability of marginalize a feature value of a single sample.
:param random_state: The random state.
:return: The marginalized data (using NaNs).
"""
if random_state is None:
random_state = np.random.RandomState(42)
data = data.astype(np.float32, copy=True)
data[random_state.rand(*data.shape) <= p] = np.nan
return data
def complete_binary_data(n_features: int) -> np.ndarray:
"""
Build a data set with complete assignments of binary features.
:param n_features: The number of features.
:return: A data array with shape (2 ** n_features, n_features).
"""
return np.array([list(i) for i in product([0, 1], repeat=n_features)], dtype=np.float32)
def complete_marginalized_binary_data(n_features: int, mar_features: List[int]) -> np.ndarray:
"""
Build a data set with complete assignments of binary features, with marginalized features.
:param n_features: The number of features.
:param mar_features: A list of features to marginalize.
:return: A marginalized data array with shape (2 ** n_features, n_features).
"""
evi_features = [i for i in range(n_features) if i not in mar_features]
data = np.empty(shape=(2 ** len(evi_features), n_features), dtype=np.float32)
data[:, evi_features] = complete_binary_data(len(evi_features))
data[:, mar_features] = np.nan
return data
def complete_posterior_binary_data(n_features: int, mar_features: List[int]) -> np.ndarray:
"""
Build a data set with complete assignments of binary features, having another dimension
for all the possible assignments of marginalized features.
:param n_features: The number of features.
:param mar_features: A list of features for which consider all the possible assignment combinations.
:return: A data array with shape (2 ** (n_features - n_mar_features), 2 ** n_mar_features, n_features).
"""
evi_features = [i for i in range(n_features) if i not in mar_features]
data = np.empty(shape=(2 ** len(evi_features), 2 ** len(mar_features), n_features), dtype=np.float32)
data[:, :, evi_features] = np.expand_dims(complete_binary_data(len(evi_features)), axis=1)
data[:, :, mar_features] = complete_binary_data(len(mar_features))
return data
|
11535663
|
import xadmin
from readings.models import Site
from readings.models import Reading
class SiteAdmin(object):
"""外部站点"""
list_display = ["name", "url"]
class ReadingAdmin(object):
"""文章"""
list_display = ["name", "source", "add_time"]
xadmin.site.register(Site, SiteAdmin)
xadmin.site.register(Reading, ReadingAdmin)
|
11535681
|
import os
import re
import json
import requests
import urllib3
from requests_http_signature import HTTPSignatureAuth
import logging
import datetime
from uuid import uuid4
from functools import lru_cache
import jmespath
from jmespath.exceptions import JMESPathError
from dcplib.aws.sqs import SQSMessenger, get_queue_url
from dss import Config, Replica, datetime_to_version_format
from dss.subscriptions_v2 import SubscriptionData
from dss.storage.identifiers import UUID_PATTERN, VERSION_PATTERN, TOMBSTONE_SUFFIX, DSS_BUNDLE_KEY_REGEX
logger = logging.getLogger(__name__)
notification_queue_name = "dss-notify-v2-" + os.environ['DSS_DEPLOYMENT_STAGE']
_attachment_size_limit = 128 * 1024
_versioned_tombstone_key_regex = re.compile(f"^(bundles)/({UUID_PATTERN}).({VERSION_PATTERN}).{TOMBSTONE_SUFFIX}$")
_unversioned_tombstone_key_regex = re.compile(f"^(bundles)/({UUID_PATTERN}).{TOMBSTONE_SUFFIX}$")
_bundle_key_regex = DSS_BUNDLE_KEY_REGEX
def should_notify(replica: Replica, subscription: dict, metadata_document: dict, key: str) -> bool:
"""
Check if a notification should be attempted for subscription and key
"""
jmespath_query = subscription.get(SubscriptionData.JMESPATH_QUERY)
if not jmespath_query:
return True
else:
try:
if jmespath.search(jmespath_query, metadata_document):
return True
else:
return False
except JMESPathError:
logger.error("jmespath query failed for owner={} replica={} uuid={} jmespath_query='{}' key={}".format(
subscription[SubscriptionData.OWNER],
subscription[SubscriptionData.REPLICA],
subscription[SubscriptionData.UUID],
subscription[SubscriptionData.JMESPATH_QUERY],
key
))
return False
def notify_or_queue(replica: Replica, subscription: dict, metadata_document: dict, key: str):
"""
Notify or queue for later processing:
1) For normal bundle: attempt notification, queue on failure
2) For delete: attempt notification, queue on failure
3) For versioned tombstone: attempt notification, queue on failure
4) For unversioned tombstone: Queue one notification per affected bundle version. Notifications are
not attempted for previously tombstoned versions. Since the number of versions is
unbounded, inline delivery is not attempted.
"""
event_type = metadata_document['event_type']
with SQSMessenger(get_queue_url(notification_queue_name)) as sqsm:
if _unversioned_tombstone_key_regex.match(key):
tombstones = set()
bundles = set()
key_prefix = key.rsplit(".", 1)[0] # chop off the tombstone suffix
for key in _list_prefix(replica, key_prefix):
if _versioned_tombstone_key_regex.match(key):
bundle_key = key.rsplit(".", 1)[0]
tombstones.add(bundle_key)
elif _bundle_key_regex.match(key):
bundles.add(key)
for key in bundles - tombstones:
sqsm.send(_format_sqs_message(replica, subscription, event_type, key), delay_seconds=0)
else:
if not notify(subscription, metadata_document, key):
sqsm.send(_format_sqs_message(replica, subscription, event_type, key), delay_seconds=15 * 60)
def notify(subscription: dict, metadata_document: dict, key: str) -> bool:
"""
Attempt notification delivery. Return True for success, False for failure
"""
fqid = key.split("/")[1]
bundle_uuid, bundle_version = fqid.split(".", 1)
sfx = f".{TOMBSTONE_SUFFIX}"
if bundle_version.endswith(sfx):
bundle_version = bundle_version[:-len(sfx)]
api_domain_name = f'https://{os.environ.get("API_DOMAIN_NAME")}'
payload = {
'bundle_url': api_domain_name + f'/v1/bundles/{bundle_uuid}?version={bundle_version}',
'dss_api': api_domain_name,
'subscription_id': subscription[SubscriptionData.UUID],
'event_timestamp': datetime_to_version_format(datetime.datetime.utcnow()),
'event_type': metadata_document['event_type'],
'match': {
'bundle_uuid': bundle_uuid,
'bundle_version': bundle_version,
},
'transaction_id': str(uuid4())
}
jmespath_query = subscription.get(SubscriptionData.JMESPATH_QUERY)
if jmespath_query is not None:
payload[SubscriptionData.JMESPATH_QUERY] = jmespath_query
if "CREATE" == metadata_document['event_type']:
attachments_defs = subscription.get(SubscriptionData.ATTACHMENTS)
if attachments_defs is not None:
errors = dict()
attachments = dict()
for name, attachment in attachments_defs.items():
if 'jmespath' == attachment['type']:
try:
value = jmespath.search(attachment['expression'], metadata_document)
except BaseException as e:
errors[name] = str(e)
else:
attachments[name] = value
if errors:
attachments['_errors'] = errors
size = len(json.dumps(attachments).encode('utf-8'))
if size > _attachment_size_limit:
attachments = {'_errors': f"Attachments too large ({size} > {_attachment_size_limit})"}
payload['attachments'] = attachments
request = {
'method': subscription.get(SubscriptionData.METHOD, "POST"),
'url': subscription[SubscriptionData.CALLBACK_URL],
'headers': dict(),
'allow_redirects': False,
'timeout': None,
}
hmac_key = subscription.get('hmac_secret_key')
if hmac_key:
hmac_key_id = subscription.get('hmac_key_id', "hca-dss:" + subscription['uuid'])
request['auth'] = HTTPSignatureAuth(key=hmac_key.encode(), key_id=hmac_key_id)
# get rid of this so it doesn't appear in delivery log messages
del subscription['hmac_secret_key']
else:
request['auth'] = None
encoding = subscription.get(SubscriptionData.ENCODING, "application/json")
if encoding == "application/json":
request['json'] = payload
elif encoding == 'multipart/form-data':
body = subscription[SubscriptionData.FORM_FIELDS].copy()
body[subscription[SubscriptionData.PAYLOAD_FORM_FIELD]] = json.dumps(payload)
data, content_type = urllib3.encode_multipart_formdata(body)
request['data'] = data
request['headers']['Content-Type'] = content_type
else:
raise ValueError(f"Encoding {encoding} is not supported")
try:
response = requests.request(**request)
except BaseException as e:
logger.warning("Exception raised while delivering notification: %s, subscription: %s",
str(payload), str(subscription), exc_info=e)
return False
if 200 <= response.status_code < 300:
logger.info("Successfully delivered %s: HTTP status %i, subscription: %s",
str(payload), response.status_code, str(subscription))
return True
else:
logger.warning("Failed delivering %s: HTTP status %i, subscription: %s",
str(payload), response.status_code, str(subscription))
return False
def _format_sqs_message(replica: Replica, subscription: dict, event_type: str, key: str):
return json.dumps({
SubscriptionData.REPLICA: replica.name,
SubscriptionData.OWNER: subscription['owner'],
SubscriptionData.UUID: subscription['uuid'],
'event_type': event_type,
'key': key
})
@lru_cache()
def _list_prefix(replica: Replica, prefix: str):
handle = Config.get_blobstore_handle(replica)
return [object_key for object_key in handle.list(replica.bucket, prefix)]
|
11535693
|
from flask import render_template
from app.misc import misc
@misc.route('/beta')
def beta():
return render_template(
'misc/beta.html',
title="LudoLatin - We're building!",
)
@misc.route('/terms')
def terms():
return render_template(
'misc/terms.html',
title="LudoLatin - Terms of Use",
)
@misc.route('/privacy')
def privacy():
return render_template(
'misc/privacypolicy.html',
title="LudoLatin - Privacy Policy",
)
@misc.route('/contact')
def contact():
return render_template(
'misc/contact.html',
title="LudoLatin - Contact",
)
@misc.route('/robots.txt')
def robots():
return render_template(
'misc/robots.txt',
)
@misc.route('/googleb362926a2c87791b.html')
def google_site_verification():
return render_template(
'misc/googleb362926a2c87791b.html',
)
|
11535739
|
import os
import sys
import pathlib
import argparse
import subprocess
import aff3ct_help_reader as ahr
parser = argparse.ArgumentParser(prog='aff3ct-command-conversion', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--build', action='store', dest='buildPath', type=str, default="build/", help='Build path to aff3ct.')
parser.add_argument('--dest', action='store', dest='destPath', default="doc/source/simulation/parameters", type=str, help='Destination path.')
args = parser.parse_args()
indent = " "
def read_help(sim, code, prec = "32"):
PathOrigin = os.getcwd()
os.chdir(args.buildPath)
# run the tested simulator
listArgsAFFECT = ["./bin/aff3ct", "-H", "-C", code, "--sim-type", sim, "-p", prec];
try:
processAFFECT = subprocess.Popen(listArgsAFFECT, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdoutAFFECT, stderrAFFECT) = processAFFECT.communicate()
except KeyboardInterrupt:
os.kill(processAFFECT.pid, signal.SIGINT)
(stdoutAFFECT, stderrAFFECT) = processAFFECT.communicate()
returnCode = processAFFECT.returncode
errOutput = stderrAFFECT.decode(encoding='UTF-8')
stdOutput = stdoutAFFECT.decode(encoding='UTF-8').split("\n")
os.chdir(PathOrigin)
return stdOutput;
def getLongestTag(tags):
tmp = tags.split(',')
for t in tmp:
t.strip()
tag = tmp[0]
if len(tmp) > 1:
if len(tmp[0]) < len(tmp[1]):
tag = tmp[1]
return tag
def getArgReference(refHeader, refBody):
return ".. _" + refHeader + "-" + getLongestTag(refBody).strip('-') + ":\n\n"
def makeTableLine(col1_len, col2_len, horiz = "-", vert = "+"):
text = vert
for i in range(col1_len):
text += horiz
text += vert
for i in range(col2_len):
text += horiz
text += vert + "\n"
return text
def addSpaces(text, totalLength):
if len(text) >= totalLength:
return text
for i in range(totalLength - len(text)):
text += " "
return text
def bubbleSort(aTagList):
for passnum in range(len(aTagList)-1,0,-1):
for i in range(passnum):
if aTagList[i][0].strip('-').lower() > aTagList[i+1][0].strip('-').lower():
temp = aTagList[i]
aTagList[i] = aTagList[i+1]
aTagList[i+1] = temp
def sortTags(moduleMap):
reqList = []
for tag in moduleMap:
if tag == "name":
continue
if moduleMap[tag]["group"] == "required":
reqList.append([tag, moduleMap[tag]])
stdList = []
for tag in moduleMap:
if tag == "name":
continue
if moduleMap[tag]["group"] == "":
stdList.append([tag, moduleMap[tag]])
advList = []
for tag in moduleMap:
if tag == "name":
continue
if moduleMap[tag]["group"] == "advanced":
advList.append([tag, moduleMap[tag]])
bubbleSort(reqList)
bubbleSort(stdList)
bubbleSort(advList)
tagList = reqList + stdList + advList
return tagList
def write_module(moduleMap, path, reftag):
file = open(path, 'w')
text = ".. _" + reftag + "-" + moduleMap["name"].replace(' ', '-').lower() + ":\n\n"
text += moduleMap["name"] + "\n"
for x in range(len(moduleMap["name"])):
text += "-"
text += "\n\n"
file.write(text)
for Arg in sortTags(moduleMap):
tag = Arg[0]
group = Arg[1]["group" ]
argtype = Arg[1]["argtype" ]
limits = Arg[1]["limits" ]
# required = Arg[1]["required"]
# default = Arg[1]["default" ]
# needs = Arg[1]["needs" ]
# excludes = Arg[1]["excludes"]
info = Arg[1]["info" ]
text = getArgReference(reftag, tag)
title = "``" + tag + "``"
if group == "required" :
title += " |image_required_argument|"
elif group == "advanced" :
title += " |image_advanced_argument|"
text += title + "\n"
for t in range(len(title)):
text += '"'
text +="\n\n"
if argtype != "FLAG":
value = argtype
text += indent + ":Type: " + argtype + "\n"
allowed_values_table = []
if limits != "":
__limits = ""
pos = limits.find("{");
if pos != -1:
__limits = ":Allowed values:";
allowed_values_table = limits[pos+1:-1].split('|')
for i in range(len(allowed_values_table)):
allowed_values_table[i] = allowed_values_table[i].strip()
for t in allowed_values_table:
__limits += " ``" + t + "``"
elif argtype == "folder" or argtype == "path" or argtype == "file":
__limits = ":Rights: " + limits[1:-1]
else:
if limits == "positive":
limits = "[0;+inf["
elif limits == "positive, non-zero":
limits = "]0;+inf["
if __limits != "":
__limits = ":Range: " + toLatex(limits)
if __limits != "":
text += indent + __limits + "\n"
# if default != "":
# text += indent + ":Default: " + default + "\n"
# if len(needs):
# text += indent + ":Needs: "
# for n in needs:
# text += "``" + n + "`` "
# text += "\n"
# if len(excludes):
# text += indent + ":Excludes: "
# for e in excludes:
# text += "``" + e + "`` "
# text += "\n"
if argtype != "FLAG":
exampleValue = "TODO"
# if default == "":
if len(allowed_values_table):
exampleValue = allowed_values_table[0]
elif argtype == "text":
exampleValue = '"TODO CHECK VALUE"'
elif argtype == "integer":
exampleValue = "1"
elif argtype == "real number":
exampleValue = "1.0"
elif argtype == "folder" or argtype == "path":
exampleValue = "example/path/to/the/right/place/"
elif argtype == "file":
exampleValue = "example/path/to/the/right/file"
text += indent + ":Examples: ``" + getLongestTag(tag) + " " + exampleValue + "``\n"
text += "\n"
info = info[0].upper() + info[1:]
info = info.strip(" ").strip(".") + "."
text += info.replace("--", "\\\\-\\\\-") + "\n\n"
if len(allowed_values_table):
text += "Description of the allowed values:\n\n"
valueHead = " Value "
descrHead = " Description "
longestValue = len(valueHead)
for v in allowed_values_table:
l = len(v) + 6
if l > longestValue :
longestValue = l
descrSubstitution = getLongestTag(tag).strip('-') + "_descr_"
longestDescription = len(descrHead)
for v in allowed_values_table:
l = len(v) + len(descrSubstitution) + 4
if l > longestDescription :
longestDescription = l
text += makeTableLine(longestValue, longestDescription, "-", "+")
text +=( "|" + addSpaces(valueHead, longestValue) +
"|" + addSpaces(descrHead, longestDescription) +
"|\n")
text += makeTableLine(longestValue, longestDescription, "=", "+")
for v in allowed_values_table:
text +=("|" + addSpaces(" ``" + v + "`` ", longestValue) +
"|" + addSpaces(" |" + descrSubstitution + v.lower() + "| ", longestDescription) +
"|\n")
text += makeTableLine(longestValue, longestDescription, "-", "+")
text += "\n"
for v in allowed_values_table:
text += ".. |" + descrSubstitution + v.lower() + "| replace:: TODO VALUE " + v + "\n"
text += "\n\n"
file.write(text)
file.close()
def write_codec_file(codecPath, codeName, hasPct):
file = open(codecPath, 'w')
text = ".. _codec-" + codeName.lower() + ":\n\n"
title = "Codec " + codeName
text += title + "\n"
for t in range(len(title)):
text += '*'
text += "\n\n"
text += ".. toctree::\n"
text += indent + ":maxdepth: 2\n"
text += indent + ":caption: Contents\n\n"
text += indent + "enc.rst\n"
text += indent + "dec.rst\n"
if hasPct:
text += indent + "pct.rst\n"
file.write(text)
if __name__ == "__main__":
######## BFER simulation
stdOutput = read_help("BFERI", "LDPC", "16");
helpMap = ahr.help_to_map(stdOutput)
####### write modules
destPath = args.destPath
if destPath[-1] != "/":
destPath += "/"
if not os.path.exists(destPath):
os.makedirs(destPath)
write_module(helpMap["Other" ], destPath + "global/global.rst", "global");
write_module(helpMap["Simulation" ], destPath + "sim/sim.rst" , "sim");
write_module(helpMap["Source" ], destPath + "src/src.rst" , "src");
write_module(helpMap["CRC" ], destPath + "crc/crc.rst" , "crc");
write_module(helpMap["Modem" ], destPath + "mdm/mdm.rst" , "mdm");
write_module(helpMap["Channel" ], destPath + "chn/chn.rst" , "chn");
write_module(helpMap["Monitor" ], destPath + "mnt/mnt.rst" , "mnt");
write_module(helpMap["Terminal" ], destPath + "ter/ter.rst" , "ter");
write_module(helpMap["Interleaver"], destPath + "itl/itl.rst" , "itl");
write_module(helpMap["Quantizer" ], destPath + "qnt/qnt.rst" , "qnt");
codesList = ["BCH", "LDPC", "POLAR", "RA", "REP", "RS", "RSC", "RSC_DB", "TURBO", "TURBO_DB", "TURBO_PROD", "UNCODED"]
for c in codesList:
stdOutput = read_help("BFER", c, "32");
helpMap = ahr.help_to_map(stdOutput)
codecPath = destPath + "cdc/cdc_" + c.lower() + "/"
if not os.path.exists(codecPath):
os.makedirs(codecPath)
write_module(helpMap["Encoder"], codecPath + "enc.rst", "enc-" + c.lower());
write_module(helpMap["Decoder"], codecPath + "dec.rst", "dec-" + c.lower());
hasPct = False
try:
write_module(helpMap["Puncturer"], codecPath + "pct.rst", "pct-" + c.lower());
hasPct = True
except KeyError:
pass
write_codec_file(codecPath + "cdc.rst", c, hasPct)
os.chdir(destPath)
|
11535766
|
from __future__ import unicode_literals
import datetime
from httplib import OK, FORBIDDEN
import json
from django.core.urlresolvers import reverse
from django.test import TestCase
from pycon.pycon_api.models import APIAuth
from pycon.pycon_api.tests import RawDataClientMixin
from pycon.schedule.models import SessionRole
from pycon.schedule.tests.factories import SessionFactory, SessionRoleFactory
from pycon.tests.factories import UserFactory
from symposion.schedule.tests.factories import SlotFactory
class SessionStaffJSONTest(RawDataClientMixin, TestCase):
def setUp(self):
super(SessionStaffJSONTest, self).setUp()
self.auth_key = APIAuth.objects.create(name="test")
def get_api_result(self):
url = reverse("session_staff_json")
sig = self.get_signature(uri=url, method='GET', body='')
rsp = self.client.get(url, **sig)
self.assertEqual(OK, rsp.status_code)
response_data = json.loads(rsp.content.decode('utf-8'))
self.assertEqual(OK, response_data['code'])
return response_data['data']
def test_no_api_key(self):
rsp = self.client.get(reverse("session_staff_json"))
self.assertEqual(FORBIDDEN, rsp.status_code)
def test_bad_api_key(self):
self.auth_key.auth_key = 'who are you fooling?'
url = reverse("session_staff_json")
sig = self.get_signature(uri=url, method='GET', body='')
rsp = self.client.get(url, **sig)
self.assertEqual(FORBIDDEN, rsp.status_code)
def test_bad_api_secret(self):
self.auth_key.secret = 'who are you fooling?'
url = reverse("session_staff_json")
sig = self.get_signature(uri=url, method='GET', body='')
rsp = self.client.get(url, **sig)
self.assertEqual(FORBIDDEN, rsp.status_code)
def test_no_data(self):
data = self.get_api_result()
self.assertEqual(0, len(data))
def test_empty_session(self):
self.session = SessionFactory()
data = self.get_api_result()
self.assertEqual(0, len(data))
def test_slot_no_roles(self):
session = SessionFactory()
slot = SlotFactory()
session.slots.add(slot)
data = self.get_api_result()
self.assertEqual(
[{
'conf_key': slot.pk,
'chair_email': '',
'chair_name': '',
'runner_email': '',
'runner_name': ''
}],
data
)
def test_slot_chair_no_runner(self):
user = UserFactory(first_name="Melanie", last_name="Pickle", email='<EMAIL>')
session = SessionFactory()
slot = SlotFactory()
session.slots.add(slot)
SessionRoleFactory(session=session, user=user, role=SessionRole.SESSION_ROLE_CHAIR)
data = self.get_api_result()
self.assertEqual(
[{
'conf_key': slot.pk,
'chair_email': '<EMAIL>',
'chair_name': '<NAME>',
'runner_email': '',
'runner_name': ''
}],
data
)
def test_slot_runner_no_chair(self):
user = UserFactory(first_name="Melanie", last_name="Pickle", email='<EMAIL>')
session = SessionFactory()
slot = SlotFactory()
session.slots.add(slot)
SessionRoleFactory(session=session, user=user, role=SessionRole.SESSION_ROLE_RUNNER)
data = self.get_api_result()
self.assertEqual(
[{
'conf_key': slot.pk,
'chair_email': '',
'chair_name': '',
'runner_email': '<EMAIL>',
'runner_name': '<NAME>',
}],
data
)
def test_slot_chair_and_runner(self):
user1 = UserFactory(first_name="Melanie", last_name="Pickle", email='<EMAIL>')
user2 = UserFactory(first_name="Ichabod", last_name="Crane", email='<EMAIL>')
session = SessionFactory()
slot = SlotFactory()
session.slots.add(slot)
SessionRoleFactory(session=session, user=user1, role=SessionRole.SESSION_ROLE_CHAIR)
SessionRoleFactory(session=session, user=user2, role=SessionRole.SESSION_ROLE_RUNNER)
data = self.get_api_result()
self.assertEqual(
[{
'conf_key': slot.pk,
'chair_email': '<EMAIL>',
'chair_name': '<NAME>',
'runner_email': '<EMAIL>',
'runner_name': '<NAME>',
}],
data
)
def test_two_chairs_two_runners_one_declined(self):
user1 = UserFactory(first_name="Melanie", last_name="Pickle", email='<EMAIL>')
user2 = UserFactory(first_name="Ichabod", last_name="Crane", email='<EMAIL>')
session = SessionFactory()
slot = SlotFactory()
session.slots.add(slot)
SessionRoleFactory(session=session, user=user1, role=SessionRole.SESSION_ROLE_CHAIR)
SessionRoleFactory(session=session, user=user2, role=SessionRole.SESSION_ROLE_CHAIR,
status=False)
user3 = UserFactory(first_name="Mike", last_name="Hammer", email='<EMAIL>')
user4 = UserFactory(first_name="Sam", last_name="Spade", email='<EMAIL>')
SessionRoleFactory(session=session, user=user3, role=SessionRole.SESSION_ROLE_RUNNER)
SessionRoleFactory(session=session, user=user4, role=SessionRole.SESSION_ROLE_RUNNER,
status=False)
data = self.get_api_result()
self.assertEqual(
[{
'conf_key': slot.pk,
'chair_email': '<EMAIL>',
'chair_name': '<NAME>',
'runner_email': '<EMAIL>',
'runner_name': '<NAME>'
}],
data
)
def test_two_slots_one_session(self):
# If there are two slots, we return data for each of them, in order of the slots
# start time
user1 = UserFactory(first_name="Melanie", last_name="Pickle", email='<EMAIL>')
user2 = UserFactory(first_name="Ichabod", last_name="Crane", email='<EMAIL>')
session = SessionFactory()
slot1 = SlotFactory(start=datetime.time(13))
slot2 = SlotFactory(start=datetime.time(8))
session.slots.add(slot1)
session.slots.add(slot2)
SessionRoleFactory(session=session, user=user1, role=SessionRole.SESSION_ROLE_CHAIR)
SessionRoleFactory(session=session, user=user2, role=SessionRole.SESSION_ROLE_CHAIR,
status=False)
data = self.get_api_result()
self.assertEqual(
[{
'conf_key': slot2.pk,
'chair_email': '<EMAIL>',
'chair_name': '<NAME>',
'runner_email': '',
'runner_name': ''
}, {
'conf_key': slot1.pk,
'chair_email': '<EMAIL>',
'chair_name': '<NAME>',
'runner_email': '',
'runner_name': ''
}],
data
)
def test_two_slots_two_sessions(self):
# If there are two slots, we return data for each of them, in order of the slots
# start time
user1 = UserFactory(first_name="Melanie", last_name="Pickle", email='<EMAIL>')
user2 = UserFactory(first_name="Ichabod", last_name="Crane", email='<EMAIL>')
session1 = SessionFactory()
session2 = SessionFactory()
slot1 = SlotFactory(start=datetime.time(13))
slot2 = SlotFactory(start=datetime.time(8))
session1.slots.add(slot1)
session2.slots.add(slot2)
SessionRoleFactory(session=session1, user=user1, role=SessionRole.SESSION_ROLE_CHAIR)
SessionRoleFactory(session=session1, user=user2, role=SessionRole.SESSION_ROLE_RUNNER)
user3 = UserFactory(first_name="Mike", last_name="Hammer", email='<EMAIL>')
user4 = UserFactory(first_name="Sam", last_name="Spade", email='<EMAIL>')
SessionRoleFactory(session=session2, user=user3, role=SessionRole.SESSION_ROLE_CHAIR)
SessionRoleFactory(session=session2, user=user4, role=SessionRole.SESSION_ROLE_RUNNER)
data = self.get_api_result()
self.assertEqual(
[{
'conf_key': slot2.pk,
'chair_email': '<EMAIL>',
'chair_name': '<NAME>',
'runner_email': '<EMAIL>',
'runner_name': '<NAME>'
}, {
'conf_key': slot1.pk,
'chair_email': '<EMAIL>',
'chair_name': '<NAME>',
'runner_email': '<EMAIL>',
'runner_name': '<NAME>'
}],
data
)
|
11535768
|
import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
from collections import OrderedDict
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes, stride)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = conv1x1(planes, planes * self.expansion)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class SfeNet(nn.Module):
def __init__(self, block=Bottleneck, layers=[3, 4, 6, 3]):
super(SfeNet, self).__init__()
self.inplanes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1)
self.inplanes = 512
self.branch2_layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.branch2_layer4 = self._make_layer(block, 512, layers[3], stride=1)
self.inplanes = 1024
self.branch3_layer4 = self._make_layer(block, 512, layers[3], stride=1)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x, p2, p3):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
# branch 1
b13 = self.layer3(x)
b1 = self.layer4(b13)
b1 = self.avgpool(b1)
# branch 2
b2 = self.height_shuffle(x, p2)
b2 = self.branch2_layer3(b2)
b2 = self.branch2_layer4(b2)
b2 = self.recover_shuffle(b2, p2)
index_pair_list_b2 = self.get_index_pair_list(b2, p2)
part_feature_list_b2 = [self.avgpool(b2[:, :, pair[0]:pair[1], :]).squeeze() for pair in index_pair_list_b2]
# branch 3
b3 = self.height_shuffle(b13, p3)
b3 = self.branch3_layer4(b3)
b3 = self.recover_shuffle(b3, p3)
index_pair_list_b3 = self.get_index_pair_list(b3, p3)
part_feature_list_b3 = [self.avgpool(b3[:, :, pair[0]:pair[1], :]).squeeze() for pair in index_pair_list_b3]
# #x = x.view(x.size(0), -1)
# #x = self.fc(x)
#
# return x, feature_map_v
return b1, part_feature_list_b2, part_feature_list_b3
def get_index_pair_list(self, x, permu):
batchsize, num_channels, height, width = x.data.size()
number_slice = len(permu)
height_per_slice = height // number_slice
index_pair_list = [(height_per_slice*i, height_per_slice*(i+1)) for i in range(number_slice-1)]
index_pair_list.append((height_per_slice*(number_slice-1), height))
return index_pair_list
def height_shuffle(self, x, permu):
batchsize, num_channels, height, width = x.data.size()
result = torch.zeros(batchsize, num_channels, height, width).cuda()
number_slice = len(permu)
height_per_slice = height // number_slice
index_pair_list = [(height_per_slice*i, height_per_slice*(i+1)) for i in range(number_slice-1)]
index_pair_list.append((height_per_slice*(number_slice-1), height))
index_pair_list_shuffled = []
for i in range(number_slice):
index_pair_list_shuffled.append(index_pair_list[permu[i]])
start = 0
for i in range(len(index_pair_list_shuffled)):
index_pair = index_pair_list_shuffled[i]
length = index_pair[1] - index_pair[0]
result[:, :, start:(start+length), :] = x[:, :, index_pair[0]:index_pair[1], :]
start = start + length
return result
def recover_shuffle(self, x, permu):
dic = {}
recover_permu = []
for i in range(len(permu)):
dic[permu[i]] = i
all_key = list(dic.keys())
all_key.sort()
for i in range(len(all_key)):
recover_permu.append(dic[all_key[i]])
return self.height_shuffle(x, recover_permu)
|
11535778
|
import unittest
from os.path import join
import numpy as np
from rastervision.core import Box
from rastervision.core.data import (IdentityCRSTransformer,
RasterizedSourceConfig, RasterizerConfig,
GeoJSONVectorSourceConfig, ClassConfig)
from rastervision.pipeline.file_system import json_to_file
from rastervision.pipeline.config import ConfigError
from rastervision.pipeline import rv_config
class TestRasterizedSource(unittest.TestCase):
def setUp(self):
self.crs_transformer = IdentityCRSTransformer()
self.extent = Box.make_square(0, 0, 10)
self.tmp_dir_obj = rv_config.get_tmp_dir()
self.tmp_dir = self.tmp_dir_obj.name
self.class_id = 0
self.background_class_id = 1
self.line_buffer = 1
self.class_config = ClassConfig(names=['a'])
self.uri = join(self.tmp_dir, 'tmp.json')
def tearDown(self):
self.tmp_dir_obj.cleanup()
def build_source(self, geojson, all_touched=False):
json_to_file(geojson, self.uri)
config = RasterizedSourceConfig(
vector_source=GeoJSONVectorSourceConfig(
uri=self.uri, default_class_id=None),
rasterizer_config=RasterizerConfig(
background_class_id=self.background_class_id,
all_touched=all_touched))
source = config.build(self.class_config, self.crs_transformer,
self.extent)
return source
def test_get_chip(self):
geojson = {
'type':
'FeatureCollection',
'features': [{
'type': 'Feature',
'geometry': {
'type':
'Polygon',
'coordinates': [[[0., 0.], [0., 5.], [5., 5.], [5., 0.],
[0., 0.]]]
},
'properties': {
'class_id': self.class_id,
}
}, {
'type': 'Feature',
'geometry': {
'type': 'LineString',
'coordinates': [[7., 0.], [7., 9.]]
},
'properties': {
'class_id': self.class_id
}
}]
}
source = self.build_source(geojson)
with source.activate():
self.assertEqual(source.get_extent(), self.extent)
chip = source.get_image_array()
self.assertEqual(chip.shape, (10, 10, 1))
expected_chip = self.background_class_id * np.ones((10, 10, 1))
expected_chip[0:5, 0:5, 0] = self.class_id
expected_chip[0:10, 6:8] = self.class_id
np.testing.assert_array_equal(chip, expected_chip)
def test_get_chip_no_polygons(self):
geojson = {'type': 'FeatureCollection', 'features': []}
source = self.build_source(geojson)
with source.activate():
# Get chip that partially overlaps extent. Expect that chip has zeros
# outside of extent, and background_class_id otherwise.
self.assertEqual(source.get_extent(), self.extent)
chip = source.get_chip(Box.make_square(5, 5, 10))
self.assertEqual(chip.shape, (10, 10, 1))
expected_chip = np.full((10, 10, 1), self.background_class_id)
np.testing.assert_array_equal(chip, expected_chip)
def test_get_chip_all_touched(self):
geojson = {
'type':
'FeatureCollection',
'features': [{
'type': 'Feature',
'geometry': {
'type':
'Polygon',
'coordinates': [[[0., 0.], [0., 0.4], [0.4, 0.4],
[0.4, 0.], [0., 0.]]]
},
'properties': {
'class_id': self.class_id,
}
}]
}
false_source = self.build_source(geojson, all_touched=False)
true_source = self.build_source(geojson, all_touched=True)
with false_source.activate():
chip = false_source.get_image_array()
expected_chip = self.background_class_id * np.ones((10, 10, 1))
np.testing.assert_array_equal(chip, expected_chip)
with true_source.activate():
chip = true_source.get_image_array()
expected_chip = self.background_class_id * np.ones((10, 10, 1))
expected_chip[0:1, 0:1, 0] = self.class_id
np.testing.assert_array_equal(chip, expected_chip)
def test_using_null_class_bufs(self):
vs = GeoJSONVectorSourceConfig(
uri=self.uri, default_class_id=None, line_bufs={0: None})
with self.assertRaises(ConfigError):
config = RasterizedSourceConfig(
vector_source=vs,
rasterizer_config=RasterizerConfig(
background_class_id=self.background_class_id))
config.validate_config()
if __name__ == '__main__':
unittest.main()
|
11535787
|
from django.conf.urls import url
import staff.views
urlpatterns = (
url(r'^$', staff.views.staff_page, name="staff.views.staff_page"),
url(r'^ajax_change_status$', staff.views.ajax_change_status,
name="staff.views.ajax_change_status"),
url(r'^ajax_save_ids', staff.views.ajax_save_ids,
name="staff.views.ajax_save_ids"),
url(r'gradebook/$', staff.views.get_gradebook,
name="staff.views.get_gradebook"),
url(r'gradebook/(?P<statuses>\w+)$', staff.views.gradebook_page,
name="staff.views.gradebook_page"),
url(r'gradebook_page', staff.views.gradebook_page,
name="staff.views.gradebook_page")
)
|
11535788
|
from dataclasses import dataclass
@dataclass
class MirobotAngleValues:
""" A dataclass to hold Mirobot's angular values. """
a: float = 0.0
""" Angle of axis 1 """
b: float = 0.0
""" Angle of axis 2 """
c: float = 0.0
""" Angle of axis 3 """
x: float = 0.0
""" Angle of axis 4 """
y: float = 0.0
""" Angle of axis 5 """
z: float = 0.0
""" Angle of axis 6 """
d: float = 0.0
""" Location of rail or stepper module """
@dataclass
class MirobotCartesianValues:
""" A dataclass to hold Mirobot's cartesian values and roll/pitch/yaw angles. """
x: float = 0.0
""" Position on X-axis """
y: float = 0.0
""" Position of Y-axis """
z: float = 0.0
""" Position of Z-axis """
a: float = 0.0
""" Position of Roll angle """
b: float = 0.0
""" Position of Pitch angle """
c: float = 0.0
""" Position of Yaw angle """
@dataclass
class MirobotStatus:
""" A composite dataclass to hold all of Mirobot's trackable quantities. """
state: str = ''
""" The brief descriptor string for Mirobot's state. """
angle: MirobotAngleValues = MirobotAngleValues()
""" Dataclass that holds Mirobot's angular values including the rail position value. """
cartesian: MirobotCartesianValues = MirobotCartesianValues()
""" Dataclass that holds the cartesian values and roll/pitch/yaw angles. """
pump_pwm: int = 0
""" The current pwm of the pnuematic pump module. """
valve_pwm: int = 0
""" The current pwm of the value module. (eg. gripper) """
motion_mode: bool = False
""" Whether Mirobot is currently in coordinate mode or joint-motion mode """
|
11535811
|
from unittest import TestCase
from aq.errors import QueryParsingError
from aq.parsers import SelectParser, TableId
class TestSelectParser(TestCase):
parser = SelectParser({})
def test_parse_query_simplest(self):
query, meta = self.parser.parse_query('select * from foo')
self.assertEqual(query, 'SELECT * FROM foo')
table = TableId(None, 'foo', None)
self.assertEqual(meta.tables, [table])
def test_parse_query_db(self):
query, meta = self.parser.parse_query('select * from foo.bar')
self.assertEqual(query, 'SELECT * FROM foo . bar')
table = TableId('foo', 'bar', None)
self.assertEqual(meta.tables, [table])
def test_parse_query_table_alias(self):
query, meta = self.parser.parse_query('select * from foo.bar a')
self.assertEqual(query, 'SELECT * FROM foo . bar a')
table = TableId('foo', 'bar', 'a')
self.assertEqual(meta.tables, [table])
def test_parse_query_table_as_alias(self):
query, meta = self.parser.parse_query('select * from foo.bar as a')
self.assertEqual(query, 'SELECT * FROM foo . bar AS a')
table = TableId('foo', 'bar', 'a')
self.assertEqual(meta.tables, [table])
def test_parse_query_join_simple(self):
query, meta = self.parser.parse_query('select * from foo, bar')
self.assertEqual(query, 'SELECT * FROM foo , bar')
table1 = TableId(None, 'foo', None)
table2 = TableId(None, 'bar', None)
self.assertEqual(meta.tables, [table1, table2])
def test_parse_query_join_expr(self):
query, meta = self.parser.parse_query('select * from foo join bar on foo.a = bar.b')
self.assertEqual(query, 'SELECT * FROM foo JOIN bar ON foo.a = bar.b')
table1 = TableId(None, 'foo', None)
table2 = TableId(None, 'bar', None)
self.assertEqual(meta.tables, [table1, table2])
def test_parse_query_join_table_with_using(self):
query, meta = self.parser.parse_query('select * from foo join foo.bar using (name)')
self.assertEqual(query, 'SELECT * FROM foo JOIN foo . bar USING ( name )')
table1 = TableId(None, 'foo', None)
table2 = TableId('foo', 'bar', None)
self.assertEqual(meta.tables, [table1, table2])
def test_parse_query_sub_select(self):
query, meta = self.parser.parse_query('select * from (select * from foo)')
self.assertEqual(query, 'SELECT * FROM ( SELECT * FROM foo )')
table = TableId(None, 'foo', None)
self.assertEqual(meta.tables, [table])
def test_parse_query_sub_select_and_join(self):
query, meta = self.parser.parse_query('select * from (select * from foo.bar) left join blah')
self.assertEqual(query, 'SELECT * FROM ( SELECT * FROM foo . bar ) LEFT JOIN blah')
table1 = TableId('foo', 'bar', None)
table2 = TableId(None, 'blah', None)
self.assertEqual(meta.tables, [table1, table2])
def test_parse_query_invalid(self):
try:
self.parser.parse_query('foo')
except QueryParsingError:
pass
else:
self.fail()
def test_parse_query_expand_json_get(self):
query, _ = self.parser.parse_query("select foo->1")
self.assertEqual(query, 'SELECT json_get(foo, 1)')
query, _ = self.parser.parse_query("select foo.bar -> 'blah'")
self.assertEqual(query, "SELECT json_get(foo.bar, 'blah')")
query, _ = self.parser.parse_query("select foo->bar->blah")
self.assertEqual(query, 'SELECT json_get(json_get(foo, bar), blah)')
def test_parse_query_expand_not_json_get(self):
query, _ = self.parser.parse_query("select * from foo where x = 'bar -> 1'")
self.assertEqual(query, "SELECT * FROM foo WHERE x = 'bar -> 1'")
query, _ = self.parser.parse_query("select * from foo where x -> 'bar -> 1'")
self.assertEqual(query, "SELECT * FROM foo WHERE json_get(x, 'bar -> 1')")
def test_parse_query_with_and(self):
query, _ = self.parser.parse_query("select * from foo where x = 'foo' and y = 'bar'")
self.assertEqual(query, "SELECT * FROM foo WHERE x = 'foo' AND y = 'bar'")
def test_parse_query_with_or(self):
query, _ = self.parser.parse_query("select * from foo where x = 'foo' or y = 'bar'")
self.assertEqual(query, "SELECT * FROM foo WHERE x = 'foo' OR y = 'bar'")
|
11535829
|
import re
from collections import namedtuple
from pathlib import Path
import gemmi
import numpy as numpy
from gemmi.cif import Loop, Document, Style
Limit = namedtuple('Limit', 'h_max, h_min, k_max, k_min, l_max, l_min')
class HKL():
"""
loop_
_refln_index_h
_refln_index_k
_refln_index_l
_refln_F_squared_meas
_refln_F_squared_sigma
_refln_scale_group_code
"""
def __init__(self, hkl_file: str, block_name: str, hklf_type: int = 4):
self._hkl_file = hkl_file
self.hklf_type = hklf_type
self._doc: Document = gemmi.cif.Document()
self._doc.add_new_block(block_name)
self.block = self._doc.sole_block()
self._get_hkl_as_block()
@property
def hkl_as_cif(self) -> str:
return self._doc.as_string(style=Style.Simple)
def _get_hkl_as_block(self):
loop_header = ['index_h',
'index_k',
'index_l',
'F_squared_meas' if self.hklf_type != 3 else 'F_meas',
'F_squared_sigma' if self.hklf_type != 3 else 'F_sigma',
'scale_group_code']
loop: Loop = self.block.init_loop('_refln_', self._trim_header_to_hkl_width(loop_header))
zero_reflection_pattern = re.compile(r'^\s+0\s+0\s+0\s+0.*')
for line in self._hkl_file.splitlines(keepends=False):
splitline = line.split()
if not splitline:
continue
# Do not use data after the 0 0 0 reflection
if zero_reflection_pattern.match(line):
loop.add_row(splitline)
break
try:
loop.add_row(splitline[:len(loop_header)])
# RuntimeError ist from gemmi.cif.add_row:
except (IndexError, RuntimeError):
continue
def __repr__(self) -> str:
return self.hkl_as_cif[:250]
def _trim_header_to_hkl_width(self, loop_header):
hkl_with = self._get_hkl_with()
trimmed_header = loop_header[:hkl_with]
return trimmed_header
def _get_hkl_with(self) -> int:
first_lines = self._hkl_file[:150].strip().splitlines(keepends=False)
if len(first_lines) > 1:
return len(first_lines[1].split())
return len(first_lines[0].split())
def get_hkl_min_max(self) -> Limit:
hkl: gemmi.ReflnBlock = gemmi.hkl_cif_as_refln_block(self.block)
miller = hkl.make_miller_array()
h_max, k_max, l_max = numpy.max(miller, axis=0)
h_min, k_min, l_min = numpy.min(miller, axis=0)
return Limit(h_max=h_max, h_min=h_min, k_max=k_max, k_min=k_min, l_max=l_max, l_min=l_min)
if __name__ == '__main__':
h = HKL(Path('tests/examples/work/test_hkl_file.txt').read_text(), '123234')
# print(h.hkl_as_cif[:250])
m = h.get_hkl_min_max()
print(m)
|
11535860
|
import yaml
import abc
from ...utils import pickle_data
class ReferenceGenerator:
def __init__(self, verbose=True):
self.generated_references = []
self.verbose = verbose
self.type = None
self.valid_target_instances = None
self.valid_anchor_instances = None
self.targets_must_be_multiple = None
self.too_hard = None
def generate(self, all_scans, valid_target_instances, valid_anchor_instances, targets_must_be_multiple, too_hard):
self.valid_target_instances = valid_target_instances
self.valid_anchor_instances = valid_anchor_instances
self.targets_must_be_multiple = targets_must_be_multiple
self.too_hard = too_hard
for scan in all_scans:
scan_references = self.generate_for_single_scan(scan)
self.generated_references.extend(scan_references)
if self.verbose:
print('{}:'.format(self.type), scan.scan_id, 'resulted in', len(scan_references), '\ttotal till now',
len(self.generated_references))
return self.generated_references
@abc.abstractmethod
def generate_for_single_scan(self, scan):
pass
def save_references(self, save_path):
references_dict_list = []
for reference in self.generated_references:
references_dict_list.append(reference.serialize())
with open(save_path, 'w') as fout:
pickle_data(fout, references_dict_list)
|
11535880
|
from .baseline import *
class SIMPLE_LAYER(MessagePassing):
def __init__(self, feat_in, feat_out,spread=1,bias=False):
super(SIMPLE_LAYER, self).__init__(aggr='mean',flow='target_to_source')
self.L_flag = torch.zeros(1,1).cuda()
self.bias = bias
self.updater = Linear(feat_in,feat_out,bias=self.bias)
self.p_leader = Linear(feat_out, 2,bias=self.bias)
self.layer_weight = Linear(2*feat_out,1,bias=self.bias)
self.aggregator = PROPAGATION_OUT()
def forward(self,x,edge_index, T):
# PRELIMINARY CALCULATIONS
self.L_flag = torch.zeros(1,1).cuda()
depth = 0
self.L_flag = self.L_flag*0
updated_x = F.leaky_relu(self.updater(x))
sum_Neigh_x = self.aggregator(updated_x, edge_index)
# SELECTION <==============================================================
random_prob = F.relu(self.p_leader(sum_Neigh_x))
random_prob = F.softmax(random_prob,dim=-1)
self.prob_i = random_prob[:,1].unsqueeze(1)
hot_prob = torch.where(random_prob[:,1]>T,torch.tensor(1).cuda(),torch.tensor(0).cuda())
SEL_v = hot_prob.view(-1,1)
self.L_flag = self.L_flag.float().view(-1,1) + SEL_v
# SUMMATION + CONCAT <========================================================
sum_SEL_x = self.aggregator(SEL_v*updated_x,edge_index)
concat_sums = torch.cat([sum_SEL_x,sum_Neigh_x],dim=-1)
# WEIGHT <========================================================
weight_SEL_v = torch.sigmoid(self.layer_weight(concat_sums))
A_x = F.relu(self.aggregator(weight_SEL_v* SEL_v* updated_x,edge_index))
out = updated_x + A_x
return out
class COMPLEX_LAYER(MessagePassing):
def __init__(self, feat_in, feat_out, spread=2, bias=False):
super(COMPLEX_LAYER, self).__init__(aggr='mean',flow='target_to_source')
self.spread = spread
self.bias = bias
self.L_flag = torch.zeros(1,1).cuda()
self.onehot_layer = torch.eye(spread).cuda()
self.updater = Linear(feat_in,feat_out,bias=self.bias)
self.p_leader = Linear(feat_out, 2,bias=self.bias)
self.layer_weight = Linear(spread+feat_out,1, bias=self.bias)
self.aggr_weight = Linear(feat_out*2,1, bias=self.bias)
self.alpha = None
self.aggregator = PROPAGATION_OUT()
def find_next_lead(self,src_ct, dst, prev_leader,coverage_tensor):
next_lead = torch.zeros_like(prev_leader).cuda()
expand_src_id = prev_leader.repeat_interleave(src_ct)
coverage_tensor = coverage_tensor + expand_src_id.float()
dst_nodes = torch.unique(dst*expand_src_id)
next_lead[dst_nodes] = 1
return next_lead, coverage_tensor
def forward(self,x,edge_index,T):
# INITIALIZATION
layer = 0
coverage = torch.zeros(1).cuda()
src, dst = edge_index
self.L_flag = self.L_flag*0
self.weights_1 = torch.tensor([]).cuda()
# PRELIMINARY CALCULATIONS
R,src_counts = torch.unique(src,return_counts=True)
if src_counts.size(0) != x.size(0):
temp_ct = torch.zeros(x.size(0),dtype=torch.long).cuda()
temp_ct[R] = src_counts
src_counts = temp_ct
x = F.leaky_relu(self.updater(x))
updated_x = x
geo_x = updated_x[:]
local_x = self.aggregator(updated_x, edge_index)
random_prob = torch.sigmoid(self.p_leader(updated_x))
random_prob = F.softmax(random_prob,dim=-1)
self.prob_i = random_prob[:,1]
hot_prob = torch.where(random_prob>T,torch.tensor(1).cuda(),torch.tensor(0).cuda())
kept_prob = torch.where(random_prob>T,random_prob,torch.tensor(0.0).cuda())
leader_hot = hot_prob[:,1].unsqueeze(dim=1)
leader_prob = kept_prob[:,1].unsqueeze(dim=1)
self.L_flag = self.L_flag.float() + leader_hot.float()
while layer < self.spread:
layer_i = self.onehot_layer[layer].float().unsqueeze(0)
layer_i = layer_i.repeat_interleave(geo_x.size(0),dim=0)
layer_i = torch.cat([layer_i,local_x],dim=-1)
layer_i = layer_i * leader_hot.float()
weight_l = torch.sigmoid(self.layer_weight(layer_i))
geopass = weight_l*geo_x
temp_agg = self.aggregator(geopass*leader_hot.float(),edge_index)
geo_x = geo_x + temp_agg
leader_hot, _ = self.find_next_lead(src_counts, dst, leader_hot,coverage)
leader_prob = kept_prob[:,1].unsqueeze(dim=1)*leader_hot
layer += 1
some_X = torch.cat([geo_x,updated_x],dim=-1)
some_weight = torch.sigmoid(self.aggr_weight(some_X))
out = (1-some_weight)*updated_x+(some_weight*geo_x)
self.alpha = some_weight
return out
class PROPAGATION_OUT(MessagePassing):
def __init__(self):
super(PROPAGATION_OUT, self).__init__(aggr='add', flow="target_to_source")
def forward(self, x, edge_index) : return self.propagate(edge_index,x=x)
def message(self,x_j) : return x_j
def update(self,aggr_out) : return aggr_out
class NSGNN(torch.nn.Module):
def __init__(self, feat_in, feat_out, learners = 1, p2L=0.48, neg_slope=0.0, spread_L=1, unifying_weight=True):
super(NSGNN, self).__init__()
self.N_learners = learners
self.dim_out = feat_out
self.p2L = p2L
self.spread = spread_L
self.neg_slope = neg_slope
self.W_layers = nn.ModuleList([SIMPLE_LAYER(feat_in,feat_out) for i in range(learners)])
# *** N.B. uncomment line below (143 ) when using smaller datasets
# self.W_layers = nn.ModuleList([COMPLEX_LAYER(feat_in,feat_out,prob_to_lead=self.p2L, spread=self.spread) for i in range(learners)])
self.fusing_weight = unifying_weight
if unifying_weight : self.prob_weighter = Linear(self.N_learners,feat_out, bias=True)
self.leader_info = None
self.L_containers = None
def forward(self,data):
# INITIALIZATION
x, edge_index = data.x, data.edge_index
L_containers = [None]*self.N_learners
filter_output = [None]*self.N_learners
filter_vstar = np.zeros(self.N_learners)
X_out = torch.zeros(x.size(0),self.dim_out).cuda()
# CONVOLUTION-PER-LEARNER
for i in range(self.N_learners):
x_temp = self.W_layers[i](x,edge_index,self.p2L)
x_temp = F.leaky_relu(x_temp,self.neg_slope)
x_temp = F.dropout(x_temp,p=0.4, training=self.training)
X_out = X_out + x_temp
filter_output[i] = F.softmax(x_temp.unsqueeze(0),dim=-1)
filter_vstar[i] = (self.W_layers[i].L_flag.sum()/x.size(0)).item()
L_containers[i] = self.W_layers[i].L_flag
# RE-ARRANGING INFORMATION
self.outputs = torch.cat(filter_output,dim=0)
self.leader_info = filter_vstar#np.round_(filter_vstar,2)
L_containers = torch.cat(L_containers,dim=-1)
self.L_containers = L_containers.sum(dim=-1).float()/self.N_learners
# FEATURE-SUMMATION
if self.fusing_weight:
global_weight = self.prob_weighter(L_containers)
global_weight = F.relu(global_weight)
else:
global_weight = 0.0
X_out = X_out + global_weight
# FEATURE-SUMMATION
y = X_out
y = F.log_softmax(y,dim=-1)
y = y.squeeze()
return y
|
11535889
|
import os
import shutil
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import argparse
# Argument parsing
parser = argparse.ArgumentParser(description='Script to plot differences between actual lepidopteran measurements versus predicted measurements.')
parser.add_argument('-a', '--actual',
type=str,
help='File name with actual measurements',
required=True)
parser.add_argument('-n', '--name',
type=str,
help='Name of column containing file name of each measured image',
required=True)
parser.add_argument('-l', '--left',
type=str,
help='Name of column containing left wing measurements in actual measurements file',
required=True)
parser.add_argument('-r', '--right',
type=str,
help='Name of column containing right wing measurements in actual measurements file',
required=True)
parser.add_argument('-p', '--predicted',
type=str,
help='File name with predicted measurement results (assumed to be a csv produced by our pipeline)',
default='results.csv')
parser.add_argument('-c', '--comparison',
action='store_true',
help='Produce an "comparison.csv" file containing all measurements and the differences')
parser.add_argument('-o', '--outliers',
action='store_true',
help='Produce an "outliers.csv" file containing only measurements that are deemed outliers')
parser.add_argument('-sd', '--sd',
type=float,
help="The number of SD's that are used as a theshold for classifying an outlier",
default=2)
parser.add_argument('-co', '--copy_outliers',
type=str,
help='Create a folder "outliers" and copy images from the specified path that correspond to outliers')
args = parser.parse_args()
# Reading in both actual measurements file, and select desired columns
actual_file, actual_ext = os.path.splitext(args.actual)
actual = pd.DataFrame()
if actual_ext.lower() == ".xlsx":
actual = pd.read_excel(args.actual)
elif actual_ext.lower() == ".csv":
actual = pd.read_csv(args.actual)
actual = actual[[args.name, args.left, args.right]]
actual.rename(columns={args.name: "image_id", args.left: "actual_left", args.right: "actual_right"}, inplace=True)
# Reading in predicted results from csv
predicted = pd.read_csv(args.predicted)
predicted.rename(columns={"left_wing (mm)": "predicted_left", "right_wing (mm)": "predicted_right"}, inplace=True)
# Merging them together and creating new columns for the difference
both = pd.merge(actual, predicted, on="image_id", how='inner')
both['left_diff'] = both['predicted_left'] - both['actual_left']
both['right_diff'] = both['predicted_right'] - both['actual_right']
all_diffs = both['right_diff'].append(both['left_diff'])
mean = np.mean(all_diffs)
sd = np.std(all_diffs)
both['left_SD'] = (both['left_diff'] - mean)/sd
both['right_SD'] = (both['right_diff'] - mean)/sd
both['is_outlier'] = (abs(both['left_SD'])>args.sd) | (abs(both['right_SD'])>args.sd)
# Print statistics about differences
lower = mean - args.sd * sd
upper = mean + args.sd * sd
num_outlier_measurements = len(all_diffs[(all_diffs < lower) | (all_diffs > upper)])
num_outlier_images = np.count_nonzero(both['is_outlier'])
print("DIFFERENCE STATISTICS")
print(f" Mean Differences: {mean}")
print(f" Differences SD: {sd}.")
print(f" Lower Bound (-{args.sd} SD) of Differences: {lower}")
print(f" Upper Bound (+{args.sd} SD) of Differences: {upper}")
print(f" Number of outlying measurements: {num_outlier_measurements}")
print(f" Number of images with outlying measurements: {num_outlier_images}")
print("")
# Plot histogram of differences
all_diffs_nonoutlier = all_diffs[(all_diffs >= lower) & (all_diffs <= upper)]
fig, ax = plt.subplots(figsize=(10, 5))
ax = all_diffs_nonoutlier.hist(bins='auto')
# Saving the plot
filename = 'result_plot.png'
output_path = os.path.normpath(filename)
plt.xlabel('Difference between (predicted - actual) in mm')
start, end = ax.get_xlim()
plt.ylabel('Number of images')
plt.title('Error in predicted measurements')
plt.savefig(output_path)
plt.close()
print(f"Saved plot of differences to {filename}")
# Printing either full comparison csv or outliers csv
both['SD_sum'] = abs(both['left_SD']) + abs(both['left_SD'])
both.sort_values('SD_sum', ascending=False, inplace=True)
both.drop('SD_sum', axis=1, inplace=True)
both.sort_values('is_outlier', ascending=False, inplace=True, kind='mergesort')
if args.comparison:
comparison_filename = 'comparison.csv'
outlier_col_str = both["is_outlier"].replace({True:"TRUE", False:""})
both_outlier_col_str = both.copy()
both_outlier_col_str["is_outlier"] = outlier_col_str
both_outlier_col_str.to_csv(comparison_filename)
print(f"Saved all differences to {comparison_filename}")
if args.outliers:
outliers_filename = 'outliers.csv'
both_outliers_only = both[both['is_outlier']].copy()
both_outliers_only.drop('is_outlier', axis=1, inplace=True)
both_outliers_only.to_csv(outliers_filename)
print(f'Saved {num_outlier_images} rows to {outliers_filename}')
# Fetching outlier images
if args.copy_outliers:
outliers_folder = 'outliers/'
if os.path.exists(outliers_folder):
oldList = os.listdir(outliers_folder)
for oldFile in oldList:
os.remove(os.path.join(outliers_folder, oldFile))
else:
os.mkdir(outliers_folder)
image_list = both[both['is_outlier']]['image_id']
print(f'Copying {len(image_list)} outlier images to {outliers_folder} ...', end="")
for image_name in image_list:
image_path = os.path.join(args.copy_outliers, image_name)
shutil.copy(image_path, outliers_folder)
print("done")
|
11535902
|
import next.utils as utils
class SimpleTargetManager(object):
def __init__(self,db):
self.bucket_id = 'targets'
self.db = db
def set_targetset(self, exp_uid, targetset):
"""
Update the default target docs in the DB if a user uploads a target set.
"""
for i,target in enumerate(targetset):
target['target_id'] = i
target['exp_uid'] = exp_uid
try:
self.db.set_doc(self.bucket_id, None, target)
except Exception as e:
raise Exception("Failed to create_target_mapping: " + str(e))
def get_targetset(self, exp_uid):
"""
Gets the entire targetset for a given experiment as a list of dictionaries.
"""
targetset = self.db.get_docs_with_filter(self.bucket_id, {'exp_uid': exp_uid})
if targetset is None:
raise Exception("Target set for experiment {} is empty".format(targetset))
# targetset = mongotized_target_blob.pop(0)
return targetset
def get_target_item(self, exp_uid, target_id):
"""
Get a target from the targetset. Th
"""
# Get an individual target form the DB given exp_uid and index
try:
got_target = self.db.get_docs_with_filter(self.bucket_id,
{'exp_uid': exp_uid,
'target_id': target_id})
except Exception as e:
raise Exception("Failed to get_target_item: " + str(e))
try:
# targets are something else
target = got_target.pop(0)
except:
# targets are numbers
target = {'target_id':target_id,
'primary_description':str(target_id),
'primary_type':'text',
'alt_description':str(target_id),
'alt_type':'text'}
# This line might fail; only tested under the except: statement above
#del target['exp_uid']
return target
def get_target_mapping(self, exp_uid):
# Get all docs for specified exp_uid
mongotized_target_blob = self.db.get_docs_with_filter(self.bucket_id, {'exp_uid': exp_uid})
# If no docs with exp_uid can be retreived, throw an error
if mongotized_target_blob is None:
raise DatabaseException("No documents with exp_uid {} could be retrieved".format(exp_uid))
# Pop target_blob_dict out of list
for i in range(len(mongotized_target_blob)):
if 'targetless' in mongotized_target_blob[i].keys():
mongotized_target_blob.pop(i)
break
try:
mongotized_target_blob = sorted(mongotized_target_blob,key = lambda x: x.get('target_id',0))
except:
pass
return mongotized_target_blob
|
11535905
|
import datetime
from typing import List
from pyhafas.profile import ProfileInterface
from pyhafas.profile.interfaces.helper.parse_leg import ParseLegHelperInterface
from pyhafas.types.fptf import Leg, Mode, Stopover
class BaseParseLegHelper(ParseLegHelperInterface):
def parse_leg(
self: ProfileInterface,
journey: dict,
common: dict,
departure: dict,
arrival: dict,
date: datetime.date,
jny_type: str = "JNY",
gis=None) -> Leg:
"""
Parses Leg HaFAS returns into Leg object
Different Types of HaFAS responses can be parsed into a leg object with the multiple variables
:param journey: Journey object given back by HaFAS (Data of the Leg to parse)
:param common: Common object given back by HaFAS
:param departure: Departure object given back by HaFAS
:param arrival: Arrival object given back by HaFAS
:param date: Parsed date of Journey (Departing date)
:param jny_type: HaFAS Journey type
:param gis: GIS object given back by HaFAS. Currently only used by "WALK" journey type.
:return: Parsed Leg object
"""
leg_origin = self.parse_lid_to_station(
common['locL'][departure['locX']]['lid'])
leg_destination = self.parse_lid_to_station(
common['locL'][arrival['locX']]['lid'])
if jny_type == "WALK":
return Leg(
id=gis['ctx'],
origin=leg_origin,
destination=leg_destination,
departure=self.parse_datetime(departure['dTimeS'], date),
arrival=self.parse_datetime(arrival['aTimeS'], date),
mode=Mode.WALKING,
name=None,
distance=gis['dist'] if gis is not None else None
)
else:
leg_stopovers: List[Stopover] = []
if 'stopL' in journey:
for stopover in journey['stopL']:
leg_stopovers.append(
Stopover(
stop=self.parse_lid_to_station(
common['locL'][stopover['locX']]['lid']
),
cancelled=bool(
stopover.get(
'dCncl',
stopover.get(
'aCncl',
False
))),
departure=self.parse_datetime(
stopover.get('dTimeS'),
date) if stopover.get('dTimeS') is not None else None,
departure_delay=self.parse_datetime(
stopover['dTimeR'],
date) - self.parse_datetime(
stopover['dTimeS'],
date) if stopover.get('dTimeR') is not None else None,
departure_platform=stopover.get(
'dPlatfR',
stopover.get('dPlatfS', stopover.get('dPltfR', stopover.get('dPltfS', {})).get('txt'))),
arrival=self.parse_datetime(
stopover['aTimeS'],
date) if stopover.get('aTimeS') is not None else None,
arrival_delay=self.parse_datetime(
stopover['aTimeR'],
date) - self.parse_datetime(
stopover['aTimeS'],
date) if stopover.get('aTimeR') is not None else None,
arrival_platform=stopover.get(
'aPlatfR',
stopover.get('aPlatfS', stopover.get('aPltfR', stopover.get('aPltfS', {})).get('txt'))),
))
return Leg(
id=journey['jid'],
name=common['prodL'][journey['prodX']]['name'],
origin=leg_origin,
destination=leg_destination,
cancelled=bool(arrival.get('aCncl', False)),
departure=self.parse_datetime(
departure['dTimeS'],
date),
departure_delay=self.parse_datetime(
departure['dTimeR'],
date) - self.parse_datetime(
departure['dTimeS'],
date) if departure.get('dTimeR') is not None else None,
departure_platform=departure.get(
'dPlatfR',
departure.get('dPlatfS', departure.get('dPltfR', departure.get('dPltfS', {})).get('txt'))),
arrival=self.parse_datetime(
arrival['aTimeS'],
date),
arrival_delay=self.parse_datetime(
arrival['aTimeR'],
date) - self.parse_datetime(
arrival['aTimeS'],
date) if arrival.get('aTimeR') is not None else None,
arrival_platform=arrival.get(
'aPlatfR',
arrival.get('aPlatfS', arrival.get('aPltfR', arrival.get('aPltfS', {})).get('txt'))),
stopovers=leg_stopovers)
def parse_legs(
self: ProfileInterface,
jny: dict,
common: dict,
date: datetime.date) -> List[Leg]:
"""
Parses Legs (when multiple available)
:param jny: Journeys object returned by HaFAS (contains secL list)
:param common: Common object returned by HaFAS
:param date: Parsed date of Journey (Departing date)
:return: Parsed List of Leg objects
"""
legs: List[Leg] = []
for leg in jny['secL']:
legs.append(
self.parse_leg(
leg.get(
'jny',
None),
common,
leg['dep'],
leg['arr'],
date,
leg['type'],
leg.get('gis')))
return legs
|
11535943
|
import debug_toolbar
from django.urls import re_path, include
from django.http import HttpResponse
def empty_page(request):
return HttpResponse('<body></body>')
urlpatterns = [
re_path(r'^$', empty_page),
re_path(r'^__debug__/', include(debug_toolbar.urls)),
]
|
11535982
|
import numpy as np
import cv2
import os
INPUT_PATH = 'F:/Scan-flood-Fill/data/toy_examples/ input_200'
OUTPUT_PATH_FILL = 'F:/Scan-flood-Fill/data/toy_examples/input_toy/'
colorThreshold = 30
def dilation(inputPath, savePath, boundaryColor, d):
#print(inputPath)
img = cv2.imread(inputPath, 0)
height, width = img.shape[:2]
result = np.zeros([height * d, width * d])
for x in range(height):
for y in range(width):
if abs(img[x,y] - boundaryColor) < colorThreshold:
#fill the rectangle
for i in range(d*x, d*(x+1)):
for j in range(d*y, d*(y+1)):
result[i,j] = boundaryColor
cv2.imwrite(savePath, result, [int(cv2.IMWRITE_JPEG_QUALITY), 100])
def dilation_main():
boundaryColor = 255
for d, n in enumerate(range(200, 2200, 200)):
output_folder_name = 'input_' + str(n)
input_folder_name = 'input_' + str(200)
input_path = os.path.join(INPUT_PATH, input_folder_name)
output_path_fill = os.path.join(OUTPUT_PATH_FILL, output_folder_name)
for root, dirs, files in os.walk(input_path):
for f in files:
dilation(os.path.join(input_path, f), os.path.join(output_path_fill, f), boundaryColor, d+1)
if __name__ == '__main__':
dilation_main()
|
11536058
|
from __future__ import absolute_import
import copy
import functools
import itertools
import inspect
import json
import re
from huskar_sdk_v2.consts import OVERALL
from more_itertools import peekable, first
from dogpile.cache.util import function_key_generator
from gevent import sleep
from huskar_api import settings
from huskar_api.models.const import MAGIC_CONFIG_KEYS
from huskar_api.models.cache.region import zdumps, zloads
from huskar_api.extras.raven import capture_exception
__all__ = [
'make_cache_decorator',
'take_slice',
'check_znode_path',
'dedupleft',
'merge_instance_list',
]
def retry(exceptions, interval, max_retry):
"""A decorator with arguments to make view functions could be retried.
:param exceptions: The tuple of exception set.
:param interval: The sleep interval in seconds.
:param max_retry: The max retry times.
"""
def decorator(wrapped):
@functools.wraps(wrapped)
def wrapper(*args, **kwargs):
for i in xrange(max_retry):
try:
return wrapped(*args, **kwargs)
except exceptions:
if i == max_retry - 1:
raise
sleep(interval)
return wrapper
return decorator
def make_cache_decorator(redis_client):
"""Creates a decorator to apply cache on arguments."""
def cache_on_arguments(expiration_time):
def decorator(fn):
fn_generate_key = function_key_generator(
'cache_on_arguments:v1', fn, to_str=unicode)
fn_args = inspect.getargspec(fn)
fn_has_self = fn_args[0] and fn_args[0][0] in ('self', 'cls')
@functools.wraps(fn)
def wrapper(*args, **kwargs):
key = fn_generate_key(*args, **kwargs)
val = redis_client.get(key)
if val is None:
val = fn(*args, **kwargs)
redis_client.set(key, zdumps(val), expiration_time)
return val
else:
return zloads(val)
def generate_key(*args, **kwargs):
args = ((None,) + args) if fn_has_self else args
return fn_generate_key(*args, **kwargs)
def flush(*args, **kwargs):
return redis_client.delete(generate_key(*args, **kwargs))
wrapper.generate_key = generate_key
wrapper.flush = flush
wrapper.original = fn
return wrapper
return decorator
return cache_on_arguments
class LazySlice(object):
"""Takes the slice of iterable object and passes it into a factory
function.
Example::
@classmethod
def list_foo(cls):
ids = cls.get_foo_ids()
return take_slice(cls.mget, ids)
assert Spam.list_foo()[:10] == Spam.mget(Spam.get_foo_ids()[:10])
"""
def __init__(self, factory, iterable):
self.factory = factory
self.iterable = iterable
def __iter__(self):
return iter(self.factory(list(self.iterable)))
def __getitem__(self, s):
assert isinstance(s, slice)
iterable = itertools.islice(self.iterable, s.start, s.stop, s.step)
return self.factory(list(iterable))
take_slice = LazySlice
# Reference: https://git.io/zookeeper-3.5.3-validate-path
re_znode_path = re.compile(
ur'^(?!^\.+$)([^\u0000-\u001F\u007F-\u009F\ud800-\uF8FF\uFFF0-\uFFFF]+)$'
)
def check_znode_path(*components):
for comp in components:
if (not comp or comp.strip() != comp or
any(c in comp for c in '/\n\r\t') or
re_znode_path.search(comp) is None):
raise ValueError(
'Illegal characters in path({!r})'.format(components))
def normalize_cluster_name(cluster_name):
"""Normalizes the cluster name to avoid from duplicated E-Zone prefix."""
fragments = cluster_name.split(u'-')
prefix = first(fragments)
if prefix and prefix in settings.ROUTE_EZONE_LIST:
return u'-'.join(dedupleft(fragments, prefix))
return unicode(cluster_name)
def dedupleft(iterable, marker):
"""Deduplicates the marker on the left of an iterable object."""
iterator = peekable(iterable)
for x in iterator:
if iterator.peek(None) != marker:
break
return itertools.chain([marker], iterator)
def merge_instance_list(
application_name, overall_instance_list, current_instance_list,
cluster_name):
new_instance_list = []
instance_key_index_map = {}
for index, instance in enumerate(overall_instance_list):
if instance['cluster'] != OVERALL:
continue
key = (application_name, cluster_name, instance['key'])
instance_key_index_map[key] = index
new_instance = copy.copy(instance)
new_instance['cluster'] = cluster_name
new_instance_list.append(new_instance)
for instance in current_instance_list:
key = (application_name, cluster_name, instance['key'])
if key in instance_key_index_map:
new_instance_list[instance_key_index_map[key]] = instance
else:
instance_key_index_map[key] = len(new_instance_list)
new_instance_list.append(instance)
new_instance_list = _process_instance_list(
application_name, cluster_name, new_instance_list,
instance_key_index_map)
return new_instance_list
def _process_instance_list(
application_name, cluster_name, new_instance_list,
instance_key_index_map):
# Find inclusive keys
inclusive_keys = frozenset()
_key = MAGIC_CONFIG_KEYS['batch_config.inclusive_keys']
key = (application_name, cluster_name, _key)
if key in instance_key_index_map:
instance = new_instance_list[instance_key_index_map[key]]
try:
inclusive_keys = frozenset(json.loads(instance['value']))
except (KeyError, ValueError, TypeError):
capture_exception()
# Process instance list
if not inclusive_keys:
return new_instance_list
return [i for i in new_instance_list if i['key'] in inclusive_keys]
|
11536062
|
from dataclasses import asdict, dataclass
from typing import Any, Dict, List, Union, Optional, Sequence
import numpy as np
import warnings
from lhotse.features.base import FeatureExtractor, register_extractor
from lhotse.utils import is_module_available, Seconds, compute_num_frames
@dataclass
class OpenSmileConfig:
"""
OpenSmile configs are stored in separated txt files in its specific format.
You can specify predefined config by setting ``feature_set`` and ``feature_level``
class attributes with:
(1) ``FeatureSet`` and ``FeatureLevel`` classes predefined in
https://github.com/audeering/opensmile-python/blob/master/opensmile/core/define.py
OR
(2) strings refered to enum members,
In opensmile-python You can also create your own config file and pass its path and
corresponding feature level as documented here
https://audeering.github.io/opensmile-python/usage.html#custom-config.
For now custom configs are not supported in this extractor.
"""
feature_set: Union[str, Any] = "ComParE_2016" # default feature set or
# string with set name
feature_level: Union[str, Any] = "lld" # default feature level or level name
options: Optional[dict] = None # dictionary with optional script parameters
loglevel: int = 2 # log level (0-5), the higher the number the more log
# messages are given
logfile: Optional[str] = None # if not ``None`` log messages will be
# stored to this file
sampling_rate: Optional[int] = None # If ``None`` it will call ``process_func``
# with the actual sampling rate of the signal.
channels: Union[int, Sequence[int]] = 0
mixdown: bool = False # apply mono mix-down on selection
resample: bool = False # if ``True`` enforces given sampling rate by resampling
num_workers: Optional[int] = 1 # number of parallel jobs or 1 for sequential
# processing. If ``None`` will be set to the number of processors
verbose: bool = False # show debug messages
def to_dict(self) -> Dict[str, Any]:
return asdict(self)
@staticmethod
def from_dict(data: Dict[str, Any]) -> "OpenSmileConfig":
return OpenSmileConfig(**data)
@staticmethod
def featuresets_names():
"""
Returns list of strings with names of pretrained FeatureSets available in opensmile.
"""
assert is_module_available(
"opensmile"
), 'To use opensmile extractors, please "pip install opensmile" first.'
import opensmile
return list(opensmile.FeatureSet.__members__)
@register_extractor
class OpenSmileExtractor(FeatureExtractor):
"""Wrapper for extraction of features implemented in OpenSmile."""
name = "opensmile-extractor"
config_type = OpenSmileConfig
def __init__(self, config: Optional[Any] = None):
super().__init__(config=config)
assert is_module_available(
"opensmile"
), 'To use opensmile extractors, please "pip install opensmile" first.'
import opensmile
if isinstance(self.config.feature_set, str):
self.feature_set = opensmile.FeatureSet[self.config.feature_set]
else:
self.feature_set = self.config.feature_set
self.feature_level = opensmile.FeatureLevel(self.config.feature_level)
self.smileExtractor = opensmile.Smile(
feature_set=self.feature_set,
feature_level=self.feature_level,
sampling_rate=self.config.sampling_rate,
options=self.config.options,
loglevel=self.config.loglevel,
logfile=self.config.logfile,
channels=self.config.channels,
mixdown=self.config.mixdown,
resample=self.config.resample,
num_workers=self.config.num_workers,
verbose=self.config.verbose,
)
@property
def feature_names(self) -> List[str]:
return self.smileExtractor.feature_names
def is_lld_or_lld_de(self) -> bool:
from opensmile import FeatureLevel
return (
self.feature_level is FeatureLevel.LowLevelDescriptors
or self.feature_level is FeatureLevel.LowLevelDescriptors_Deltas
)
@property
def frame_shift(self) -> Seconds:
import opensmile
if (
self.is_lld_or_lld_de()
and self.feature_set in opensmile.FeatureSet.__members__.values()
):
# For all deafult opensmile configs frameshift is equal to 10 ms
return 0.01
else:
raise NotImplementedError(
f"frame_shift is not defined for Functionals feature level or for non default feature set. Defined featureset: {self.config.feature_set}"
)
def feature_dim(self, sampling_rate: int) -> int:
return len(self.feature_names)
def feature_names(self) -> List[str]:
return self.smileExtractor.feature_names()
def extract(self, samples: np.ndarray, sampling_rate: int) -> np.ndarray:
if (
self.config.sampling_rate is not None
and self.config.sampling_rate != sampling_rate
):
raise ValueError(
f"Given sampling rate ({sampling_rate}) mismatched with the value set in OpenSmileConfig ({self.config.sampling_rate})."
)
import opensmile
feats = self.smileExtractor.process_signal(
samples, sampling_rate=sampling_rate
).to_numpy()
if self.is_lld_or_lld_de():
feats = self._pad_frames(samples, feats, sampling_rate)
return feats.copy()
def _pad_frames(
self, samples: np.ndarray, feats: np.ndarray, sampling_rate: int
) -> np.ndarray:
"""Adds last diff frames to the end of feats matrix to fit lhotse.utils.compute_num_frames."""
duration = np.shape(samples)[1] / sampling_rate
diff = (
compute_num_frames(duration, self.frame_shift, sampling_rate)
- np.shape(feats)[0]
)
if abs(diff) >= 6:
warnings.warn(f"Unusual difference in number of frames: {diff}")
if diff > 0:
feats = np.append(feats, feats[-diff:, :], axis=0)
elif diff < 0:
feats = feats[:-diff, :]
return feats
|
11536090
|
import random
class PrepopulatedModelFactory:
"""Most of the models in user_accounts are loaded from fixtures
This class creates a factory from an existing populated table.
"""
@classmethod
def get_queryset(cls):
raise NotImplementedError("override get_queryset in subclasses")
@classmethod
def get_table_count(cls):
count = cls.get_queryset().count()
if not count:
raise Exception(
"`{}` table is not yet populated.".format(
cls.get_queryset().model.__name__))
return count
@classmethod
def choice(cls):
cls.get_table_count()
return random.choice(set(cls.get_queryset()))
@classmethod
def sample(cls, count=None, zero_is_okay=False):
row_count = cls.get_table_count()
if not count:
lower_limit = 0 if zero_is_okay else 1
count = random.randint(lower_limit, row_count)
return random.sample(set(cls.get_queryset()), count)
|
11536096
|
import re
# converts text to lowercase
def to_lower(text):
return text.lower()
# remove all the characters except alphabetical
# removes special characters and numerical charcharters
def remove_non_alpha(text):
return re.sub(r'[^a-zA-Z]', ' ', text)
# Removes all blank lines
def remove_extra_blank_lines(content):
return re.sub(r'\n\s*\n', '\n', content)
return txt
# splits stiong by space or " "
def split_string(text):
return text.split()
# add space before and after the punctuation
def add_space_punc(text):
return re.sub("([^a-zA-Z0-9])", r' \1 ', text)
# removes multiple spaces with single space
def remove_multi_space(text):
return re.sub(r' +', ' ', text)
# removes all spaces
# replaces " " with ""
def remove_all_space(text):
return re.sub(r' +', '', text)
# check if substring present in text
def text_contains(text, substr):
if(substr in text):
return 1
else:
return 0
# remove pattern from string
def strip_http_s(text, pattern_str):
return text.replace(pattern_str,"")
# extract sequence after last occurance
def extract_line_af(line, symbol_seq):
return line.rsplit(symbol_seq)[-1]
# extract sequence after last occurance for multiple lines
def extract_line_af_all(content, symbol_seq):
for line in content:
line_ex = extract_line_af(line, symbol_seq)
print(line_ex)
|
11536097
|
import glob
import argparse
import statistics
import os
import time
import pickle
import copy
import numpy as np
from sklearn import metrics
from sklearn.linear_model import LogisticRegression
from sklearn.dummy import DummyClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from metric_learn import LMNN
from sklearn.neighbors import KNeighborsClassifier
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split, cross_val_score
import brainflow
from brainflow.board_shim import BoardShim, BrainFlowInputParams, LogLevels, BoardIds
from brainflow.data_filter import DataFilter, FilterTypes, AggOperations, WindowFunctions, DetrendOperations
from brainflow.ml_model import BrainFlowMetrics, BrainFlowClassifiers, MLModel, BrainFlowModelParams
from svm_classifier import train_brainflow_search_svm, train_brainflow_svm
from store_model import write_model, write_knn_model
def prepare_data ():
# use different windows, its kinda data augmentation
window_sizes = [4.0, 6.0, 8.0, 10.0]
overlaps = [0.5, 0.45, 0.4, 0.35] # percentage of window_size
dataset_x = list ()
dataset_y = list ()
for data_type in ('relaxed', 'focused'):
for file in glob.glob (os.path.join ('data', data_type, '*', '*.csv')):
print (file)
board_id = os.path.basename (os.path.dirname (file))
try:
board_id = int (board_id)
data = DataFilter.read_file (file)
sampling_rate = BoardShim.get_sampling_rate (board_id)
eeg_channels = get_eeg_channels (board_id)
for num, window_size in enumerate (window_sizes):
if data_type == 'focused':
cur_pos = sampling_rate * 10 # skip a little more for focus
else:
cur_pos = sampling_rate * 3
while cur_pos + int (window_size * sampling_rate) < data.shape[1]:
data_in_window = data[:, cur_pos:cur_pos + int (window_size * sampling_rate)]
bands = DataFilter.get_avg_band_powers (data_in_window, eeg_channels, sampling_rate, True)
feature_vector = np.concatenate ((bands[0], bands[1]))
dataset_x.append (feature_vector)
if data_type == 'relaxed':
dataset_y.append (0)
else:
dataset_y.append (1)
cur_pos = cur_pos + int (window_size * overlaps[num] * sampling_rate)
except Exception as e:
print (str (e))
print ('Class 1: %d Class 0: %d' % (len ([x for x in dataset_y if x == 1]), len ([x for x in dataset_y if x == 0])))
with open ('dataset_x.pickle', 'wb') as f:
pickle.dump (dataset_x, f, protocol = 3)
with open ('dataset_y.pickle', 'wb') as f:
pickle.dump (dataset_y, f, protocol = 3)
return dataset_x, dataset_y
def get_eeg_channels (board_id):
eeg_channels = BoardShim.get_eeg_channels (board_id)
# optional: filter some channels we dont want to consider
try:
eeg_names = BoardShim.get_eeg_names (board_id)
selected_channels = list ()
# blacklisted_channels = {'O1', 'O2'}
blacklisted_channels = set ()
for i, channel in enumerate (eeg_names):
if not channel in blacklisted_channels:
selected_channels.append (eeg_channels[i])
eeg_channels = selected_channels
except Exception as e:
print (str (e))
print ('channels to use: %s' % str (eeg_channels))
return eeg_channels
def train_lda (data):
model = LinearDiscriminantAnalysis ()
print ('#### Linear Discriminant Analysis ####')
scores = cross_val_score (model, data[0], data[1], cv = 5, scoring = 'f1_macro', n_jobs = 8)
print ('f1 macro %s' % str (scores))
scores = cross_val_score (model, data[0], data[1], cv = 5, scoring = 'precision_macro', n_jobs = 8)
print ('precision macro %s' % str (scores))
scores = cross_val_score (model, data[0], data[1], cv = 5, scoring = 'recall_macro', n_jobs = 8)
print ('recall macro %s' % str (scores))
model.fit (data[0], data[1])
write_model (model.intercept_, model.coef_, 'lda')
def train_regression (data):
model = LogisticRegression (class_weight = 'balanced', solver = 'liblinear',
max_iter = 4000, penalty = 'l2', random_state = 1)
print('#### Logistic Regression ####')
scores = cross_val_score (model, data[0], data[1], cv = 5, scoring = 'f1_macro', n_jobs = 8)
print ('f1 macro %s' % str (scores))
scores = cross_val_score (model, data[0], data[1], cv = 5, scoring = 'precision_macro', n_jobs = 8)
print ('precision macro %s' % str (scores))
scores = cross_val_score (model, data[0], data[1], cv = 5, scoring = 'recall_macro', n_jobs = 8)
print ('recall macro %s' % str (scores))
model.fit (data[0], data[1])
write_model (model.intercept_, model.coef_, 'regression')
def train_knn (data):
model = KNeighborsClassifier (n_neighbors = 5)
print ('#### KNN ####')
data_x = copy.deepcopy (data[0])
for i, x in enumerate (data_x):
for j in range (5, 10):
data_x[i][j] = data_x[i][j] / 5 # idea to make stddev less important than avg, 5 random value
scores = cross_val_score (model, data_x, data[1], cv = 5, scoring = 'f1_macro', n_jobs = 8)
print ('f1 macro %s' % str (scores))
scores = cross_val_score (model, data_x, data[1], cv = 5, scoring = 'precision_macro', n_jobs = 8)
print ('precision macro %s' % str (scores))
scores = cross_val_score (model, data_x, data[1], cv = 5, scoring = 'recall_macro', n_jobs = 8)
print ('recall macro %s' % str (scores))
write_knn_model (data)
def test_brainflow_lr (data):
print ('Test BrainFlow LR')
params = BrainFlowModelParams (BrainFlowMetrics.CONCENTRATION.value, BrainFlowClassifiers.REGRESSION.value)
model = MLModel (params)
start_time = time.time ()
model.prepare ()
predicted = [model.predict (x) > 0.5 for x in data[0]]
model.release ()
stop_time = time.time ()
print ('Total time %f' % (stop_time - start_time))
print (metrics.classification_report (data[1], predicted))
def test_brainflow_knn (data):
print ('Test BrainFlow KNN')
params = BrainFlowModelParams (BrainFlowMetrics.CONCENTRATION.value, BrainFlowClassifiers.KNN.value)
model = MLModel (params)
start_time = time.time ()
model.prepare ()
predicted = [model.predict (x) >= 0.5 for x in data[0]]
model.release ()
stop_time = time.time ()
print ('Total time %f' % (stop_time - start_time))
print (metrics.classification_report (data[1], predicted))
def test_brainflow_lda (data):
print ('Test BrainFlow LDA')
params = BrainFlowModelParams (BrainFlowMetrics.CONCENTRATION.value, BrainFlowClassifiers.LDA.value)
model = MLModel (params)
start_time = time.time ()
model.prepare ()
predicted = [model.predict (x) >= 0.5 for x in data[0]]
model.release ()
stop_time = time.time ()
print ('Total time %f' % (stop_time - start_time))
print (metrics.classification_report (data[1], predicted))
def test_brainflow_svm (data):
print ('Test BrainFlow SVM')
params = BrainFlowModelParams (BrainFlowMetrics.CONCENTRATION.value, BrainFlowClassifiers.SVM.value)
model = MLModel (params)
start_time = time.time ()
model.prepare ()
predicted = [model.predict (x) >= 0.5 for x in data[0]]
model.release ()
stop_time = time.time ()
print ('Total time %f' % (stop_time - start_time))
print (metrics.classification_report (data[1], predicted))
def main ():
parser = argparse.ArgumentParser ()
parser.add_argument ('--test', action = 'store_true')
parser.add_argument('--reuse-dataset', action='store_true')
parser.add_argument('--grid-search',action='store_true')
args = parser.parse_args ()
if args.reuse_dataset:
with open ('dataset_x.pickle', 'rb') as f:
dataset_x = pickle.load (f)
with open ('dataset_y.pickle', 'rb') as f:
dataset_y = pickle.load (f)
data = dataset_x, dataset_y
else:
data = prepare_data ()
if args.test:
# since we port models from python to c++ we need to test it as well
test_brainflow_knn (data)
test_brainflow_lr (data)
test_brainflow_svm (data)
test_brainflow_lda (data)
else:
train_regression (data)
# Don't use grid search method unless you have to as it takes a while to complete
train_brainflow_search_svm (data) if args.grid_search else train_brainflow_svm (data)
train_lda (data)
train_knn (data)
if __name__ == '__main__':
main ()
|
11536104
|
import logging
from django.core.management import BaseCommand
from bot.app.repository.launches_repository import LaunchRepository
logger = logging.getLogger(__name__)
TAG = 'Digest Server'
class Command(BaseCommand):
help = 'Run Get Launcher Configs manually.'
def handle(self, *args, **options):
logger.info('Get Launcher Config')
repository = LaunchRepository()
repository.get_launcher_configs()
|
11536118
|
from burp import IBurpExtender
import jarray
import os
#Adding directory to the path where Python searches for modules
module_folder = os.path.dirname('/home/arvind/Documents/Me/My_Projects/Git/WebAppsec/BurpExtensions/modules/')
sys.path.insert(0, module_folder)
import webcommon
unique_list_of_urls=[]
class BurpExtender(IBurpExtender):
def registerExtenderCallbacks(self,callbacks):
list_of_urls=[]
# Get a reference to the Burp helpers object
self._helpers = callbacks.getHelpers()
# set our extension name
callbacks.setExtensionName("Get all URLs")
# Get proxy history
proxyhistory=callbacks.getProxyHistory()
#Read each request in proxy history
for request in proxyhistory:
request_byte_array=request.getRequest()
requestInfo = self._helpers.analyzeRequest(request_byte_array)
BurpExtender.get_urls(self,callbacks,request_byte_array,requestInfo)
for url in unique_list_of_urls:
url_without_query_string=url.split('?')
if url_without_query_string[0].endswith(".php"):
print url_without_query_string[0]
def get_urls(self,callbacks,request_byte_array,requestInfo):
if requestInfo:
request_headers=requestInfo.getHeaders()
t0=request_headers[0].split(' ')
t1=request_headers[1].split(': ')
#Extract directories from every single request in proxy history
url=webcommon.extract_urls(self,callbacks,t0[1])
if url not in unique_list_of_urls:
unique_list_of_urls.append(url)
|
11536140
|
from dataclasses import dataclass
from typing import Callable, List, Optional
from hooqu.analyzers.analyzer import (AggDefinition, DoubledValuedState,
StandardScanShareableAnalyzer)
from hooqu.analyzers.preconditions import has_column, is_numeric
from hooqu.dataframe import DataFrameLike
@dataclass
class SumState(DoubledValuedState["SumState"]):
sum_value: float
def sum(self, other: "SumState") -> "SumState":
return SumState(self.sum_value + other.sum_value)
def metric_value(self):
return self.sum_value
class Sum(StandardScanShareableAnalyzer[SumState]):
def __init__(self, column: str, where: Optional[str] = None):
super().__init__("Sum", column, where=where)
def from_aggregation_result(
self, result: DataFrameLike, offset: int = 0
) -> Optional[SumState]:
value = 0
if len(result): # otherwise an empty dataframe
value = result.loc["sum"][self.instance]
return SumState(value)
def _aggregation_functions(self, where: Optional[str] = None) -> AggDefinition:
return {self.instance: {"sum", }}
def additional_preconditions(self) -> List[Callable[[DataFrameLike], None]]:
return [has_column(self.instance), is_numeric(self.instance)]
|
11536141
|
import math
import cmath
import tkinter as tk
from tkinter import colorchooser
from tkinter import ttk
import framework
from supershapes import *
class PaintApplication(framework.Framework):
start_x, start_y = 0, 0
end_x, end_y = 0, 0
current_item = None
fill = "red"
outline = "red"
width = 2.0
number_of_spokes = 5
arrow = None
dash = None
background = 'white'
foreground = 'red'
selected_super_shape = "shape A"
tool_bar_functions = (
"draw_line", "draw_oval", "draw_rectangle", "draw_arc",
"draw_triangle", "draw_star", "draw_irregular_line", "draw_super_shape", "draw_text", "delete_item", "fill_item", "duplicate_item", "move_to_top", "drag_item", "enlarge_item_size", "reduce_item_size"
)
selected_tool_bar_function = tool_bar_functions[0]
def draw_text(self):
pass
def draw_text_options(self):
tk.Label(self.top_bar, text='Text:').pack(side="left")
self.text_entry_widget = tk.Entry(self.top_bar, width=20)
self.text_entry_widget.pack(side="left")
tk.Label(self.top_bar, text='Font size:').pack(side="left")
self.font_size_spinbox = tk.Spinbox(
self.top_bar, from_=14, to=100, width=3)
self.font_size_spinbox.pack(side="left")
self.create_fill_options_combobox()
self.create_text_button = tk.Button(
self.top_bar, text="Go", command=self.on_create_text_button_clicked)
self.create_text_button.pack(side="left", padx=5)
def on_create_text_button_clicked(self):
entered_text = self.text_entry_widget.get()
center_x = self.canvas.winfo_width() / 2
center_y = self.canvas.winfo_height() / 2
font_size = self.font_size_spinbox.get()
self.canvas.create_text(
center_x, center_y, font=("", font_size), text=entered_text, fill=self.fill)
def delete_item(self):
self.current_item = None
self.canvas.delete("current")
def fill_item(self):
try:
self.canvas.itemconfig(
"current", fill=self.fill, outline=self.outline)
except TclError:
self.canvas.itemconfig("current", fill=self.fill)
def fill_item_options(self):
self.create_fill_options_combobox()
self.create_outline_options_combobox()
def duplicate_item(self):
try:
function_name = "create_" + self.canvas.type("current")
except TypeError:
return
coordinates = tuple(
map(lambda i: i + 10, self.canvas.coords("current")))
configurations = self.get_all_configurations_for_item()
self.canvas_function_wrapper(
function_name, coordinates, configurations)
def get_all_configurations_for_item(self):
configuration_dict = {}
for key, value in self.canvas.itemconfig("current").items():
if value[-1] and value[-1] not in ["0", "0.0", "0,0", "current"]:
configuration_dict[key] = value[-1]
return configuration_dict
def canvas_function_wrapper(self, function_name, *arg, **kwargs):
func = getattr(self.canvas, function_name)
func(*arg, **kwargs)
def move_to_top(self):
self.current_item = None
self.canvas.tag_raise("current")
def drag_item(self):
self.canvas.move(
"current", self.end_x - self.start_x, self.end_y - self.start_y)
self.canvas.bind("<B1-Motion>", self.drag_item_update_x_y)
def drag_item_update_x_y(self, event):
self.start_x, self.start_y = self.end_x, self.end_y
self.end_x, self.end_y = event.x, event.y
self.drag_item()
def enlarge_item_size(self):
self.current_item = None
if self.canvas.find_withtag("current"):
self.canvas.scale("current", self.end_x, self.end_y, 1.2, 1.2)
self.canvas.config(scrollregion=self.canvas.bbox(tk.ALL))
def reduce_item_size(self):
self.current_item = None
if self.canvas.find_withtag("current"):
self.canvas.scale("current", self.end_x, self.end_y, .8, .8)
self.canvas.config(scrollregion=self.canvas.bbox(tk.ALL))
def draw_irregular_line(self):
self.current_item = self.canvas.create_line(
self.start_x, self.start_y, self.end_x, self.end_y, fill=self.fill, width=self.width)
self.canvas.bind("<B1-Motion>", self.draw_irregular_line_update_x_y)
def draw_irregular_line_update_x_y(self, event=None):
self.start_x, self.start_y = self.end_x, self.end_y
self.end_x, self.end_y = event.x, event.y
self.draw_irregular_line()
def draw_irregular_line_options(self):
self.create_fill_options_combobox()
self.create_width_options_combobox()
def on_tool_bar_button_clicked(self, button_index):
self.selected_tool_bar_function = self.tool_bar_functions[button_index]
self.remove_options_from_top_bar()
self.display_options_in_the_top_bar()
self.bind_mouse()
def draw_super_shape(self):
points = self.get_super_shape_points(
*super_shapes[self.selected_super_shape])
self.current_item = self.canvas.create_polygon(points, outline=self.outline,
fill=self.fill, width=self.width)
def draw_super_shape_options(self):
self.create_super_shapes_options_combobox()
self.create_fill_options_combobox()
self.create_outline_options_combobox()
self.create_width_options_combobox()
def create_super_shapes_options_combobox(self):
tk.Label(self.top_bar, text='Select shape:').pack(side="left")
self.super_shape_combobox = ttk.Combobox(
self.top_bar, state='readonly', width=8)
self.super_shape_combobox.pack(side="left")
self.super_shape_combobox['values'] = tuple(
shape for shape in super_shapes.keys())
self.super_shape_combobox.bind(
'<<ComboboxSelected>>', self.set_selected_super_shape)
self.super_shape_combobox.set(self.selected_super_shape)
def set_selected_super_shape(self, event=None):
self.selected_super_shape = self.super_shape_combobox.get()
def get_super_shape_points(self, a, b, m, n1, n2, n3):
# https://en.wikipedia.org/wiki/Superformula
points = []
for i in self.float_range(0, 2 * math.pi, 0.01):
raux = (abs(1 / a * abs(math.cos(m * i / 4))) ** n2 +
abs(1 / b * abs(math.sin(m * i / 4))) ** n3)
r = abs(raux) ** (-1 / n1)
x = self.end_x + r * math.cos(i)
y = self.end_y + r * math.sin(i)
points.extend((x, y))
return points
def float_range(self, x, y, step):
while x < y:
yield x
x += step
def set_foreground_color(self, event=None):
self.foreground = self.get_color_from_chooser(
self.foreground, "foreground")
self.color_palette.itemconfig(
self.foreground_palette, width=0, fill=self.foreground)
def set_background_color(self, event=None):
self.background = self.get_color_from_chooser(
self.background, "background")
self.color_palette.itemconfig(
self.background_palette, width=0, fill=self.background)
def get_color_from_chooser(self, initial_color, color_type="a"):
color = colorchooser.askcolor(
color=initial_color,
title="select {} color".format(color_type)
)[-1]
if color:
return color
# dialog has been cancelled
else:
return initial_color
def try_to_set_fill_after_palette_change(self):
try:
self.set_fill()
except:
pass
def try_to_set_outline_after_palette_change(self):
try:
self.set_outline()
except:
pass
def display_options_in_the_top_bar(self):
self.show_selected_tool_icon_in_top_bar(
self.selected_tool_bar_function)
options_function_name = "{}_options".format(
self.selected_tool_bar_function)
func = getattr(self, options_function_name, self.function_not_defined)
func()
def draw_line_options(self):
self.create_fill_options_combobox()
self.create_width_options_combobox()
self.create_arrow_options_combobox()
self.create_dash_options_combobox()
def draw_oval_options(self):
self.create_fill_options_combobox()
self.create_outline_options_combobox()
self.create_width_options_combobox()
def draw_rectangle_options(self):
self.create_fill_options_combobox()
self.create_outline_options_combobox()
self.create_width_options_combobox()
def draw_arc_options(self):
self.create_fill_options_combobox()
self.create_outline_options_combobox()
self.create_width_options_combobox()
def draw_triangle_options(self):
self.create_fill_options_combobox()
self.create_outline_options_combobox()
self.create_width_options_combobox()
def draw_star_options(self):
self.create_number_of_spokes_options_combobox()
self.create_fill_options_combobox()
self.create_outline_options_combobox()
self.create_width_options_combobox()
def create_fill_options_combobox(self):
tk.Label(self.top_bar, text='Fill:').pack(side="left")
self.fill_combobox = ttk.Combobox(
self.top_bar, state='readonly', width=5)
self.fill_combobox.pack(side="left")
self.fill_combobox['values'] = ('none', 'fg', 'bg', 'black', 'white')
self.fill_combobox.bind('<<ComboboxSelected>>', self.set_fill)
self.fill_combobox.set(self.fill)
def create_number_of_spokes_options_combobox(self):
tk.Label(self.top_bar, text='Number of Edges:').pack(side="left")
self.number_of_spokes_combobox = ttk.Combobox(
self.top_bar, state='readonly', width=3)
self.number_of_spokes_combobox.pack(side="left")
self.number_of_spokes_combobox[
'values'] = tuple(i for i in range(5, 50))
self.number_of_spokes_combobox.bind(
'<<ComboboxSelected>>', self.set_number_of_spokes)
self.number_of_spokes_combobox.set(self.number_of_spokes)
def create_outline_options_combobox(self):
tk.Label(self.top_bar, text='Outline:').pack(side="left")
self.outline_combobox = ttk.Combobox(
self.top_bar, state='readonly', width=5)
self.outline_combobox.pack(side="left")
self.outline_combobox['values'] = (
'none', 'fg', 'bg', 'black', 'white')
self.outline_combobox.bind('<<ComboboxSelected>>', self.set_outline)
self.outline_combobox.set(self.outline)
def create_width_options_combobox(self):
tk.Label(self.top_bar, text='Width:').pack(side="left")
self.width_combobox = ttk.Combobox(
self.top_bar, state='readonly', width=3)
self.width_combobox.pack(side="left")
self.width_combobox['values'] = (
1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0)
self.width_combobox.bind('<<ComboboxSelected>>', self.set_width)
self.width_combobox.set(self.width)
def create_dash_options_combobox(self):
tk.Label(self.top_bar, text='Dash:').pack(side="left")
self.dash_combobox = ttk.Combobox(
self.top_bar, state='readonly', width=5)
self.dash_combobox.pack(side="left")
self.dash_combobox['values'] = ('none', 'small', 'medium', 'large')
self.dash_combobox.bind('<<ComboboxSelected>>', self.set_dash)
self.dash_combobox.current(0)
def create_arrow_options_combobox(self):
tk.Label(self.top_bar, text='Arrow:').pack(side="left")
self.arrow_combobox = ttk.Combobox(
self.top_bar, state='readonly', width=5)
self.arrow_combobox.pack(side="left")
self.arrow_combobox['values'] = ('none', 'first', 'last', 'both')
self.arrow_combobox.bind('<<ComboboxSelected>>', self.set_arrow)
self.arrow_combobox.current(0)
def set_fill(self, event=None):
fill_color = self.fill_combobox.get()
if fill_color == 'none':
self.fill = '' # transparent
elif fill_color == 'fg':
self.fill = self.foreground
elif fill_color == 'bg':
self.fill = self.background
else:
self.fill = fill_color
def set_outline(self, event=None):
outline_color = self.outline_combobox.get()
if outline_color == 'none':
self.outline = '' # transparent
elif outline_color == 'fg':
self.outline = self.foreground
elif outline_color == 'bg':
self.outline = self.background
else:
self.outline = outline_color
def set_width(self, event):
self.width = float(self.width_combobox.get())
def set_number_of_spokes(self, event):
self.number_of_spokes = int(self.number_of_spokes_combobox.get())
def set_arrow(self, event):
self.arrow = self.arrow_combobox.get()
def set_dash(self, event):
'''Dash takes value from 1 to 255'''
dash_size = self.dash_combobox.get()
if dash_size == 'none':
self.dash = None
elif dash_size == 'small':
self.dash = 1
elif dash_size == 'medium':
self.dash = 15
elif dash_size == 'large':
self.dash = 100
def create_color_palette(self):
self.color_palette = tk.Canvas(self.tool_bar, height=55, width=55)
self.color_palette.grid(row=10, column=1, columnspan=2, pady=5, padx=3)
self.background_palette = self.color_palette.create_rectangle(
15, 15, 48, 48, outline=self.background, fill=self.background)
self.foreground_palette = self.color_palette.create_rectangle(
1, 1, 33, 33, outline=self.foreground, fill=self.foreground)
self.bind_color_palette()
def bind_color_palette(self):
self.color_palette.tag_bind(
self.background_palette, "<Button-1>", self.set_background_color)
self.color_palette.tag_bind(
self.foreground_palette, "<Button-1>", self.set_foreground_color)
def create_current_coordinate_label(self):
self.current_coordinate_label = tk.Label(
self.tool_bar, text='x:0\ny: 0 ')
self.current_coordinate_label.grid(
row=13, column=1, columnspan=2, pady=5, padx=1, sticky='w')
def show_current_coordinates(self, event=None):
x_coordinate = event.x
y_coordinate = event.y
coordinate_string = "x:{0}\ny:{1}".format(x_coordinate, y_coordinate)
self.current_coordinate_label.config(text=coordinate_string)
def function_not_defined(self):
pass
def execute_selected_method(self):
self.current_item = None
func = getattr(
self, self.selected_tool_bar_function, self.function_not_defined)
func()
def draw_line(self):
self.current_item = self.canvas.create_line(
self.start_x, self.start_y, self.end_x, self.end_y, fill=self.fill, width=self.width, arrow=self.arrow, dash=self.dash)
def draw_oval(self):
self.current_item = self.canvas.create_oval(
self.start_x, self.start_y, self.end_x, self.end_y, outline=self.outline, fill=self.fill, width=self.width)
def draw_rectangle(self):
self.current_item = self.canvas.create_rectangle(
self.start_x, self.start_y, self.end_x, self.end_y, outline=self.outline, fill=self.fill, width=self.width)
def draw_arc(self):
self.current_item = self.canvas.create_arc(
self.start_x, self.start_y, self.end_x, self.end_y, outline=self.outline, fill=self.fill, width=self.width)
def draw_triangle(self):
dx = self.end_x - self.start_x
dy = self.end_y - self.start_y
z = complex(dx, dy)
radius, angle0 = cmath.polar(z)
edges = 3
points = list()
for edge in range(edges):
angle = angle0 + edge * (2 * math.pi) / edges
points.append(self.start_x + radius * math.cos(angle))
points.append(self.start_y + radius * math.sin(angle))
self.current_item = self.canvas.create_polygon(
points, outline=self.outline, fill=self.fill,
width=self.width)
def draw_star(self):
dx = self.end_x - self.start_x
dy = self.end_y - self.start_y
z = complex(dx, dy)
radius_out, angle0 = cmath.polar(z)
radius_in = radius_out / 2
points = list()
for edge in range(self.number_of_spokes):
angle = angle0 + edge * (2 * math.pi) / self.number_of_spokes
points.append(self.start_x + radius_out * math.cos(angle))
points.append(self.start_y + radius_out * math.sin(angle))
angle += math.pi / self.number_of_spokes
points.append(self.start_x + radius_in * math.cos(angle))
points.append(self.start_y + radius_in * math.sin(angle))
self.current_item = self.canvas.create_polygon(
points, outline=self.outline, fill=self.fill,
width=self.width)
def create_tool_bar_buttons(self):
for index, name in enumerate(self.tool_bar_functions):
icon = tk.PhotoImage(file='icons/' + name + '.gif')
self.button = tk.Button(
self.tool_bar, image=icon, command=lambda index=index: self.on_tool_bar_button_clicked(index))
self.button.grid(
row=index // 2, column=1 + index % 2, sticky='nsew')
self.button.image = icon
def remove_options_from_top_bar(self):
for child in self.top_bar.winfo_children():
child.destroy()
def show_selected_tool_icon_in_top_bar(self, function_name):
display_name = function_name.replace("_", " ").capitalize() + ":"
tk.Label(self.top_bar, text=display_name).pack(side="left")
photo = tk.PhotoImage(
file='icons/' + function_name + '.gif')
label = tk.Label(self.top_bar, image=photo)
label.image = photo
label.pack(side="left")
def bind_mouse(self):
self.canvas.bind("<Button-1>", self.on_mouse_button_pressed)
self.canvas.bind(
"<Button1-Motion>", self.on_mouse_button_pressed_motion)
self.canvas.bind(
"<Button1-ButtonRelease>", self.on_mouse_button_released)
self.canvas.bind("<Motion>", self.on_mouse_unpressed_motion)
def on_mouse_button_pressed(self, event):
self.start_x = self.end_x = self.canvas.canvasx(event.x)
self.start_y = self.end_y = self.canvas.canvasy(event.y)
self.execute_selected_method()
def on_mouse_button_pressed_motion(self, event):
self.end_x = self.canvas.canvasx(event.x)
self.end_y = self.canvas.canvasy(event.y)
self.canvas.delete(self.current_item)
self.execute_selected_method()
def on_mouse_button_released(self, event):
self.end_x = self.canvas.canvasx(event.x)
self.end_y = self.canvas.canvasy(event.y)
def on_mouse_unpressed_motion(self, event):
self.show_current_coordinates(event)
def __init__(self, root):
super().__init__(root)
self.create_gui()
self.bind_mouse()
def create_gui(self):
self.create_menu()
self.create_top_bar()
self.create_tool_bar()
self.create_tool_bar_buttons()
self.create_drawing_canvas()
self.create_color_palette()
self.create_current_coordinate_label()
self.bind_menu_accelrator_keys()
self.show_selected_tool_icon_in_top_bar("draw_line")
self.draw_line_options()
def create_menu(self):
self.menubar = tk.Menu(self.root)
menu_definitions = (
'File- &New/Ctrl+N/self.on_new_file_menu_clicked, Save/Ctrl+S/self.on_save_menu_clicked, SaveAs/ /self.on_save_as_menu_clicked, sep, Exit/Alt+F4/self.on_close_menu_clicked',
'Edit- Undo/Ctrl+Z/self.on_undo_menu_clicked, sep',
'View- Zoom in//self.on_canvas_zoom_in_menu_clicked,Zoom Out//self.on_canvas_zoom_out_menu_clicked',
'About- About/F1/self.on_about_menu_clicked'
)
self.build_menu(menu_definitions)
def create_top_bar(self):
self.top_bar = tk.Frame(self.root, height=25, relief="raised")
self.top_bar.pack(fill="x", side="top", pady=2)
def create_tool_bar(self):
self.tool_bar = tk.Frame(self.root, relief="raised", width=50)
self.tool_bar.pack(fill="y", side="left", pady=3)
def create_drawing_canvas(self):
self.canvas_frame = tk.Frame(self.root, width=900, height=900)
self.canvas_frame.pack(side="right", expand="yes", fill="both")
self.canvas = tk.Canvas(self.canvas_frame, background="white",
width=500, height=500, scrollregion=(0, 0, 800, 800))
self.create_scroll_bar()
self.canvas.pack(side=tk.RIGHT, expand=tk.YES, fill=tk.BOTH)
def create_scroll_bar(self):
x_scroll = tk.Scrollbar(self.canvas_frame, orient="horizontal")
x_scroll.pack(side="bottom", fill="x")
x_scroll.config(command=self.canvas.xview)
y_scroll = tk.Scrollbar(self.canvas_frame, orient="vertical")
y_scroll.pack(side="right", fill="y")
y_scroll.config(command=self.canvas.yview)
self.canvas.config(
xscrollcommand=x_scroll.set, yscrollcommand=y_scroll.set)
def bind_menu_accelrator_keys(self):
self.root.bind('<KeyPress-F1>', self.on_about_menu_clicked)
self.root.bind('<Control-N>', self.on_new_file_menu_clicked)
self.root.bind('<Control-n>', self.on_new_file_menu_clicked)
self.root.bind('<Control-s>', self.on_save_menu_clicked)
self.root.bind('<Control-S>', self.on_save_menu_clicked)
self.root.bind('<Control-z>', self.on_undo_menu_clicked)
self.root.bind('<Control-Z>', self.on_undo_menu_clicked)
def on_new_file_menu_clicked(self, event=None):
pass
def on_save_menu_clicked(self, event=None):
pass
def on_save_as_menu_clicked(self):
pass
def on_canvas_zoom_out_menu_clicked(self):
pass
def on_canvas_zoom_in_menu_clicked(self):
pass
def on_close_menu_clicked(self):
pass
def on_undo_menu_clicked(self, event=None):
pass
def on_about_menu_clicked(self, event=None):
pass
if __name__ == '__main__':
root = tk.Tk()
app = PaintApplication(root)
root.mainloop()
|
11536158
|
import json
import os
import pickle
import numpy as np
import scipy.io
def ensuredir(path):
"""
Creates a folder if it doesn't exists.
:param path: path to the folder to create
"""
if len(path) == 0:
return
if not os.path.exists(path):
os.makedirs(path)
def load(path, pkl_py2_comp=False):
"""
Loads the content of a file. It is mainly a convenience function to
avoid adding the ``open()`` contexts. File type detection is based on extensions.
Can handle the following types:
- .pkl: pickles
- .txt: text files, result is a list of strings ending whitespace removed
:param path: path to the file
:param pkl_py2_comp: if True, when loading a pickle adds Python 2 compatibility
"""
if path.endswith('.pkl'):
with open(path, 'rb') as f:
if pkl_py2_comp:
return pickle.load(f, encoding='latin1')
else:
return pickle.load(f)
elif path.endswith('.npy'):
return np.load(path)
elif path.endswith('.txt'):
with open(path, 'r') as f:
return [x.rstrip('\n\r') for x in list(f)]
elif path.endswith('.mat'):
return scipy.io.loadmat(path)
elif path.endswith('.json'):
with open(path, 'r') as f:
return json.load(f)
else:
raise NotImplementedError("Unknown extension: " + os.path.splitext(path)[1])
def save(path, var, varname=None):
"""
Saves the variable ``var`` to the given path. The file format depends on the file extension.
List of supported file types:
- .pkl: pickle
- .npy: numpy
- .mat: matlab, needs ``varname`` keyword argument defined
"""
ensuredir(os.path.dirname(path))
if path.endswith(".pkl"):
with open(path, 'wb') as f:
pickle.dump(var, f, 2)
elif path.endswith(".mat"):
assert varname is not None, "when using matlab format the variable name must be defined"
scipy.io.savemat(path, {varname: var})
elif path.endswith(".npy"):
np.save(path, var)
elif path.endswith('.json'):
with open(path, 'w') as f:
json.dump(var, f, indent=2, sort_keys=True)
elif path.endswith(".txt"):
with open(path, 'w') as f:
if isinstance(var, str):
f.write(var)
else:
for i in var:
f.write(i)
f.write('\n')
else:
raise NotImplementedError("Unknown extension: " + os.path.splitext(path)[1])
def assert_shape(data, shape):
"""
Asserts a numpy array's shape. The shape is a tuple, describing a pattern of shape:
- An integer means the dimension must be the exact same size at that position
- None means any value is matched
- * mean any number of values are matched. corresponds to '...' in indexing
:param data: a numpy array
:param shape: a tuple or list
"""
star_pos = len(shape)
for i, j in enumerate(shape):
if j == "*":
if star_pos < len(shape):
raise Exception("Only one asterisk (*) character allowed")
star_pos = i
assert len(data.shape) >= (len(shape) if star_pos == len(shape) else len(shape) - 1), "Unexpected shape: " + str(data.shape)
for i in range(0, star_pos):
if shape[i] is not None:
assert data.shape[i] == shape[i], "Unexpected shape: " + str(data.shape)
for i in range(star_pos + 1, len(shape)):
ind = i - len(shape)
if shape[ind] is not None:
assert data.shape[ind] == shape[ind], "Unexpected shape: " + str(data.shape)
|
11536163
|
import datetime
import pytest
from tools.nasa_api import download_image, get_info, os
"""
Tests for get_info()
"""
def test_get_info_specific_date():
# Define the answer for 2019-12-23
correct_info = {
'date': '2019-12-23',
'explanation': "Where is the best place to collect a surface sample from asteroid Bennu? Launched in 2016, NASA sent the robotic Origins, Spectral Interpretation, Resource Identification, Security, Regolith Explorer (OSIRIS-REx) to investigate the 500-meter-across asteroid 101955 Bennu. After mapping the near-Earth asteroid's dark surface, OSIRIS-REx will next touch Bennu's surface in 2020 August to collect a surface sample. The featured 23-second time-lapse video shows four candidate locations for the touch, from which NASA chose just one earlier this month. NASA chose the Nightingale near Bennu's northern hemisphere as the primary touch-down spot because of its relative flatness, lack of boulders, and apparent abundance of fine-grained sand. Location Osprey is the backup. NASA plans to return soil samples from Bennu to Earth in 2023 for a detailed analysis. Free Presentation: APOD Editor to show best astronomy images of 2019 -- and the decade -- in NYC on January 3",
'media_type': 'video',
'service_version': 'v1',
'title': 'Places for OSIRIS-REx to Touch Asteroid Bennu',
'url': 'https://www.youtube.com/embed/pvKEG141GmU?rel=0'
}
# Obtain given answer
DATE = datetime.datetime.strptime("2019-12-23", "%Y-%m-%d")
obtained_info = get_info(DATE)
assert obtained_info == correct_info
"""
Tests for download_image()
"""
@pytest.fixture
def mock_os_path_exists(monkeypatch):
def mock_path_exists(*args, **kwargs):
return True
monkeypatch.setattr(os.path, 'exists', mock_path_exists)
def test_img_path(DATE_IMG):
expected = '2019-12-24_A-Northern-Winter-Sky-Panorama.jpg'
img_path = download_image(date=DATE_IMG)
assert expected in img_path
assert os.path.exists(img_path)
def test_img_cached(DATE_IMG, mock_os_path_exists, capsys):
img_path = download_image(date=DATE_IMG)
captured = capsys.readouterr()
assert "Today's image has already been downloaded and is now being set as background." in captured[0]
|
11536176
|
from ..factory import Method
class recoverPassword(Method):
recovery_code = None # type: "string"
|
11536184
|
colors_list = ['purple', 'brown', 'orange', 'blue', 'pink', 'fuchsia']
print(colors_list)
colors_list.remove('brown')
print(colors_list)
colors_list.append('yellow')
print(colors_list)
print(colors_list[1] + " and " + colors_list[2])
colors_list.insert(1, 'red')
print("NOTE: Inserting a new item will change the indexes.")
# this is the same command but the output has changed
print(colors_list[1] + " and " + colors_list[2])
print("NOTE: sort() alters the list.")
colors_list.sort()
print(colors_list)
colors_list.reverse()
print(colors_list)
|
11536185
|
import os
from unittest import TestCase
from micrograph_cleaner_em.tests.testConfig import TEST_DATA_ROOT_DIR
class TestFixJumpInBorders(TestCase):
def test_fixJumps(self):
from micrograph_cleaner_em.predictMask import fixJumpInBorders
from micrograph_cleaner_em.filesManager import loadMic
micsDir=os.path.join(TEST_DATA_ROOT_DIR,"rawPreds")
fnamesDict={
"20190628_Her2_2mgml_nanoprobe_0003_aligned_mic_DW.mrc":False,
"20190628_Her2_2mgml_nanoprobe_0094_aligned_mic_DW.mrc":False,
"20190628_Her2_2mgml_nanoprobe_0225_aligned_mic_DW.mrc":False,
"20190628_Her2_2mgml_nanoprobe_0330_aligned_mic_DW.mrc":True,
"stack_0002_2x_SumCorr.mrc":True,
"stack_0007_2x_SumCorr.mrc": True
}
for basename in fnamesDict:
fname= os.path.join(micsDir, basename)
micro_out= loadMic(fname)
axis=0
stride=128
micro_fix, wasFixed= fixJumpInBorders(micro_out.copy(), axis, stride)
print("wasFixed", wasFixed)
self.assertTrue( wasFixed == fnamesDict[basename])
# import matplotlib.pyplot as plt
# fig=plt.figure(); fig.suptitle(fname); fig.add_subplot(121); plt.imshow(micro_out, cmap="gray"); fig.add_subplot(122); plt.imshow(micro_fix, cmap="gray"); plt.show()
|
11536194
|
import errno
from time import sleep
import json
import logging
import os
import re
import requests
from bs4 import BeautifulSoup
from banned_exception import BannedException
from constants import AMAZON_BASE_URL
OUTPUT_DIR = 'comments'
if not os.path.exists(OUTPUT_DIR):
os.makedirs(OUTPUT_DIR)
def get_reviews_filename(product_id):
filename = os.path.join(OUTPUT_DIR, '{}.json'.format(product_id))
exist = os.path.isfile(filename)
return filename, exist
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def persist_comment_to_disk(reviews):
if len(reviews) == 0:
return False
product_id_set = set([r['product_id'] for r in reviews])
assert len(product_id_set) == 1, 'all product ids should be the same in the reviews list.'
product_id = next(iter(product_id_set))
output_filename, exist = get_reviews_filename(product_id)
if exist:
return False
mkdir_p(OUTPUT_DIR)
# https://stackoverflow.com/questions/18337407/saving-utf-8-texts-in-json-dumps-as-utf8-not-as-u-escape-sequence/18337754
with open(output_filename, 'w', encoding='utf-8') as fp:
json.dump(reviews, fp, sort_keys=True, indent=4, ensure_ascii=False)
return True
def extract_product_id(link_from_main_page):
# e.g. B01H8A7Q42
p_id = -1
tags = ['/dp/', '/gp/product/']
for tag in tags:
try:
p_id = link_from_main_page[link_from_main_page.index(tag) + len(tag):].split('/')[0]
except:
pass
m = re.match('[A-Z0-9]{10}', p_id)
if m:
return m.group()
else:
return None
def get_soup(url):
if AMAZON_BASE_URL not in url:
url = AMAZON_BASE_URL + url
nap_time_sec = 1
logging.debug('Script is going to sleep for {} (Amazon throttling). ZZZzzzZZZzz.'.format(nap_time_sec))
sleep(nap_time_sec)
header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36'
}
logging.debug('-> to Amazon : {}'.format(url))
out = requests.get(url, headers=header)
assert out.status_code == 200
soup = BeautifulSoup(out.content, 'lxml')
if 'captcha' in str(soup):
raise BannedException('Your bot has been detected. Please wait a while.')
return soup
|
11536219
|
from footmark.regioninfo import RegionInfo
class VPCRegionInfo(RegionInfo):
"""
Represents an ECS Region
"""
def __init__(self, connection=None, name=None, id=None,
connection_cls=None):
from footmark.vpc.connection import VPCConnection
super(VPCRegionInfo, self).__init__(connection, name, id,
VPCConnection)
|
11536221
|
import pandas as pd
import numpy as np
import cv2
df = pd.read_csv('data/unlabeled_csv/thinking.csv',index_col=0)
pts = df.to_numpy().astype(int)
black_img = np.zeros((720,1280), dtype=np.uint8)
n = len(pts)
counter = 0
while True:
img = np.copy(black_img)
img[pts[:counter].T[1],pts[:counter].T[0]]=255
img = cv2.flip(img, 1)
cv2.imshow('frame', img)
key = cv2.waitKey(1)
if key == ord('q'):
break
if key == ord('b'):
counter = max(counter-1,0)
print(counter-1)
if key == ord('n'):
counter = min(counter+1,n)
print(counter-1)
if key == ord('z'):
counter = 0
print(counter-1)
cv2.destroyAllWindows()
|
11536321
|
import numpy as np
import math
from sklearn.datasets import make_moons
from scipy.stats import norm
# Create a simple dataset
def create_twomoon_dataset(n, p):
relevant, y = make_moons(n_samples=n, shuffle=True, noise=0.1, random_state=None)
print(y.shape)
noise_vector = norm.rvs(loc=0, scale=1, size=[n,p-2])
data = np.concatenate([relevant, noise_vector], axis=1)
print(data.shape)
return data, y
def create_sin_dataset(n, p):
'''This dataset was added to provide an example of L1 norm reg failure for presentation.
'''
assert p == 2
x1 = np.random.uniform(-math.pi, math.pi, n).reshape(n ,1)
x2 = np.random.uniform(-math.pi, math.pi, n).reshape(n, 1)
y = np.sin(x1)
data = np.concatenate([x1, x2], axis=1)
print("data.shape: {}".format(data.shape))
return data, y
|
11536331
|
USED_MODELS = ['Baseline', 'SDBN', 'UBM', 'UBM-IA', 'EB_UBM', 'EB_UBM-IA', 'DCM', 'DCM-IA', 'DBN', 'DBN-IA']
EXTENDED_LOG_FORMAT = False
TRAIN_FOR_METRIC = False
PRINT_EBU_STATS = True
MAX_ITERATIONS = 30
DEBUG = False
PRETTY_LOG = True
MIN_DOCS_PER_QUERY = 10
MAX_DOCS_PER_QUERY = 10
SERP_SIZE = 10
TRANSFORM_LOG = False
QUERY_INDEPENDENT_PAGER = False
DEFAULT_REL = 0.5
MAX_VERTICAL_ID = 20
DEFAULT_SAT_CLICK = 0.5
DEFAULT_SAT_EXAM = 0.5
ALPHA_PRIOR = [1.0, 2.0]
BETA_PRIOR = [1.0, 2.0]
S_C_PRIOR = [1.0, 5.0]
S_E_PRIOR = [1.0, 5.0]
VPT_EPSILON = 1
|
11536346
|
from os.path import join
import zipfile
import logging
from rastervision.pipeline import rv_config
from rastervision.pipeline.config import (build_config, upgrade_config)
from rastervision.pipeline.file_system.utils import (download_if_needed,
make_dir, file_to_json)
from rastervision.core.data.raster_source import ChannelOrderError
from rastervision.core.analyzer import StatsAnalyzerConfig
log = logging.getLogger(__name__)
class Predictor():
"""Class for making predictions based off of a model bundle."""
def __init__(self,
model_bundle_uri,
tmp_dir,
update_stats=False,
channel_order=None):
"""Creates a new Predictor.
Args:
model_bundle_uri: URI of the model bundle to use. Can be any
type of URI that Raster Vision can read.
tmp_dir: Temporary directory in which to store files that are used
by the Predictor. This directory is not cleaned up by this
class.
channel_order: Option for a new channel order to use for the
imagery being predicted against. If not present, the
channel_order from the original configuration in the predict
package will be used.
"""
self.tmp_dir = tmp_dir
self.update_stats = update_stats
self.model_loaded = False
bundle_path = download_if_needed(model_bundle_uri, tmp_dir)
bundle_dir = join(tmp_dir, 'bundle')
make_dir(bundle_dir)
with zipfile.ZipFile(bundle_path, 'r') as bundle_zip:
bundle_zip.extractall(path=bundle_dir)
config_path = join(bundle_dir, 'pipeline-config.json')
config_dict = file_to_json(config_path)
rv_config.set_everett_config(
config_overrides=config_dict.get('rv_config'))
config_dict = upgrade_config(config_dict)
self.config = build_config(config_dict)
self.scene = self.config.dataset.validation_scenes[0]
if not hasattr(self.scene.raster_source, 'uris'):
raise Exception(
'raster_source in model bundle must have uris as field')
if not hasattr(self.scene.label_store, 'uri'):
raise Exception(
'label_store in model bundle must have uri as field')
for t in self.scene.raster_source.transformers:
t.update_root(bundle_dir)
if self.update_stats:
stats_analyzer = StatsAnalyzerConfig(
output_uri=join(bundle_dir, 'stats.json'))
self.config.analyzers = [stats_analyzer]
self.scene.label_source = None
self.scene.aoi_uris = None
self.scene.aoi_geometries = None
self.scene.raster_source.extent_crop = None
self.config.dataset.train_scenes = [self.scene]
self.config.dataset.validation_scenes = [self.scene]
self.config.dataset.test_scenes = []
self.config.train_uri = bundle_dir
if channel_order is not None:
self.scene.raster_source.channel_order = channel_order
self.pipeline = None
def predict(self, image_uris, label_uri):
"""Generate predictions for the given image.
Args:
image_uris: URIs of the images to make predictions against.
This can be any type of URI readable by Raster Vision
FileSystems.
label_uri: URI to save labels off into
"""
if self.pipeline is None:
self.scene.raster_source.uris = image_uris
self.pipeline = self.config.build(self.tmp_dir)
if not hasattr(self.pipeline, 'predict'):
raise Exception(
'pipeline in model bundle must have predict method')
try:
self.scene.raster_source.uris = image_uris
self.scene.label_store.uri = label_uri
if self.update_stats:
self.pipeline.analyze()
self.pipeline.predict()
except ChannelOrderError:
raise ValueError(
'The predict package is using a channel_order '
'with channels unavailable in the imagery.\nTo set a new '
'channel_order that only uses channels available in the '
'imagery, use the --channel-order option.')
|
11536418
|
from __future__ import print_function
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# All of these examples are really, really outdated but offer some insights
# into using the python code, if you want to check it out
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
import numpy as np
import pypose as pose
import pypose.mpii as ds # Use this to swap which dataset you want to use
from six.moves import xrange
# Sample dataset generation
if False:
# Everything is pretty self explanatory here (outdated and not functional anymore
# command line interface is better)
filename = 'sample'
numsamples = 100
is_train = 1
augmentation = 1
pose.data.generateset(ds, filename, numsamples, is_train, chg=augmentation)
# Sample report
# (compares performance based on whether the person is facing forward or backward)
if False:
# Get predictions
preds = np.load(pose.eval.get_path(ds.name, 'nyu_pred'))
# Get prediction error
dists = pose.eval.getdists(preds)
# To create our filters for the report: 307-DR,304-hg-D
# Load up ground truth annotations
gt_idx = pose.eval.gt_idx[ds.name]
# Compare shoulder annotations
sho_diff = np.array([ds.partinfo(gt_idx[i,0],gt_idx[i,1],'lsho')[0][0] -
ds.partinfo(gt_idx[i,0],gt_idx[i,1],'rsho')[0][0]
for i in xrange(len(gt_idx))], np.float)
# Normalize difference by sample scale size
sho_diff /= gt_idx[:,2]
# Define the filters, numpy generates boolean arrays out of these comparisons
filtnames = ['Forward', 'Back', 'Profile', 'Total']
thresh = .3
filts = [sho_diff > thresh,
sho_diff < -thresh,
(sho_diff < thresh) * (sho_diff > -thresh),
None]
# Prepare the document
title='Performance Comparison - Facing Forward or Backward'
pdf = pose.report.PdfPages(pose.ref.posedir+'/img/reports/fwd_back_sample.pdf')
# Add whatever pages you want
print("Doing overall comparison...")
pose.report.filtercomparison(ds.name, dists, filts, filtnames=filtnames, title=title, pdf=pdf)
for i,filt in enumerate(filts[:-1]):
print("Generating images for - %s..." % filtnames[i])
pose.report.sampleimages(ds, preds, dists=dists, pdf=pdf, title=filtnames[i], filt=filt)
pose.report.sampleimages(ds, preds, dists=dists, pdf=pdf, title=filtnames[i], filt=filt, get_worst=True)
# Save the pdf
pdf.close()
if True:
# Get predictions
preds = np.load(pose.eval.get_path(ds.name, 'nyu_pred'))
# Get prediction error
dists = pose.eval.getdists(preds)
# To create our filters for the report:
# Load up ground truth annotations
gt_idx = pose.eval.gt_idx[ds.name]
# Calculate torso angles (note this only works for mpii)
torso_angles = np.array([abs(ds.torsoangle(gt_idx[i,0], gt_idx[i,1])) for i in xrange(len(gt_idx))])
# Define filters
filtnames = ['< 20 degrees','20 < 40','40 < 120', '> 120', 'Total']
filts = [torso_angles <= 20,
(20 < torso_angles) * (torso_angles < 40),
(40 < torso_angles) * (torso_angles < 120),
(120 < torso_angles),
None]
# Prepare the document
title='Performance Comparison - Torso Deviation from Vertical'
pdf = pose.report.PdfPages(pose.ref.posedir+'/img/reports/torso_angle_sample.pdf')
print("Doing overall comparison...")
pose.report.filtercomparison(ds.name, dists, filts, filtnames=filtnames, title=title, pdf=pdf)
for i in xrange(7):
# This loop will only generate poor performing images for the first filter (people who are upright)
print("Generating images for page - %d..." % i)
pose.report.sampleimages(ds, preds, dists=dists, pdf=pdf, title=filtnames[0], filt=filts[0],
get_worst=True, page_num=i+1)
# Save the pdf
pdf.close()
"""
overall performance - taken out of report.py not adjusted to work here
def make(dataset, preds, partnames=None):
pdf = PdfPages(ref.posedir+'/img/test.pdf')
num_pages = 10
dists = eval.getdists(preds)
for i in xrange(num_pages):
print "Page %d..." % i
page_choice = i + 1
if i < num_pages / 2:
get_worst = False
else:
page_choice -= num_pages / 2
get_worst = True
sampleimages(dataset, preds, dists=dists, pdf=pdf, get_worst=get_worst,
partnames=partnames, title='Overall Performance', page_num=page_choice)
pdf.close()
"""
|
11536451
|
from __future__ import print_function
from setuptools import setup
from unflattener import __version__ as VERSION
REQUIRES = [
'numpy',
'pillow >= 2.2.1'
]
setup(
name='Unflattener',
version=VERSION,
description='Make normal maps for 2D art.',
url='http://github.com/dbohdan/unflattener',
author='dbohdan',
author_email='<EMAIL>',
license='BSD',
packages=['unflattener'],
package_dir='',
data_files=[('', ['LICENSE', 'README.md'])],
test_suite='unflattener.tests.suite',
zip_safe=False,
install_requires=REQUIRES,
entry_points = {
'console_scripts': [
'unflatten = unflattener.unflatten:main',
],
}
)
|
11536500
|
from .adapters.consul import SyncConsulDiscovery
from .adapters.router import SyncRouterDiscovery
from .adapters.static import SyncStaticDiscovery
from .base import SyncAbstractServiceDiscovery
__all__ = [
"SyncAbstractServiceDiscovery",
"SyncConsulDiscovery",
"SyncRouterDiscovery",
"SyncStaticDiscovery",
]
|
11536539
|
from cipher_description import CipherDescription
size = 512
salsa = CipherDescription(size)
n = 32
s = ['s{}'.format(i) for i in range(size)]
t = ['t{}'.format(i) for i in range(size)]
v = []
for i in range(16):
v.append(s[i*n:(i+1)*n])
def R(a,b,c,k):
salsa.add_mod(a,c,t[0:n],n,size)
for i in range(n):
salsa.apply_xor(b[i],t[(i-k)%n],b[i])
for i in range(4):
R(v[0+i],v[4+i],v[12+i],7)
R(v[0+i],v[8+i],v[4+i],9)
R(v[8+i],v[12+i],v[4+i],13)
R(v[8+i],v[0+i],v[12+i],18)
shuffle = [0, 4, 8, 12,
1, 5, 9, 13,
2, 6, 10, 14,
3, 7, 11, 15]
salsa.shufflewords(shuffle,n,1)
|
11536598
|
from array_deque import ArrayDeque
def test_circle_deque_features():
deque = ArrayDeque()
assert len(deque) == 0
assert deque.is_empty() == True
deque.add_last(5)
assert deque._array == [5]
deque.add_first(3)
assert deque._array == [3, 5]
deque.add_first(7)
assert deque._array == [7, 3, 5]
assert deque.first() == 7
deque.del_last()
assert len(deque) == 2
assert deque._array == [7, 3]
deque.del_first()
assert deque._array == [3]
assert deque.is_empty() == False
deque2 = ArrayDeque([1, 2, 3])
assert deque2._array == [1, 2, 3]
|
11536605
|
from asynctnt import Response
from asynctnt.exceptions import TarantoolSchemaError
from tests import BaseTarantoolTestCase
class DeleteTestCase(BaseTarantoolTestCase):
async def _fill_data(self):
data = [
[0, 'a', 1, 2, 'hello my darling'],
[1, 'b', 3, 4, 'hello my darling, again'],
]
for t in data:
await self.conn.insert(self.TESTER_SPACE_ID, t)
return data
async def test__delete_one(self):
data = await self._fill_data()
res = await self.conn.delete(self.TESTER_SPACE_ID, [data[0][0]])
self.assertIsInstance(res, Response, 'Got response')
self.assertEqual(res.code, 0, 'success')
self.assertGreater(res.sync, 0, 'sync > 0')
self.assertResponseEqual(res, [data[0]], 'Body ok')
res = await self.conn.select(self.TESTER_SPACE_ID, [0])
self.assertResponseEqual(res, [], 'Body ok')
async def test__delete_by_name(self):
data = await self._fill_data()
res = await self.conn.delete(self.TESTER_SPACE_NAME, [data[0][0]])
self.assertIsInstance(res, Response, 'Got response')
self.assertEqual(res.code, 0, 'success')
self.assertGreater(res.sync, 0, 'sync > 0')
self.assertResponseEqual(res, [data[0]], 'Body ok')
res = await self.conn.select(self.TESTER_SPACE_ID, [0])
self.assertResponseEqual(res, [], 'Body ok')
async def test__delete_by_index_id(self):
index_name = 'temp_idx'
res = self.tnt.command(
'make_third_index("{}")'.format(index_name)
)
index_id = res[0][0]
try:
await self.tnt_reconnect()
data = await self._fill_data()
res = await self.conn.delete(self.TESTER_SPACE_NAME, [data[1][2]],
index=index_id)
self.assertIsInstance(res, Response, 'Got response')
self.assertEqual(res.code, 0, 'success')
self.assertGreater(res.sync, 0, 'sync > 0')
self.assertResponseEqual(res, [data[1]], 'Body ok')
res = await self.conn.select(self.TESTER_SPACE_ID, [data[1][2]],
index=index_id)
self.assertResponseEqual(res, [], 'Body ok')
finally:
self.tnt.command(
'box.space.{}.index.{}:drop()'.format(
self.TESTER_SPACE_NAME, index_name)
)
async def test__delete_by_index_name(self):
index_name = 'temp_idx'
res = self.tnt.command(
'make_third_index("{}")'.format(index_name)
)
index_id = res[0][0]
try:
await self.tnt_reconnect()
data = await self._fill_data()
res = await self.conn.delete(self.TESTER_SPACE_NAME, [data[1][2]],
index=index_name)
self.assertIsInstance(res, Response, 'Got response')
self.assertEqual(res.code, 0, 'success')
self.assertGreater(res.sync, 0, 'sync > 0')
self.assertResponseEqual(res, [data[1]], 'Body ok')
res = await self.conn.select(self.TESTER_SPACE_ID, [data[1][2]],
index=index_id)
self.assertResponseEqual(res, [], 'Body ok')
finally:
self.tnt.command(
'box.space.{}.index.{}:drop()'.format(
self.TESTER_SPACE_NAME, index_name)
)
async def test__delete_by_name_no_schema(self):
await self.tnt_reconnect(fetch_schema=False)
with self.assertRaises(TarantoolSchemaError):
await self.conn.delete(self.TESTER_SPACE_NAME, [0])
async def test__delete_by_index_name_no_schema(self):
await self.tnt_reconnect(fetch_schema=False)
with self.assertRaises(TarantoolSchemaError):
await self.conn.delete(self.TESTER_SPACE_ID, [0],
index='primary')
async def test__delete_invalid_types(self):
with self.assertRaisesRegex(
TypeError,
"missing 2 required positional arguments: 'space' and 'key'"):
await self.conn.delete()
async def test__delete_key_tuple(self):
try:
await self.conn.delete(self.TESTER_SPACE_ID, (1,))
except Exception as e:
self.fail(e)
async def test__delete_dict_key(self):
data = await self._fill_data()
res = await self.conn.delete(self.TESTER_SPACE_ID, {
'f1': 0
})
self.assertResponseEqual(res, [data[0]], 'Body ok')
async def test__delete_dict_resp(self):
data = [0, 'hello', 0, 1, 'wow']
await self.conn.insert(self.TESTER_SPACE_ID, data)
res = await self.conn.delete(self.TESTER_SPACE_ID, [0])
self.assertResponseEqualKV(res, [{
'f1': 0,
'f2': 'hello',
'f3': 0,
'f4': 1,
'f5': 'wow'
}])
|
11536622
|
print("asdf".rpartition('g'))
print("asdf".rpartition('a'))
print("asdf".rpartition('s'))
print("asdf".rpartition('f'))
print("asdf".rpartition('d'))
print("asdf".rpartition('asd'))
print("asdf".rpartition('sdf'))
print("asdf".rpartition('as'))
print("asdf".rpartition('df'))
print("asdf".rpartition('asdf'))
print("asdf".rpartition('asdfa'))
print("asdf".rpartition('fasdf'))
print("asdf".rpartition('fasdfa'))
print("abba".rpartition('a'))
print("abba".rpartition('b'))
try:
print("asdf".rpartition(1))
except TypeError:
print("Raised TypeError")
else:
print("Did not raise TypeError")
try:
print("asdf".rpartition(''))
except ValueError:
print("Raised ValueError")
else:
print("Did not raise ValueError")
|
11536642
|
import numpy as np
import matplotlib.pyplot as plt
from gen_forward_op_parser import gen_forward_op_parser
def check_bounds(pt, pt0, pt1):
"""Checks if the pt is within range of segment (pt0,pt1)"""
return np.logical_and(
np.logical_and(pt[:,0]>=min(pt0[0], pt1[0]), pt[:,0]<=max(pt0[0], pt1[0])),
np.logical_and(pt[:,1]>=min(pt0[1], pt1[1]), pt[:,1]<=max(pt0[1], pt1[1])))
def get_line_params(end_pts):
"""Given a 2(npts) x 2(dim) of array of end_pts return line params
I will use the cross product trick here
"""
homogenized_pts = np.append(end_pts, np.ones((2,1)), axis=1)
line_params = np.cross(homogenized_pts[0], homogenized_pts[1])
line_params /= line_params[-1]
# cross gives ax+by+c = 0, further code assumes ax+by=c
# hence, the next line
line_params[-1] *= -1
return line_params
def get_li(im, end_pts, grid_size):
"""Gets the intersection of the line defined by
line parameters with the cartesian grid defined
using grid size. origin is assumed to be the bottom-
left of the grid
params:
im (2d array): takes in gray scale image
line_params (ndarray): a 2(npts) x 2(dim) of array of end_pts
grid_size (int): a cartesian grid of the given grid_size
is created with x=i and y=i lines with $i \in [grid_size]$
returns:
all intersection points with the grid
"""
line_params = get_line_params(end_pts)
grid_size = int(grid_size)
a,b,c = line_params
# first make the grid
x = np.arange(grid_size)
y = np.arange(grid_size)
# calc interesections
x_ = np.stack((x, (c - a*x)/b), axis=1)
y_ = np.stack(((c - b*y)/a, y), axis=1)
int_pts = np.concatenate((x_,y_), axis=0)
# clean the pts
idx_to_keep = check_bounds(int_pts, end_pts[0], end_pts[1])
new_int_points = int_pts[idx_to_keep]
new_int_points = np.unique(np.append(new_int_points, end_pts, axis=0), axis=0)
# python's pixel coordinate frame
# python's pixel centers have integer coordinates. (i.e. pixel 10,10) will occupy
# a Cartesian grid from [9.5,10.5]x[9.5,10.5]. So the grid that we calculated
# our intersections with needs to be shifted by (0.5, 0.5) to get it in the required
# frame for estimating which pixels intersect
# sort the pts acc to x-coordinate
ind = np.argsort(new_int_points[:,0])
sorted_int_pts = new_int_points[ind] + np.array([[0.5,0.5]])
# calculate line_integral
rs = []
cs = []
n = len(sorted_int_pts) - 1
line_integral = np.zeros(n)
# Now, for calculating the pixel location that straddles any two consecutive points
# in the sorted points array, I use the midpoint. The midpoint of the two points,
# will always be inside required pixel. So if I cast it as int, I should have the pixel
# coordinate. However, since, the pixel center is at integer coordinates, I add an additional
# 0.5 to before the cast.
for i in range(n):
dist = np.linalg.norm(sorted_int_pts[i+1]-sorted_int_pts[i])
mp = (sorted_int_pts[i+1]+sorted_int_pts[i])/2.0
r = int(mp[1]+0.5) # python transposes images, hence 1 here and 0 for column
c = int(mp[0]+0.5)
rs.append(r)
cs.append(c)
line_integral[i] = im[r,c]*dist
return line_integral, sorted_int_pts, (rs,cs)
def test_get_li():
# ## Testing for `get_li` module
# here, I test a two sensor setup on an image with just ones
# this test only checks if I have picked up the correct pixels
# and have calculated the correct intersection points.
# to check, look at the plot and see if the 'x' are on the pixel
# edges and all the pixels where the dashed blue line crosses the
# image should have some random color in them.
end_pts = np.array([[23.45, 34.56],[100.97, 85.56]])
im = np.ones((128,128))
li, pts, x_ids = get_li(im, end_pts, 128)
for i in range(len(x_ids[0])):
im[x_ids[0][i],x_ids[1][i]] = np.random.rand()+1.1
plt.figure(figsize=(10,10))
plt.imshow(im)
plt.plot(end_pts[:,0]+0.5,end_pts[:,1]+0.5,'--')
plt.scatter(pts[:,0], pts[:,1], marker='x', c='r')
plt.show()
# ## Scale to random sensor grid
def setup_grid(nsensors, grid_size):
"""setup a random grid of sensors on the image"""
np.random.seed(0)
c = np.array([grid_size/2.0, grid_size/2.0])
r = grid_size/2.0
sensor_locs = np.zeros((nsensors, 2))
# pt = np.zeros(2)
for i in range(nsensors):
pt = np.zeros(2)
while np.linalg.norm(pt-c)>r:
pt = np.random.uniform(low=0.0, high=1.0, size=(2,))*grid_size
sensor_locs[i]=pt
return sensor_locs
def plot_sg(sensor_locs):
# norms = np.linalg.norm(sensor_locs-np.array([[64.0,64.0]]), axis=-1)
# print(norms)
# if np.all(norms<=64):
# print('Grid ok!')
plt.figure(figsize=(5,5))
plt.scatter(sensor_locs[:,0], sensor_locs[:,1])
plt.xlim((0,128))
plt.ylim((0,128))
plt.show()
from tqdm import tqdm
def get_forward_op(sensor_locs, grid_size):
"""sets up forward op"""
nsensors = len(sensor_locs)
n_measurements = int(nsensors*(nsensors-1)/2)
grid_size = int(grid_size)
print("Getting %d measurements from %d sensors!"%(n_measurements, nsensors))
F = np.zeros((n_measurements, grid_size**2))
end_pts = np.zeros((2,2))
ct = 0
for i in tqdm(range(nsensors)):
for j in range(i+1, nsensors):
end_pts[0] = sensor_locs[i]
end_pts[1] = sensor_locs[j]
im = np.ones((grid_size,grid_size))
li, _, x_ids = get_li(im, end_pts, grid_size)
for ii in range(len(x_ids[0])):
r,c = x_ids[0][ii],x_ids[1][ii]
F[ct,r*grid_size+c] = li[ii]
ct+=1
return F
def apply_F(F, Finv, im):
"""Projects `im` in range of F"""
return (Finv@(F@im.reshape(-1))).reshape(128,128)
def store_mats(F, nsensors):
"""takes F, calculates its pseudoinverse and saves both
as npy arrays"""
Finv = np.linalg.pinv(F)
np.save('../' + str(nsensors) + '_forward.npy', F)
np.save('../' + str(nsensors) + '_pinverse.npy', Finv)
print('Operators stored successfully!')
return
def gen_mask(points, grid_size):
"""Gets the convex hull of sensor locations"""
from matplotlib.path import Path
from scipy.spatial import ConvexHull
hull = ConvexHull(points)
hull_path = Path( points[hull.vertices] )
grid = np.zeros((grid_size, grid_size))
for x in range(grid_size):
for y in range(grid_size):
grid[x,y] = hull_path.contains_point((x,y))
grid = np.rot90(grid)
grid = grid[::-1,:]
grid = grid.flatten()
np.save('../' + str(points.shape[0]) + '_mask.npy', grid)
return grid
def main():
args = gen_forward_op_parser()
nsensors = args.n
grid_size = args.g
sensor_locs = setup_grid(nsensors, grid_size)
gen_mask(sensor_locs, grid_size)
F = get_forward_op(sensor_locs, grid_size)
store_mats(F, nsensors)
return None
###############################################################################
if __name__ == "__main__":
main()
|
11536657
|
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def getMinimumDifference(self, root: TreeNode) -> int:
def helper( root: TreeNode):
traversal_queue = [(root, 'init')]
min_diff, prev_node_value = float('inf'), -2**31
while traversal_queue:
node, label = traversal_queue.pop()
if label is not 'c':
if node.right:
traversal_queue.append( (node.right, 'r') )
traversal_queue.append( (node, 'c') )
if node.left:
traversal_queue.append( (node.left, 'l') )
else:
yield (node.val - prev_node_value)
prev_node_value = node.val
return min( helper( root ) )
# n : the number of nodes in binary search tree
## Time Complexity: O( n )
#
# The overhead in time is the cost of in-order traversal, which is of O( n )
## Space Complexity: O( n )
#
# THe overhead in space is the storage for traversal_queue, which is of O( n )
def test_bench():
## Test case_#1
root_1 = TreeNode(1)
root_1.right = TreeNode(3)
root_1.right.left = TreeNode(2)
# expected output:
'''
1
'''
print( Solution().getMinimumDifference(root_1) )
## Test case_#2
root_2 = TreeNode(5)
root_2.left = TreeNode(1)
root_2.right = TreeNode(10)
root_2.right.left = TreeNode(8)
root_2.right.right = TreeNode(13)
# expected output:
'''
2
'''
print( Solution().getMinimumDifference(root_2) )
if __name__ == '__main__':
test_bench()
|
11536713
|
import abc
import ast
import os
from typing import List, Optional
from good_smell import SmellWarning
class LintSmell(abc.ABC):
"""Abstract Base class to represent the sniffing instructions for the linter"""
def __init__(
self,
transform: bool,
path: Optional[str] = None,
tree: Optional[ast.AST] = None,
):
self.tree = tree
self.path = path
self.transform = transform
@classmethod
def from_source(
cls,
source_code: str,
transform: bool = True,
start_line: Optional[int] = 0,
end_line: Optional[int] = None,
path: Optional[str] = None,
) -> "LintSmell":
start_line = start_line
end_line = end_line or len(source_code.splitlines())
source_code = os.linesep.join(source_code.splitlines()[start_line:end_line])
return cls(transform=transform, path=path, tree=ast.parse(source_code))
@abc.abstractmethod
def check_for_smell(self) -> List[SmellWarning]:
"""Return a list of all occuring smells of this smell class"""
@abc.abstractmethod
def fix_smell(self) -> str:
"""Return a fixed version of the code without the code smell"""
@property
@abc.abstractmethod
def symbol(self) -> str:
"""The symbolic name for the smell"""
@property
@abc.abstractmethod
def warning_message(self) -> str:
"""The symbolic name for the smell"""
|
11536751
|
import markdown
from django.db import models
from mdeditor.fields import MDTextField
class Category(models.Model):
name = models.CharField(max_length=128, verbose_name='分类名', unique=True, db_index=True)
class Meta:
verbose_name = '分类'
verbose_name_plural = '分类管理'
def __str__(self):
return self.name
class Article(models.Model):
title = models.CharField(max_length=128, verbose_name='标题', unique=True, db_index=True)
content_raw = MDTextField(verbose_name='原始内容')
content_render = models.TextField(verbose_name='呈现内容', null=True, blank=True)
category = models.ForeignKey(Category, on_delete=models.SET_NULL, blank=False, null=True, verbose_name='分类',
db_index=True)
create_time = models.DateTimeField(verbose_name='创建时间', auto_now_add=True)
def save(self, *args, **kwargs):
# 将Markdown格式 转为html,页面上显示
self.content_render = markdown.markdown(self.content_raw, extensions=[
'markdown.extensions.extra',
'markdown.extensions.codehilite',
'markdown.extensions.toc',
])
super(Article, self).save(*args, **kwargs)
class Meta:
verbose_name = '文章'
verbose_name_plural = '文章管理'
def __str__(self):
return self.title
|
11536752
|
import os
os.system("netsh wlan show profile")
os.system("netsh wlan export profile folder=C:\ key=clear")
|
11536757
|
def generate_airflow_cmd(dag_id, task_id, execution_date, is_root_task=False):
return "airflow {sub_command} {dag_id}{task_id} {start_date} {end_date}".format(
sub_command="backfill" if is_root_task else "test",
dag_id=dag_id,
task_id=" %s" % task_id if not is_root_task else "",
start_date="-s %s" % execution_date,
end_date="-e %s" % execution_date,
)
def generate_airflow_func_call(
dag_id,
schedule_interval,
execution_date,
task_type=None,
task_id=None,
task_retries=None,
task_command=None,
is_root_task=False,
):
if is_root_task:
return (
"DAG(dag_id='{dag_id}', default_args=args, schedule_interval='{schedule_interval}')"
".run(start_date={start_date}, end_date={end_date}, execution_date={execution_date})".format(
dag_id=dag_id,
schedule_interval=schedule_interval,
start_date=execution_date,
end_date=execution_date,
execution_date=execution_date,
)
)
elif task_id is not None:
return "{operator}(task_id='{task_id}', retries='{retries}', {command}))".format(
operator=task_type,
task_id=task_id,
retries=task_retries,
command=task_command,
)
|
11536784
|
from typing import Optional, Union, List
from banditpylib.bandits import ThresholdingBandit
from banditpylib.learners import SinglePlayerLearner
class ThresholdingBanditLearner(SinglePlayerLearner):
"""Abstract class for learners playing with thresholding bandit
:param int arm_num: number of arms
:param Optional[str] name: alias name
"""
def __init__(self, arm_num: int, name: Optional[str]):
super().__init__(name)
if arm_num < 2:
raise ValueError('Number of arms is expected at least 2. Got %d.' %
arm_num)
self.__arm_num = arm_num
@property
def running_environment(self) -> Union[type, List[type]]:
return ThresholdingBandit
@property
def arm_num(self) -> int:
"""Number of arms"""
return self.__arm_num
|
11536798
|
import time
import RPi.GPIO as GPIO
from adafruit_servokit import ServoKit
'''GPIO.setmode(GPIO.BCM)
GPIO.setup(11,GPIO.OUT)
servo1=GPIO.PWM(11,50)
servo1.start(2)'''
h = ServoKit(channels=16)
#servo1.ChangeDutyCycle(12)
#kit.servo[0].angle
init = [0,90,20,0,180,160,170,180,60,0,0,150]
limitLo = [0,0,20,0,0,40,0,0,60,0,0,30]
limitHi = [35,180,180,180,180,160,170,180,180,180,180,150]
cur = init
def changeDeg(pin,newDegree):
maxChange = 0
pinSize = len(pin)
for i in range(0,pinSize):
maxChange = max(abs(cur[pin[i]]-newDegree[i]),maxChange)
for deg in range(0,maxChange,5):
for i in range(0,pinSize):
if cur[pin[i]]<newDegree[i]:
cur[pin[i]] += 5
elif cur[pin[i]]>newDegree[i]:
cur[pin[i]] -= 5
for i in range(0,pinSize):
h.servo[pin[i]].angle = cur[pin[i]]
time.sleep(0.05)
#function closed
for i in range(0,12):
h.servo[i].angle=init[i]
for i in range(0,26,5):
h.servo[0].angle=i
time.sleep(0.05)
times=10
while times>0:
times-=1
changeDeg([3,4,1],[90,180,60])
changeDeg([3,4,1],[0,90,120])
changeDeg([3,4,1],[init[3],init[4],init[1]])
|
11536874
|
from django.http import HttpResponse, HttpRequest, Http404
from django.shortcuts import render, get_object_or_404, redirect
from django.urls import reverse
from django.views.decorators.cache import cache_page
from recipe_db.analytics.spotlight.style import StyleAnalysis
from recipe_db.models import Style
from web_app.charts.style import StyleChartFactory
from web_app.charts.utils import NoDataException
from web_app.meta import StyleOverviewMeta, StyleMeta
from web_app.views.utils import render_chart, FORMAT_PNG, render_recipes_list
def overview(request: HttpRequest) -> HttpResponse:
categories = Style.objects.filter(parent_style=None).order_by('-recipes_count')
most_popular = Style.objects.exclude(parent_style=None).order_by('-recipes_count')[:5]
meta = StyleOverviewMeta().get_meta()
context = {'categories': categories, 'most_popular': most_popular, 'meta': meta}
return render(request, 'style/overview.html', context)
def category(request: HttpRequest, category_slug: str) -> HttpResponse:
style = get_object_or_404(Style, slug=category_slug)
if not style.is_category:
return redirect('style_detail', category_slug=style.category.slug, slug=style.slug)
return display_style(request, style)
def detail(request: HttpRequest, slug: str, category_slug: str) -> HttpResponse:
style = get_object_or_404(Style, slug=slug)
if style.is_category:
return redirect('style_category', category_slug=style.category.slug)
if category_slug != style.category.slug:
return redirect('style_detail', category_slug=style.category.slug, slug=style.slug)
return display_style(request, style)
def display_style(request: HttpRequest, style: Style) -> HttpResponse:
meta = StyleMeta(style).get_meta()
if style.recipes_count > 100:
meta.image = reverse('style_chart', kwargs=dict(
category_slug=style.category.slug,
slug=style.slug,
chart_type='og',
format=FORMAT_PNG,
))
context = {"style": style, 'meta': meta}
return render(request, 'style/detail.html', context)
def category_chart(request: HttpRequest, category_slug: str, chart_type: str, format: str) -> HttpResponse:
style = get_object_or_404(Style, slug=category_slug)
if not style.is_category:
return redirect('style_chart', category_slug=style.category.slug, slug=style.slug, chart_type=chart_type, format=format)
return display_chart(request, style, chart_type, format)
def chart(request: HttpRequest, slug: str, category_slug: str, chart_type: str, format: str) -> HttpResponse:
style = get_object_or_404(Style, slug=slug)
if style.is_category:
return redirect('style_category_chart', category_slug=style.category.slug, chart_type=chart_type, format=format)
if category_slug != style.category.slug:
return redirect('style_chart', category_slug=style.category.slug, slug=style.slug, chart_type=chart_type, format=format)
return display_chart(request, style, chart_type, format)
@cache_page(0)
def category_recipes(request: HttpRequest, category_slug: str) -> HttpResponse:
style = get_object_or_404(Style, slug=category_slug)
if not style.is_category:
return redirect('style_category_recipes', category_slug=style.category.slug)
recipes_list = StyleAnalysis(style).random_recipes(24)
return render_recipes_list(recipes_list)
@cache_page(0)
def recipes(request: HttpRequest, slug: str, category_slug: str) -> HttpResponse:
style = get_object_or_404(Style, slug=slug)
if style.is_category:
return redirect('style_category_recipes', category_slug=style.category.slug)
if category_slug != style.category.slug:
return redirect('style_recipes', category_slug=style.category.slug, slug=style.slug)
recipes_list = StyleAnalysis(style).random_recipes(24)
return render_recipes_list(recipes_list)
def display_chart(request: HttpRequest, style: Style, chart_type: str, format: str) -> HttpResponse:
filter_param = str(request.GET['filter']) if 'filter' in request.GET else None
if StyleChartFactory.is_supported_chart(chart_type):
try:
chart = StyleChartFactory.plot_chart(style, chart_type, filter_param)
except NoDataException:
return HttpResponse(status=204)
else:
raise Http404('Unknown chart type %s.' % chart_type)
return render_chart(chart, format)
|
11536880
|
graph = { "a" : ["b", "c", "d", "e", "g"],
"b" : ["c", "a"],
"c" : ["a", "b", "d"],
"d" : ["a", "c", "e"],
"e" : ["a", "d"],
"f" : ["g"],
"g" : ["a", "f"]
}
def EdgesList(graph):
edges = []
for vertex in graph:
for neighbour in graph[vertex]:
if (neighbour, vertex) not in edges:
edges.append((vertex, neighbour))
return edges
print(EdgesList (graph))
|
11536902
|
from inspect import CO_VARARGS
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule
from torch.nn.functional import embedding
from torch.nn.modules import conv
from depth.models.builder import HEADS
from .decode_head import DepthBaseDecodeHead
import torch.nn.functional as F
from depth.models.utils import UpConvBlock, BasicConvBlock
class UpSample(nn.Sequential):
'''Fusion module
From Adabins
'''
def __init__(self, skip_input, output_features, conv_cfg=None, norm_cfg=None, act_cfg=None):
super(UpSample, self).__init__()
self.convA = ConvModule(skip_input, output_features, kernel_size=3, stride=1, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
self.convB = ConvModule(output_features, output_features, kernel_size=3, stride=1, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
def forward(self, x, concat_with):
up_x = F.interpolate(x, size=[concat_with.size(2), concat_with.size(3)], mode='bilinear', align_corners=True)
return self.convB(self.convA(torch.cat([up_x, concat_with], dim=1)))
@HEADS.register_module()
class DenseDepthHead(DepthBaseDecodeHead):
"""DenseDepthHead.
This head is implemented of `DenseDepth: <https://arxiv.org/abs/1812.11941>`_.
Args:
up_sample_channels (List): Out channels of decoder layers.
fpn (bool): Whether apply FPN head.
Default: False
conv_dim (int): Default channel of features in FPN head.
Default: 256.
"""
def __init__(self,
up_sample_channels,
fpn=False,
conv_dim=256,
**kwargs):
super(DenseDepthHead, self).__init__(**kwargs)
self.up_sample_channels = up_sample_channels[::-1]
self.in_channels = self.in_channels[::-1]
self.conv_list = nn.ModuleList()
up_channel_temp = 0
self.fpn = fpn
if self.fpn:
self.num_fpn_levels = len(self.in_channels)
# construct the FPN
self.lateral_convs = nn.ModuleList()
self.output_convs = nn.ModuleList()
for idx, in_channel in enumerate(self.in_channels[:self.num_fpn_levels]):
lateral_conv = ConvModule(
in_channel, conv_dim, kernel_size=1, norm_cfg=self.norm_cfg
)
output_conv = ConvModule(
conv_dim,
conv_dim,
kernel_size=3,
stride=1,
padding=1,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg,
)
self.lateral_convs.append(lateral_conv)
self.output_convs.append(output_conv)
else:
for index, (in_channel, up_channel) in enumerate(
zip(self.in_channels, self.up_sample_channels)):
if index == 0:
self.conv_list.append(
ConvModule(
in_channels=in_channel,
out_channels=up_channel,
kernel_size=1,
stride=1,
padding=0,
act_cfg=None
))
else:
self.conv_list.append(
UpSample(skip_input=in_channel + up_channel_temp,
output_features=up_channel,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg))
# save earlier fusion target
up_channel_temp = up_channel
def forward(self, inputs, img_metas):
"""Forward function."""
temp_feat_list = []
if self.fpn:
for index, feat in enumerate(inputs[::-1]):
x = feat
lateral_conv = self.lateral_convs[index]
output_conv = self.output_convs[index]
cur_fpn = lateral_conv(x)
# Following FPN implementation, we use nearest upsampling here. Change align corners to True.
if index != 0:
y = cur_fpn + F.interpolate(temp_feat_list[-1], size=cur_fpn.shape[-2:], mode="bilinear", align_corners=True)
else:
y = cur_fpn
y = output_conv(y)
temp_feat_list.append(y)
else:
temp_feat_list = []
for index, feat in enumerate(inputs[::-1]):
if index == 0:
temp_feat = self.conv_list[index](feat)
temp_feat_list.append(temp_feat)
else:
skip_feat = feat
up_feat = temp_feat_list[index-1]
temp_feat = self.conv_list[index](up_feat, skip_feat)
temp_feat_list.append(temp_feat)
output = self.depth_pred(temp_feat_list[-1])
return output
|
11536958
|
from pydantic import BaseSettings
class Settings(BaseSettings):
port: int = 50051
host: str = '0.0.0.0'
max_workers: int = 10
environment: str = ""
class Config:
env_prefix = "GSK_"
|
11536996
|
from loguru import logger
import pytest
@pytest.fixture(scope='function')
def example_fixture():
logger.info("Setting Up Example Fixture...")
yield
logger.info("Tearing Down Example Fixture...")
|
11536999
|
from google.cloud import storage
import gcsfs
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.externals import joblib
import pandas as pd
import numpy as np
from time import time
import re
cleanup_re = re.compile('[^a-z]+')
def cleanup(sentence):
sentence = sentence.lower()
sentence = cleanup_re.sub(' ', sentence).strip()
return sentence
def download_blob(blob, download_path):
blob.download_to_filename(download_path)
print('Blob {} downloaded to {}.'.format(
blob.name,
download_path))
def upload_blob(bucket_name, blob, upload_path):
blob.upload_from_filename(upload_path)
print('File {} uploaded to {}.'.format(
blob.name,
bucket_name))
def function_handler(request):
request_json = request.get_json(silent=True)
dataset_bucket = request_json['dataset_bucket']
dataset_blob_name = request_json['dataset_blob_name']
model_bucket = request_json['model_bucket']
model_blob_name = request_json['model_blob_name']
fs = gcsfs.GCSFileSystem(project='Serverless-faas-workbench')
with fs.open(dataset_bucket+'/'+dataset_blob_name) as f:
df = pd.read_csv(f)
start = time()
df['train'] = df['Text'].apply(cleanup)
tfidf_vect = TfidfVectorizer(min_df=100).fit(df['train'])
train = tfidf_vect.transform(df['train'])
model = LogisticRegression()
model.fit(train, df['Score'])
latency = time() - start
print(latency)
model_file_path = "/tmp/" + model_blob_name
joblib.dump(model, model_file_path)
storage_client = storage.Client()
m_bucket = storage_client.get_bucket(model_bucket)
m_blob = m_bucket.blob(model_blob_name)
upload_blob(model_bucket, m_blob, model_file_path)
return "latency : " + str(latency)
|
11537091
|
import re
from urllib.parse import urljoin
import typesystem
from apistar.document import Document, Field, Link, Section
from apistar.schemas.jsonschema import JSON_SCHEMA
SCHEMA_REF = typesystem.Object(
properties={"$ref": typesystem.String(pattern="^#/definitiions/")}
)
RESPONSE_REF = typesystem.Object(
properties={"$ref": typesystem.String(pattern="^#/responses/")}
)
definitions = typesystem.SchemaDefinitions()
SWAGGER = typesystem.Object(
title="Swagger",
properties={
"swagger": typesystem.String(),
"info": typesystem.Reference("Info", definitions=definitions),
"paths": typesystem.Reference("Paths", definitions=definitions),
"host": typesystem.String(),
"basePath": typesystem.String(pattern="^/"),
"schemes": typesystem.Array(items=typesystem.Choice(choices=["http", "https", "ws", "wss"])),
"consumes": typesystem.Array(items=typesystem.String()),
"produces": typesystem.Array(items=typesystem.String()),
"definitions": typesystem.Object(additional_properties=typesystem.Any()),
"parameters": typesystem.Object(
additional_properties=typesystem.Reference(
"Parameters", definitions=definitions
)
),
"responses": typesystem.Object(
additional_properties=typesystem.Reference(
"Responses", definitions=definitions
)
),
"securityDefinitions": typesystem.Object(
additional_properties=typesystem.Reference(
"SecurityScheme", definitions=definitions
)
),
"security": typesystem.Array(
items=typesystem.Reference("SecurityRequirement", definitions=definitions)
),
"tags": typesystem.Array(
items=typesystem.Reference("Tag", definitions=definitions)
),
"externalDocs": typesystem.Reference(
"ExternalDocumentation", definitions=definitions
),
},
pattern_properties={"^x-": typesystem.Any()},
additional_properties=False,
required=["swagger", "info", "paths"],
)
definitions["Info"] = typesystem.Object(
properties={
"title": typesystem.String(allow_blank=True),
"description": typesystem.Text(allow_blank=True),
"termsOfService": typesystem.String(format="url"),
"contact": typesystem.Reference("Contact", definitions=definitions),
"license": typesystem.Reference("License", definitions=definitions),
"version": typesystem.String(allow_blank=True),
},
pattern_properties={"^x-": typesystem.Any()},
additional_properties=False,
required=["title", "version"],
)
definitions["Contact"] = typesystem.Object(
properties={
"name": typesystem.String(allow_blank=True),
"url": typesystem.String(format="url"),
"email": typesystem.String(format="email"),
},
pattern_properties={"^x-": typesystem.Any()},
additional_properties=False,
)
definitions["License"] = typesystem.Object(
properties={"name": typesystem.String(), "url": typesystem.String(format="url")},
required=["name"],
pattern_properties={"^x-": typesystem.Any()},
additional_properties=False,
)
definitions["Paths"] = typesystem.Object(
pattern_properties={
"^/": typesystem.Reference("Path", definitions=definitions),
"^x-": typesystem.Any(),
},
additional_properties=False,
)
definitions["Path"] = typesystem.Object(
properties={
"summary": typesystem.String(allow_blank=True),
"description": typesystem.Text(allow_blank=True),
"get": typesystem.Reference("Operation", definitions=definitions),
"put": typesystem.Reference("Operation", definitions=definitions),
"post": typesystem.Reference("Operation", definitions=definitions),
"delete": typesystem.Reference("Operation", definitions=definitions),
"options": typesystem.Reference("Operation", definitions=definitions),
"head": typesystem.Reference("Operation", definitions=definitions),
"patch": typesystem.Reference("Operation", definitions=definitions),
"trace": typesystem.Reference("Operation", definitions=definitions),
"parameters": typesystem.Array(
items=typesystem.Reference("Parameter", definitions=definitions)
), # TODO: | ReferenceObject
},
pattern_properties={"^x-": typesystem.Any()},
additional_properties=False,
)
definitions["Operation"] = typesystem.Object(
properties={
"tags": typesystem.Array(items=typesystem.String()),
"summary": typesystem.String(allow_blank=True),
"description": typesystem.Text(allow_blank=True),
"externalDocs": typesystem.Reference(
"ExternalDocumentation", definitions=definitions
),
"operationId": typesystem.String(),
"consumes": typesystem.Array(items=typesystem.String()),
"produces": typesystem.Array(items=typesystem.String()),
"parameters": typesystem.Array(
items=typesystem.Reference("Parameter", definitions=definitions)
), # TODO: | ReferenceObject
"responses": typesystem.Reference("Responses", definitions=definitions),
"schemes": typesystem.Array(items=typesystem.Choice(choices=["http", "https", "ws", "wss"])),
"deprecated": typesystem.Boolean(),
"security": typesystem.Array(
typesystem.Reference("SecurityRequirement", definitions=definitions)
),
},
pattern_properties={"^x-": typesystem.Any()},
additional_properties=False,
)
definitions["ExternalDocumentation"] = typesystem.Object(
properties={
"description": typesystem.Text(),
"url": typesystem.String(format="url"),
},
pattern_properties={"^x-": typesystem.Any()},
additional_properties=False,
required=["url"],
)
definitions["Parameter"] = typesystem.Object(
properties={
"name": typesystem.String(),
"in": typesystem.Choice(
choices=["query", "header", "path", "formData", "body"]
),
"description": typesystem.Text(),
"required": typesystem.Boolean(),
# in: "body"
"schema": JSON_SCHEMA | SCHEMA_REF,
# in: "query"|"header"|"path"|"formData"
"type": typesystem.Choice(choices=["string", "number", "integer", "boolean", "array", "file"]),
"format": typesystem.String(allow_blank=True),
"allowEmptyValue": typesystem.Boolean(),
"items": JSON_SCHEMA, # TODO: Should actually be a restricted subset
"collectionFormat": typesystem.Choice(
choices=["csv", "ssv", "tsv", "pipes", "multi"]
),
"default": typesystem.Any(),
"maximum": typesystem.Number(),
"exclusiveMaximum": typesystem.Boolean(),
"minimum": typesystem.Number(),
"exclusiveMinimum": typesystem.Boolean(),
"maxLength": typesystem.Integer(),
"minLength": typesystem.Integer(),
"pattern": typesystem.String(allow_blank=True),
"maxItems": typesystem.Integer(),
"minItems": typesystem.Integer(),
"uniqueItems": typesystem.Boolean(),
"enum": typesystem.Array(items=typesystem.Any()),
"multipleOf": typesystem.Integer(),
},
pattern_properties={"^x-": typesystem.Any()},
additional_properties=False,
required=["name", "in"],
)
definitions["RequestBody"] = typesystem.Object(
properties={
"description": typesystem.String(allow_blank=True),
"content": typesystem.Object(
additional_properties=typesystem.Reference(
"MediaType", definitions=definitions
)
),
"required": typesystem.Boolean(),
},
pattern_properties={"^x-": typesystem.Any()},
additional_properties=False,
)
definitions["Responses"] = typesystem.Object(
properties={
"default": typesystem.Reference("Response", definitions=definitions)
| RESPONSE_REF
},
pattern_properties={
"^([1-5][0-9][0-9]|[1-5]XX)$": typesystem.Reference(
"Response", definitions=definitions
)
| RESPONSE_REF,
"^x-": typesystem.Any(),
},
additional_properties=False,
)
definitions["Response"] = typesystem.Object(
properties={
"description": typesystem.String(allow_blank=True),
"content": typesystem.Object(
additional_properties=typesystem.Reference(
"MediaType", definitions=definitions
)
),
"headers": typesystem.Object(
additional_properties=typesystem.Reference(
"Header", definitions=definitions
)
),
# TODO: Header | ReferenceObject
# TODO: links
},
pattern_properties={"^x-": typesystem.Any()},
additional_properties=False,
)
definitions["MediaType"] = typesystem.Object(
properties={
"schema": JSON_SCHEMA | SCHEMA_REF,
"example": typesystem.Any(),
# TODO 'examples', 'encoding'
},
pattern_properties={"^x-": typesystem.Any()},
additional_properties=False,
)
definitions["Header"] = typesystem.Object(
properties={
"description": typesystem.Text(),
"type": typesystem.Choice(choices=["string", "number", "integer", "boolean", "array", "file"]),
"format": typesystem.String(allow_blank=True),
"items": JSON_SCHEMA, # TODO: Should actually be a restricted subset
"collectionFormat": typesystem.Choice(
choices=["csv", "ssv", "tsv", "pipes", "multi"]
),
"default": typesystem.Any(),
"maximum": typesystem.Number(),
"exclusiveMaximum": typesystem.Boolean(),
"minimum": typesystem.Number(),
"exclusiveMinimum": typesystem.Boolean(),
"maxLength": typesystem.Integer(),
"minLength": typesystem.Integer(),
"pattern": typesystem.String(allow_blank=True),
"maxItems": typesystem.Integer(),
"minItems": typesystem.Integer(),
"uniqueItems": typesystem.Boolean(),
"enum": typesystem.Array(items=typesystem.Any()),
"multipleOf": typesystem.Integer(),
},
pattern_properties={"^x-": typesystem.Any()},
additional_properties=False,
)
definitions["Tag"] = typesystem.Object(
properties={
"name": typesystem.String(),
"description": typesystem.Text(allow_blank=True),
"externalDocs": typesystem.Reference(
"ExternalDocumentation", definitions=definitions
),
},
pattern_properties={"^x-": typesystem.Any()},
additional_properties=False,
required=["name"],
)
definitions["SecurityRequirement"] = typesystem.Object(
additional_properties=typesystem.Array(items=typesystem.String())
)
definitions["SecurityScheme"] = typesystem.Object(
properties={
"type": typesystem.Choice(choices=["basic", "apiKey", "oauth2"]),
"description": typesystem.Text(allow_blank=True),
"name": typesystem.String(),
"in": typesystem.Choice(choices=["query", "header"]),
"flow": typesystem.Choice(
choices=["implicit", "password", "application", "accessCode"]
),
"authorizationUrl": typesystem.String(format="url"),
"tokenUrl": typesystem.String(format="url"),
"scopes": typesystem.Reference("Scopes", definitions=definitions),
},
pattern_properties={"^x-": typesystem.Any()},
additional_properties=False,
required=["type"],
)
definitions["Scopes"] = typesystem.Object(
pattern_properties={"^x-": typesystem.Any()},
additional_properties=typesystem.String(),
)
METHODS = ["get", "put", "post", "delete", "options", "head", "patch", "trace"]
def lookup(value, keys, default=None):
for key in keys:
try:
value = value[key]
except (KeyError, IndexError, TypeError):
return default
return value
def _simple_slugify(text):
if text is None:
return None
text = text.lower()
text = re.sub(r"[^a-z0-9]+", "_", text)
text = re.sub(r"[_]+", "_", text)
return text.strip("_")
class Swagger:
def load(self, data):
title = lookup(data, ["info", "title"])
description = lookup(data, ["info", "description"])
version = lookup(data, ["info", "version"])
host = lookup(data, ["host"])
path = lookup(data, ["basePath"], "/")
scheme = lookup(data, ["schemes", 0], "https")
base_url = None
if host:
base_url = "%s://%s%s" % (scheme, host, path)
schema_definitions = self.get_schema_definitions(data)
content = self.get_content(data, base_url, schema_definitions)
return Document(
title=title,
description=description,
version=version,
url=base_url,
content=content,
)
def get_schema_definitions(self, data):
definitions = typesystem.SchemaDefinitions()
schemas = lookup(data, ["components", "schemas"], {})
for key, value in schemas.items():
ref = f"#/components/schemas/{key}"
definitions[ref] = typesystem.from_json_schema(
value, definitions=definitions
)
return definitions
def get_content(self, data, base_url, schema_definitions):
"""
Return all the links in the document, layed out by tag and operationId.
"""
links_by_tag = {}
links = []
for path, path_info in data.get("paths", {}).items():
operations = {key: path_info[key] for key in path_info if key in METHODS}
for operation, operation_info in operations.items():
tag = lookup(operation_info, ["tags", 0])
link = self.get_link(
base_url,
path,
path_info,
operation,
operation_info,
schema_definitions,
)
if link is None:
continue
if tag is None:
links.append(link)
elif tag not in links_by_tag:
links_by_tag[tag] = [link]
else:
links_by_tag[tag].append(link)
sections = [
Section(name=_simple_slugify(tag), title=tag.title(), content=links)
for tag, links in links_by_tag.items()
]
return links + sections
def get_link(
self, base_url, path, path_info, operation, operation_info, schema_definitions
):
"""
Return a single link in the document.
"""
name = operation_info.get("operationId")
title = operation_info.get("summary")
description = operation_info.get("description")
if name is None:
name = _simple_slugify(title)
if not name:
return None
# Parameters are taken both from the path info, and from the operation.
parameters = path_info.get("parameters", [])
parameters += operation_info.get("parameters", [])
fields = [
self.get_field(parameter, schema_definitions) for parameter in parameters
]
default_encoding = None
if any([field.location == "body" for field in fields]):
default_encoding = "application/json"
elif any([field.location == "formData" for field in fields]):
default_encoding = "application/x-www-form-urlencoded"
form_fields = [field for field in fields if field.location == "formData"]
body_field = Field(
name="body",
location="body",
schema=typesystem.Object(
properties={
field.name: typesystem.Any()
if field.schema is None
else field.schema
for field in form_fields
},
required=[field.name for field in form_fields if field.required],
),
)
fields = [field for field in fields if field.location != "formData"]
fields.append(body_field)
encoding = lookup(operation_info, ["consumes", 0], default_encoding)
return Link(
name=name,
url=urljoin(base_url, path),
method=operation,
title=title,
description=description,
fields=fields,
encoding=encoding,
)
def get_field(self, parameter, schema_definitions):
"""
Return a single field in a link.
"""
name = parameter.get("name")
location = parameter.get("in")
description = parameter.get("description")
required = parameter.get("required", False)
schema = parameter.get("schema")
example = parameter.get("example")
if schema is not None:
if "$ref" in schema:
ref = schema["$ref"]
schema = schema_definitions.get(ref)
else:
schema = typesystem.from_json_schema(
schema, definitions=schema_definitions
)
return Field(
name=name,
location=location,
description=description,
required=required,
schema=schema,
example=example,
)
|
11537106
|
from django.apps import AppConfig
class WebHooksConfig(AppConfig):
name = 'web_hooks'
verbose_name = "Web Hooks"
def ready(self):
# register the signals
from . import receivers
|
11537120
|
import pytest
from shrubberies.factories import ShrubberyFactory, UserFactory
from shrubberies.models import Shrubbery
from .rules import Is, Relation
@pytest.mark.django_db
def test_relation_to_user():
u1 = UserFactory()
u2 = UserFactory()
s1 = ShrubberyFactory(branch=u1.profile.branch)
s2 = ShrubberyFactory(branch=u2.profile.branch)
belongs_to_branch = Relation("branch", Is(lambda u: u.profile.branch))
assert belongs_to_branch.check(u1, s1)
assert belongs_to_branch.check(u2, s2)
assert not belongs_to_branch.check(u1, s2)
assert not belongs_to_branch.check(u2, s1)
qs1 = belongs_to_branch.filter(u1, Shrubbery.objects.all())
qs2 = belongs_to_branch.filter(u2, Shrubbery.objects.all())
assert qs1.count() == 1
assert s1 in qs1
assert s2 not in qs1
assert qs2.count() == 1
assert s2 in qs2
assert s1 not in qs2
@pytest.mark.django_db
def test_relation_never_global():
user = UserFactory()
belongs_to_branch = Relation("branch", Is(lambda u: u.profile.branch))
assert not belongs_to_branch.check(user)
|
11537147
|
import errno
import platform
import subprocess
class Base:
def __init__(self):
self.flags = set()
try:
tmp = self.get_flags() or []
self.flags = set(tmp)
except IOError as e:
if e.errno == errno.ENOENT:
return
raise
def __contains__(self, name):
return name in self.flags
class Linux(Base):
def get_flags(self):
with open('/proc/cpuinfo', 'r') as f:
for line in f:
if line.startswith('flags'):
return line.split()[2:]
class ARM(Base):
def get_flags(self):
with open('/proc/cpuinfo', 'r') as f:
for line in f:
if line.startswith('Features'):
return line.split()[2:]
class MacOS(Base):
def get_flags(self):
return subprocess.Popen("sysctl -n machdep.cpu ".split(), stdout=subprocess.PIPE).communicate()[0].decode("utf-8").lower().split()
if platform.system() == 'Darwin':
CPUFlags = MacOS
else:
if platform.machine().startswith('arm'):
CPUFlags = ARM
else:
CPUFlags = Linux
def main():
import sys
flags = CPUFlags()
if len(sys.argv) == 2:
if sys.argv[1] in flags:
print ("present")
if __name__ == '__main__':
main()
|
11537170
|
from django.conf.urls.defaults import *
urlpatterns = patterns('issues.views',
(r'^manage/?$', 'manage_index'),
(r'^manage/admin_report$', 'admin_report'),
(r'^manage/new/SF/?$', 'manage_new_specialfee'),
(r'^manage/new/(?P<issue_kind>[\w\d-]+)/?$', 'manage_new'),
(r'^manage/create/?$', 'create'),
(r'^$', 'index'), # added index to the regex
(r'^(?P<show>[\w\d-]+)$', 'index'),
(r'^issue/(?P<issue_slug>[\w\d-]+)/?$', 'detail'),
(r'^issue/(?P<issue_slug>[\w\d-]+)/edit$', 'manage_edit'),
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.