id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
11590597
|
from fractions import Fraction
class Solution:
def isRationalEqual(self, S: str, T: str) -> bool:
def convert(s):
if '.' not in s:
return Fraction(int(s), 1)
i = s.index('.')
result = Fraction(int(s[:i]), 1)
s = s[i + 1:]
if '(' not in s:
if s:
result += Fraction(int(s), 10 ** len(s))
return result
i = s.index('(')
if i > 0:
result += Fraction(int(s[:i]), 10 ** i)
s = s[i + 1 : -1]
result += Fraction(int(s), 10 ** i * (10 ** len(s) - 1))
return result
return convert(S) == convert(T)
|
11590602
|
import maya.cmds as mc
import glTools.utils.attribute
import types
class Remap(object):
'''
Object wrapper for remapValue node in Maya.
'''
# CONSTANTS
_REMAPSUFFIX = 'remap'
def __init__( self,
remapName,
inputValue = None,
inputMin = None,
inputMax = None,
outputMin = None,
outputMax = None ):
'''
Remap object initilization
@param remapName: RemapValue node name
@type remapName: str
@param inputValue: RemapValue input value or source plug. If None, leave at default
@type inputValue: float or str or None
@param inputMin: RemapValue node input minimum value. If None, leave at default
@type inputMin: float or None
@param inputMax: RemapValue node input maximum value. If None, leave at default
@type inputMax: float or None
@param outputMin: RemapValue node output minimum value. If None, leave at default
@type outputMin: float or None
@param outputMax: RemapValue node output maximum value. If None, leave at default
@type outputMax: float or None
'''
# Checks Existing Node
if mc.objExists('%s_%s' % (remapName,self._REMAPSUFFIX) ):
# Use Existing Node
self._name = '%s_%s' % (remapName,self._REMAPSUFFIX)
else:
# Create New Node
self.create(remapName)
# Set Input
if(inputValue != None): self.setInput(inputValue)
# Set Range
self.setRange(inputMin,inputMax,outputMin,outputMax)
# Initialize Index
self.setIndex(0)
def create( self,name ):
'''
Create new remapValue node with the specified name
@param name: New node name.
@type name: str
'''
self._name = mc.createNode('remapValue', name='%s_%s' % (name, self._REMAPSUFFIX))
return self._name
#==================
# get
#==================
def getName(self):
return self._name
def getIndex(self):
return self._index
#==================
# set
#==================
def setAttribute(self,attr,value):
'''
Set remapValue node value or source plug.
@param attrName: RemapValue attribute name to set value or source plug for.
@type attrName: float or str
@param value: RemapValue attribute value or source plug.
@type value: int or float or str or None
'''
# Check None
if(value == None): return
# Check Numeric Input
if isinstance(value,(types.IntType,types.FloatType)):
# Set Numeric Attribute Value
try: mc.setAttr(attr,value)
except: raise Exception('Error setting remapValue attribute "'+attr+'" value!')
return
# Check String Input
elif isinstance(value,types.StringTypes):
# Connect External Plug
if glTools.utils.attribute.isAttr(value):
if not mc.isConnected(value,attr):
try: mc.connectAttr(value,attr,f=True)
except: raise Exception('Error connecting remapValue attribute ("'+value+'" >> "'+attr+'")!')
return
else:
print('RemapValue node attribute "'+attr+'" already connected to source plug "'+inputValue+'"! Skipping...')
return
else:
raise Exception('Source plug value is not a valid attribute! ("'+value+'")')
# Invlaid Type
raise Exception('Invalid value type specified for remapValue attribute "'+attr+'"! ('+str(type(value))+')!')
def setInput(self, inputValue):
'''
Set remapValue node inputValue.
@param inputValue: RemapValue node input value or source plug.
@type inputValue: float or str
'''
attr = self._name+'.inputValue'
self.setAttribute(attr,inputValue)
def setInputMin(self,inputMin):
'''
Set remapValue node inputMin attribute value
@param inputMin: RemapValue node input minimum value or source plug.
@type inputMin: float or None
'''
attr = self._name+'.inputMin'
self.setAttribute(attr,inputMin)
def setInputMax(self,inputMax):
'''
Set remapValue node inputMax attribute value
@param inputMax: Attribute Value to set.
@type inputMax: float or None
'''
attr = self._name+'.inputMax'
self.setAttribute(attr,inputMax)
def setOutputMin(self,outputMin):
'''
Set remapValue node outputMin attribute value
@param outputMin: Attribute Value to set.
@type outputMin: float or None
'''
attr = self._name+'.outputMin'
self.setAttribute(attr,outputMin)
def setOutputMax(self,outputMax):
'''
Set remapValue node outputMax attribute value
@param outputMax: Attribute Value to set.
@type outputMax: float or None
'''
attr = self._name+'.outputMax'
self.setAttribute(attr,outputMax)
def setInputRange( self,
inputMin = None,
inputMax = None ):
'''
Set remapValue node inputMin and inputMax attribute value
@param inputMin: Attribute value to set for inputMin.
@type inputMin: float or None
@param inputMax: Attribute value to set for inputMax.
@type inputMax: float or None
'''
if(inputMin != None): self.setInputMin(inputMin)
if(inputMax != None): self.setInputMax(inputMax)
def setOutputRange( self,
outputMin = None,
outputMax = None ):
'''
Set remapValue node outputMin and outputMax attribute value
@param outputMin: Attribute value to set for outputMin.
@type outputMin: float or None
@param outputMax: Attribute value to set for outputMax.
@type outputMax: float or None
'''
if(outputMin != None): self.setOutputMin(outputMin)
if(outputMax != None): self.setOutputMax(outputMax)
def setRange( self,
inputMin = None,
inputMax = None,
outputMin = None,
outputMax = None ):
'''
Set remapValue node inputMin, inputMax, outputMin, outputMax attribute value
@param outputMin: Attribute value to set for outputMin.
@type outputMin: float or None
@param outputMax: Attribute value to set for outputMax.
@type outputMax: float or None
'''
self.setInputRange(inputMin,inputMax)
self.setOutputRange(outputMin,outputMax)
def setPoint( self,
index,
position = None,
value = None,
interpolation = None):
'''
Set remap point on remapValue node.
@param index: Remap point index.
@type index: int or str
@param position: Remap point position.
@type position: float or str
@param value: Remap point value.
@type value: float or str
@param interpolation: Remap point interpolation.
@type interpolation: int or str
'''
# Set Index
self.setIndex(index)
# Set Position
self.setPosition(position)
# Set Value
self.setValue(value)
# Set Interpolation
self.setInterpolation(interpolation)
def setIndex(self,index):
'''
Set remapValue point index.
@param index: RemapValue point index.
@type index: int
'''
self._index = index
self._indexedName = '%s.value[%s]' % (self._name, index)
def setPosition(self,position):
'''
Set remapValue point position value.
@param position: RemapValue point float position or source plug.
@type position: float or str
'''
attr = self._indexedName+'.value_Position'
self.setAttribute(attr,position)
def setValue(self,value):
'''
Set remapValue point float value.
@param value: RemapValue point float value or source plug.
@type value: float or str
'''
attr = self._indexedName+'.value_FloatValue'
self.setAttribute(attr,value)
def setInterpolation(self,interpolation):
'''
Set remapValue point interpolation value.
@param interpolation: RemapValue point interpolation value or source plug.
@type interpolation: int or str
'''
attr = self._indexedName+'.value_Interp'
self.setAttribute(attr,interpolation)
def connectInput(self, objectAttrName):
'''
'''
if not mc.isConnected(objectAttrName, '%s.inputValue' % self._name):
mc.connectAttr(objectAttrName, '%s.inputValue' % self._name, force=True)
def connectOutput(self,dstAttr):
'''
Connect remapValue node output to destination plug.
@param dstAttr: Destination plug for remapValue node output.
@type dstAttr: str
'''
# Checks
if not glTools.utils.attribute.isAttr(dstAttr):
raise Exception('Destination attribute "'+dstAttr+'" is not a valid attribute! Unable to establish output connection...')
# Connect Output
outAttr = self._name+'.outValue'
if not mc.isConnected(outAttr,dstAttr):
try: mc.connectAttr(outAttr,dstAttr,f=True)
except: raise Exception('Error connecting remapValue output ("'+outAttr+'" >> "'+dstAttr+'")!')
else:
print('RemapValue node output "'+outAttr+'" already connected to destination plug "'+dstAttr+'"! Skipping...')
|
11590604
|
from easydict import EasyDict
cartpole_dqfd_config = dict(
exp_name='cartpole_dqfd',
env=dict(
manager=dict(shared_memory=True, force_reproducibility=True),
collector_env_num=8,
evaluator_env_num=5,
n_evaluator_episode=5,
stop_value=195,
),
policy=dict(
cuda=True,
priority=True,
model=dict(
obs_shape=4,
action_shape=2,
encoder_hidden_size_list=[128, 128, 64],
dueling=True,
),
nstep=3,
discount_factor=0.97,
learn=dict(
batch_size=64,
learning_rate=0.001,
lambda1 = 1,
lambda2 = 3.0,
lambda3 = 0, # set this to be 0 (L2 loss = 0) with expert_replay_buffer_size = 0 and lambda1 = 0 recover the one step pdd dqn
per_train_iter_k = 10,
expert_replay_buffer_size = 10000, # justify the buffer size of the expert buffer
),
# Users should add their own path here (path should lead to a well-trained model)
collect=dict(n_sample=8, demonstration_info_path = 'path'),
# note: this is the times after which you learns to evaluate
eval=dict(evaluator=dict(eval_freq=50, )),
other=dict(
eps=dict(
type='exp',
start=0.95,
end=0.1,
decay=10000,
),
replay_buffer=dict(replay_buffer_size=20000, ),
),
),
)
cartpole_dqfd_config = EasyDict(cartpole_dqfd_config)
main_config = cartpole_dqfd_config
cartpole_dqfd_create_config = dict(
env=dict(
type='cartpole',
import_names=['dizoo.classic_control.cartpole.envs.cartpole_env'],
),
env_manager=dict(type='subprocess'),
policy=dict(type='dqfd'),
)
cartpole_dqfd_create_config = EasyDict(cartpole_dqfd_create_config)
create_config = cartpole_dqfd_create_config
|
11590622
|
import unittest
from mock import patch
from rfid import RFIDClient, comma_format_to_ten_digit, ten_digit_to_comma_format
TEST_CONTROLLER_IP = "192.168.1.20"
TEST_CONTROLLER_SERIAL = 123106461
TEST_BADGE = 3126402
open_door_resp = (
b" A\xb9\x8c\x05\x00\x00\x00\x9dtV\x07\x00\x00\x00\x00\x01\x05\x02\x00\x01"
b"\x00\x00\x00"
)
add_badge_resp_1 = (
b" \x11\xd0\xfc(\x00\x00\x00\x9dtV\x07\x00\x00\x00\x00\x01\x05\x02\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\xadN\x00\x00\x00\x00\x00\x00\x00\x90L\x00B"
b"\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00"
b"\x04\x00\x00\x00\x00A\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x16\x10\x00\x00@\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x16\x10\x00\x00?\x00\x00\x00\x01\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x16\x10\x00\x00>\x00\x00\x00\x01\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x16\x10\x00\x00=\x00\x00\x00\x01"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x16\x10\x00\x00<\x00\x00"
b"\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x16\x10\x00\x00;"
b"\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x12\x00\x00"
b"\x00:\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x12"
b"\x83\x00\x009\x00\x00\x00\x02\x01\xa8\xc0\x00\x00\x00\x00\x1a\x1d\xf1"
b"\xba\x16\x10\x00\x008\x00\x00\x00X\xd6,\x00\x00\x00\x00\x00\x1a\x1dp\xac"
b"\x10\x90\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf0\xff\xff\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x00\x00\x00"
b"\xff\x8f\xff_\x13\x86\xff\xfb\xff?\xff\x0f\xff\x1d\x00.\x00\x00\x00\x00"
b"\x00\x00\x00\x00\x00\x00\x00\x00"
)
add_badge_resp_2 = b"#!\xee\xb1)\x00\x00\x00\x9dtV\x07\x00\x00\x00\x00\x01\x05\x02\x00"
remove_user_resp = b'#!\xa9\x86"\x00\x00\x00\x9dtV\x07\x00\x00\x00\x00\x01\x05\x02\x00'
class TestTenDigitToCommaFormat(unittest.TestCase):
def test_happy_path(self):
result = ten_digit_to_comma_format(2058018)
self.assertEqual(result, TEST_BADGE)
class TestCommaFormatToTenDigit(unittest.TestCase):
def test_happy_path(self):
result = comma_format_to_ten_digit(TEST_BADGE)
expected_result = 2058018
self.assertEqual(result, expected_result)
class mock_socket(object):
def __init__(self, responses):
self.responses = responses
def recv(self, size):
return self.responses.pop()
def send(self, msg):
return len(msg)
def close(self):
return True
class TestRFIDClient(unittest.TestCase):
def test_invalid_ip(self):
with self.assertRaises(TypeError):
RFIDClient("blah")
@patch("rfid.RFIDClient.connect")
@patch("rfid.RFIDClient.check_valid_ipv4_address")
def test_controller_serial(self, mock_connect, mock_check_valid_ipv4_address):
rfid_client = RFIDClient(TEST_CONTROLLER_IP, TEST_CONTROLLER_SERIAL)
expected_controller_serial_hex = "9d745607"
self.assertEqual(rfid_client.controller_serial, expected_controller_serial_hex)
def test_crc_16_ibm(self):
test_controller_serial_hex = "9d745607"
test_data = (
"2010"
+ RFIDClient.source_port
+ "2800000000000000"
+ test_controller_serial_hex
+ "00000200ffffffff"
)
result = RFIDClient.crc_16_ibm(test_data)
expected_result = (
b" \x10f\xf2(\x00\x00\x00\x00\x00\x00\x00\x9dtV\x07\x00\x00\x02"
b"\x00\xff\xff\xff\xff"
)
self.assertEqual(result, expected_result)
@patch(
"rfid.RFIDClient.connect",
return_value=mock_socket([add_badge_resp_2, add_badge_resp_1]),
)
@patch("rfid.RFIDClient.check_valid_ipv4_address")
def test_add_user(self, mock_connect, mock_check_valid_ipv4_address):
rfid_client = RFIDClient(TEST_CONTROLLER_IP, TEST_CONTROLLER_SERIAL)
test_doors = [1, 2]
rfid_client.add_user(TEST_BADGE, test_doors)
@patch("rfid.RFIDClient.connect", return_value=mock_socket([remove_user_resp]))
@patch("rfid.RFIDClient.check_valid_ipv4_address")
def test_remove_user(self, mock_connect, mock_check_valid_ipv4_address):
rfid_client = RFIDClient(TEST_CONTROLLER_IP, TEST_CONTROLLER_SERIAL)
rfid_client.remove_user(TEST_BADGE)
@patch("rfid.RFIDClient.connect", return_value=mock_socket([open_door_resp]))
@patch("rfid.RFIDClient.check_valid_ipv4_address")
def test_open_door(self, mock_connect, mock_check_valid_ipv4_address):
rfid_client = RFIDClient(TEST_CONTROLLER_IP, TEST_CONTROLLER_SERIAL)
rfid_client.open_door(1)
|
11590644
|
import numpy as np
import re
import matplotlib.pyplot as plt
import matplotlib as mpl
plt.rcParams["font.family"] = "Times New Roman"
mpl.rcParams['xtick.direction'] = 'in'
mpl.rcParams['ytick.direction'] = 'in'
fontsize = 9
mpl.rcParams['axes.labelsize'] = fontsize
mpl.rcParams['xtick.labelsize'] = fontsize
mpl.rcParams['ytick.labelsize'] = fontsize
mpl.rcParams['legend.fontsize'] = fontsize
#mpl.rcParams['title.fontsize'] = fontsize
mpl.rcParams['font.size'] = fontsize
mpl.rcParams['axes.titlepad'] = 7
mpl.rcParams['savefig.dpi'] = 300
plt.rcParams["figure.figsize"] = [4, 3]
take_ln = True
moving_avg = True
save = True
save_val = True
window_size = 2500
dataset_num = 8
mean_from_last = 20000
remove_repeats = True #Caused by starting from the same counter multiple times
scale = 1
ratio = 1.618
width = scale * 3.3
height = (width / 1.618)
num_data_to_use = 20000
num_hist_bins = 200
mse_x_to = 0.012
f = plt.figure()
labels_sets = [["1/17.9", "1/27.3", "1/38.2", "1/50.0", "1/60.5", "1/73.7", "1/87.0"]]
sets = [[74, 71, 75, 72, 77, 73, 76]]
f = plt.figure()
ax = f.add_subplot(111)
losses_sets = []
iters_sets = []
for i, (data_nums, labels) in enumerate(zip(sets, labels_sets)):
#ax._frameon = False
#ax = f.add_subplot(1, 2, i+1)
ax.xaxis.set_ticks_position('both')
ax.yaxis.set_ticks_position('both')
ax.tick_params(labeltop=False, labelright=False)
ax.minorticks_on()
for j, dataset_num in enumerate(data_nums):
if not i:
hist_loc = ("Z:/Jeffrey-Ede/models/stem-random-walk-nin-20-"+str(dataset_num)+"/")
hist_file = hist_loc+"mses.npy"
else:
hist_file = f"Z:/Jeffrey-Ede/models/stem-random-walk-nin-20-68/mses-{dataset_num}.npy"
mses = np.load(hist_file) /2
#for x in mses: print(x)
#print(np.mean(mses), np.std(mses))
#print(len([x for x in mses if x >= 10]))
mses = [np.sqrt(x) for x in mses if x < 0.05]
bins, edges = np.histogram(mses, 100)
edges = 0.5*(edges[:-1] + edges[1:])
ax.plot(edges, bins, label=labels[j], linewidth=1)
ax.set_ylabel('Frequency')
ax.set_xlabel('Root Mean Squared Error')
#ax.set_ylim(-100, 2175)
plt.legend(loc='upper right', frameon=False, fontsize=8)
plt.minorticks_on()
#f.subplots_adjust(wspace=0.22, hspace=0.26)
#f.subplots_adjust(left=.00, bottom=.00, right=1., top=1.)
#f.set_size_inches(width, height)
save_loc = "Z:/Jeffrey-Ede/models/stem-random-walk-nin-figures/partial_hist.png"
plt.savefig( save_loc, bbox_inches='tight', )
#plt.gcf().clear()
|
11590659
|
import numpy as np
from mmseg.core.evaluation import (eval_metrics, mean_dice, mean_fscore,
mean_iou)
from mmseg.core.evaluation.metrics import f_score
def get_confusion_matrix(pred_label, label, num_classes, ignore_index):
"""Intersection over Union
Args:
pred_label (np.ndarray): 2D predict map
label (np.ndarray): label 2D label map
num_classes (int): number of categories
ignore_index (int): index ignore in evaluation
"""
mask = (label != ignore_index)
pred_label = pred_label[mask]
label = label[mask]
n = num_classes
inds = n * label + pred_label
mat = np.bincount(inds, minlength=n**2).reshape(n, n)
return mat
# This func is deprecated since it's not memory efficient
def legacy_mean_iou(results, gt_seg_maps, num_classes, ignore_index):
num_imgs = len(results)
assert len(gt_seg_maps) == num_imgs
total_mat = np.zeros((num_classes, num_classes), dtype=np.float)
for i in range(num_imgs):
mat = get_confusion_matrix(
results[i], gt_seg_maps[i], num_classes, ignore_index=ignore_index)
total_mat += mat
all_acc = np.diag(total_mat).sum() / total_mat.sum()
acc = np.diag(total_mat) / total_mat.sum(axis=1)
iou = np.diag(total_mat) / (
total_mat.sum(axis=1) + total_mat.sum(axis=0) - np.diag(total_mat))
return all_acc, acc, iou
# This func is deprecated since it's not memory efficient
def legacy_mean_dice(results, gt_seg_maps, num_classes, ignore_index):
num_imgs = len(results)
assert len(gt_seg_maps) == num_imgs
total_mat = np.zeros((num_classes, num_classes), dtype=np.float)
for i in range(num_imgs):
mat = get_confusion_matrix(
results[i], gt_seg_maps[i], num_classes, ignore_index=ignore_index)
total_mat += mat
all_acc = np.diag(total_mat).sum() / total_mat.sum()
acc = np.diag(total_mat) / total_mat.sum(axis=1)
dice = 2 * np.diag(total_mat) / (
total_mat.sum(axis=1) + total_mat.sum(axis=0))
return all_acc, acc, dice
# This func is deprecated since it's not memory efficient
def legacy_mean_fscore(results,
gt_seg_maps,
num_classes,
ignore_index,
beta=1):
num_imgs = len(results)
assert len(gt_seg_maps) == num_imgs
total_mat = np.zeros((num_classes, num_classes), dtype=np.float)
for i in range(num_imgs):
mat = get_confusion_matrix(
results[i], gt_seg_maps[i], num_classes, ignore_index=ignore_index)
total_mat += mat
all_acc = np.diag(total_mat).sum() / total_mat.sum()
recall = np.diag(total_mat) / total_mat.sum(axis=1)
precision = np.diag(total_mat) / total_mat.sum(axis=0)
fv = np.vectorize(f_score)
fscore = fv(precision, recall, beta=beta)
return all_acc, recall, precision, fscore
def test_metrics():
pred_size = (10, 30, 30)
num_classes = 19
ignore_index = 255
results = np.random.randint(0, num_classes, size=pred_size)
label = np.random.randint(0, num_classes, size=pred_size)
# Test the availability of arg: ignore_index.
label[:, 2, 5:10] = ignore_index
# Test the correctness of the implementation of mIoU calculation.
ret_metrics = eval_metrics(
results, label, num_classes, ignore_index, metrics='mIoU')
all_acc, acc, iou = ret_metrics['aAcc'], ret_metrics['Acc'], ret_metrics[
'IoU']
all_acc_l, acc_l, iou_l = legacy_mean_iou(results, label, num_classes,
ignore_index)
assert all_acc == all_acc_l
assert np.allclose(acc, acc_l)
assert np.allclose(iou, iou_l)
# Test the correctness of the implementation of mDice calculation.
ret_metrics = eval_metrics(
results, label, num_classes, ignore_index, metrics='mDice')
all_acc, acc, dice = ret_metrics['aAcc'], ret_metrics['Acc'], ret_metrics[
'Dice']
all_acc_l, acc_l, dice_l = legacy_mean_dice(results, label, num_classes,
ignore_index)
assert all_acc == all_acc_l
assert np.allclose(acc, acc_l)
assert np.allclose(dice, dice_l)
# Test the correctness of the implementation of mDice calculation.
ret_metrics = eval_metrics(
results, label, num_classes, ignore_index, metrics='mFscore')
all_acc, recall, precision, fscore = ret_metrics['aAcc'], ret_metrics[
'Recall'], ret_metrics['Precision'], ret_metrics['Fscore']
all_acc_l, recall_l, precision_l, fscore_l = legacy_mean_fscore(
results, label, num_classes, ignore_index)
assert all_acc == all_acc_l
assert np.allclose(recall, recall_l)
assert np.allclose(precision, precision_l)
assert np.allclose(fscore, fscore_l)
# Test the correctness of the implementation of joint calculation.
ret_metrics = eval_metrics(
results,
label,
num_classes,
ignore_index,
metrics=['mIoU', 'mDice', 'mFscore'])
all_acc, acc, iou, dice, precision, recall, fscore = ret_metrics[
'aAcc'], ret_metrics['Acc'], ret_metrics['IoU'], ret_metrics[
'Dice'], ret_metrics['Precision'], ret_metrics[
'Recall'], ret_metrics['Fscore']
assert all_acc == all_acc_l
assert np.allclose(acc, acc_l)
assert np.allclose(iou, iou_l)
assert np.allclose(dice, dice_l)
assert np.allclose(precision, precision_l)
assert np.allclose(recall, recall_l)
assert np.allclose(fscore, fscore_l)
# Test the correctness of calculation when arg: num_classes is larger
# than the maximum value of input maps.
results = np.random.randint(0, 5, size=pred_size)
label = np.random.randint(0, 4, size=pred_size)
ret_metrics = eval_metrics(
results,
label,
num_classes,
ignore_index=255,
metrics='mIoU',
nan_to_num=-1)
all_acc, acc, iou = ret_metrics['aAcc'], ret_metrics['Acc'], ret_metrics[
'IoU']
assert acc[-1] == -1
assert iou[-1] == -1
ret_metrics = eval_metrics(
results,
label,
num_classes,
ignore_index=255,
metrics='mDice',
nan_to_num=-1)
all_acc, acc, dice = ret_metrics['aAcc'], ret_metrics['Acc'], ret_metrics[
'Dice']
assert acc[-1] == -1
assert dice[-1] == -1
ret_metrics = eval_metrics(
results,
label,
num_classes,
ignore_index=255,
metrics='mFscore',
nan_to_num=-1)
all_acc, precision, recall, fscore = ret_metrics['aAcc'], ret_metrics[
'Precision'], ret_metrics['Recall'], ret_metrics['Fscore']
assert precision[-1] == -1
assert recall[-1] == -1
assert fscore[-1] == -1
ret_metrics = eval_metrics(
results,
label,
num_classes,
ignore_index=255,
metrics=['mDice', 'mIoU', 'mFscore'],
nan_to_num=-1)
all_acc, acc, iou, dice, precision, recall, fscore = ret_metrics[
'aAcc'], ret_metrics['Acc'], ret_metrics['IoU'], ret_metrics[
'Dice'], ret_metrics['Precision'], ret_metrics[
'Recall'], ret_metrics['Fscore']
assert acc[-1] == -1
assert dice[-1] == -1
assert iou[-1] == -1
assert precision[-1] == -1
assert recall[-1] == -1
assert fscore[-1] == -1
# Test the bug which is caused by torch.histc.
# torch.histc: https://pytorch.org/docs/stable/generated/torch.histc.html
# When the arg:bins is set to be same as arg:max,
# some channels of mIoU may be nan.
results = np.array([np.repeat(31, 59)])
label = np.array([np.arange(59)])
num_classes = 59
ret_metrics = eval_metrics(
results, label, num_classes, ignore_index=255, metrics='mIoU')
all_acc, acc, iou = ret_metrics['aAcc'], ret_metrics['Acc'], ret_metrics[
'IoU']
assert not np.any(np.isnan(iou))
def test_mean_iou():
pred_size = (10, 30, 30)
num_classes = 19
ignore_index = 255
results = np.random.randint(0, num_classes, size=pred_size)
label = np.random.randint(0, num_classes, size=pred_size)
label[:, 2, 5:10] = ignore_index
ret_metrics = mean_iou(results, label, num_classes, ignore_index)
all_acc, acc, iou = ret_metrics['aAcc'], ret_metrics['Acc'], ret_metrics[
'IoU']
all_acc_l, acc_l, iou_l = legacy_mean_iou(results, label, num_classes,
ignore_index)
assert all_acc == all_acc_l
assert np.allclose(acc, acc_l)
assert np.allclose(iou, iou_l)
results = np.random.randint(0, 5, size=pred_size)
label = np.random.randint(0, 4, size=pred_size)
ret_metrics = mean_iou(
results, label, num_classes, ignore_index=255, nan_to_num=-1)
all_acc, acc, iou = ret_metrics['aAcc'], ret_metrics['Acc'], ret_metrics[
'IoU']
assert acc[-1] == -1
assert acc[-1] == -1
def test_mean_dice():
pred_size = (10, 30, 30)
num_classes = 19
ignore_index = 255
results = np.random.randint(0, num_classes, size=pred_size)
label = np.random.randint(0, num_classes, size=pred_size)
label[:, 2, 5:10] = ignore_index
ret_metrics = mean_dice(results, label, num_classes, ignore_index)
all_acc, acc, iou = ret_metrics['aAcc'], ret_metrics['Acc'], ret_metrics[
'Dice']
all_acc_l, acc_l, dice_l = legacy_mean_dice(results, label, num_classes,
ignore_index)
assert all_acc == all_acc_l
assert np.allclose(acc, acc_l)
assert np.allclose(iou, dice_l)
results = np.random.randint(0, 5, size=pred_size)
label = np.random.randint(0, 4, size=pred_size)
ret_metrics = mean_dice(
results, label, num_classes, ignore_index=255, nan_to_num=-1)
all_acc, acc, dice = ret_metrics['aAcc'], ret_metrics['Acc'], ret_metrics[
'Dice']
assert acc[-1] == -1
assert dice[-1] == -1
def test_mean_fscore():
pred_size = (10, 30, 30)
num_classes = 19
ignore_index = 255
results = np.random.randint(0, num_classes, size=pred_size)
label = np.random.randint(0, num_classes, size=pred_size)
label[:, 2, 5:10] = ignore_index
ret_metrics = mean_fscore(results, label, num_classes, ignore_index)
all_acc, recall, precision, fscore = ret_metrics['aAcc'], ret_metrics[
'Recall'], ret_metrics['Precision'], ret_metrics['Fscore']
all_acc_l, recall_l, precision_l, fscore_l = legacy_mean_fscore(
results, label, num_classes, ignore_index)
assert all_acc == all_acc_l
assert np.allclose(recall, recall_l)
assert np.allclose(precision, precision_l)
assert np.allclose(fscore, fscore_l)
ret_metrics = mean_fscore(
results, label, num_classes, ignore_index, beta=2)
all_acc, recall, precision, fscore = ret_metrics['aAcc'], ret_metrics[
'Recall'], ret_metrics['Precision'], ret_metrics['Fscore']
all_acc_l, recall_l, precision_l, fscore_l = legacy_mean_fscore(
results, label, num_classes, ignore_index, beta=2)
assert all_acc == all_acc_l
assert np.allclose(recall, recall_l)
assert np.allclose(precision, precision_l)
assert np.allclose(fscore, fscore_l)
results = np.random.randint(0, 5, size=pred_size)
label = np.random.randint(0, 4, size=pred_size)
ret_metrics = mean_fscore(
results, label, num_classes, ignore_index=255, nan_to_num=-1)
all_acc, recall, precision, fscore = ret_metrics['aAcc'], ret_metrics[
'Recall'], ret_metrics['Precision'], ret_metrics['Fscore']
assert recall[-1] == -1
assert precision[-1] == -1
assert fscore[-1] == -1
def test_filename_inputs():
import cv2
import tempfile
def save_arr(input_arrays: list, title: str, is_image: bool, dir: str):
filenames = []
SUFFIX = '.png' if is_image else '.npy'
for idx, arr in enumerate(input_arrays):
filename = '{}/{}-{}{}'.format(dir, title, idx, SUFFIX)
if is_image:
cv2.imwrite(filename, arr)
else:
np.save(filename, arr)
filenames.append(filename)
return filenames
pred_size = (10, 30, 30)
num_classes = 19
ignore_index = 255
results = np.random.randint(0, num_classes, size=pred_size)
labels = np.random.randint(0, num_classes, size=pred_size)
labels[:, 2, 5:10] = ignore_index
with tempfile.TemporaryDirectory() as temp_dir:
result_files = save_arr(results, 'pred', False, temp_dir)
label_files = save_arr(labels, 'label', True, temp_dir)
ret_metrics = eval_metrics(
result_files,
label_files,
num_classes,
ignore_index,
metrics='mIoU')
all_acc, acc, iou = ret_metrics['aAcc'], ret_metrics[
'Acc'], ret_metrics['IoU']
all_acc_l, acc_l, iou_l = legacy_mean_iou(results, labels, num_classes,
ignore_index)
assert all_acc == all_acc_l
assert np.allclose(acc, acc_l)
assert np.allclose(iou, iou_l)
|
11590691
|
import numpy
from ReID_net.Log import log
def shift_im(im, offset):
return shift(im, offset, "reflect", None)
def shift_lab(lab, offset, void_label):
return shift(lab, offset, "constant", void_label)
def generate_video(im, lab, n_frames, max_speed, void_label):
speed = (numpy.random.rand() * 2 - 1.0) * max_speed
step_offset = numpy.random.rand(2)
step_offset /= numpy.linalg.norm(step_offset, 2)
step_offset *= speed
video_ims = [im]
video_labs = [lab]
total_offset = numpy.array([0.0, 0.0])
for frame in range(n_frames - 1):
total_offset += step_offset
im_frame = shift_im(im, total_offset)
lab_frame = shift_lab(lab, total_offset, void_label)
video_ims.append(im_frame)
video_labs.append(lab_frame)
return video_ims, video_labs
def shift(im, offset, mode, value=None):
assert mode in ("reflect", "constant")
if mode == "reflect":
assert value is None
else:
assert value is not None
offset = numpy.round(offset).astype("int32")
start = numpy.maximum(-offset, 0)
size = im.shape[:2] - numpy.abs(offset)
# Extract the image region that is defined by the offset
im = im[start[0]:start[0] + size[0], start[1]:start[1] + size[1]]
# Pad the image on the opposite side
padding = numpy.array([
[max(0, offset[0]), max(0, -offset[0])],
[max(0, offset[1]), max(0, -offset[1])],
[0, 0]
])
if mode == "reflect":
im = numpy.pad(im, padding, mode)
else:
im = numpy.pad(im, padding, mode, constant_values=value)
return im
def make_chunks(fns, size):
res = []
for seq_fns in fns:
l = len(seq_fns)
if l < size:
print("warning, sequence", seq_fns[0], "too short for chunk size", size, file=log.v1)
for i in range(l / size):
chunk = seq_fns[size * i: size * (i + 1)]
res.append(chunk)
return res
|
11590736
|
from torch.autograd import Variable
import torch
import torch.optim
import copy
import numpy as np
from scipy.linalg import hadamard
from .helpers import *
dtype = torch.cuda.FloatTensor
#dtype = torch.FloatTensor
from data import transforms as transform
def exp_lr_scheduler(optimizer, epoch, init_lr=0.001, lr_decay_epoch=500):
"""Decay learning rate by a factor of 0.1 every lr_decay_epoch epochs."""
lr = init_lr * (0.65**(epoch // lr_decay_epoch))
if epoch % lr_decay_epoch == 0:
print('LR is set to {}'.format(lr))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return optimizer
def sqnorm(a):
return np.sum( a*a )
def get_distances(initial_maps,final_maps):
results = []
for a,b in zip(initial_maps,final_maps):
res = sqnorm(a-b)/(sqnorm(a) + sqnorm(b))
results += [res]
return(results)
def get_weights(net):
weights = []
for m in net.modules():
if isinstance(m, nn.Conv2d):
weights += [m.weight.data.cpu().numpy()]
return weights
def channels2imgs(out):
sh = out.shape
chs = int(sh[0]/2)
imgs = np.zeros( (chs,sh[1],sh[2]) )
for i in range(chs):
imgs[i] = np.sqrt( out[2*i]**2 + out[2*i+1]**2 )
return imgs
def fit(net,
img_noisy_var,
num_channels,
img_clean_var,
num_iter = 5000,
LR = 0.01,
OPTIMIZER='adam',
opt_input = False,
reg_noise_std = 0,
reg_noise_decayevery = 100000,
mask_var = None,
apply_f = None,
lr_decay_epoch = 0,
net_input = None,
net_input_gen = "random",
find_best=False,
weight_decay=0,
upsample_mode = "bilinear",
totalupsample = 1,
loss_type="MSE",
output_gradients=False,
output_weights=False,
show_images=False,
plot_after=None,
in_size=None,
MRI_multicoil_reference=None,
):
if net_input is not None:
print("input provided")
else:
if upsample_mode=="bilinear":
# feed uniform noise into the network
totalupsample = 2**len(num_channels)
width = int(img_clean_var.data.shape[2]/totalupsample)
height = int(img_clean_var.data.shape[3]/totalupsample)
elif upsample_mode=="deconv":
# feed uniform noise into the network
totalupsample = 2**(len(num_channels)-1)
width = int(img_clean_var.data.shape[2]/totalupsample)
height = int(img_clean_var.data.shape[3]/totalupsample)
elif upsample_mode=="free":
width,height = in_size
shape = [1,num_channels[0], width, height]
print("input shape: ", shape)
net_input = Variable(torch.zeros(shape)).type(dtype)
net_input.data.uniform_()
net_input.data *= 1./10
net_input = net_input.type(dtype)
net_input_saved = net_input.data.clone()
noise = net_input.data.clone()
p = [x for x in net.parameters() ]
if(opt_input == True): # optimizer over the input as well
net_input.requires_grad = True
p += [net_input]
mse_wrt_noisy = np.zeros(num_iter)
mse_wrt_truth = np.zeros(num_iter)
print( "init norm: ", np.linalg.norm( net( net_input.type(dtype) ).data.cpu().numpy()[0] ) )
print( "orig img norm: ", np.linalg.norm( img_clean_var.data.cpu().numpy() ))
if OPTIMIZER == 'SGD':
print("optimize with SGD", LR)
optimizer = torch.optim.SGD(p, lr=LR,momentum=0.9,weight_decay=weight_decay)
elif OPTIMIZER == 'adam':
print("optimize with adam", LR)
optimizer = torch.optim.Adam(p, lr=LR,weight_decay=weight_decay)
elif OPTIMIZER == 'LBFGS':
print("optimize with LBFGS", LR)
optimizer = torch.optim.LBFGS(p, lr=LR)
if loss_type=="MSE":
mse = torch.nn.MSELoss() #.type(dtype)
if loss_type=="L1":
mse = nn.L1Loss()
if find_best:
best_net = copy.deepcopy(net)
best_mse = 1000000.0
nconvnets = 0
for p in list(filter(lambda p: len(p.data.shape)>2, net.parameters())):
nconvnets += 1
out_grads = np.zeros((nconvnets,num_iter))
init_weights = get_weights(net)
out_weights = np.zeros(( len(init_weights) ,num_iter))
out_imgs = np.zeros((1,1))
if plot_after is not None:
out_img_np = net( net_input_saved.type(dtype) ).data.cpu().numpy()[0]
out_imgs = np.zeros( (len(plot_after),) + out_img_np.shape )
for i in range(num_iter):
if lr_decay_epoch is not 0:
optimizer = exp_lr_scheduler(optimizer, i, init_lr=LR, lr_decay_epoch=lr_decay_epoch)
if reg_noise_std > 0:
if i % reg_noise_decayevery == 0:
reg_noise_std *= 0.7
net_input = Variable(net_input_saved + (noise.normal_() * reg_noise_std))
def closure():
optimizer.zero_grad()
out = net(net_input.type(dtype))
# training loss
if mask_var is not None:
loss = mse( out * mask_var , img_noisy_var * mask_var )
elif apply_f:
loss = mse( apply_f(out) , img_noisy_var )
else:
loss = mse(out, img_noisy_var)
loss.backward()
mse_wrt_noisy[i] = loss.data.cpu().numpy()
# the actual loss
true_loss = mse( Variable(out.data, requires_grad=False).type(dtype), img_clean_var.type(dtype) )
mse_wrt_truth[i] = true_loss.data.cpu().numpy()
if MRI_multicoil_reference is not None:
out_chs = net( net_input.type(dtype) ).data.cpu().numpy()[0]
out_imgs = channels2imgs(out_chs)
out_img_np = transform.root_sum_of_squares( torch.tensor(out_imgs) , dim=0).numpy()
mse_wrt_truth[i] = np.linalg.norm(MRI_multicoil_reference - out_img_np)
if output_gradients:
for ind,p in enumerate(list(filter(lambda p: p.grad is not None and len(p.data.shape)>2, net.parameters()))):
out_grads[ind,i] = p.grad.data.norm(2).item()
#print(p.grad.data.norm(2).item())
#su += p.grad.data.norm(2).item()
#mse_wrt_noisy[i] = su
if i % 10 == 0:
out2 = net(Variable(net_input_saved).type(dtype))
loss2 = mse(out2, img_clean_var)
print ('Iteration %05d Train loss %f Actual loss %f Actual loss orig %f' % (i, loss.data,mse_wrt_truth[i],loss2.data), '\r', end='')
if show_images:
if i % 50 == 0:
print(i)
out_img_np = net( ni.type(dtype) ).data.cpu().numpy()[0]
myimgshow(plt,out_img_np)
plt.show()
if plot_after is not None:
if i in plot_after:
out_imgs[ plot_after.index(i) ,:] = net( net_input_saved.type(dtype) ).data.cpu().numpy()[0]
if output_weights:
out_weights[:,i] = np.array( get_distances( init_weights, get_weights(net) ) )
return loss
loss = optimizer.step(closure)
if find_best:
# if training loss improves by at least one percent, we found a new best net
if best_mse > 1.005*loss.data:
best_mse = loss.data
best_net = copy.deepcopy(net)
if find_best:
net = best_net
if output_gradients and output_weights:
return mse_wrt_noisy, mse_wrt_truth,net_input_saved, net, out_grads
elif output_gradients:
return mse_wrt_noisy, mse_wrt_truth,net_input_saved, net, out_grads
elif output_weights:
return mse_wrt_noisy, mse_wrt_truth,net_input_saved, net, out_weights
elif plot_after is not None:
return mse_wrt_noisy, mse_wrt_truth,net_input_saved, net, out_imgs
else:
return mse_wrt_noisy, mse_wrt_truth,net_input_saved, net
|
11590758
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, args):
super().__init__()
for k, v in args.__dict__.items():
self.__setattr__(k, v)
self.emb = nn.Embedding(self.dict_size, self.emb_dim)
self.first_gru = nn.GRU(input_size=self.emb_dim,
hidden_size=self.first_rnn_hsz,
num_layers=1,
batch_first=True)
self.transform_A = nn.Linear(
self.first_rnn_hsz, self.first_rnn_hsz, bias=False)
self.cnn = nn.Conv2d(in_channels=2,
out_channels=self.fillters,
kernel_size=self.kernel_size)
self.match_vec = nn.Linear(16 * 16 * 8, self.match_vec_dim)
self.second_gru = nn.GRU(input_size=self.match_vec_dim,
hidden_size=self.second_rnn_hsz,
num_layers=1)
self.pred = nn.Linear(self.match_vec_dim, 2)
self._reset_parameters()
def _reset_parameters(self):
stdv = 1. / math.sqrt(self.emb_dim)
self.emb.weight.data.uniform_(-stdv, stdv)
self.transform_A.weight.data.uniform_(-stdv, stdv)
self.match_vec.weight.data.uniform_(-stdv, stdv)
self.match_vec.bias.data.fill_(0)
self.pred.weight.data.uniform_(-stdv, stdv)
self.pred.bias.data.fill_(0)
def forward(self, utterances, responses):
bsz = utterances.size(0)
resps_emb = self.emb(responses)
resps_gru, _ = self.first_gru(resps_emb)
resps_gru = F.dropout(resps_gru, p=self.dropout)
resps_emb_t = resps_emb.transpose(1, 2)
resps_gru_t = resps_gru.transpose(1, 2)
uttes_t = utterances.transpose(0, 1)
match_vecs = []
for utte in uttes_t:
utte_emb = self.emb(utte)
mat_1 = torch.matmul(utte_emb, resps_emb_t)
utte_gru, _ = self.first_gru(utte_emb)
utte_gru = F.dropout(utte_gru, p=self.dropout)
mat_2 = torch.matmul(self.transform_A(utte_gru), resps_gru_t)
M = torch.stack([mat_1, mat_2], 1)
cnn_layer = F.relu(self.cnn(M))
pool_layer = F.max_pool2d(cnn_layer,
self.kernel_size,
stride=self.kernel_size)
pool_layer = pool_layer.view(bsz, -1)
match_vec = F.relu(self.match_vec(pool_layer))
match_vecs.append(match_vec)
match_vecs = torch.stack(match_vecs, 0)
match_vecs = F.dropout(match_vecs, p=self.dropout)
_, hidden = self.second_gru(match_vecs)
hidden = F.dropout(hidden[-1], p=self.dropout)
props = F.log_softmax(self.pred(hidden), dim=-1)
return props
|
11590763
|
from math import log
def next_text(modeler, dataset, subset_size=50):
# Limit our search to the least seen examples
# TODO it's a little hacky to use the dataframe underlying the dataset...
min_seen = dataset.df["seen"].min()
least_seen_examples = dataset.df[dataset.df["seen"]==min_seen]
if ((len(least_seen_examples)==1) \
or (len(modeler.get_lfs())==0)):
# We have no labelling functions, or only one example hasn't been seen:
res_idx = least_seen_examples.sample(1).index[0]
else:
modeler.fit(dataset)
# Sample at most subset_size examples
subset_size = min(subset_size, len(least_seen_examples))
subset = least_seen_examples.sample(subset_size)
probs = modeler.predict(subset)
entropies = [entropy(x) for x in probs]
subset = subset[entropies==max(entropies)]
res_idx = subset.sample(1).index[0]
dataset.df.at[res_idx, "seen"] += 1
return {"text": dataset.df.at[res_idx, "text"], "id": int(res_idx)}
def entropy(prob_dist):
#return(-(L_row_i==-1).sum())
return(-sum([x*log(x) for x in prob_dist]))
|
11590765
|
import logging
import time
import pytest
import salt.defaults.exitcodes
from saltfactories.utils import random_string
from tests.support.helpers import PRE_PYTEST_SKIP_REASON
pytestmark = [
pytest.mark.slow_test,
pytest.mark.windows_whitelisted,
]
log = logging.getLogger(__name__)
@pytest.fixture
def master_id():
return random_string("master-")
@pytest.mark.skip_on_windows(reason=PRE_PYTEST_SKIP_REASON)
def test_exit_status_correct_usage(salt_factories, master_id):
factory = salt_factories.salt_master_daemon(master_id)
factory.start()
assert factory.is_running()
time.sleep(0.5)
ret = factory.terminate()
assert ret.exitcode == salt.defaults.exitcodes.EX_OK, ret
|
11590842
|
import click
from testplan.cli.commands import single_reader_commands
from testplan.cli.commands import writer_commands
from testplan.cli.utils.actions import ProcessResultAction, ParseSingleAction
@click.group(name="convert", chain=True)
def convert():
"""
Convert a single input file to testplan format.
Once converted, then can dump to a target destination or display it through a local webui.
The parameters forms a pipeline which starts with a source command (from*) then any write (to*)
or a display command.
use convert COMMAND --help to get more details of the subcommands.
"""
pass
@convert.resultcallback()
def run_actions(actions):
parse, *processors = actions
if not (
isinstance(parse, ParseSingleAction)
and all((isinstance(p, ProcessResultAction) for p in processors))
):
raise click.UsageError(
"convert need a single parser like from* and can have many processor or targets like to* or display"
)
result = parse()
for process in processors:
result = process(result)
single_reader_commands.register_to(convert)
writer_commands.register_to(convert)
|
11590855
|
import pytest
import numpy as np
from scipy import sparse
from sklearn.datasets import load_iris
keras = pytest.importorskip("keras")
from keras.models import Sequential # noqa: E402
from keras.layers import Dense # noqa: E402
from keras.utils import to_categorical # noqa: E402
from imblearn.datasets import make_imbalance # noqa: E402
from imblearn.under_sampling import ClusterCentroids # noqa: E402
from imblearn.under_sampling import NearMiss # noqa: E402
from imblearn.over_sampling import RandomOverSampler # noqa: E402
from imblearn.keras import BalancedBatchGenerator # noqa: E402
from imblearn.keras import balanced_batch_generator # noqa: E402
@pytest.fixture
def data():
iris = load_iris()
X, y = make_imbalance(iris.data, iris.target, {0: 30, 1: 50, 2: 40})
y = to_categorical(y, 3)
return X, y
def _build_keras_model(n_classes, n_features):
model = Sequential()
model.add(Dense(n_classes, input_dim=n_features, activation="softmax"))
model.compile(
optimizer="sgd", loss="categorical_crossentropy", metrics=["accuracy"]
)
return model
def test_balanced_batch_generator_class_no_return_indices(data):
with pytest.raises(ValueError, match="needs to have an attribute"):
BalancedBatchGenerator(*data, sampler=ClusterCentroids(), batch_size=10)
@pytest.mark.filterwarnings("ignore:`wait_time` is not used") # keras 2.2.4
@pytest.mark.parametrize(
"sampler, sample_weight",
[
(None, None),
(RandomOverSampler(), None),
(NearMiss(), None),
(None, np.random.uniform(size=120)),
],
)
def test_balanced_batch_generator_class(data, sampler, sample_weight):
X, y = data
model = _build_keras_model(y.shape[1], X.shape[1])
training_generator = BalancedBatchGenerator(
X,
y,
sample_weight=sample_weight,
sampler=sampler,
batch_size=10,
random_state=42,
)
model.fit_generator(generator=training_generator, epochs=10)
@pytest.mark.parametrize("keep_sparse", [True, False])
def test_balanced_batch_generator_class_sparse(data, keep_sparse):
X, y = data
training_generator = BalancedBatchGenerator(
sparse.csr_matrix(X),
y,
batch_size=10,
keep_sparse=keep_sparse,
random_state=42,
)
for idx in range(len(training_generator)):
X_batch, _ = training_generator.__getitem__(idx)
if keep_sparse:
assert sparse.issparse(X_batch)
else:
assert not sparse.issparse(X_batch)
def test_balanced_batch_generator_function_no_return_indices(data):
with pytest.raises(ValueError, match="needs to have an attribute"):
balanced_batch_generator(
*data, sampler=ClusterCentroids(), batch_size=10, random_state=42
)
@pytest.mark.filterwarnings("ignore:`wait_time` is not used") # keras 2.2.4
@pytest.mark.parametrize(
"sampler, sample_weight",
[
(None, None),
(RandomOverSampler(), None),
(NearMiss(), None),
(None, np.random.uniform(size=120)),
],
)
def test_balanced_batch_generator_function(data, sampler, sample_weight):
X, y = data
model = _build_keras_model(y.shape[1], X.shape[1])
training_generator, steps_per_epoch = balanced_batch_generator(
X,
y,
sample_weight=sample_weight,
sampler=sampler,
batch_size=10,
random_state=42,
)
model.fit_generator(
generator=training_generator,
steps_per_epoch=steps_per_epoch,
epochs=10,
)
@pytest.mark.parametrize("keep_sparse", [True, False])
def test_balanced_batch_generator_function_sparse(data, keep_sparse):
X, y = data
training_generator, steps_per_epoch = balanced_batch_generator(
sparse.csr_matrix(X),
y,
keep_sparse=keep_sparse,
batch_size=10,
random_state=42,
)
for _ in range(steps_per_epoch):
X_batch, _ = next(training_generator)
if keep_sparse:
assert sparse.issparse(X_batch)
else:
assert not sparse.issparse(X_batch)
|
11590875
|
import torch.nn as nn
import torch
class LR(nn.Module):
def __init__(self, config):
super().__init__()
self.model = nn.Linear(in_features=config.hidden_size, out_features=1)
def forward(self, *inputs, **kwargs):
feature = kwargs.pop("features")
if feature.dim() == 3:
feature = torch.mean(feature, dim=1)
output = self.model(feature)
return output
|
11590882
|
import datetime
import itertools
import logging
import logging.config
import urllib.parse
from django.core.exceptions import ImproperlyConfigured
from django.core.management.base import BaseCommand
from django.conf import settings
from django.db.models import Q
import pytz
from opencivicdata.legislative.models import BillDocumentLink, BillVersionLink, \
EventDocumentLink, EventRelatedEntity
for configuration in ['AWS_KEY','AWS_SECRET']:
if not hasattr(settings, configuration):
raise ImproperlyConfigured(
'Please define {0} in settings_deployment.py'.format(configuration))
logging.config.dictConfig(settings.LOGGING)
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = 'Refreshes the property image cache by deleting documents that need to be newly created'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.local_now = pytz.timezone(settings.TIME_ZONE)\
.localize(datetime.datetime.now())
self.bills_on_upcoming_agendas = EventRelatedEntity.objects.filter(
bill__isnull=False,
agenda_item__event__start_date__gte=self.local_now
).values_list('bill__id')
def handle(self, *args, **options):
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from boto.exception import S3ResponseError
s3_conn = S3Connection(settings.AWS_KEY, settings.AWS_SECRET)
document_urls = self._get_urls()
aws_keys = self._create_keys(document_urls)
bucket = s3_conn.get_bucket('councilmatic-document-cache')
bucket.delete_keys(aws_keys)
success_message = 'Removed {} document(s) from the councilmatic-document-cache'.format(len(aws_keys))
logger.info(success_message)
def _get_bill_versions(self, window_start):
'''
Retrieve URLs of updated and upcoming versions, i.e., the bills
themselves.
'''
recently_updated = Q(version__bill__updated_at__gte=window_start)
upcoming = Q(version__bill__id__in=self.bills_on_upcoming_agendas)
return BillVersionLink.objects.filter(
recently_updated | upcoming
).values_list('url', flat=True)
def _get_bill_documents(self, window_start):
'''
Retrieve URLs of updated and upcoming documents, i.e., attachments
to bills (versions).
'''
has_versions = Q(document__bill__versions__isnull=False)
recently_updated = Q(document__bill__updated_at__gte=window_start)
upcoming = Q(document__bill__id__in=self.bills_on_upcoming_agendas)
return BillDocumentLink.objects.filter(
has_versions & (recently_updated | upcoming)
).values_list('url', flat=True)
def _get_event_documents(self, window_start):
'''
Retrieve URLs of updated and upcoming event documents, i.e., agendas.
'''
recently_updated = Q(document__event__updated_at__gte=window_start)
upcoming = Q(document__event__start_date__gte=self.local_now)
return EventDocumentLink.objects.filter(
recently_updated | upcoming
).values_list('url', flat=True)
def _get_urls(self):
'''
Get the URLs of bill and event documents if the related bill or event
has been updated in the past hour, or if they are releated to an event
that is scheduled for a future date, as these are the documents that are
most likely to change.
This is a workaround for a known issue where making changes to data in
Legistar (DataMade's source data system) does not always update timestamps
that tell us to rescrape entities, toggling the updated timestamps in
our database.
'''
one_hour_ago = self.local_now - datetime.timedelta(hours=1)
return itertools.chain(
self._get_bill_versions(one_hour_ago),
self._get_bill_documents(one_hour_ago),
self._get_event_documents(one_hour_ago)
)
def _create_keys(self, document_urls):
return [urllib.parse.quote_plus(url) for url in document_urls]
|
11590893
|
from conans import ConanFile, CMake, tools
import os
required_conan_version = ">=1.33.0"
class SrtConan(ConanFile):
name = "srt"
homepage = "https://github.com/Haivision/srt"
description = "Secure Reliable Transport (SRT) is an open source transport technology that optimizes streaming performance across unpredictable networks, such as the Internet."
topics = ("conan", "srt", "ip", "transport")
url = "https://github.com/conan-io/conan-center-index"
license = "MPL-2.0"
settings = "os", "compiler", "build_type", "arch"
options = {"shared": [True, False], "fPIC": [True, False]}
default_options = {"shared": False, "fPIC": True}
short_paths = True
exports_sources = ["CMakeLists.txt", "patches/*"]
generators = "cmake", "cmake_find_package"
_cmake = None
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _build_subfolder(self):
return "build_subfolder"
@property
def _has_stdcxx_sync(self):
return tools.Version(self.version) >= "1.4.2"
@property
def _has_posix_threads(self):
return not (self.settings.os == "Windows" and (self.settings.compiler == "Visual Studio" or \
(self.settings.compiler == "gcc" and self.settings.compiler.get_safe("threads") == "win32")))
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
del self.options.fPIC
def requirements(self):
self.requires("openssl/1.1.1k")
if not self._has_posix_threads and not self._has_stdcxx_sync:
self.requires("pthreads4w/3.0.0")
def source(self):
tools.get(**self.conan_data["sources"][self.version],
destination=self._source_subfolder, strip_root=True)
def _patch_sources(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
tools.replace_in_file(os.path.join(self._source_subfolder, "CMakeLists.txt"),
"set (CMAKE_MODULE_PATH \"${CMAKE_CURRENT_SOURCE_DIR}/scripts\")",
"list(APPEND CMAKE_MODULE_PATH \"${CMAKE_CURRENT_SOURCE_DIR}/scripts\")")
def _configure_cmake(self):
if self._cmake:
return self._cmake
self._cmake = CMake(self)
self._cmake.definitions["ENABLE_APPS"] = False
self._cmake.definitions["ENABLE_LOGGING"] = False
self._cmake.definitions["ENABLE_SHARED"] = self.options.shared
self._cmake.definitions["ENABLE_STATIC"] = not self.options.shared
if self._has_stdcxx_sync:
self._cmake.definitions["ENABLE_STDCXX_SYNC"] = True
self._cmake.definitions["ENABLE_ENCRYPTION"] = True
self._cmake.definitions["USE_OPENSSL_PC"] = False
if self.settings.compiler == "Visual Studio":
# required to avoid warnings when srt shared, even if openssl shared,
# otherwise upstream CMakeLists would add /DELAYLOAD:libeay32.dll to link flags
self._cmake.definitions["OPENSSL_USE_STATIC_LIBS"] = True
self._cmake.configure(build_folder=self._build_subfolder)
return self._cmake
def build(self):
self._patch_sources()
cmake = self._configure_cmake()
cmake.build()
def package(self):
self.copy("LICENSE", dst="licenses", src=self._source_subfolder)
cmake = self._configure_cmake()
cmake.install()
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
def package_info(self):
self.cpp_info.names["pkg_config"] = "srt"
suffix = "_static" if self.settings.compiler == "Visual Studio" and not self.options.shared else ""
self.cpp_info.libs = ["srt" + suffix]
if self.options.shared:
self.cpp_info.defines = ["SRT_DYNAMIC"]
if self.settings.os == "Linux":
self.cpp_info.system_libs = ["pthread"]
if self.settings.os == "Windows":
self.cpp_info.system_libs = ["ws2_32"]
|
11590919
|
from directory_constants.choices import COUNTRY_CHOICES
from domestic.forms import SectorPotentialForm, UKEFContactForm
def test_sector_potential_form():
sector_list = [
{'name': 'Sector One'},
{'name': 'Sector Two'},
{'name': 'Sector Three'},
{'name': 'Sector Four'},
]
form = SectorPotentialForm(sector_list)
assert form.fields['sector'].choices == [
('', 'Select your sector'), # From the form's base choices
('Sector Four', 'Sector Four'), # Alphabetically ordered
('Sector One', 'Sector One'),
('Sector Three', 'Sector Three'),
('Sector Two', 'Sector Two'),
]
def test_ukef_contact_form_validations(valid_contact_form_data):
form = UKEFContactForm(data=valid_contact_form_data)
assert form.is_valid()
assert form.cleaned_data['full_name'] == valid_contact_form_data['full_name']
assert form.cleaned_data['email'] == valid_contact_form_data['email']
def test_ukef_contact_form_api_serialization(valid_contact_form_data):
form = UKEFContactForm(data=valid_contact_form_data)
assert form.is_valid()
api_data = form.serialized_data
country_label = dict(COUNTRY_CHOICES).get(form.cleaned_data['country'])
assert api_data['country_label'] == country_label
def test_ukef_community_form_api_serialization_with_other_options(valid_contact_form_data_with_extra_options):
form = UKEFContactForm(data=valid_contact_form_data_with_extra_options)
assert form.is_valid()
assert form.cleaned_data['like_to_discuss'] == 'yes'
api_data = form.serialized_data
like_to_discuss_country = dict(COUNTRY_CHOICES).get(form.cleaned_data['like_to_discuss_other'])
assert api_data['like_to_discuss_country'] == like_to_discuss_country
|
11590963
|
from collections import OrderedDict
from SeleniumLibrary.base import LibraryComponent, keyword
class PluginWithAllArgs(LibraryComponent):
def __init__(self, ctx, arg, *varargs, **kwargs):
LibraryComponent.__init__(self, ctx)
self.arg = arg
self.varargs = varargs
self.kwargs = kwargs
@keyword
def return_all_args_as_string(self):
joined_str = "start: arg=%s," % self.arg
for arg in self.varargs:
joined_str = f"{joined_str} {arg},"
kwargs = OrderedDict(sorted(self.kwargs.items()))
for key in kwargs:
joined_str = "{} {}={},".format(joined_str, key, kwargs[key])
return joined_str[:-1]
|
11590985
|
import sys
import time
from i2cdriver import I2CDriver, EDS
if __name__ == '__main__':
i2 = I2CDriver(sys.argv[1])
d = EDS.LED(i2)
TEAL = 0x008080
ORANGE = 0xffa500
while 1:
time.sleep(1)
d.hex(TEAL, 3)
time.sleep(1)
d.hex(ORANGE, 3)
|
11591006
|
import torch
from deepsvg.difflib.tensor import SVGTensor
from torch.distributions.categorical import Categorical
import torch.nn.functional as F
def _get_key_padding_mask(commands, seq_dim=0):
"""
Args:
commands: Shape [S, ...]
"""
with torch.no_grad():
key_padding_mask = (commands == SVGTensor.COMMANDS_SIMPLIFIED.index("EOS")).cumsum(dim=seq_dim) > 0
if seq_dim == 0:
return key_padding_mask.transpose(0, 1)
return key_padding_mask
def _get_padding_mask(commands, seq_dim=0, extended=False):
with torch.no_grad():
padding_mask = (commands == SVGTensor.COMMANDS_SIMPLIFIED.index("EOS")).cumsum(dim=seq_dim) == 0
padding_mask = padding_mask.float()
if extended:
# padding_mask doesn't include the final EOS, extend by 1 position to include it in the loss
S = commands.size(seq_dim)
torch.narrow(padding_mask, seq_dim, 3, S-3).add_(torch.narrow(padding_mask, seq_dim, 0, S-3)).clamp_(max=1)
if seq_dim == 0:
return padding_mask.unsqueeze(-1)
return padding_mask
def _get_group_mask(commands, seq_dim=0):
"""
Args:
commands: Shape [S, ...]
"""
with torch.no_grad():
group_mask = (commands == SVGTensor.COMMANDS_SIMPLIFIED.index("m")).cumsum(dim=seq_dim)
return group_mask
def _get_visibility_mask(commands, seq_dim=0):
"""
Args:
commands: Shape [S, ...]
"""
S = commands.size(seq_dim)
with torch.no_grad():
visibility_mask = (commands == SVGTensor.COMMANDS_SIMPLIFIED.index("EOS")).sum(dim=seq_dim) < S - 1
if seq_dim == 0:
return visibility_mask.unsqueeze(-1)
return visibility_mask
def _get_key_visibility_mask(commands, seq_dim=0):
S = commands.size(seq_dim)
with torch.no_grad():
key_visibility_mask = (commands == SVGTensor.COMMANDS_SIMPLIFIED.index("EOS")).sum(dim=seq_dim) >= S - 1
if seq_dim == 0:
return key_visibility_mask.transpose(0, 1)
return key_visibility_mask
def _generate_square_subsequent_mask(sz):
mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
return mask
def _sample_categorical(temperature=0.0001, *args_logits):
if len(args_logits) == 1:
arg_logits, = args_logits
return Categorical(logits=arg_logits / temperature).sample()
return (*(Categorical(logits=arg_logits / temperature).sample() for arg_logits in args_logits),)
def _threshold_sample(arg_logits, threshold=0.5, temperature=1.0):
scores = F.softmax(arg_logits / temperature, dim=-1)[..., 1]
return scores > threshold
|
11591061
|
class Solution:
def findLengthOfLCIS(self, nums: List[int]) -> int:
anchor, length = 0, 0
for i in range(len(nums)):
if i and nums[i - 1] >= nums[i]:
anchor = i
length = max(length, i - anchor + 1)
return length
|
11591091
|
from typing import Union, Dict
from cif.atoms import atoms
def formula_str_to_dict(sumform: Union[str, bytes]) -> Dict[str, str]:
"""
converts an atom name like C12 to the element symbol C
Use this code to find the atoms while going through the character astream of a sumformula
e.g. C12H6O3Mn7
Find two-char atoms, them one-char, and see if numbers are in between.
"""
elements = [x.upper() for x in atoms]
atlist = {}
nums = []
try:
sumform = sumform.upper().replace(' ', '').replace('\n', '').replace('\r', '')
except AttributeError:
print('Error in formula_str_to_dict')
return atlist
def isnumber(el):
for x in el:
if x.isnumeric() or x == '.':
nums.append(x)
else:
# end of number
break
while sumform:
if sumform[0:2] in elements: # The two-character elements
isnumber(sumform[2:])
atlist[sumform[0:2].capitalize()] = "".join(nums)
sumform = sumform[2 + len(nums):]
nums.clear()
elif sumform[0] in elements:
isnumber(sumform[1:])
atlist[sumform[0]] = "".join(nums)
sumform = sumform[1 + len(nums):]
nums.clear()
else:
raise KeyError
return atlist
def sum_formula_to_html(sumform: Dict[str, str], break_after: int = 99) -> str:
"""
Makes html formatted sum formula from dictionary.
"""
if not sumform:
return ''
l = ['<html><body>']
num = 0
for el in sumform:
if sumform[el] == 0 or sumform[el] == None:
continue
try:
times = round(float(sumform[el]), 1)
except (TypeError, ValueError):
times = 1
if num > 3 and num % break_after == 0:
l.append("<br>")
if times == 1:
l.append('{}'.format(el))
else:
l.append("{}<sub>{:g}</sub>".format(el, times))
num += 1
l.append('</body></html>')
formula = "".join(l)
return formula
|
11591162
|
DEFAULT_CONFIG_PATH = "~/.sotabench/sotabenchapi.ini"
SOTABENCH_API_URL = "https://sotabench.com/api/v0"
|
11591164
|
import csv
import pprint
import bw2io
import wurst
from bw2data.database import DatabaseChooser
from wurst import searching as ws
from . import DATA_DIR
FILEPATH_FIX_NAMES = DATA_DIR / "fix_names.csv"
FILEPATH_BIOSPHERE_FLOWS = DATA_DIR / "dict_biosphere.txt"
class DatabaseCleaner:
"""
Class that cleans the datasets contained in the inventory database for further processing.
:ivar source_type: type of the database source. Can be ´brightway´ or 'ecospold'.
:vartype source_type: str
:ivar source_db: name of the source database if `source_type` == 'brightway'
:vartype source_db: str
:ivar source_file_path: filepath of the database if `source_type` == 'ecospold'.
:vartype source_file_path: str
"""
def __init__(self, source_db, source_type, source_file_path):
if source_type == "brightway":
# Check that database exists
if len(DatabaseChooser(source_db)) == 0:
raise NameError(
"The database selected is empty. Make sure the name is correct"
)
self.db = wurst.extract_brightway2_databases(source_db)
if source_type == "ecospold":
# The ecospold data needs to be formatted
ei = bw2io.SingleOutputEcospold2Importer(source_file_path, source_db)
ei.apply_strategies()
self.db = ei.data
# Location field is added to exchanges
self.add_location_field_to_exchanges()
# Product field is added to exchanges
self.add_product_field_to_exchanges()
# Parameter field is converted from a list to a dictionary
self.transform_parameter_field()
def add_negative_CO2_flows_for_biomass_ccs(self):
"""
Rescale the amount of all exchanges of carbon dioxide, non-fossil by a factor -9 (.9/-.1),
to account for sequestered CO2.
All CO2 capture and storage in the Carma datasets is assumed to be 90% efficient.
Thus, we can simply find out what the new CO2 emission is and then we know how much gets stored in the ground.
It's very important that we ONLY do this for biomass CCS plants, as only they will have negative emissions!
Modifies in place (does not return anything).
"""
for ds in ws.get_many(
self.db, ws.contains("name", "storage"), ws.equals("database", "Carma CCS")
):
for exc in ws.biosphere(
ds, ws.equals("name", "Carbon dioxide, non-fossil")
):
wurst.rescale_exchange(exc, (0.9 / -0.1), remove_uncertainty=True)
def change_biogenic_co2_name(self):
"""
CO2 capture through biomass growth is represented with `Carbon dioxide, in air`.
However, such flow does not have a CF in the IPCC method. This becomes an issue when biommas
is used together with CCS.
Hence, we change th flow name to `Carbon dioxide, to soil or biomass stock`, for which the IPPCC
has a CF of -1.
:return:
"""
for ds in self.db:
for exc in ws.biosphere(ds, ws.equals("name", "Carbon dioxide, in air")):
exc["name"] = "Carbon dioxide, to soil or biomass stock"
exc["categories"] = ("soil",)
for exc in ws.biosphere(
ds, ws.equals("name", "Carbon dioxide, non-fossil")
):
exc["name"] = "Carbon dioxide, from soil or biomass stock"
exc["categories"] = ("air",)
@staticmethod
def get_fix_names_dict():
"""
Loads a csv file into a dictionary. This dictionary contains a few location names
that need correction in the wurst inventory database.
:return: dictionary that contains names equivalence
:rtype: dict
"""
with open(FILEPATH_FIX_NAMES) as f:
return dict(filter(None, csv.reader(f, delimiter=";")))
def get_rev_fix_names_dict(self):
"""
Reverse the fix_names dictionary.
:return: dictionary that contains names equivalence
:rtype: dict
"""
return {v: k for k, v in self.get_fix_names_dict().items()}
@staticmethod
def get_biosphere_flow_uuid():
"""
Retrieve a dictionary with biosphere flow (name, categories, unit) --> uuid.
:returns: dictionary with biosphere flow (name, categories, unit) --> uuid
:rtype: dict
"""
if not FILEPATH_BIOSPHERE_FLOWS.is_file():
raise FileNotFoundError(
"The dictionary of biosphere flows could not be found."
)
csv_dict = {}
with open(FILEPATH_BIOSPHERE_FLOWS) as f:
input_dict = csv.reader(f, delimiter=";")
for row in input_dict:
csv_dict[(row[0], row[1], row[2], row[3])] = row[-1]
return csv_dict
@staticmethod
def get_biosphere_flow_categories():
"""
Retrieve a dictionary with biosphere flow uuids and categories.
:returns: dictionary with biosphere flow uuids as keys and categories as values
:rtype: dict
"""
if not FILEPATH_BIOSPHERE_FLOWS.is_file():
raise FileNotFoundError(
"The dictionary of biosphere flows could not be found."
)
csv_dict = {}
with open(FILEPATH_BIOSPHERE_FLOWS) as f:
input_dict = csv.reader(f, delimiter=";")
for row in input_dict:
csv_dict[row[-1]] = (
(row[1], row[2]) if row[2] != "unspecified" else (row[1],)
)
return csv_dict
@staticmethod
def remove_nones(db):
"""
Remove empty exchanges in the datasets of the wurst inventory database.
Modifies in place (does not return anything).
:param db: wurst inventory database
:type db: list
"""
exists = lambda x: {k: v for k, v in x.items() if v is not None}
for ds in db:
ds["exchanges"] = [exists(exc) for exc in ds["exchanges"]]
def find_product_given_lookup_dict(self, lookup_dict):
"""
Return a list of location names, given the filtering conditions given in `lookup_dict`.
It is, for example, used to return a list of location names based on the name and the unit of a dataset.
:param lookup_dict: a dictionary with filtering conditions
:return: a list of location names
:rtype: list
"""
return [
x["product"]
for x in wurst.searching.get_many(
self.db, *[ws.equals(k, v) for k, v in lookup_dict.items()]
)
]
def find_location_given_lookup_dict(self, lookup_dict):
"""
Return a list of location names, given the filtering conditions given in `lookup_dict`.
It is, for example, used to return a list of location names based on the name and the unit of a dataset.
:param lookup_dict: a dictionary with filtering conditions
:return: a list of location names
:rtype: list
"""
return [
x["location"]
for x in wurst.searching.get_many(
self.db, *[ws.equals(k, v) for k, v in lookup_dict.items()]
)
]
def add_location_field_to_exchanges(self):
"""Add the `location` key to the production and
technosphere exchanges in :attr:`db`.
:raises IndexError: if no corresponding activity (and reference product) can be found.
"""
d_location = {(a["database"], a["code"]): a["location"] for a in self.db}
for a in self.db:
for e in a["exchanges"]:
if e["type"] == "technosphere":
exc_input = e["input"]
e["location"] = d_location[exc_input]
def add_product_field_to_exchanges(self):
"""Add the `product` key to the production and
technosphere exchanges in :attr:`db`.
For production exchanges, use the value of the `reference_product` field.
For technosphere exchanges, search the activities in :attr:`db` and
use the reference product.
:raises IndexError: if no corresponding activity (and reference product) can be found.
"""
# Create a dictionary that contains the 'code' field as key and the 'product' field as value
d_product = {a["code"]: (a["reference product"], a["name"]) for a in self.db}
# Add a `product` field to the production exchange
for x in self.db:
for y in x["exchanges"]:
if y["type"] == "production":
if "product" not in y:
y["product"] = x["reference product"]
if y["name"] != x["name"]:
y["name"] = x["name"]
# Add a `product` field to technosphere exchanges
for x in self.db:
for y in x["exchanges"]:
if y["type"] == "technosphere":
# Check if the field 'product' is present
if "product" not in y:
y["product"] = d_product[y["input"][1]][0]
# If a 'reference product' field is present, we make sure it matches with the new 'product' field
if "reference product" in y:
try:
assert y["product"] == y["reference product"]
except AssertionError:
y["product"] = d_product[y["input"][1]][0]
# Ensure the name is correct
y["name"] = d_product[y["input"][1]][1]
def transform_parameter_field(self):
# When handling ecospold files directly, the parameter field is a list.
# It is here transformed into a dictionary
for x in self.db:
x["parameters"] = {k["name"]: k["amount"] for k in x["parameters"]}
# Functions to clean up Wurst import and additional technologies
def fix_unset_technosphere_and_production_exchange_locations(
self, matching_fields=("name", "unit")
):
"""
Give all the production and technopshere exchanges with a missing location name the location of the dataset
they belong to.
Modifies in place (does not return anything).
:param matching_fields: filter conditions
:type matching_fields: tuple
"""
for ds in self.db:
# collect production exchanges that simply do not have a location key and set it to
# the location of the dataset
for exc in wurst.production(ds):
if "location" not in exc:
exc["location"] = ds["location"]
for exc in wurst.technosphere(ds):
if "location" not in exc:
locs = self.find_location_given_lookup_dict(
{k: exc.get(k) for k in matching_fields}
)
if len(locs) == 1:
exc["location"] = locs[0]
else:
print(
"No unique location found for exchange:\n{}\nFound: {}".format(
pprint.pformat(exc), locs
)
)
def fix_biosphere_flow_categories(self):
"""Add a `categories` for biosphere flows if missing.
This happens when importing directly from ecospold files"""
dict_bio_cat = self.get_biosphere_flow_categories()
dict_bio_uuid = self.get_biosphere_flow_uuid()
for ds in self.db:
for exc in ds["exchanges"]:
if exc["type"] == "biosphere":
if "categories" not in exc:
if "input" in exc:
# from the uuid, fetch the flow category
if exc["input"][1] in dict_bio_cat:
exc["categories"] = dict_bio_cat[exc["input"][1]]
else:
print(
f"Missing flow category for {exc['name']} with UUID {exc['input'][1]}. It will be deleted."
)
exc["delete"] = True
else:
print(
f"Missing flow category for {exc['name']}. It will be deleted."
)
exc["delete"] = True
if "input" not in exc:
if "categories" in exc:
# from the category, fetch the uuid of that biosphere flow
cat = (
exc["categories"]
if len(exc["categories"]) > 1
else (exc["categories"][0], "unspecified")
)
uuid = dict_bio_uuid[
exc["name"], cat[0], cat[1], exc["unit"]
]
exc["input"] = ("biosphere3", uuid)
ds["exchanges"] = [exc for exc in ds["exchanges"] if "delete" not in exc]
def prepare_datasets(self):
"""
Clean datasets for all databases listed in scenarios: fix location names, remove
empty exchanges, etc.
"""
# Set missing locations to ```GLO``` for datasets in ``database``
print("Set missing location of datasets to global scope.")
wurst.default_global_location(self.db)
# Set missing locations to ```GLO``` for exchanges in ``datasets``
print("Set missing location of production exchanges to scope of dataset.")
print("Correct missing location of technosphere exchanges.")
self.fix_unset_technosphere_and_production_exchange_locations()
print("Correct missing flow categories for biosphere exchanges")
self.fix_biosphere_flow_categories()
# Remove empty exchanges
print("Remove empty exchanges.")
self.remove_nones(self.db)
return self.db
|
11591192
|
from django.db import models
from django.utils import timezone
import subprocess
from apps.news.models import News
class Hunt(models.Model):
id = models.AutoField(primary_key=True)
datetime = models.DateTimeField(default=timezone.now)
name = models.CharField(max_length=255)
keyword = models.CharField(max_length=255)
notice = models.BooleanField(default=False)
channel = models.CharField(max_length=255, null=True, blank=True)
enable = models.BooleanField(default=True)
newss = models.ManyToManyField(News)
def __str__(self):
return str(self.id)
def setDisable(self):
self.enable = False
self.save()
def setEnable(self):
self.enable = True
self.save()
def setNoticeTrue(self):
self.notice = True
self.save()
def setNoticeFalse(self):
self.notice = False
self.save()
def run(self):
cmd = "python scripts/hunter/news/nw_hunter.py " + str(self.id)
subprocess.Popen(cmd, shell=True)
|
11591202
|
from . import enc
from base64 import urlsafe_b64encode as b64enc
from urllib.parse import quote
def _header(video_id, channel_id) -> str:
S1_3 = enc.rs(1, video_id)
S1_5 = enc.rs(1, channel_id) + enc.rs(2, video_id)
S1 = enc.rs(3, S1_3) + enc.rs(5, S1_5)
S3 = enc.rs(48687757, enc.rs(1, video_id))
header_replay = enc.rs(1, S1) + enc.rs(3, S3) + enc.nm(4, 1)
return b64enc(header_replay)
def _build(video_id, seektime, topchat_only, channel_id) -> str:
chattype = 4 if topchat_only else 1
if seektime < 0:
seektime = 0
timestamp = int(seektime * 1000000)
header = enc.rs(3, _header(video_id, channel_id))
timestamp = enc.nm(5, timestamp)
s6 = enc.nm(6, 0)
s7 = enc.nm(7, 0)
s8 = enc.nm(8, 0)
s9 = enc.nm(9, 4)
s10 = enc.rs(10, enc.nm(4, 0))
chattype = enc.rs(14, enc.nm(1, 4))
s15 = enc.nm(15, 0)
entity = b''.join((header, timestamp, s6, s7, s8, s9, s10, chattype, s15))
continuation = enc.rs(156074452, entity)
return quote(b64enc(continuation).decode())
def getparam(video_id, seektime=0, topchat_only=False, channel_id='') -> str:
'''
Parameter
---------
seektime : int
unit:seconds
start position of fetching chat data.
topchat_only : bool
if True, fetch only 'top chat'
'''
return _build(video_id, seektime, topchat_only, channel_id)
|
11591213
|
import unittest
from unittest.mock import Mock
from kaggle_gcp import KaggleKernelCredentials, init_ucaip
from test.support import EnvironmentVarGuard
def _make_credentials():
import google.auth.credentials
return Mock(spec=google.auth.credentials.Credentials)
class TestUcaip(unittest.TestCase):
def test_user_provided_credentials(self):
credentials = _make_credentials()
env = EnvironmentVarGuard()
env.set('KAGGLE_USER_SECRETS_TOKEN', 'foobar')
env.set('KAGGLE_KERNEL_INTEGRATIONS', 'CLOUDAI')
with env:
from google.cloud import aiplatform
init_ucaip()
aiplatform.init(credentials=credentials)
self.assertNotIsInstance(aiplatform.initializer.global_config.credentials, KaggleKernelCredentials)
self.assertIsNotNone(aiplatform.initializer.global_config.credentials)
|
11591229
|
import logging
import sys
from typing import List
from scholarly import scholarly
from ..utils import dump_papers
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logger = logging.getLogger(__name__)
scholar_field_mapper = {
"venue": "journal",
"author": "authors",
"cites": "citations",
}
process_fields = {"year": lambda x: int(x) if x.isdigit() else -1, "citations": int}
def get_scholar_papers(
title: str,
fields: List = ["title", "authors", "year", "abstract", "journal", "citations"],
*args,
**kwargs,
):
"""
Performs Google Scholar API request of a given query and returns list of papers with
fields as desired.
Args:
query (str): Query to arxiv API. Needs to match the arxiv API notation.
fields (list[str]): List of strings with fields to keep in output.
Returns:
list of dicts. One dict per paper.
"""
logger.info(
"NOTE: Scholar API cannot be used with Boolean logic in keywords."
"Query should be a single string to be entered in the Scholar search field."
)
if not isinstance(title, str):
raise TypeError(f"Pass str not {type(title)}")
matches = scholarly.search_pubs(title)
processed = [
{
scholar_field_mapper.get(key, key): process_fields.get(
scholar_field_mapper.get(key, key), lambda x: x
)(value)
for key, value in paper.bib.items()
if scholar_field_mapper.get(key, key) in fields
}
for paper in matches
]
return processed
def get_and_dump_scholar_papers(
title: str,
output_filepath: str,
fields: List = ["title", "authors", "year", "abstract", "journal", "citations"],
):
"""
Combines get_scholar_papers and dump_papers.
Args:
keywords (List[str, List[str]]): List of keywords to request arxiv API.
The outer list level will be considered as AND separated keys, the
inner level as OR separated.
filepath (str): Path where the dump will be saved.
fields (List, optional): List of strings with fields to keep in output.
Defaults to ['title', 'authors', 'date', 'abstract',
'journal', 'doi'].
"""
papers = get_scholar_papers(title, fields)
dump_papers(papers, output_filepath)
def get_citations_from_title(title: str) -> int:
"""
Args:
title (str): Title of paper to be searched on Scholar.
Raises:
TypeError: If sth else than str is passed.
Returns:
int: Number of citations of paper.
"""
if not isinstance(title, str):
raise TypeError(f"Pass str not {type(title)}")
# Search for exact match
title = '"' + title.strip() + '"'
matches = scholarly.search_pubs(title)
counts = list(map(lambda p: int(p.bib["cites"]), matches))
if len(counts) == 0:
logger.warning(f"Found no match for {title}.")
return 0
if len(counts) > 1:
logger.warning(f"Found {len(counts)} matches for {title}.")
return counts[0]
|
11591250
|
from django.db import models
# Create your models here.
class Destination(models.Model):
name = models.CharField(
unique=True,
max_length=50,
null=False,
blank=False,
)
description = models.TextField(
max_length=2000,
null=False,
blank=False
)
def __str__(self):
return self.name
class Cruise(models.Model):
name = models.CharField(
unique=True,
max_length=50,
null=False,
blank=False,
)
description = models.TextField(
max_length=2000,
null=False,
blank=False
)
destinations = models.ManyToManyField(
Destination,
related_name='cruises'
)
def __str__(self):
return self.name
class InfoRequest(models.Model):
name = models.CharField(
max_length=50,
null=False,
blank=False,
)
email = models.EmailField()
notes = models.TextField(
max_length=2000,
null=False,
blank=False
)
cruise = models.ForeignKey(
Cruise,
on_delete=models.PROTECT
)
|
11591271
|
from e2e import DockerTest
class TestHelp(DockerTest):
def test_guet_command_by_itself_displays_help_message(self):
self.add_command('guet')
self.execute()
self.assert_text_in_logs(0, 'usage: guet <command>')
def test_guet_command_shows_help_message_when_dash_h_is_given(self):
self.add_command('guet -h')
self.execute()
self.assert_text_in_logs(0, 'usage: guet <command>')
def test_guet_command_shows_help_message_when_dash_dash_help_is_given(self):
self.add_command('guet --help')
self.execute()
self.assert_text_in_logs(0, 'usage: guet <command>')
|
11591273
|
import pathlib
import numpy as np
import pytest
import meshio
from . import helpers
@pytest.mark.parametrize(
"mesh",
[
helpers.empty_mesh,
helpers.tri_mesh,
helpers.triangle6_mesh,
helpers.quad_mesh,
helpers.quad8_mesh,
helpers.tri_quad_mesh,
helpers.tet_mesh,
helpers.tet10_mesh,
helpers.hex_mesh,
helpers.hex20_mesh,
],
)
def test(mesh, tmp_path):
helpers.write_read(tmp_path, meshio.abaqus.write, meshio.abaqus.read, mesh, 1.0e-15)
@pytest.mark.parametrize(
"filename, ref_sum, ref_num_cells, ref_num_cell_sets",
[
("UUea.inp", 4950.0, 50, 10),
("nle1xf3c.inp", 32.215275528, 12, 3),
("element_elset.inp", 6.0, 2, 3),
("wInclude_main.inp", 1.5, 2, 0),
],
)
def test_reference_file(filename, ref_sum, ref_num_cells, ref_num_cell_sets):
this_dir = pathlib.Path(__file__).resolve().parent
filename = this_dir / "meshes" / "abaqus" / filename
mesh = meshio.read(filename)
assert np.isclose(np.sum(mesh.points), ref_sum)
assert sum(len(cells.data) for cells in mesh.cells) == ref_num_cells
assert len(mesh.cell_sets) == ref_num_cell_sets
def test_elset(tmp_path):
points = np.array(
[[1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [2.0, 0.5, 0.0], [0.0, 0.5, 0.0]]
)
cells = [
("triangle", np.array([[0, 1, 2]])),
("triangle", np.array([[0, 1, 3]])),
]
cell_sets = {
"right": [np.array([0]), np.array([])],
"left": [np.array([]), np.array([1])],
}
mesh_ref = meshio.Mesh(points, cells, cell_sets=cell_sets)
filepath = tmp_path / "test.inp"
meshio.abaqus.write(filepath, mesh_ref)
mesh = meshio.abaqus.read(filepath)
assert np.allclose(mesh_ref.points, mesh.points)
assert len(mesh_ref.cells) == len(mesh.cells)
for ic, cell in enumerate(mesh_ref.cells):
assert cell.type == mesh.cells[ic].type
assert np.allclose(cell.data, mesh.cells[ic].data)
assert sorted(mesh_ref.cell_sets.keys()) == sorted(mesh.cell_sets.keys())
for k, v in mesh_ref.cell_sets.items():
for ic in range(len(mesh_ref.cells)):
assert np.allclose(v[ic], mesh.cell_sets[k][ic])
|
11591275
|
import random
import os
import os.path
NAMES = ['stefan','melanie','nick','darrel','kent','simon']
AGES = list(range(1,10)) + [None]
def get_schema_path(fname):
dname = os.path.dirname(os.path.realpath(__file__))
return os.path.join(dname, fname)
def load_schema_file(fname):
fname = get_schema_path(fname)
with open(fname) as f:
return f.read()
BASIC_SCHEMA = load_schema_file('basic_schema.avsc')
def create_basic_item(i):
return {
'name' : random.choice(NAMES) + '-' + str(i),
'number' : random.choice(AGES)
}
BASIC_ITEMS = map(create_basic_item, range(1,20))
ADVANCED_SCHEMA = load_schema_file('adv_schema.avsc')
def create_adv_item(i):
friends = map(create_basic_item, range(1,3))
family = map(create_basic_item, range(1,3))
basic = create_basic_item(i)
basic['family'] = dict(map(lambda bi: (bi['name'],bi), family))
basic['friends'] = dict(map(lambda bi: (bi['name'],bi), friends))
return basic
ADVANCED_ITEMS = map(create_adv_item, range(1, 20))
from avro import schema
from avro.datafile import DataFileReader, DataFileWriter
from avro.io import DatumReader, DatumWriter
import json
def _write_items(base_name, schema_str, items):
avro_schema = schema.parse(schema_str)
avro_file = base_name + '.avro'
with DataFileWriter(open(avro_file, "w"), DatumWriter(), avro_schema) as writer:
for i in items:
writer.append(i)
writer.close
return (avro_file)
def write_basic_items(base_name):
return _write_items(base_name, BASIC_SCHEMA, BASIC_ITEMS)
def write_advanced_items(base_name):
return _write_items(base_name, ADVANCED_SCHEMA, ADVANCED_ITEMS)
def cleanup(files):
for f in files:
try:
os.remove(f)
except OSError:
pass
if __name__ == "__main__":
write_advanced_items("advanced")
|
11591276
|
import numpy as np
from scipy import misc
import matplotlib.pyplot as plt
import cv2
def psnr(im1, im2):
""" im1 and im2 value must be between 0 and 255"""
im1 = np.float64(im1)
im2 = np.float64(im2)
rmse = np.sqrt(np.mean(np.square(im1[:] - im2[:])))
psnr = 20 * np.log10(255 / rmse)
return psnr, rmse
def img_to_uint8(img):
img = np.clip(img, 0, 255)
return np.round(img).astype(np.uint8)
rgb_to_ycbcr = np.array([[65.481, 128.553, 24.966],
[-37.797, -74.203, 112.0],
[112.0, -93.786, -18.214]])
ycbcr_to_rgb = np.linalg.inv(rgb_to_ycbcr)
# ycbcr_to_rgb = np.array([[1.164, 0, 1.596],
# [1.164, -0.813, -0.392],
# [1.164, 2.017, 0]])
def rgb2ycbcr(img):
""" img value must be between 0 and 255"""
img = np.float64(img)
img = np.dot(img, rgb_to_ycbcr.T) / 255.0
img = img + np.array([16, 128, 128])
return img
def ycbcr2rgb(img):
""" img value must be between 0 and 255"""
img = np.float64(img)
img = img - np.array([16, 128, 128])
img = np.dot(img, ycbcr_to_rgb.T) * 255.0
return img
def ssim(img1, img2):
C1 = (0.01 * 255)**2
C2 = (0.03 * 255)**2
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
kernel = cv2.getGaussianKernel(11, 1.5)
window = np.outer(kernel, kernel.transpose())
mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5] # valid
mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
mu1_sq = mu1**2
mu2_sq = mu2**2
mu1_mu2 = mu1 * mu2
sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq
sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq
sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2
ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
(sigma1_sq + sigma2_sq + C2))
return ssim_map.mean()
def calculate_ssim(img1, img2):
'''calculate SSIM
the same outputs as MATLAB's
img1, img2: [0, 255]
'''
if not img1.shape == img2.shape:
raise ValueError('Input images must have the same dimensions.')
if img1.ndim == 2:
return ssim(img1, img2)
elif img1.ndim == 3:
if img1.shape[2] == 3:
ssims = []
for i in range(3):
ssims.append(ssim(img1, img2))
return np.array(ssims).mean()
elif img1.shape[2] == 1:
return ssim(np.squeeze(img1), np.squeeze(img2))
else:
raise ValueError('Wrong input image dimensions.')
|
11591313
|
import ctypes
import windows.generated_def as gdef
from ..apiproxy import ApiProxy, NeededParameter
from ..error import fail_on_zero, succeed_on_zero
class Shell32Proxy(ApiProxy):
APIDLL = "shell32"
default_error_check = staticmethod(fail_on_zero)
@Shell32Proxy()
def ShellExecuteA(hwnd, lpOperation, lpFile, lpParameters, lpDirectory, nShowCmd):
return ShellExecuteA.ctypes_function(hwnd, lpOperation, lpFile, lpParameters, lpDirectory, nShowCmd)
@Shell32Proxy()
def ShellExecuteW(hwnd, lpOperation, lpFile, lpParameters, lpDirectory, nShowCmd):
return ShellExecuteW.ctypes_function(hwnd, lpOperation, lpFile, lpParameters, lpDirectory, nShowCmd)
@Shell32Proxy()
def SHGetPathFromIDListA(pidl, pszPath):
return SHGetPathFromIDListA.ctypes_function(pidl, pszPath)
@Shell32Proxy()
def SHGetPathFromIDListW(pidl, pszPath):
return SHGetPathFromIDListW.ctypes_function(pidl, pszPath)
@Shell32Proxy(error_check=succeed_on_zero)
def SHFileOperationA(lpFileOp):
return SHFileOperationA.ctypes_function(lpFileOp)
|
11591371
|
import imp
import subprocess
import os
from string import Template
PLUGINS = [
'interpolate',
]
BASE_FOLDER = 'torch2trt_dynamic/converters'
NINJA_TEMPLATE = Template((
"rule link\n"
" command = g++ -shared -o $$out $$in -L$torch_dir/lib -L$cuda_dir/lib64 -L$trt_lib_dir -lc10 -lc10_cuda -ltorch -lcudart -lprotobuf -lprotobuf-lite -pthread -lpthread -lnvinfer\n"
"rule protoc\n"
" command = protoc $$in --cpp_out=. --python_out=.\n"
"rule cxx\n"
" command = g++ -c -fPIC $$in -I$cuda_dir/include -I$torch_dir/include -I$torch_dir/include/torch/csrc/api/include -I. -std=c++11 -I$trt_inc_dir\n"
))
PLUGIN_TEMPLATE = Template((
"build $plugin_dir/$plugin.pb.h $plugin_dir/$plugin.pb.cc $plugin_dir/${plugin}_pb2.py: protoc $plugin_dir/$plugin.proto\n"
"build $plugin.pb.o: cxx $plugin_dir/$plugin.pb.cc\n"
"build $plugin.o: cxx $plugin_dir/$plugin.cpp\n"
))
def build(cuda_dir="/usr/local/cuda",
torch_dir=imp.find_module('torch')[1],
trt_inc_dir="/usr/include/aarch64-linux-gnu",
trt_lib_dir="/usr/lib/aarch64-linux-gnu"):
global PLUGINS, BASE_FOLDER, NINJA_TEMPLATE, PLUGIN_TEMPLATE
NINJA_STR = NINJA_TEMPLATE.substitute({
'torch_dir': torch_dir,
'cuda_dir': cuda_dir,
'trt_inc_dir': trt_inc_dir,
'trt_lib_dir': trt_lib_dir,
})
plugin_o_files = []
for plugin in PLUGINS:
NINJA_STR += \
PLUGIN_TEMPLATE.substitute({
'plugin': plugin,
'plugin_dir': os.path.join(BASE_FOLDER, plugin),
})
plugin_o_files += [plugin + '.pb.o', plugin + '.o']
NINJA_STR += Template((
"build torch2trt_dynamic/libtorch2trt_dynamic.so: link $o_files\n"
)).substitute({'o_files': ' '.join(plugin_o_files)})
with open('build.ninja', 'w') as f:
f.write(NINJA_STR)
subprocess.call(['ninja'])
if __name__ == '__main__':
build()
|
11591387
|
import os
import time
import datetime
import ref
import torch
import torch.utils.data
from opts import opts
from model.Pose3D import Pose3D
from datahelpers.dataloaders.fusedDataLoader import FusionDataset
from datahelpers.dataloaders.h36mLoader import h36m
from datahelpers.dataloaders.mpiiLoader import mpii
from datahelpers.dataloaders.myposetrackLoader import posetrack
from utils.utils import adjust_learning_rate
from utils.logger import Logger
from train import train,val
from inflateScript import *
def main():
opt = opts().parse()
now = datetime.datetime.now()
logger = Logger(opt.saveDir + '/logs_{}'.format(now.isoformat()))
if opt.loadModel == 'none':
model = inflate(opt).cuda()
elif opt.loadModel == 'scratch':
model = Pose3D(opt.nChannels, opt.nStack, opt.nModules, opt.numReductions, opt.nRegModules, opt.nRegFrames, ref.nJoints).cuda()
else :
model = torch.load(opt.loadModel).cuda()
train_loader = torch.utils.data.DataLoader(
h36m('train',opt),
batch_size = opt.dataloaderSize,
shuffle = False,
num_workers = int(ref.nThreads)
)
optimizer = torch.optim.RMSprop(
[{'params': model.parameters(), 'lr': opt.LRhg}],
alpha = ref.alpha,
eps = ref.epsilon,
weight_decay = ref.weightDecay,
momentum = ref.momentum
)
for epoch in range(1, opt.nEpochs + 1):
loss_train, acc_train = train(epoch, opt, train_loader, model, optimizer)
logger.scalar_summary('loss_train', loss_train, epoch)
logger.scalar_summary('acc_train', acc_train, epoch)
logger.write('{:8f} {:8f} \n'.format(loss_train, acc_train))
logger.close()
if __name__ == '__main__':
main()
|
11591413
|
import argparse
import cv2
import numpy as np
def build_arg_parser():
parser = argparse.ArgumentParser(description='Reconstruct the 3D map from \
the two input stereo images. Output will be saved in \'output.ply\'')
parser.add_argument("--image-left", dest="image_left", required=True,
help="Input image captured from the left")
parser.add_argument("--image-right", dest="image_right", required=True,
help="Input image captured from the right")
parser.add_argument("--output-file", dest="output_file", required=True,
help="Output filename (without the extension) where the point cloud will be saved")
return parser
def create_output(vertices, colors, filename):
colors = colors.reshape(-1, 3)
vertices = np.hstack([vertices.reshape(-1,3), colors])
ply_header = '''ply
format ascii 1.0
element vertex %(vert_num)d
property float x
property float y
property float z
property uchar red
property uchar green
property uchar blue
end_header
'''
with open(filename, 'w') as f:
f.write(ply_header % dict(vert_num=len(vertices)))
np.savetxt(f, vertices, '%f %f %f %d %d %d')
if __name__ == '__main__':
args = build_arg_parser().parse_args()
image_left = cv2.imread(args.image_left)
image_right = cv2.imread(args.image_right)
output_file = args.output_file + '.ply'
if image_left.shape[0] != image_right.shape[0] or \
image_left.shape[1] != image_right.shape[1]:
raise TypeError("Input images must be of the same size")
# downscale images for faster processing
image_left = cv2.pyrDown(image_left)
image_right = cv2.pyrDown(image_right)
# disparity range is tuned for 'aloe' image pair
win_size = 1
min_disp = 16
max_disp = min_disp * 9
num_disp = max_disp - min_disp # Needs to be divisible by 16
stereo = cv2.StereoSGBM(minDisparity = min_disp,
numDisparities = num_disp,
SADWindowSize = win_size,
uniquenessRatio = 10,
speckleWindowSize = 100,
speckleRange = 32,
disp12MaxDiff = 1,
P1 = 8*3*win_size**2,
P2 = 32*3*win_size**2,
fullDP = True
)
print "\nComputing the disparity map ..."
disparity_map = stereo.compute(image_left, image_right).astype(np.float32) / 16.0
print "\nGenerating the 3D map ..."
h, w = image_left.shape[:2]
focal_length = 0.8*w
# Perspective transformation matrix
Q = np.float32([[1, 0, 0, -w/2.0],
[0,-1, 0, h/2.0],
[0, 0, 0, -focal_length],
[0, 0, 1, 0]])
points_3D = cv2.reprojectImageTo3D(disparity_map, Q)
colors = cv2.cvtColor(image_left, cv2.COLOR_BGR2RGB)
mask_map = disparity_map > disparity_map.min()
output_points = points_3D[mask_map]
output_colors = colors[mask_map]
print "\nCreating the output file ...\n"
create_output(output_points, output_colors, output_file)
#cv2.imshow('Left Image', image_left)
#cv2.imshow('Right Image', image_right)
#cv2.imshow('Disparity Map', (disparity_map - min_disp) / num_disp)
#cv2.waitKey()
#cv2.destroyAllWindows()
|
11591420
|
from typing import NamedTuple
from typing_extensions import Protocol
from sciencebeam_parser.config.config import AppConfig
from sciencebeam_parser.external.wapiti.wrapper import LazyWapitiBinaryWrapper
# using protocol to avoid delft import where we just need the typing hint
class DownloadManagerProtocol(Protocol):
def get_local_file(self, file_url: str, auto_uncompress: bool = True) -> str:
pass
def is_downloaded(self, file_url: str, auto_uncompress: bool = True) -> bool:
pass
def download(
self, file_url: str,
local_file: str = None,
auto_uncompress: bool = True,
skip_if_downloaded: bool = True
) -> str:
pass
def download_if_url(self, file_url_or_path: str, **kwargs) -> str:
pass
class AppContext(NamedTuple):
app_config: AppConfig
download_manager: DownloadManagerProtocol
lazy_wapiti_binary_wrapper: LazyWapitiBinaryWrapper
|
11591440
|
from test_helper import unittest, paypal, client_id, client_secret, assert_regex_matches
from paypalrestsdk.openid_connect import Tokeninfo, Userinfo, authorize_url, logout_url, endpoint
class TestTokeninfo(unittest.TestCase):
def test_create(self):
self.assertRaises(
paypal.ResourceNotFound, Tokeninfo.create, "invalid-code")
def test_userinfo(self):
self.assertRaises(paypal.UnauthorizedAccess, Tokeninfo().userinfo, {})
def test_refresh(self):
self.assertRaises(paypal.ResourceNotFound, Tokeninfo().refresh, {})
def test_create_with_refresh_token(self):
self.assertRaises(
paypal.ResourceNotFound, Tokeninfo.create_with_refresh_token, "invalid-token")
class TestUserinfo(unittest.TestCase):
def test_get(self):
self.assertRaises(paypal.UnauthorizedAccess, Userinfo.get, "invalid")
class TestUrls(unittest.TestCase):
def test_authorize_url(self):
url = authorize_url()
assert_regex_matches(self, url, 'response_type=code')
assert_regex_matches(self, url, 'scope=openid')
assert_regex_matches(self, url, 'client_id=%s' % (client_id))
assert_regex_matches(self, url, 'https://www.sandbox.paypal.com')
self.assertEqual(endpoint(), 'https://api.sandbox.paypal.com')
def test_live_mode_url(self):
try:
paypal.configure(
mode='live', client_id=client_id, client_secret=client_secret)
url = authorize_url()
assert_regex_matches(self, url, 'response_type=code')
assert_regex_matches(self, url, 'scope=openid')
assert_regex_matches(self, url, 'client_id=%s' % (client_id))
assert_regex_matches(self, url, 'https://www.paypal.com')
self.assertEqual(endpoint(), 'https://api.paypal.com')
finally:
paypal.configure(
mode='sandbox', client_id=client_id, client_secret=client_secret)
def test_authorize_url_options(self):
url = authorize_url({'scope': 'openid profile'})
assert_regex_matches(self, url, 'scope=openid\+profile')
def test_authorize_url_using_tokeninfo(self):
url = Tokeninfo.authorize_url({'scope': 'openid profile'})
assert_regex_matches(self, url, 'scope=openid\+profile')
def test_logout_url(self):
url = logout_url()
assert_regex_matches(self, url, 'logout=true')
def test_logout_url_options(self):
url = logout_url({'id_token': '<PASSWORD>'})
assert_regex_matches(self, url, 'id_token=<PASSWORD>')
def test_logout_url_using_tokeninfo(self):
url = Tokeninfo({'id_token': '<PASSWORD>'}).logout_url()
assert_regex_matches(self, url, 'id_token=1234')
|
11591445
|
import collections
import datetime
import itertools
import json
import math
from operator import itemgetter
import random
import re
import sys
from typing import List, Optional, Union
import pytz
from pytz import UnknownTimeZoneError
from .sql.casts import get_time_formatter
from .sql.internal_utils.joins import (
CROSS_JOIN, FULL_JOIN, INNER_JOIN, LEFT_ANTI_JOIN, LEFT_JOIN, LEFT_SEMI_JOIN, RIGHT_JOIN
)
from .sql.schema_utils import get_on_fields
from .sql.types import create_row, Row, row_from_keyed_values
from .sql.utils import IllegalArgumentException
class Tokenizer:
def __init__(self, expression: str):
self.expression = expression
def get_next(self, separator: Optional[Union[List[str], str]] = None) -> str:
if isinstance(separator, list):
separator_positions_and_lengths = [
(self.expression.find(s), s)
for s in separator if s in self.expression
]
if separator_positions_and_lengths:
sep_pos, separator = min(separator_positions_and_lengths, key=itemgetter(0))
else:
sep_pos = -1
elif separator:
sep_pos = self.expression.find(separator)
else:
sep_pos = -1
if sep_pos < 0:
value = self.expression
self.expression = ''
return value
value = self.expression[:sep_pos]
self.expression = self.expression[sep_pos + len(separator):]
return value
def parse_file_uri(expr):
t = Tokenizer(expr)
scheme = t.get_next('://')
domain = t.get_next('/')
last_slash_position = t.expression.rfind('/')
folder_path = '/' + t.expression[:last_slash_position + 1]
file_pattern = t.expression[last_slash_position + 1:]
return scheme, domain, folder_path, file_pattern
def format_file_uri(scheme, domain, *local_path_components):
return f'{scheme}://{domain}{"/".join(local_path_components)}'
def reservoir_sample_and_size(iterable, k, seed):
"""
Returns a sample of k items of iterable and its original size
If the iterable contains less than k items the sample is a list of those items
Algorithm used is reservoir sampling.
:rtype list
"""
random.seed(seed)
# Put the first k elements in the reservoir.
reservoir = list(itertools.islice(iterable, k))
# If we have consumed all the elements, return them. Otherwise do the replacement.
reservoir_size = len(reservoir)
if reservoir_size < k:
return reservoir, reservoir_size
# If input size > k, continue the sampling process.
for reservoir_size, item in enumerate(iterable, start=k + 1):
# There are k elements in the reservoir, and the l-th element has been
# consumed. It should be chosen with probability k/l. The expression
# below is a random int chosen uniformly from [0, l)
replacementIndex = random.randint(0, reservoir_size)
if replacementIndex < k:
reservoir[replacementIndex.toInt] = item
return reservoir, reservoir_size
def compute_weighted_percentiles(weighted_values, number_of_percentiles, key=lambda x: x):
"""
Compute weighted percentiles from a list of values and weights.
number_of_percentiles evenly distributed percentiles values will be returned,
including the 0th (minimal value) and the 100th (maximal value).
A custom key function can be supplied to customize the sort order
:type weighted_values: list of tuple
:type number_of_percentiles: int
:type key: function
Examples with 0th, 50th and 100th percentiles:
>>> compute_weighted_percentiles([(2, 0.2), (1, 0.1), (3, 0.7)], 3)
[1, 3, 3]
>>> compute_weighted_percentiles([(1, 10), (2, 20), (3, 20)], 3)
[1, 2, 3]
>>> compute_weighted_percentiles([(i, 1) for i in range(1, 101)], 1)
Traceback (most recent call last):
...
ValueError: number_of_percentiles must be at least 2
>>> compute_weighted_percentiles([(i, 1) for i in range(1, 101)], 2)
[1, 100]
>>> compute_weighted_percentiles([(i, 1) for i in range(1, 101)], 3)
[1, 50, 100]
>>> compute_weighted_percentiles([(i, 1) for i in range(1, 101)], 4)
[1, 34, 67, 100]
>>> compute_weighted_percentiles([(i, 1) for i in range(1, 101)], 5)
[1, 25, 50, 75, 100]
>>> compute_weighted_percentiles([
... ((1, "b"), 10),
... ((2, "c"), 20),
... ((3, "a"), 20)
... ], 3, key=lambda row: row[1])
[(3, 'a'), (1, 'b'), (2, 'c')]
"""
if number_of_percentiles == 1:
raise ValueError("number_of_percentiles must be at least 2")
ordered_values = sorted(weighted_values, key=lambda weighted_value: key(weighted_value[0]))
total_weight = sum(weight for value, weight in ordered_values)
bounds = []
cumulative_weight = 0
for value, weight in ordered_values:
cumulative_weight += weight
while len(bounds) / (number_of_percentiles - 1) <= cumulative_weight / total_weight:
bounds.append(value)
return bounds
def get_keyfunc(cols, schema, nulls_are_smaller=False):
"""
Return a function that maps a row to a tuple of some of its columns values
"""
def key(row):
"""
Returns a tuple designed for comparisons based on a row
Each requested columns is mapped to two columns:
- The first indicate whether the column value is None or not
- The second is the column value
This prevent comparison between None and non-None values
It also allows to defined how None values should be considered
"""
values = []
for col in cols:
value = col.eval(row, schema)
values += (
(value is None) != nulls_are_smaller,
value
)
return tuple(values)
return key
FULL_WIDTH_REGEX = re.compile(
"["
+ r"\u1100-\u115F"
+ r"\u2E80-\uA4CF"
+ r"\uAC00-\uD7A3"
+ r"\uF900-\uFAFF"
+ r"\uFE10-\uFE19"
+ r"\uFE30-\uFE6F"
+ r"\uFF00-\uFF60"
+ r"\uFFE0-\uFFE6"
+ "]"
)
def str_half_width(value):
"""
Compute string length with full width characters counting for 2 normal ones
"""
string = format_cell(value)
if string is None:
return 0
if not isinstance(string, str):
string = str(string)
return len(string) + len(FULL_WIDTH_REGEX.findall(string))
def pad_cell(cell, truncate, col_width):
"""
Compute how to pad the value "cell" truncated to truncate so that it fits col width
:param cell: Any
:param truncate: int
:param col_width: int
:return:
"""
cell = format_cell(cell)
cell_width = col_width - str_half_width(cell) + len(cell)
if truncate > 0:
return cell.rjust(cell_width)
return cell.ljust(cell_width)
def format_cell(value):
"""
Convert a cell value to a string using the logic needed in DataFrame.show()
"""
if value is None:
return "null"
if isinstance(value, bool):
return str(value).lower()
if isinstance(value, Row):
return f"[{', '.join(format_cell(sub_value) for sub_value in value)}]"
if isinstance(value, dict):
return "[{0}]".format(
", ".join(
f"{format_cell(key)} -> {format_cell(sub_value)}"
for key, sub_value in value.items()
)
)
return str(value)
class MonotonicallyIncreasingIDGenerator:
def __init__(self, partition_index):
self.value = partition_index * 8589934592 - 1
def __iter__(self):
return self
def __next__(self):
self.value += 1
return self.value
# pylint: disable=W0511
# todo: store random-related utils in a separated module
class XORShiftRandom:
# pylint: disable=W0511
# todo: align generated values with the ones in Spark
def __init__(self, init):
self.seed = XORShiftRandom.hashSeed(init)
self.haveNextNextGaussian = False
self.nextNextGaussian = 0
def nextValue(self, bits):
seed = self.seed
nextSeed = seed ^ (seed << 21)
nextSeed ^= (nextSeed >> 35)
nextSeed ^= (nextSeed << 4)
self.seed = nextSeed
return int(nextSeed & ((1 << bits) - 1))
def nextDouble(self):
a = self.nextValue(26)
b = self.nextValue(27)
return ((a << 27) + b) * 1.1102230246251565E-16
def nextGaussian(self):
if self.haveNextNextGaussian:
self.haveNextNextGaussian = False
return self.nextNextGaussian
v1 = 0
v2 = 0
s = 0
while not 0 < s < 1:
v1 = 2.0 * self.nextDouble() - 1
v2 = 2.0 * self.nextDouble() - 1
s = v1 * v1 + v2 * v2
multiplier = math.sqrt(-2 * math.log(s) / s)
self.nextNextGaussian = v2 * multiplier
self.haveNextNextGaussian = True
return v1 * multiplier
@staticmethod
def hashSeed(seed):
as_bytes = seed.to_bytes(8, "big")
lowBits = MurmurHash3.bytesHash(as_bytes)
highBits = MurmurHash3.bytesHash(as_bytes, lowBits)
return (highBits << 32) | (lowBits & 0xFFFFFFFF)
class MurmurHash3:
@staticmethod
def bytesHash(data, seed=0x3c074a61):
length = len(data)
h = seed
# Body
i = 0
while length >= 4:
k = data[i + 0] & 0xFF
k |= (data[i + 1] & 0xFF) << 8
k |= (data[i + 2] & 0xFF) << 16
k |= (data[i + 3] & 0xFF) << 24
h = MurmurHash3.mix(h, k)
i += 4
length -= 4
# Tail
k = 0
if length == 3:
k ^= (data[i + 2] & 0xFF) << 16
if length >= 2:
k ^= (data[i + 1] & 0xFF) << 8
if length >= 1:
k ^= (data[i + 0] & 0xFF)
h = MurmurHash3.mixLast(h, k)
# Finalization
return MurmurHash3.finalizeHash(h, len(data))
@staticmethod
def mix(h, data):
h = MurmurHash3.mixLast(h, data)
h = MurmurHash3.rotl(h, 13)
return h * 5 + 0xe6546b64
@staticmethod
def finalizeHash(h, length):
return MurmurHash3.avalanche(h ^ length)
@staticmethod
def avalanche(h):
h ^= h >> 16
h *= 0x85ebca6b
h ^= h >> 13
h *= 0xc2b2ae35
h ^= h >> 16
return h
@staticmethod
def mixLast(h, k):
k *= 0xcc9e2d51
k = MurmurHash3.rotl(k, 15)
k *= 0x1b873593
return h ^ k
@staticmethod
def rotl(i, distance):
return i << distance
def merge_rows(left, right):
return create_row(
itertools.chain(left.__fields__, right.__fields__),
left + right
)
def merge_rows_joined_on_values(left, right, left_schema, right_schema, how, on):
left_names = left_schema.names
right_names = right_schema.names
left_on_fields, right_on_fields = get_on_fields(left_schema, right_schema, on)
on_parts = [
(on_field, left[on_field] if left is not None else right[on_field])
for on_field in on
]
if left is None and how in (FULL_JOIN, RIGHT_JOIN):
left = create_row(left_names, [None for _ in left_names])
if right is None and how in (LEFT_JOIN, FULL_JOIN):
right = create_row(right_names, [None for _ in right_names])
left_parts = (
(field.name, value)
for field, value in zip(left_schema.fields, left)
if field not in left_on_fields
)
if how in (INNER_JOIN, CROSS_JOIN, LEFT_JOIN, FULL_JOIN, RIGHT_JOIN):
right_parts = (
(field.name, value)
for field, value in zip(right_schema.fields, right)
if field not in right_on_fields
)
elif how in (LEFT_SEMI_JOIN, LEFT_ANTI_JOIN):
right_parts = ()
else:
raise IllegalArgumentException(f"Argument 'how' cannot be '{how}'")
return row_from_keyed_values(itertools.chain(on_parts, left_parts, right_parts))
def strhash(string):
"""
Old python hash function as described in PEP 456, excluding prefix, suffix and mask.
:param string: string to hash
:return: hash
"""
if string == "":
return 0
x = ord(string[0]) << 7
for c in string[1:]:
x = ((1000003 * x) ^ ord(c)) & (1 << 32)
x = (x ^ len(string))
return x
def portable_hash(x):
"""
This function returns consistent hash code for builtin types, especially
for None and tuple with None.
The algorithm is similar to that one used by CPython 2.7
>>> portable_hash(None)
0
>>> portable_hash((None, 1)) & 0xffffffff
219750521
"""
if x is None:
return 0
if isinstance(x, list):
return portable_hash(tuple(x))
if isinstance(x, tuple):
h = 0x345678
for i in x:
h ^= portable_hash(i)
h *= 1000003
h &= sys.maxsize
h ^= len(x)
if h == -1:
h = -2
return int(h)
if isinstance(x, str):
return strhash(x)
if isinstance(x, datetime.datetime):
return portable_hash(x.timetuple())
return hash(x)
def parse_tz(tz):
"""
Parse a string referencing a timezone which is either supported by pytz or
in a GMT+1 or GMT+1:30 format.
Returns a datetime.tzinfo if it was able to parse the string, None otherwise
>>> parse_tz("GMT")
<StaticTzInfo 'GMT'>
>>> parse_tz("Europe/Paris")
<DstTzInfo 'Europe/Paris' LMT+0:09:00 STD>
>>> parse_tz("GMT+1")
pytz.FixedOffset(60)
>>> parse_tz("GMT+1:30")
pytz.FixedOffset(90)
>>> parse_tz("MalformedString") # returns None
"""
try:
return pytz.timezone(tz)
except UnknownTimeZoneError:
GMT_PATTERN = r'GMT(?P<sign>[+-])(?P<hours>[0-9]{1,2})(?::(?P<minutes>[0-9]{2}))?'
match = re.match(GMT_PATTERN, tz)
if match:
return parse_gmt_based_offset(match)
return None
def parse_gmt_based_offset(match):
# GMT+2 or GMT+2:30 case
sign, hours, minutes = match.groups()
sign = -1 if sign == "-" else 1
try:
hours = int(hours)
minutes = int(minutes) if minutes else 0
except ValueError:
return None
if 0 <= hours < 24 and 0 <= minutes < 60:
offset = sign * (hours * 60 + minutes)
return pytz.FixedOffset(offset)
return None
def half_up_round(value, scale):
"""
Round values using the "half up" logic
See more: https://docs.python.org/3/library/decimal.html#rounding-modes
>>> half_up_round(7.5, 0)
8.0
>>> half_up_round(6.5, 0)
7.0
>>> half_up_round(-7.5, 0)
-8.0
>>> half_up_round(-6.5, 0)
-7.0
"""
# Python2 and Python3's round behavior differs for rounding e.g. 0.5
# hence we handle the "half" case so that it is rounded up
scaled_value = (value * (10 ** scale))
removed_part = scaled_value % 1
if removed_part == 0.5:
sign = -1 if value < 0 else 1
value += 10 ** -(scale + 1) * sign
return round(value, scale)
def half_even_round(value, scale):
"""
Round values using the "half even" logic
See more: https://docs.python.org/3/library/decimal.html#rounding-modes
>>> half_even_round(7.5, 0)
8.0
>>> half_even_round(6.5, 0)
6.0
>>> half_even_round(-7.5, 0)
-8.0
>>> half_even_round(-6.5, 0)
-6.0
"""
# Python2 and Python3's round behavior differs for rounding e.g. 0.5
# hence we handle the "half" case so that it round even up and odd down
if scale > 0:
return round(value, scale)
scaled_value = (value * (10 ** scale))
removed_part = scaled_value % 1
if removed_part == 0.5:
rounded_part = int(scaled_value)
is_even = (rounded_part + max(0, scale)) % 2 == 0
sign = -1 if value < 0 else 1
if is_even:
value -= 10 ** -(scale + 1) * sign
else:
value += 10 ** -(scale + 1) * sign
return round(value, scale)
def levenshtein_distance(str1, str2):
if str1 == "":
return len(str2)
if str2 == "":
return len(str1)
return min(
levenshtein_distance(str1[1:], str2[1:]) + (str1[0] != str2[0]),
levenshtein_distance(str1[1:], str2) + 1,
levenshtein_distance(str1, str2[1:]) + 1
)
def get_json_encoder(options):
"""
Returns a JsonEncoder which convert Rows to json with the same behavior
and conversion logic as PySpark.
:param date_formatter: a function that convert a date into a string
:param timestamp_formatter: a function that convert a timestamp into a string
:return: type
"""
date_format = options.get("dateformat", "yyyy-MM-dd")
timestamp_format = options.get("timestampformat", "yyyy-MM-dd'T'HH:mm:ss.SSSXXX")
date_formatter = get_time_formatter(date_format)
timestamp_formatter = get_time_formatter(timestamp_format)
class CustomJSONEncoder(json.JSONEncoder):
def encode(self, o):
def encode_rows(item):
if isinstance(item, Row):
return collections.OrderedDict(
(key, encode_rows(value))
for key, value in zip(item.__fields__, item)
)
if isinstance(item, (list, tuple)):
return [encode_rows(e) for e in item]
if isinstance(item, dict):
return collections.OrderedDict(
(key, encode_rows(value))
for key, value in item.items()
)
return item
return super().encode(encode_rows(o))
# default can be overridden if passed a parameter during init
# pylint doesn't like the behavior but it is the expected one
# pylint: disable=E0202
def default(self, o):
if isinstance(o, datetime.datetime):
return timestamp_formatter(o)
if isinstance(o, datetime.date):
return date_formatter(o)
return super().default(o)
return CustomJSONEncoder
|
11591469
|
getObject = {
"certificate": "-----BEGIN CERTIFICATE----- \nMIIEJTCCAw2gAwIBAgIDCbQ0MA0GCSqGSIb3DQEBCwUAMEcxCzAJBgNVBAYTAlVT"
" -----END CERTIFICATE-----",
"certificateSigningRequest": "-----BEGIN CERTIFICATE REQUEST-----\n"
"MIIC1jCCAb4CAQAwgZAxCzAJBgNVBAYTAlVTMQ4wDAYDVQQIDAVUZXhh123456QMA4G\n"
"-----END CERTIFICATE REQUEST-----",
"commonName": "techbabble.xyz",
"createDate": "2016-01-20T10:56:44-06:00",
"id": 123456,
"intermediateCertificate": "",
"keySize": 258369,
"modifyDate": "2019-06-05T14:10:40-06:00",
"organizationName": "Unspecified",
"privateKey": "-----<KEY>"
"-----END RSA PRIVATE KEY-----",
"validityBegin": "2016-01-19T17:59:37-06:00",
"validityDays": 0,
"validityEnd": "2017-01-20T13:48:41-06:00"
}
createObject = {}
editObject = True
deleteObject = True
|
11591494
|
from ansiblemdgen.Config import SingleConfig
from ansiblemdgen.Utils import SingleLog
import os
from os import walk
import yaml
class WriterBase:
config = None
def __init__(self):
self.config = SingleConfig()
self.log = SingleLog()
self.log.info("Base directory: "+self.config.get_base_dir())
def makeDocsDir(self, doc_directory):
self.log.debug("(makeDocsDir) Output Directory: "+doc_directory)
if not os.path.exists(doc_directory):
os.makedirs(doc_directory)
def iterateOnFilesAndDirectories(self, directory, output_directory):
self.log.debug("(iterateOnFilesAndDirectories) directory: "+ directory)
self.log.debug("(iterateOnFilesAndDirectories) output_directory: "+ output_directory)
for (dirpath, dirnames, filenames) in walk(directory):
self.log.debug("(iterateOnFilesAndDirectories) dirpath: "+ dirpath)
relPath = dirpath.replace(directory,"")
for filename in filenames:
#ignore any existing md files and vault encrypted files
if not filename.endswith('.md') and self.isFileVaultEncrypted(dirpath, filename) is False:
self.createMDFile(dirpath, filename, output_directory+"/"+relPath)
def iterateOnCombinations(self, directory, combinations, output_directory):
for combination in combinations:
self.createMDCombinationFile(combination['filename'], directory, output_directory, combination['files_to_combine'])
def isFileVaultEncrypted(self, directory, filename):
with open(directory+"/"+filename, 'r') as stream:
data = stream.readlines()
if data[0].startswith('$ANSIBLE_VAULT;1.1;AES256'):
return True
else:
return False
|
11591538
|
import os, re
from time import sleep
from WhatsAppManifest.adb.base import WhatsAppManifest
from WhatsAppManifest.consts import _PACKAGE_NAME_
from WhatsAppManifest.adb.device import Device
from WhatsAppManifest.manifest.android import AndroidKeyEvents
from WhatsAppManifest.automator.whatsapp.database import WhatsAppDatabaseMSGStore
from WhatsAppManifest.automator.whatsapp.utils import re_open_package
from WhatsAppManifest.manifest.android.activity import Activities
from WhatsAppManifest.manifest.whatsapp.contact_picker import ContactPicker
from WhatsAppManifest.manifest.whatsapp.api_send import APISend
from WhatsAppManifest.automator.whatsapp.database.objects import Message
class Conversation(WhatsAppManifest):
_device: Device = None
_msgstore: WhatsAppDatabaseMSGStore = None
def __init__(self, device: Device):
self.build_logger(type(self).__name__)
self._device = device
self._msgstore = WhatsAppDatabaseMSGStore(device=device)
def send_message(self, jid: str, message: str, re_open: bool = True, wait_send_complete: bool = False,
interval: float = 1.5, retries: int = 3) -> Message:
"""
Responsible method for sending text
:param jid:
:type jid:
:param message:
:type message:
:param re_open:
:type re_open:
:param wait_send_complete:
:type wait_send_complete:
:return:
:rtype:
"""
if re_open:
re_open_package(device=self._device, package=_PACKAGE_NAME_)
picker = ContactPicker()
# Command constructor
command = picker.build_send_message(jid, message)
self.logger.info(f"Opening conversation with contact {jid}")
command_output = self._device.adb_device.shell(command)
self.logger.debug(f"{command_output}")
# We need to wait for the conversation to enter
self._device.wait_activity(activity=Activities.WhatsAppConversation, interval=interval, retries=retries)
self.logger.info(f"Pressing the \"ENTER\" key")
self._device.send_keyevent(AndroidKeyEvents.ENTER)
if wait_send_complete:
self.logger.info(f"Waiting confirmation")
while not self._msgstore.chat_last_message_has_sent(jid):
sleep(0.2)
self.logger.info(f"The message has not yet been sent")
return self._msgstore.last_contact_message(jid)
def create_chat(self, phone_number) -> bool:
"""
Method responsible for creating a chat (using the API) from a phone number, this is necessary for the contact to start appearing in the Contact Picker
:param phone_number:
:type phone_number:
:return: Chat created
:rtype: bool
"""
"""
Create chat via api command constructor
"""
api_send = APISend()
command = api_send.build_apo_send(phone_number)
self._device.adb_device.shell(command)
# Wait activity open
self._device.wait_activity(activity=Activities.WhatsAppConversation)
return self.chat_exists(self.phone_str_to_jid(phone_number))
def send_media(self, jid: str, file_path: str, re_open: bool = True, wait_send_complete: bool = False) -> Message:
"""
Responsible method for sending media
:param jid:
:type jid:
:param file_path:
:type file_path:
:param re_open:
:type re_open:
:param wait_send_complete:
:type wait_send_complete:
:return:
:rtype:
"""
file_name = os.path.basename(file_path)
if re_open:
re_open_package(device=self._device, package=_PACKAGE_NAME_)
self._device.adb_device.push(file_path, f"/data/local/{file_name}")
# Send media command build
picker = ContactPicker()
command = picker.build_send_media(jid, file_name)
self.logger.info(f"Sending media to contact {jid}")
command_output = self._device.adb_device.shell(command)
self.logger.debug(f"{command_output}")
# We can get the activity for selecting the contact or viewing the media
self._device.wait_activity(activity=[
Activities.WhatsAppContactPicker,
Activities.WhatsAppGalleryPickerMediaPreview
])
if self._device.is_current_activity(Activities.WhatsAppContactPicker):
self._device.send_keyevent(AndroidKeyEvents.TAB, repeats=3)
self._device.send_keyevent(AndroidKeyEvents.ENTER, repeats=1)
if self._device.is_current_activity(Activities.WhatsAppConversation):
if wait_send_complete:
self.logger.info(f"Waiting confirmation")
while not self._msgstore.chat_last_message_has_sent(jid):
sleep(0.2)
self.logger.info(f"The message has not yet been sent")
return self._msgstore.last_contact_message(jid)
elif self._device.is_current_activity(Activities.WhatsAppGalleryPickerMediaPreview):
self._device.send_keyevent(AndroidKeyEvents.TAB, repeats=6)
self._device.send_keyevent(AndroidKeyEvents.ENTER, repeats=1)
if self._device.is_current_activity(Activities.WhatsAppConversation):
if wait_send_complete:
self.logger.info(f"Waiting confirmation")
while not self._msgstore.chat_last_message_has_sent(jid):
sleep(0.2)
self.logger.info(f"The message has not yet been sent")
return self._msgstore.last_contact_message(jid)
def chat_exists(self, jid: str) -> bool:
return self._msgstore.chat_exists(jid)
def phone_str_to_jid(self, phone: str) -> str:
phone = re.sub("[^0-9]", "", phone)
return f"{<EMAIL>"
def get_jid_from_phone_number(self, phone: str) -> str:
return self._msgstore.get_jid_from_number(phone).raw_string
|
11591546
|
from .base_request import BaseRequest
from .console import embed, shell_entry
from .misc import decode_text_from_webwx, enhance_connection, enhance_webwx_request, ensure_list, get_receiver, \
get_text_without_at_bot, get_user_name, handle_response, match_attributes, match_name, match_text, repr_message, \
smart_map, start_new_thread, wrap_user_name
from .puid_map import PuidMap
from .tools import detect_freq_limit, dont_raise_response_error, ensure_one, mutual_friends
|
11591564
|
from __future__ import absolute_import
from celery import Celery, platforms
from library.config.database import redis_config
platforms.C_FORCE_ROOT = True
celery_redis_conf = redis_config['session']
BROKER_URL = 'redis://:' + celery_redis_conf['pwd'] + '@' + celery_redis_conf['host'] + ':' + str(celery_redis_conf['port']) + '/' + str(celery_redis_conf['db'])
CELERY_RESULT_BACKEND = BROKER_URL
app = Celery('lykops', backend=BROKER_URL, broker=BROKER_URL, include=['lykops.tasks'])
app.conf.update(
task_serializer='json',
accept_content=['json'], # Ignore other content
result_serializer='json',
timezone='Asia/Shanghai',
enable_utc=True,
)
|
11591595
|
BASE_INDEX = 1
"""Index of "base" points (96% of the training set picked at random)"""
VALID_INDEX = 2
"""Index of "valid" points (2% of the training set picked at random)"""
HIDDEN_INDEX = 3
"""Index of "hidden" points (2% of the training set picked at random)"""
PROBE_INDEX = 4
"""Index of "probe" points (1/3 of the test set picked at random)"""
QUAL_INDEX = 5
"""Index of "qual" points (2/3 of the test set picked at random)"""
USER_INDEX = 0
"""Index of user ID in data point tuple for all data point arrays"""
MOVIE_INDEX = 1
"""Index of movie ID in data point tuple for all data point arrays"""
TIME_INDEX = 2
"""Index of time stamp in data point tuple for all data point arrays"""
RATING_INDEX = 3
"""Index of rating in data point tuple for all data point arrays"""
SVD_FEATURE_VALUE_INITIAL = 0.01
"""Default value for initial algorithm predictions"""
BLENDING_RATIO = 25
"""Blending ratio (K) described by funny to blend global mean and movie mean"""
|
11591599
|
import mmap
import fcntl
import io
class MmapIo:
def __init__(self, filepath):
self.mmap_filepath = filepath
self.mm = None
def open(self):
self.f = open(self.mmap_filepath, "r+b")
self.mm = mmap.mmap(fileno=self.f.fileno(), length=0, flags=mmap.MAP_SHARED, prot=mmap.PROT_READ | mmap.PROT_WRITE)
def _ex_lock(self):
try:
fcntl.flock(self.f.fileno(), fcntl.LOCK_EX)
return True
except IOError:
return False
def _ex_unlock(self):
fcntl.flock(self.f.fileno(), fcntl.LOCK_UN)
def read(self, off, size):
bin = self.mm[off:off+size]
return bin
def write(self, off, data):
self.mm.seek(off)
self.mm.write(data)
def close(self):
if self.mm is not None:
self.mm.close()
self.mm = None
|
11591651
|
import django
from schema_graph.schema import get_schema
DJANGO_LT_19 = django.VERSION < (1, 9, 0)
def test_abstract_models():
expected = {
"django.contrib.auth": ("AbstractBaseUser", "AbstractUser", "PermissionsMixin"),
"django.contrib.sessions": ("AbstractBaseSession",),
"tests.inheritance": (
"Abstract",
"AbstractBase",
"AbstractSubclass1",
"AbstractSubclass2",
),
"tests.not_installed.models": ("AbstractNotInstalled",),
}
if DJANGO_LT_19:
expected.pop("django.contrib.sessions")
assert get_schema().abstract_models == expected
def test_models():
expected = {
"django.contrib.auth": ("Group", "Permission", "User"),
"django.contrib.contenttypes": ("ContentType",),
"django.contrib.sessions": ("Session",),
"django.contrib.sites": ("Site",),
"tests.app_a": ("InterAppSubclass",),
"tests.app_b": ("InterAppForeignKey",),
"tests.app_c": ("InterAppOneToOne",),
"tests.app_d": ("InterAppManyToMany", "InterAppProxy"),
"tests.basic": (
"OutgoingForeignKey",
"OutgoingManyToMany",
"OutgoingOneToOne",
"SelfReference",
"Target",
),
"tests.generic": ("GenericFK",),
"tests.inheritance": (
"AbstractMultipleInheritance",
"Concrete",
"ConcreteBase",
"ConcreteInheritance",
"ConcreteSubclass1",
"ConcreteSubclass2",
"MixedMultipleInheritance",
"SubSubclass",
"Subclass",
),
"tests.installed": ("ConcreteInstalled",),
"tests.proxy": ("ProxyNode", "ProxyNode2", "Target"),
}
assert get_schema().models == expected
def test_foreign_key():
expected = [
(
("django.contrib.auth", "Permission"),
("django.contrib.contenttypes", "ContentType"),
),
(("tests.app_b", "InterAppForeignKey"), ("django.contrib.auth", "User")),
(("tests.basic", "OutgoingForeignKey"), ("tests.basic", "Target")),
(("tests.basic", "SelfReference"), ("tests.basic", "SelfReference")),
(
("tests.generic", "GenericFK"),
("django.contrib.contenttypes", "ContentType"),
),
]
assert get_schema().foreign_keys == expected
def test_one_to_one():
expected = [
(("tests.app_c", "InterAppOneToOne"), ("tests.app_b", "InterAppForeignKey")),
(("tests.basic", "OutgoingOneToOne"), ("tests.basic", "Target")),
(
("tests.inheritance", "ConcreteSubclass2"),
("tests.inheritance", "ConcreteBase"),
),
]
assert get_schema().one_to_ones == expected
def test_many_to_many():
expected = [
(("django.contrib.auth", "Group"), ("django.contrib.auth", "Permission")),
(("django.contrib.auth", "User"), ("django.contrib.auth", "Group")),
(("django.contrib.auth", "User"), ("django.contrib.auth", "Permission")),
(("tests.app_d", "InterAppManyToMany"), ("tests.app_b", "InterAppForeignKey")),
(("tests.basic", "OutgoingManyToMany"), ("tests.basic", "Target")),
]
assert get_schema().many_to_manys == expected
def test_inheritance():
expected = [
(
("django.contrib.auth", "AbstractUser"),
("django.contrib.auth", "AbstractBaseUser"),
),
(
("django.contrib.auth", "AbstractUser"),
("django.contrib.auth", "PermissionsMixin"),
),
(("django.contrib.auth", "User"), ("django.contrib.auth", "AbstractUser")),
(
("django.contrib.sessions", "Session"),
("django.contrib.sessions", "AbstractBaseSession"),
),
(("tests.app_a", "InterAppSubclass"), ("django.contrib.auth", "Group")),
(
("tests.inheritance", "AbstractMultipleInheritance"),
("tests.inheritance", "AbstractSubclass1"),
),
(
("tests.inheritance", "AbstractMultipleInheritance"),
("tests.inheritance", "AbstractSubclass2"),
),
(
("tests.inheritance", "AbstractSubclass1"),
("tests.inheritance", "AbstractBase"),
),
(
("tests.inheritance", "AbstractSubclass2"),
("tests.inheritance", "AbstractBase"),
),
(("tests.inheritance", "Concrete"), ("tests.inheritance", "Abstract")),
(
("tests.inheritance", "ConcreteInheritance"),
("tests.inheritance", "ConcreteSubclass1"),
),
(
("tests.inheritance", "ConcreteInheritance"),
("tests.inheritance", "ConcreteSubclass2"),
),
(
("tests.inheritance", "ConcreteSubclass1"),
("tests.inheritance", "ConcreteBase"),
),
(
("tests.inheritance", "ConcreteSubclass2"),
("tests.inheritance", "ConcreteBase"),
),
(
("tests.inheritance", "MixedMultipleInheritance"),
("tests.inheritance", "AbstractBase"),
),
(
("tests.inheritance", "MixedMultipleInheritance"),
("tests.inheritance", "ConcreteBase"),
),
(("tests.inheritance", "SubSubclass"), ("tests.inheritance", "Subclass")),
(("tests.inheritance", "Subclass"), ("tests.inheritance", "ConcreteBase")),
(
("tests.installed", "ConcreteInstalled"),
("tests.not_installed.models", "AbstractNotInstalled"),
),
]
if DJANGO_LT_19:
expected.remove(
(
("django.contrib.sessions", "Session"),
("django.contrib.sessions", "AbstractBaseSession"),
)
)
assert get_schema().inheritance == expected
def test_proxy():
expected = [
(("tests.app_d", "InterAppProxy"), ("tests.app_c", "InterAppOneToOne")),
(("tests.proxy", "ProxyNode"), ("tests.proxy", "Target")),
(("tests.proxy", "ProxyNode2"), ("tests.proxy", "Target")),
]
assert get_schema().proxies == expected
|
11591656
|
from typing import Any, Dict, List
from casymda.blocks import Sink, Source
from main.geo.geo_info import GeoInfo
from main.model.blocks.drive_tour import DriveTour
from main.model.blocks.truck import Truck
from main.model.geo_info_setup import get_geo_info
geo_info: GeoInfo = get_geo_info()
class Model:
def __init__(self, env):
self.env = env
self.model_components: Any
self.model_graph_names: Dict[str, List[str]]
#!resources+components (generated)
self.source = Source(
self.env,
"source",
xy=(79, 59),
entity_type=Truck,
max_entities=10,
inter_arrival_time=250,
ways={"drive_tour": [(97, 59), (180, 59)]},
)
self.sink = Sink(self.env, "sink", xy=(368, 59), ways={})
self.drive_tour = DriveTour(
self.env,
"drive_tour",
xy=(230, 59),
geo_info=geo_info,
start="PAT",
stops=["ZFB", "MMC", "CAV"],
ways={"sink": [(280, 59), (350, 59)]},
)
#!model (generated)
self.model_components = {
"source": self.source,
"sink": self.sink,
"drive_tour": self.drive_tour,
}
self.model_graph_names = {
"source": ["drive_tour"],
"sink": [],
"drive_tour": ["sink"],
}
# translate model_graph_names into corresponding objects
self.model_graph = {
self.model_components[name]: [
self.model_components[nameSucc]
for nameSucc in self.model_graph_names[name]
]
for name in self.model_graph_names
}
for component in self.model_graph:
component.successors = self.model_graph[component]
|
11591691
|
import os
from warnings import warn
from xml.dom import minidom
import parmed as pmd
import mbuild as mb
from mbuild.compound import Compound
from mbuild.utils.io import has_foyer
def specific_ff_to_residue(
structure,
forcefield_selection=None,
residues=None,
reorder_res_in_pdb_psf=False,
boxes_for_simulation=1,
):
"""
Takes the mbuild Compound or mbuild Box structure and applies the selected
force field to the corresponding residue via foyer.
Note: a residue is defined as a molecule in this case, so it is not
designed for applying a force field to a protein.
Parameters
----------
structure: mbuild Compound object or mbuild Box object;
The mBuild Compound object or mbuild Box object, which contains the molecules
(or empty box) that will have the force field applied to them.
forcefield_selection: str or dictionary, default=None
Apply a forcefield to the output file by selecting a force field xml file with
its path or by using the standard force field name provided the `foyer` package.
Example dict for FF file: {'ETH' : 'oplsaa.xml', 'OCT': 'path_to file/trappe-ua.xml'}
Example str for FF file: 'path_to file/trappe-ua.xml'
Example dict for standard FF names : {'ETH' : 'oplsaa', 'OCT': 'trappe-ua'}
Example str for standard FF names: 'trappe-ua'
Example of a mixed dict with both : {'ETH' : 'oplsaa', 'OCT': 'path_to file/'trappe-ua.xml'}
residues: list, [str, ..., str], default=None
Labels of unique residues in the Compound. Residues are assigned by
checking against Compound.name. Only supply residue names as 4 characters
strings, as the residue names are truncated to 4 characters to fit in the
psf and pdb file.
reorder_res_in_pdb_psf: bool, default=False
This option provides the ability to reorder the residues/molecules from the original
structure's order. If True, the residues will be reordered as they appear in the residues
variable. If False, the order will be the same as entered in the original structure.
boxes_for_simulation: int [1, 2], default = 1
Gibbs (GEMC) or grand canonical (GCMC) ensembles are examples of where the boxes_for_simulation would be 2.
Canonical (NVT) or isothermal–isobaric (NPT) ensembles are example with the boxes_for_simulation equal to 1.
Note: the only valid options are 1 or 2.
Returns
-------
list, [structure, coulomb14scalar_dict, lj14_scalar_dict, residues_applied_list]
structure: parmed.Structure
parmed structure with applied force field
coulomb14scalar_dict: dict
a dictionary with the 1,4-colombic scalars for each residue
(i.e., a different force field could on each residue)
lj14_scalar_dict: dict
a dictionary with the 1,4-LJ scalars for each residue
(i.e., a different force field could on each residue)
residues_applied_list: list
list of residues (i.e., list of stings).
These are all the residues in which the force field actually applied
Notes
-----
To write the NAMD/GOMC force field, pdb, psf, and force field
(.inp) files, the residues and forcefields must be provided in
a str or dictionary. If a dictionary is provided all residues must
be specified to a force field if the boxes_for_simulation is equal to 1.
Generating an empty box (i.e., pdb and psf files):
Enter residues = [], but the accompanying structure must be an empty mb.Box.
However, when doing this, the forcefield_selection must be supplied,
or it will provide an error (i.e., forcefield_selection can not be equal to None).
In this current FF/psf/pdb writer, a residue type is essentially a molecule type.
Therefore, it can only correctly write systems where every bead/atom in the molecule
has the same residue name, and the residue name is specific to that molecule type.
For example: a protein molecule with many residue names is not currently supported,
but is planned to be supported in the future.
"""
if has_foyer:
from foyer import Forcefield
from foyer.forcefields import forcefields
else:
print_error_message = (
"Package foyer is not installed. "
"Please install it using conda install -c conda-forge foyer"
)
raise ImportError(print_error_message)
if not isinstance(structure, (Compound, mb.Box)):
print_error_message = (
"ERROR: The structure expected to be of type: "
"{} or {}, received: {}".format(
type(Compound()),
type(mb.Box(lengths=[1, 1, 1])),
type(structure),
)
)
raise TypeError(print_error_message)
print("forcefield_selection = " + str(forcefield_selection))
if forcefield_selection is None:
print_error_message = (
"Please the force field selection (forcefield_selection) as a dictionary "
"with all the residues specified to a force field "
'-> Ex: {"Water" : "oplsaa", "OCT": "path/trappe-ua.xml"}, '
"Note: the file path must be specified the force field file "
"or by using the standard force field name provided the `foyer` package."
)
raise TypeError(print_error_message)
elif forcefield_selection is not None and not isinstance(
forcefield_selection, dict
):
print_error_message = (
"The force field selection (forcefield_selection) "
"is not a dictionary. Please enter a dictionary "
"with all the residues specified to a force field "
'-> Ex: {"Water" : "oplsaa", "OCT": "path/trappe-ua.xml"}, '
"Note: the file path must be specified the force field file "
"or by using the standard force field name provided the `foyer` package."
)
raise TypeError(print_error_message)
if residues is None or not isinstance(residues, list):
print_error_message = (
"Please enter the residues in the Specific_FF_to_residue function."
)
raise TypeError(print_error_message)
if not isinstance(reorder_res_in_pdb_psf, bool):
print_error_message = (
"Please enter the reorder_res_in_pdb_psf "
"in the Specific_FF_to_residue function (i.e., True or False)."
)
raise TypeError(print_error_message)
print_error_message_for_boxes_for_simulatiion = (
"ERROR: Please enter boxes_for_simulation equal " "the integer 1 or 2."
)
if not isinstance(boxes_for_simulation, int):
raise TypeError(print_error_message_for_boxes_for_simulatiion)
elif isinstance(boxes_for_simulation, int) and boxes_for_simulation not in [
1,
2,
]:
raise ValueError(print_error_message_for_boxes_for_simulatiion)
forcefield_keys_list = []
if forcefield_selection is not None:
for res in forcefield_selection.keys():
forcefield_keys_list.append(res)
ff_data = forcefield_selection
if forcefield_keys_list == [] and len(residues) != 0:
print_error_message = "The forcefield_selection variable are not provided, but there are residues provided."
raise ValueError(print_error_message)
elif forcefield_keys_list != [] and len(residues) == 0:
print_error_message = (
"The residues variable is an empty list but there are "
"forcefield_selection variables provided."
)
raise ValueError(print_error_message)
user_entered_ff_with_path_dict = (
{}
) # True means user entered the path, False is a standard foyer FF with no path
for z in range(0, len(forcefield_keys_list)):
for res_i in range(0, len(residues)):
if residues[res_i] == forcefield_keys_list[z]:
if (
os.path.splitext(ff_data[forcefield_keys_list[z]])[1]
== ".xml"
and len(residues) != 0
):
user_entered_ff_with_path_dict.update(
{residues[res_i]: True}
)
elif (
os.path.splitext(ff_data[forcefield_keys_list[z]])[1] == ""
and len(residues) != 0
):
user_entered_ff_with_path_dict.update(
{residues[res_i]: False}
)
else:
print_error_message = (
r"Please make sure you are entering the correct "
"foyer FF name and not a path to a FF file. "
"If you are entering a path to a FF file, "
"please use the forcefield_files variable with the "
"proper XML extension (.xml)."
)
raise ValueError(print_error_message)
coulomb14scalar_dict = {}
lj14_scalar_dict = {}
for j in range(0, len(forcefield_keys_list)):
residue_iteration = forcefield_keys_list[j]
if user_entered_ff_with_path_dict[residue_iteration]:
ff_for_residue_iteration = ff_data[residue_iteration]
try:
read_xlm_iteration = minidom.parse(ff_for_residue_iteration)
except:
print_error_message = (
"Please make sure you are entering the correct foyer FF path, "
"including the FF file name.xml "
"If you are using the pre-build FF files in foyer, "
"only use the string name without any extension."
)
raise ValueError(print_error_message)
elif not user_entered_ff_with_path_dict[residue_iteration]:
ff_for_residue_iteration = ff_data[residue_iteration]
ff_names_path_iteration = (
forcefields.get_ff_path()[0]
+ "/xml/"
+ ff_for_residue_iteration
+ ".xml"
)
try:
read_xlm_iteration = minidom.parse(ff_names_path_iteration)
except:
print_error_message = (
"Please make sure you are entering the correct foyer FF name, or the "
"correct file extension (i.e., .xml, if required)."
)
raise ValueError(print_error_message)
lj_coul_1_4_values = read_xlm_iteration.getElementsByTagName(
"NonbondedForce"
)
for Scalar in lj_coul_1_4_values:
coulomb14scalar_dict.update(
{
residue_iteration: float(
Scalar.getAttribute("coulomb14scale")
)
}
)
lj14_scalar_dict.update(
{residue_iteration: float(Scalar.getAttribute("lj14scale"))}
)
# Check to see if it is an empty mbuild.Compound and set intial atoms to 0
# note empty mbuild.Compound will read 1 atoms but there is really noting there
if isinstance(structure, Compound):
if len(structure.children) == 0:
# there are no real atoms in the Compound so the test fails. User should use mbuild.Box
print_error_message = (
"ERROR: If you are not providing an empty box, "
"you need to specify the atoms/beads as children in the mb.Compound. "
"If you are providing and empty box, please do so by specifying and "
"mbuild Box ({})".format(type(mb.Box(lengths=[1, 1, 1])))
)
raise TypeError(print_error_message)
else:
initial_no_atoms = len(structure.to_parmed().atoms)
# calculate the initial number of atoms for later comparison
if isinstance(structure, mb.Box):
lengths = structure.lengths
angles = structure.angles
structure = mb.Compound()
structure.box = mb.Box(lengths=lengths, angles=angles)
initial_no_atoms = 0
# add the FF to the residues
compound_box_infor = structure.to_parmed(residues=residues)
new_structure = pmd.Structure()
new_structure.box = compound_box_infor.box
# prepare all compound and remove nested compounds
no_layers_to_check_for_residues = 3
print_error_message_all_res_not_specified = (
"ERROR: All the residues are not specified, or "
"the residues entered does not match the residues that "
"were found and built for structure."
)
for j in range(0, no_layers_to_check_for_residues):
new_compound_iter = mb.Compound()
new_compound_iter.periodicity = structure.periodicity
if structure.name in residues:
if len(structure.children) == 0:
warn(
"Warning: This residue is the atom, and is a single atom., "
+ str(structure.name)
)
new_compound_iter.add(mb.compound.clone(structure))
elif len(structure.children) > 0:
new_compound_iter.add(mb.compound.clone(structure))
else:
for child in structure.children:
if len(child.children) == 0:
if child.name not in residues:
raise ValueError(
print_error_message_all_res_not_specified
)
else:
new_compound_iter.add(mb.compound.clone(child))
elif len(child.children) > 0:
if child.name in residues:
new_compound_iter.add(mb.compound.clone(child))
else:
for sub_child in child.children:
if sub_child.name in residues:
new_compound_iter.add(
mb.compound.clone(sub_child)
)
else:
if len(sub_child.children) == 0 and (
child.name not in residues
):
raise ValueError(
print_error_message_all_res_not_specified
)
structure = new_compound_iter
residues_applied_list = []
residue_orig_order_list = []
for child in structure.children:
if child.name not in residue_orig_order_list:
residue_orig_order_list.append(child.name)
for res_reorder_iter in range(0, len(residues)):
if residues[res_reorder_iter] not in residue_orig_order_list:
text_to_print_1 = (
"All the residues were not used from the forcefield_selection "
"string or dictionary. There may be residues below other "
"specified residues in the mbuild.Compound hierarchy. "
"If so, all the highest listed residues pass down the force "
"fields through the hierarchy. Alternatively, residues that "
"are not in the structure may have been specified. "
)
text_to_print_2 = (
"Note: This warning will appear if you are using the CHARMM pdb and psf writers "
+ "2 boxes, and the boxes do not contain all the residues in each box."
)
if boxes_for_simulation == 1:
warn(text_to_print_1)
raise ValueError(text_to_print_1)
if boxes_for_simulation == 2:
warn(text_to_print_1 + text_to_print_2)
if not reorder_res_in_pdb_psf:
residues = residue_orig_order_list
elif reorder_res_in_pdb_psf:
print(
"INFO: the output file are being reordered in via the residues list's sequence."
)
for i in range(0, len(residues)):
children_in_iteration = False
new_compound_iteration = mb.Compound()
new_compound_iter.periodicity = structure.periodicity
new_structure_iteration = pmd.Structure()
new_structure_iteration.box = compound_box_infor.box
for child in structure.children:
if ff_data.get(child.name) is None:
print_error_message = "ERROR: All residues are not specified in the force_field dictionary"
raise ValueError(print_error_message)
if child.name == residues[i]:
children_in_iteration = True
new_compound_iteration.add(mb.compound.clone(child))
if children_in_iteration:
if user_entered_ff_with_path_dict[residues[i]]:
ff_iteration = Forcefield(ff_data[residues[i]])
residues_applied_list.append(residues[i])
elif not user_entered_ff_with_path_dict[residues[i]]:
ff_iteration = Forcefield(name=ff_data[residues[i]])
residues_applied_list.append(residues[i])
new_structure_iteration = ff_iteration.apply(
new_compound_iteration, residues=[residues[i]]
)
new_structure = new_structure + new_structure_iteration
structure = new_structure
# calculate the final number of atoms
final_no_atoms = len(structure.atoms)
if final_no_atoms != initial_no_atoms:
print_error_message = (
"ERROR: The initial number of atoms sent to the force field analysis is "
"not the same as the final number of atoms analyzed. "
"The initial number of atoms was {} and the final number of atoms was {}. "
"Please ensure that all the residues names that are in the initial "
"Compound are listed in the residues list "
"(i.e., the residues variable).".format(
initial_no_atoms, final_no_atoms
)
)
raise ValueError(print_error_message)
return [
structure,
coulomb14scalar_dict,
lj14_scalar_dict,
residues_applied_list,
]
|
11591692
|
def _go_command(ctx):
output = ctx.attr.output
if ctx.attr.os == "windows":
output = output + ".exe"
output_file = ctx.actions.declare_file(ctx.attr.os + "/" + ctx.attr.arch + "/" + output)
pkg = ctx.attr.pkg
ld_flags = "-s -w"
if ctx.attr.ld:
ld_flags = ld_flags + " " + ctx.attr.ld
options = [
"go",
"build",
"-o", output_file.path,
"-compiler", "gc",
"-gcflags", '"all=-trimpath=${GOPATH}/src"',
"-asmflags", '"all=-trimpath=${GOPATH}/src"',
"-ldflags", "'%s'" % ld_flags,
"-tags", "'%s'" % ctx.attr.gotags,
pkg,
]
command = " ".join(options)
envs = [
"CGO_ENABLED=0",
"GOOS="+ctx.attr.os,
"GOARCH="+ctx.attr.arch,
#"GOROOT_FINAL=/go",
"GO111MODULE=on",
"GOCACHE=${TMPDIR}/gocache"
]
if ctx.attr.mips: # https://github.com/golang/go/issues/27260
envs+=["GOMIPS="+ctx.attr.mips]
envs+=["GOMIPS64="+ctx.attr.mips]
envs+=["GOMIPSLE="+ctx.attr.mips]
envs+=["GOMIPS64LE="+ctx.attr.mips]
if ctx.attr.arm:
envs+=["GOARM="+ctx.attr.arm]
switchToPwd="cd ${SPWD} && "
command = switchToPwd + " ".join(envs) + " " + command
ctx.actions.run_shell(
outputs = [output_file],
command = command,
use_default_shell_env = True,
)
runfiles = ctx.runfiles(files = [output_file])
return [DefaultInfo(executable = output_file, runfiles = runfiles)]
foreign_go_binary = rule(
_go_command,
attrs = {
'pkg': attr.string(),
'output': attr.string(),
'os': attr.string(mandatory=True),
'arch': attr.string(mandatory=True),
'mips': attr.string(),
'arm': attr.string(),
'ld': attr.string(),
'gotags': attr.string(),
},
executable = True,
)
|
11591721
|
import unittest
from flask import Flask, Blueprint, request
try:
from mock import Mock
except:
# python3
from unittest.mock import Mock
import flask
import flask_restful
import flask_restful.fields
#noinspection PyUnresolvedReferences
from nose.tools import assert_true, assert_false # you need it for tests in form of continuations
# Add a dummy Resource to verify that the app is properly set.
class HelloWorld(flask_restful.Resource):
def get(self):
return {}
class GoodbyeWorld(flask_restful.Resource):
def __init__(self, err):
self.err = err
def get(self):
flask.abort(self.err)
class APIWithBlueprintTestCase(unittest.TestCase):
def test_api_base(self):
blueprint = Blueprint('test', __name__)
api = flask_restful.Api(blueprint)
app = Flask(__name__)
app.register_blueprint(blueprint)
self.assertEqual(api.urls, {})
self.assertEqual(api.prefix, '')
self.assertEqual(api.default_mediatype, 'application/json')
def test_api_delayed_initialization(self):
blueprint = Blueprint('test', __name__)
api = flask_restful.Api()
api.init_app(blueprint)
app = Flask(__name__)
app.register_blueprint(blueprint)
api.add_resource(HelloWorld, '/', endpoint="hello")
def test_add_resource_endpoint(self):
blueprint = Blueprint('test', __name__)
api = flask_restful.Api(blueprint)
view = Mock(**{'as_view.return_value': Mock(__name__='test_view')})
api.add_resource(view, '/foo', endpoint='bar')
app = Flask(__name__)
app.register_blueprint(blueprint)
view.as_view.assert_called_with('bar')
def test_add_resource_endpoint_after_registration(self):
blueprint = Blueprint('test', __name__)
api = flask_restful.Api(blueprint)
app = Flask(__name__)
app.register_blueprint(blueprint)
view = Mock(**{'as_view.return_value': Mock(__name__='test_view')})
api.add_resource(view, '/foo', endpoint='bar')
view.as_view.assert_called_with('bar')
def test_url_with_api_prefix(self):
blueprint = Blueprint('test', __name__)
api = flask_restful.Api(blueprint, prefix='/api')
api.add_resource(HelloWorld, '/hi', endpoint='hello')
app = Flask(__name__)
app.register_blueprint(blueprint)
with app.test_request_context('/api/hi'):
self.assertEqual(request.endpoint, 'test.hello')
def test_url_with_blueprint_prefix(self):
blueprint = Blueprint('test', __name__, url_prefix='/bp')
api = flask_restful.Api(blueprint)
api.add_resource(HelloWorld, '/hi', endpoint='hello')
app = Flask(__name__)
app.register_blueprint(blueprint)
with app.test_request_context('/bp/hi'):
self.assertEqual(request.endpoint, 'test.hello')
def test_url_with_registration_prefix(self):
blueprint = Blueprint('test', __name__)
api = flask_restful.Api(blueprint)
api.add_resource(HelloWorld, '/hi', endpoint='hello')
app = Flask(__name__)
app.register_blueprint(blueprint, url_prefix='/reg')
with app.test_request_context('/reg/hi'):
self.assertEqual(request.endpoint, 'test.hello')
def test_registration_prefix_overrides_blueprint_prefix(self):
blueprint = Blueprint('test', __name__, url_prefix='/bp')
api = flask_restful.Api(blueprint)
api.add_resource(HelloWorld, '/hi', endpoint='hello')
app = Flask(__name__)
app.register_blueprint(blueprint, url_prefix='/reg')
with app.test_request_context('/reg/hi'):
self.assertEqual(request.endpoint, 'test.hello')
def test_url_with_api_and_blueprint_prefix(self):
blueprint = Blueprint('test', __name__, url_prefix='/bp')
api = flask_restful.Api(blueprint, prefix='/api')
api.add_resource(HelloWorld, '/hi', endpoint='hello')
app = Flask(__name__)
app.register_blueprint(blueprint)
with app.test_request_context('/bp/api/hi'):
self.assertEqual(request.endpoint, 'test.hello')
def test_url_part_order_aeb(self):
blueprint = Blueprint('test', __name__, url_prefix='/bp')
api = flask_restful.Api(blueprint, prefix='/api', url_part_order='aeb')
api.add_resource(HelloWorld, '/hi', endpoint='hello')
app = Flask(__name__)
app.register_blueprint(blueprint)
with app.test_request_context('/api/hi/bp'):
self.assertEqual(request.endpoint, 'test.hello')
def test_error_routing(self):
blueprint = Blueprint('test', __name__)
api = flask_restful.Api(blueprint)
api.add_resource(HelloWorld(), '/hi', endpoint="hello")
api.add_resource(GoodbyeWorld(404), '/bye', endpoint="bye")
app = Flask(__name__)
app.register_blueprint(blueprint)
with app.test_request_context('/hi', method='POST'):
assert_true(api._should_use_fr_error_handler())
assert_true(api._has_fr_route())
with app.test_request_context('/bye'):
api._should_use_fr_error_handler = Mock(return_value=False)
assert_true(api._has_fr_route())
def test_non_blueprint_rest_error_routing(self):
blueprint = Blueprint('test', __name__)
api = flask_restful.Api(blueprint)
api.add_resource(HelloWorld(), '/hi', endpoint="hello")
api.add_resource(GoodbyeWorld(404), '/bye', endpoint="bye")
app = Flask(__name__)
app.register_blueprint(blueprint, url_prefix='/blueprint')
api2 = flask_restful.Api(app)
api2.add_resource(HelloWorld(), '/hi', endpoint="hello")
api2.add_resource(GoodbyeWorld(404), '/bye', endpoint="bye")
with app.test_request_context('/hi', method='POST'):
assert_false(api._should_use_fr_error_handler())
assert_true(api2._should_use_fr_error_handler())
assert_false(api._has_fr_route())
assert_true(api2._has_fr_route())
with app.test_request_context('/blueprint/hi', method='POST'):
assert_true(api._should_use_fr_error_handler())
assert_false(api2._should_use_fr_error_handler())
assert_true(api._has_fr_route())
assert_false(api2._has_fr_route())
api._should_use_fr_error_handler = Mock(return_value=False)
api2._should_use_fr_error_handler = Mock(return_value=False)
with app.test_request_context('/bye'):
assert_false(api._has_fr_route())
assert_true(api2._has_fr_route())
with app.test_request_context('/blueprint/bye'):
assert_true(api._has_fr_route())
assert_false(api2._has_fr_route())
def test_non_blueprint_non_rest_error_routing(self):
blueprint = Blueprint('test', __name__)
api = flask_restful.Api(blueprint)
api.add_resource(HelloWorld(), '/hi', endpoint="hello")
api.add_resource(GoodbyeWorld(404), '/bye', endpoint="bye")
app = Flask(__name__)
app.register_blueprint(blueprint, url_prefix='/blueprint')
@app.route('/hi')
def hi():
return 'hi'
@app.route('/bye')
def bye():
flask.abort(404)
with app.test_request_context('/hi', method='POST'):
assert_false(api._should_use_fr_error_handler())
assert_false(api._has_fr_route())
with app.test_request_context('/blueprint/hi', method='POST'):
assert_true(api._should_use_fr_error_handler())
assert_true(api._has_fr_route())
api._should_use_fr_error_handler = Mock(return_value=False)
with app.test_request_context('/bye'):
assert_false(api._has_fr_route())
with app.test_request_context('/blueprint/bye'):
assert_true(api._has_fr_route())
if __name__ == '__main__':
unittest.main()
|
11591742
|
from pycap import PropertyTree, Observer, Observable, Experiment
import unittest
class ObserverObservableTestCase(unittest.TestCase):
def test_builders(self):
for AbstractClass in [Observer, Observable]:
# AbstractClass takes a PropertyTree as argument.
self.assertRaises(TypeError, AbstractClass)
# The PropertyTree must specify what concrete class derived from
# AbstractClass to instantiate.
ptree = PropertyTree()
self.assertRaises(KeyError, AbstractClass, ptree)
# The derived concrete class must be registerd in the dictionary
# that holds the builders.
ptree.put_string('type', 'Invalid')
self.assertRaises(KeyError, AbstractClass, ptree)
# Now declare a concrete class.
class ConcreteClass(AbstractClass):
def __new__(cls, *args, **kwargs):
return object.__new__(ConcreteClass)
def __init__(*args, **kwargs):
pass
# Here is how to register a derived concrete class to the base
# abstract class.
AbstractClass._builders['ConcreteClass'] = ConcreteClass
# Now instantiation works.
ptree.put_string('type', 'ConcreteClass')
AbstractClass(ptree)
# Also can build directly from derived class.
ConcreteClass()
# Remove from the dictionary.
del AbstractClass._builders['ConcreteClass']
self.assertRaises(KeyError, AbstractClass, ptree)
def test_update_attach_detach_notify(self):
# Define an observable.
class ConcreteObservable(Observable):
def __new__(cls, *args, **kwargs):
return object.__new__(ConcreteObservable)
def __init__(self):
Observable.__init__(self)
self._greetings = 'hello world'
Observable._builders['ConcreteObservable'] = ConcreteObservable
subject = ConcreteObservable()
# Define a concrete observer.
class ConcreteObserver(Observer):
def __new__(cls, *args, **kwargs):
return object.__new__(ConcreteObserver)
Observer._builders['ConcreteObserver'] = ConcreteObserver
# Derived Observer class need to override the method ``update(...)``.
observer = ConcreteObserver()
self.assertRaises(RuntimeError, observer.update, subject)
class ObserverUpdate(Exception):
pass
def update(self, subject, *args, **kwargs):
print(subject._greetings)
raise ObserverUpdate
# Add the method to the definition of ConcreteObserver
ConcreteObserver.update = update
# Now it works.
observer = ConcreteObserver()
self.assertRaises(ObserverUpdate, observer.update, subject)
# Attach the observer to the observable.
subject.attach(observer)
# An observer may only be attached once.
self.assertRaises(RuntimeError, subject.attach, observer)
# Trigger update in the observers.
self.assertRaises(ObserverUpdate, subject.notify)
# Detach the observer.
subject.detach(observer)
# Only attached observer may be detached.
self.assertRaises(RuntimeError, subject.detach, observer)
# Note that the subject only stores a weak reference to its observers.
subject.attach(observer)
del observer
self.assertRaises(RuntimeError, subject.notify)
# Similarly this will raise an exception.
subject = ConcreteObservable()
subject.notify()
subject.attach(ConcreteObserver())
self.assertRaises(RuntimeError, subject.notify)
class ExperimentTestCase(unittest.TestCase):
def test_abstract_class(self):
# Declare a concrete Experiment
class DummyExperiment(Experiment):
def __new__(cls, *args, **kwargs):
return object.__new__(DummyExperiment)
def __init__(self, ptree):
Experiment.__init__(self)
# Do not forget to register it to the builders dictionary.
Observable._builders['Dummy'] = DummyExperiment
# Construct directly via DummyExperiment with a PropertyTree as a
# positional arguemnt
ptree = PropertyTree()
dummy = DummyExperiment(ptree)
# ... or directly via Experiment by specifying the ``type`` of
# Experiment.
ptree.put_string('type', 'Dummy')
dummy = Experiment(ptree)
# The method run() must be overloaded.
self.assertRaises(RuntimeError, dummy.run, None)
# Override the method run().
def run(self, device):
pass
DummyExperiment.run = run
# Now calling it without raising an error.
dummy.run(None)
if __name__ == '__main__':
unittest.main()
|
11591765
|
import os
import time
from eval_engines.bag.bagEvalEngine import BagEvalEngine
from bag.io import read_yaml
from bag_deep_ckt.util import *
import IPython
class TIACTLEEvaluationEngine(BagEvalEngine):
def __init__(self, design_specs_fname):
BagEvalEngine.__init__(self, design_specs_fname)
def impose_constraints(self, design_dict):
# constraints:
# seg_dict and w_dict for transistors should be integer
# nser, npar, ndum for resistors should be integer
# l, w for resistors min and max are 0.5u and 50u respectively
# imposed by the parameter vector in yaml file
for kwrd in design_dict.keys():
step_3_indicator = self.decend(kwrd, step=3)
if step_3_indicator.startswith('seg_dict') or step_3_indicator.startswith(
'w_dict') or step_3_indicator.startswith('n'):
design_dict[kwrd] = int(design_dict[kwrd])
return design_dict
def process_results(self, results):
processed_results = []
for result in results:
processed_result = {'valid': True}
cost = 0
if isinstance(result, Dict):
result = result['noise']
for spec in self.spec_range.keys():
processed_result[spec] = self.find_worst(result[spec], spec)
penalty = self.compute_penalty(processed_result[spec], spec)[0]
cost += penalty
processed_result['cost'] = cost
else:
processed_result['valid'] = False
processed_results.append(processed_result)
return processed_results
if __name__ == '__main__':
import pickle
np.random.seed(10)
random.seed(10)
fname = 'specs_design/TIA_CTLE.yaml'
evalEngine = TIACTLEEvaluationEngine(design_specs_fname=fname)
content = read_yaml(fname)
dir = content['database_dir']
start = time.time()
sample_designs = evalEngine.generate_data_set(n=150, evaluate=True)
print("time: {}".format(time.time() - start))
os.makedirs(dir, exist_ok=True)
with open(dir+"/init_data.pickle", 'wb') as f:
pickle.dump(sample_designs, f)
with open(dir+"/init_data.pickle", 'rb') as f:
data = pickle.load(f)
se = [x.cost for x in data]
se = sorted(se, key=lambda x:x)
a = 1/0
|
11591800
|
import superimport
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import multivariate_normal
from jax import vmap
class BinaryFA:
def __init__(self, input_dim, latent, max_iter, conv_tol=1e-4, compute_ll=True):
self.W = 0.1 * np.random.randn(latent, input_dim) # 2x16
self.b = 0.01 * np.random.randn(input_dim, 1) # 16x1
self.mu_prior = np.zeros((latent,1)) # 2x1
self.sigma_prior = np.eye(latent) # 2x2
self.input_dim = input_dim
self.latent = latent
self.max_iter = max_iter
self.compute_ll = compute_ll
if compute_ll :
self.ll_hist = np.zeros((max_iter + 1, 1)) # 51x1
def variational_em(self, data):
ll_hist = np.zeros((self.max_iter + 1, 1))
i = 0
while i < 3:
S1, S2, ll = self.estep(data)
ll_hist[i,0] = ll
self.mstep(S1, S2)
if i!=0:
delta_fval = abs(ll_hist[i] - ll_hist[i-1])
avg_fval = (abs(ll_hist[i]) + abs(ll_hist[i-1]) + np.finfo(float).eps)/2
if (delta_fval / avg_fval) < conv_tol:
break
i += 1
return ll_hist[:i]
def estep(self, data):
S1 = np.zeros((self.latent + 1, self.input_dim)) # 3x16
S2 = np.zeros((self.latent + 1, self.latent + 1, self.input_dim)) # 3x3x16
W, b, mu_prior = self.W , self.b, self.mu_prior
ll = 0
for i in range(data.T.shape[1]):
mu_post, sigma_post, logZ, lambd = self.compute_latent_posterior_statistics(data.T[:,i], max_iter=3)
ll += logZ
EZZ = np.zeros((self.latent+1, self.latent+1))
EZZ[:self.latent,:self.latent] = sigma_post + np.outer(mu_post, mu_post)
EZZ[self.latent,:self.latent] = mu_post.T
EZZ[:self.latent,self.latent] = np.squeeze(np.asarray(mu_post))
EZZ[self.latent,self.latent] = 1
EZ = np.append(mu_post,np.ones((1,1)))
for j in range(self.input_dim):
S1[:,j] = S1[:,j] + (data.T[j,i] - 0.5) * EZ
S2[:,:,j] = S2[:,:,j] - 2* lambd[j] * EZZ
return S1, S2, ll
def mstep(self, S1, S2):
for i in range(self.input_dim):
what = np.linalg.lstsq(S2[:,:,i] , S1[:,i])[0]
self.W[:,i] = what[:self.latent]
self.b[i] = what[self.latent]
def compute_latent_posterior_statistics(self, y, output=[0,0,0,0], max_iter=3):
W, b = np.copy(self.W), np.copy(self.b)
y = y.reshape((-1,1))
# variational parameters
mu_prior = self.mu_prior
xi = (2 * y -1) * (W.T @ mu_prior + b)
xi[xi==0] = 0.01 * np.random.rand(np.count_nonzero(xi==0)) # 16x1
sigma_inv, iter = np.linalg.inv(self.sigma_prior), 0
for iter in range(max_iter):
lambd = (0.5 - sigmoid(xi)) / (2*xi)
tmp = W @ np.diagflat(lambd) @ W.T # 2x2
sigma_post = np.linalg.inv(sigma_inv - (2 * tmp))
tmp = y -0.5 + 2* lambd * b
tmp2 = np.sum(W @ np.diagflat(tmp), axis=1).reshape((2,1))
mu_post = sigma_post @ (sigma_inv @ mu_prior + tmp2)
tmp = np.diag(W.T @ (sigma_post + mu_post @ mu_post.T) @ W)
tmp = tmp.reshape((tmp.shape[0],1))
tmp2 = 2*(W @ np.diagflat(b)).T @ mu_post
xi = np.sqrt(tmp + tmp2 + b**2)
logZ = 0
if self.compute_ll:
lam = -lambd
A = np.diagflat(2*lam)
invA = np.diagflat(1/(2*lam))
bb = -0.5 * np.ones((y.shape[0],1))
c = -lam * xi**2 - 0.5 * xi + np.log(1+ np.exp(xi))
ytilde = invA @ (bb + y)
B = W.T
logconst1 = -0.5* np.sum(np.log(lam/np.pi))
logconst2 = 0.5 * ytilde.T @ A @ ytilde - np.sum(c)
gauss = multivariate_normal.logpdf(np.squeeze(np.asarray(ytilde)), mean=np.squeeze(np.asarray(B @ mu_prior + b)), cov=(invA + B @ sigma_post @ B.T))
logZ = logconst1 + logconst2 + gauss
output = [mu_post, sigma_post, logZ,lambd]
return output
def predict_missing(self, y):
N, T = y.shape # 150 x 16
prob_on = np.zeros(y.shape) # 150 x 16
post_pred = np.zeros((N,T,2)) # 150 x 16 x 2
L,p = self.W.shape # 16 x 3
B = np.c_[np.copy(self.b),self.W.T] # 16 x 3
for n in range(N):
mu_post, sigma_post, logZ, lambd = self.compute_latent_posterior_statistics(y[n,:].T, False)
mu1 = np.r_[np.ones((1,1)), mu_post]
sigma1 = np.zeros((L+1,L+1))
sigma1[1:,1:] = sigma_post
prob_on[n,:] = sigmoid_times_gauss(B, mu1, sigma1)
return prob_on
def infer_latent(self, y):
N, T = y.shape
W, b, mu_prior = self.W, self.b, self.mu_prior
K, T2 = self.W.shape
mu_post, loglik = np.zeros((K,N)),np.zeros((1,N))
sigma_post = np.zeros((K,K,N))
for n in range(N):
mu_p , sigma_p, loglik[0,n] , _ = self.compute_latent_posterior_statistics(y[n,:].T)
mu_post[:,n] = np.squeeze(np.asarray(mu_p))
sigma_post[:,:,n] = np.squeeze(np.asarray(sigma_p))
return mu_post, sigma_post, loglik
def sigmoid_times_gauss(X, wMAP, C):
vv = lambda x, y: jnp.vdot(x, y)
mv = vmap(vv, (None, 0), 0)
mm = vmap(mv, (0, None), 0)
vm = vmap(vv, (0, 0), 0)
mu = X @ wMAP;
n = X.shape[1]
if n < 1000:
sigma2 = np.diag(X @ C @ X.T)
else:
sigma2 = vm(X , mm(C,X))
kappa = 1 / np.sqrt(1 + np.pi * sigma2 /8);
p = sigmoid(kappa * mu.reshape(kappa.shape))
return p
np.random.seed(1)
max_iter, conv_tol = 50, 1e-4
sigmoid = lambda x : 1/(1 + np.exp(-1 * x))
d, k, m = 16, 3, 50
noise_level = 0.5
proto = np.random.rand(d, k) < noise_level
src = np.concatenate((np.tile(proto[:,0], (1,m)), np.tile(proto[:,1],(1,m)), np.tile(proto[:,2],(1,m))),axis=1)
clean_data = np.concatenate((np.tile(proto[:,0], (m,1)), np.tile(proto[:,1],(m,1)), np.tile(proto[:,2],(m,1))), axis=0)
n = clean_data.shape[0]
mask, noisy_data, missing_data, = np.random.rand(n,d) < 0.05, np.copy(clean_data), np.copy(clean_data)
noisy_data[mask] = 1 - noisy_data[mask]
missing_data[mask] = np.nan
plt.figure()
ax = plt.gca()
plt.imshow(noisy_data, aspect='auto', interpolation='none',
origin='lower', cmap="gray")
plt.title('Noisy Binary Data')
plt.show()
binaryFA = BinaryFA(d, 2, 50, 1e-4, True)
binaryFA.variational_em(noisy_data)
mu_post, sigma_post, loglik = binaryFA.infer_latent(noisy_data)
symbols = ['ro', 'gs', 'k*']
plt.figure()
plt.plot(mu_post[0,:m], mu_post[1,0:m], symbols[0])
plt.plot(mu_post[0,m:2*m], mu_post[1,m:2*m], symbols[1])
plt.plot(mu_post[0,2*m:], mu_post[1,2*m:], symbols[2])
plt.title('Latent Embedding')
plt.show()
prob_on = binaryFA.predict_missing(noisy_data)
plt.figure()
plt.imshow(prob_on, aspect='auto', interpolation='none',
origin='lower', cmap="gray")
plt.title('Posterior Predictive')
plt.show()
plt.figure()
plt.imshow(prob_on>0.5, aspect='auto', interpolation='none',
origin='lower', cmap="gray")
plt.title('Reconstruction')
plt.show()
|
11591866
|
import os.path as op
from os import getenv
from uuid import uuid4
from unittest import SkipTest
from flask_admin.contrib.fileadmin import azure
from .test_fileadmin import Base
class AzureFileAdminTests(Base.FileAdminTests):
_test_storage = getenv('AZURE_STORAGE_CONNECTION_STRING')
def setUp(self):
if not azure.BlockBlobService:
raise SkipTest('AzureFileAdmin dependencies not installed')
self._container_name = 'fileadmin-tests-%s' % uuid4()
if not self._test_storage or not self._container_name:
raise SkipTest('AzureFileAdmin test credentials not set')
client = azure.BlockBlobService(connection_string=self._test_storage)
client.create_container(self._container_name)
dummy = op.join(self._test_files_root, 'dummy.txt')
client.create_blob_from_path(self._container_name, 'dummy.txt', dummy)
def tearDown(self):
client = azure.BlockBlobService(connection_string=self._test_storage)
client.delete_container(self._container_name)
def fileadmin_class(self):
return azure.AzureFileAdmin
def fileadmin_args(self):
return (self._container_name, self._test_storage), {}
|
11591896
|
import numpy as np
import mxnet as mx
from mxnet.gluon import nn, HybridBlock
from mxnet.util import use_np
from autogluon_contrib_nlp.layers import get_activation, get_norm_layer
from autogluon_contrib_nlp.models.transformer import TransformerEncoder
from .. import constants as _C
from ..config import CfgNode
@use_np
class BasicMLP(HybridBlock):
def __init__(self, in_units,
mid_units,
out_units,
num_layers=1,
normalization='layer_norm',
norm_eps=1E-5,
dropout=0.1,
data_dropout=False,
activation='leaky',
weight_initializer=None,
bias_initializer=None,
prefix=None, params=None):
"""
data -> [dropout] * (0/1) -> [Dense -> Normalization -> ACT] * N -> dropout -> Dense -> out
Parameters
----------
in_units
mid_units
out_units
num_layers
Number of intermediate layers
normalization
norm_eps
dropout
activation
"""
super().__init__(prefix=prefix, params=params)
self.in_units = in_units
self.data_dropout = data_dropout
if mid_units < 0:
mid_units = in_units
with self.name_scope():
self.proj = nn.HybridSequential()
with self.proj.name_scope():
if num_layers > 0 and data_dropout:
self.proj.add(nn.Dropout(dropout))
for i in range(num_layers):
self.proj.add(nn.Dense(units=mid_units,
in_units=in_units,
flatten=False,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
use_bias=False))
self.proj.add(get_norm_layer(normalization,
axis=-1,
epsilon=norm_eps,
in_channels=mid_units))
self.proj.add(get_activation(activation))
in_units = mid_units
self.proj.add(nn.Dropout(dropout))
self.proj.add(nn.Dense(units=out_units,
in_units=in_units,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
flatten=False))
def hybrid_forward(self, F, x):
return self.proj(x)
@use_np
class CategoricalFeatureNet(HybridBlock):
"""Embedding of the categories."""
def __init__(self, num_class, out_units, cfg=None, prefix=None, params=None):
super().__init__(prefix=prefix, params=params)
self.cfg = cfg = CategoricalFeatureNet.get_cfg().clone_merge(cfg)
embed_initializer = mx.init.create(*cfg.initializer.embed)
weight_initializer = mx.init.create(*cfg.initializer.weight)
bias_initializer = mx.init.create(*cfg.initializer.bias)
with self.name_scope():
self.embedding = nn.Embedding(input_dim=num_class,
output_dim=cfg.emb_units,
weight_initializer=embed_initializer)
self.proj = BasicMLP(in_units=cfg.emb_units,
mid_units=cfg.mid_units,
out_units=out_units,
num_layers=cfg.num_layers,
normalization=cfg.normalization,
norm_eps=cfg.norm_eps,
data_dropout=cfg.data_dropout,
dropout=cfg.dropout,
activation=cfg.activation,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer)
@staticmethod
def get_cfg(key=None):
if key is None:
cfg = CfgNode()
cfg.emb_units = 32
cfg.mid_units = 64
cfg.num_layers = 1
cfg.data_dropout = False
cfg.dropout = 0.1
cfg.activation = 'leaky'
cfg.normalization = 'layer_norm'
cfg.norm_eps = 1e-5
cfg.initializer = CfgNode()
cfg.initializer.embed = ['xavier', 'gaussian', 'in', 1.0]
cfg.initializer.weight = ['xavier', 'uniform', 'avg', 3.0]
cfg.initializer.bias = ['zeros']
return cfg
else:
raise NotImplementedError
def hybrid_forward(self, F, feature):
embed = self.embedding(feature)
return self.proj(embed)
@use_np
class NumericalFeatureNet(HybridBlock):
def __init__(self, input_shape, out_units, cfg=None, prefix=None, params=None):
super().__init__(prefix=prefix, params=params)
self.cfg = cfg = NumericalFeatureNet.get_cfg().clone_merge(cfg)
self.input_shape = input_shape
self.need_first_reshape = isinstance(input_shape, (list, tuple)) and len(input_shape) != 1
self.in_units = int(np.prod(input_shape))
weight_initializer = mx.init.create(*cfg.initializer.weight)
bias_initializer = mx.init.create(*cfg.initializer.bias)
with self.name_scope():
if self.cfg.input_centering:
self.data_bn = nn.BatchNorm(in_channels=self.in_units)
if self.cfg.gated_activation:
self.gate_proj = BasicMLP(in_units=self.in_units,
mid_units=cfg.mid_units,
out_units=out_units,
num_layers=cfg.num_layers,
normalization=cfg.normalization,
norm_eps=cfg.norm_eps,
data_dropout=cfg.data_dropout,
dropout=cfg.dropout,
activation=cfg.activation,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer)
else:
self.gate_proj = None
self.proj = BasicMLP(in_units=self.in_units,
mid_units=cfg.mid_units,
out_units=out_units,
num_layers=cfg.num_layers,
normalization=cfg.normalization,
norm_eps=cfg.norm_eps,
data_dropout=cfg.data_dropout,
dropout=cfg.dropout,
activation=cfg.activation,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer)
@staticmethod
def get_cfg(key=None):
if key is None:
cfg = CfgNode()
cfg.input_centering = False
cfg.gated_activation = False
cfg.mid_units = 128
cfg.num_layers = 1
cfg.data_dropout = False
cfg.dropout = 0.1
cfg.activation = 'leaky'
cfg.normalization = 'layer_norm'
cfg.norm_eps = 1e-5
cfg.initializer = CfgNode()
cfg.initializer.weight = ['xavier', 'uniform', 'avg', 3.0]
cfg.initializer.bias = ['zeros']
else:
raise NotImplementedError
return cfg
def hybrid_forward(self, F, features):
if self.need_first_reshape:
features = F.np.reshape(features, (-1, self.in_units))
if self.cfg.input_centering:
features = self.data_bn(features)
if self.gate_proj is not None:
return F.npx.sigmoid(self.gate_proj(features)) * self.proj(features)
else:
return self.proj(features)
@use_np
class FeatureAggregator(HybridBlock):
def __init__(self, num_fields, out_shape, in_units,
cfg=None, get_embedding=False, prefix=None, params=None):
"""
Parameters
----------
num_fields
The number of fields of the
out_shape
in_units
The number of input units
cfg
The configuration
get_embedding
Whether to get the embedding
prefix
The prefix
params
The parameters
"""
super().__init__(prefix=prefix, params=params)
if cfg is None:
cfg = FeatureAggregator.get_cfg()
self.cfg = cfg = FeatureAggregator.get_cfg().clone_merge(cfg)
self.num_fields = num_fields
if isinstance(out_shape, list):
out_shape = tuple(out_shape)
self.out_shape = out_shape
self.in_units = in_units
self.get_embedding = get_embedding
weight_initializer = mx.init.create(*cfg.initializer.weight)
bias_initializer = mx.init.create(*cfg.initializer.bias)
out_units = int(np.prod(out_shape))
with self.name_scope():
if num_fields > 1:
if cfg.agg_type == 'attention' or cfg.agg_type == 'attention_token':
if cfg.attention_net.hidden_size < 0:
hidden_size = 4 * cfg.attention_net.units
if cfg.attention_net.units != self.in_units:
self.attention_net_pre_proj = nn.Dense(units=cfg.attention_net.units,
in_units=in_units,
use_bias=False,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
flatten=False,
prefix='attention_net_pre_proj_')
else:
self.attention_net_pre_proj = None
self.attention_agg_ln = nn.LayerNorm(in_channels=in_units,
epsilon=self.cfg.norm_eps)
self.attention_agg_dropout = nn.Dropout(self.cfg.dropout)
self.attention_transformer_enc = TransformerEncoder(
num_layers=cfg.attention_net.num_layers,
num_heads=cfg.attention_net.num_heads,
units=cfg.attention_net.units,
hidden_size=hidden_size,
dropout=cfg.dropout,
activation=cfg.attention_net.activation,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer)
# Construct out proj
if cfg.agg_type == 'mean' or cfg.agg_type == 'max':
in_units = in_units
elif cfg.agg_type == 'concat':
in_units = in_units * num_fields
elif cfg.agg_type == 'attention' or cfg.agg_type == 'attention_token':
in_units = cfg.attention_net.units
else:
raise NotImplementedError
mid_units = in_units if cfg.mid_units < 0 else cfg.mid_units
self.out_proj = BasicMLP(in_units=in_units,
mid_units=mid_units,
out_units=out_units,
num_layers=cfg.out_proj_num_layers,
data_dropout=cfg.data_dropout,
normalization=cfg.normalization,
norm_eps=cfg.norm_eps,
dropout=cfg.dropout,
activation=cfg.activation,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer)
@staticmethod
def get_cfg(key=None):
if key is None:
cfg = CfgNode()
cfg.agg_type = 'concat'
# Attention Aggregator
cfg.attention_net = CfgNode()
cfg.attention_net.num_layers = 6
cfg.attention_net.units = 64
cfg.attention_net.num_heads = 4
cfg.attention_net.hidden_size = -1 # Size of the FFN network used in attention
cfg.attention_net.activation = 'gelu' # Activation of the attention
# Other parameters
cfg.mid_units = 128
cfg.feature_proj_num_layers = -1
cfg.out_proj_num_layers = 1
cfg.data_dropout = False
cfg.dropout = 0.1
cfg.activation = 'leaky'
cfg.normalization = 'layer_norm'
cfg.norm_eps = 1e-5
cfg.initializer = CfgNode()
cfg.initializer.weight = ['xavier', 'uniform', 'avg', 3.0]
cfg.initializer.bias = ['zeros']
else:
raise NotImplementedError
return cfg
def hybrid_forward(self, F, features, valid_length=None):
"""
Parameters
----------
features
If it is the
List of projection features. All elements must have the same shape.
valid_length
The valid_length of the text column. If it is given it will have shape (B,)
Returns
-------
scores
Shape (batch_size,) + out_shape
"""
if len(features) == 1:
agg_features = features[0]
else:
if self.cfg.agg_type == 'attention_token':
# Features[0] will have shape (B, T, C)
other_features = F.np.stack(features[1:], axis=1)
agg_features = F.np.concatenate([other_features, features[0]], axis=1)
agg_features = self.attention_agg_ln(agg_features)
agg_features = self.attention_agg_dropout(agg_features)
if self.attention_net_pre_proj is not None:
agg_features = self.attention_net_pre_proj(agg_features)
agg_features = self.attention_transformer_enc(agg_features,
valid_length + len(features) - 1)
agg_features = agg_features[:, len(features) - 1, :]
else:
agg_features = F.np.stack(features, axis=1)
if self.cfg.agg_type == 'mean':
agg_features = F.np.mean(agg_features, axis=1)
elif self.cfg.agg_type == 'max':
agg_features = F.np.max(agg_features, axis=1)
elif self.cfg.agg_type == 'concat':
agg_features = F.npx.reshape(agg_features, (-2, -1))
elif self.cfg.agg_type == 'attention':
if self.attention_net_pre_proj is not None:
agg_features = self.attention_net_pre_proj(agg_features)
agg_features = self.attention_transformer_enc(agg_features, None)
agg_features = F.np.mean(agg_features, axis=1)
else:
raise NotImplementedError
scores = self.out_proj(agg_features)
if len(self.out_shape) != 1:
scores = F.np.reshape(scores, (-1,) + self.out_shape)
if self.get_embedding:
return scores, agg_features
else:
return scores
@use_np
class MultiModalWithPretrainedTextNN(HybridBlock):
"""The basic model for classification + regression of multimodal tabular data
with text, numerical, and categorical columns.
It uses pretrained model, e.g, ELECTRA, BERT, ALBERT, RoBERTa, etc. as the backbone for
handling text data.
Here, we use the backbone network to extract the contextual embeddings and use
another dense layer to map the contextual embeddings to the class scores.
Input:
TextField + EntityField --> TextNet -------> TextFeature
...
CategoricalField --> CategoricalNet --> CategoricalFeature ==> AggregateNet --> Dense --> logits/scores
...
NumericalField ----> NumericalNet ----> NumericalFeature
We support three aggregators:
- mean
Take the average of the input features
- concat
Concatenate the input features
- max
Take the maximum of the input features
- attention
We use a stack of transformer-encoder layer to aggregate the information.
- attention_token
We use a stack of transformer-encoder layers to aggregate the information.
Instead of using one embedding to describe the text data. We keep the original embeddings
and fuse it jointly with the other embeddings.
-
"""
def __init__(self, text_backbone,
num_text_features,
num_categorical_features,
num_numerical_features,
numerical_input_units,
num_categories,
out_shape,
cfg=None,
get_embedding=False,
prefix=None,
params=None):
"""
Parameters
----------
text_backbone
Backbone network for handling the text data
num_text_features
Number of text features.
Each text feature will have (text_token_ids, valid_length)
num_categorical_features
Number of categorical features
num_numerical_features
Number of numerical features
numerical_input_units
The number of units for each numerical column
num_categories
The number of categories for each categorical column.
out_shape
Shape of the output
cfg
The configuration of the network
get_embedding
Whether to output the aggregated intermediate embedding from the network
prefix
params
"""
super().__init__(prefix=prefix, params=params)
self.cfg = cfg = MultiModalWithPretrainedTextNN.get_cfg().clone_merge(cfg)
assert self.cfg.text_net.pool_type == 'cls'
base_feature_units = self.cfg.base_feature_units
if not isinstance(out_shape, (list, tuple)):
out_shape = (out_shape,)
self.out_shape = out_shape
if base_feature_units == -1:
base_feature_units = text_backbone.units
self.get_embedding = get_embedding
self.num_text_features = num_text_features
self.num_categorical_features = num_categorical_features
self.num_numerical_features = num_numerical_features
if numerical_input_units is None:
numerical_input_units = []
elif not isinstance(numerical_input_units, (list, tuple)):
numerical_input_units = [numerical_input_units] * self.num_numerical_features
self.numerical_input_units = numerical_input_units
self.num_categories = num_categories
if self.num_categorical_features > 0:
assert len(self.num_categories) == self.num_categorical_features
weight_initializer = mx.init.create(*cfg.initializer.weight)
bias_initializer = mx.init.create(*cfg.initializer.bias)
self.agg_type = cfg.agg_net.agg_type
if self.agg_type == 'attention_token':
assert self.num_text_features == 1, \
'Only supports a single text input if use token-level attention'
with self.name_scope():
self.text_backbone = text_backbone
if base_feature_units != text_backbone.units:
self.text_proj = nn.HybridSequential()
for i in range(self.num_text_features):
with self.text_proj.name_scope():
self.text_proj.add(nn.Dense(in_units=text_backbone.units,
units=base_feature_units,
use_bias=False,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer,
flatten=False))
else:
self.text_proj = None
if self.num_categorical_features > 0:
self.categorical_networks = nn.HybridSequential()
for i in range(self.num_categorical_features):
with self.categorical_networks.name_scope():
self.categorical_networks.add(
CategoricalFeatureNet(num_class=self.num_categories[i],
out_units=base_feature_units,
cfg=cfg.categorical_net))
else:
self.categorical_networks = None
if self.cfg.aggregate_categorical and self.num_categorical_features > 1:
# Use another dense layer to aggregate the categorical features
self.categorical_agg = BasicMLP(
in_units=base_feature_units * self.num_categorical_features,
mid_units=cfg.categorical_agg.mid_units,
out_units=base_feature_units,
activation=cfg.categorical_agg.activation,
dropout=cfg.categorical_agg.dropout,
num_layers=cfg.categorical_agg.num_layers,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer
)
if self.cfg.categorical_agg.gated_activation:
self.categorical_agg_gate = BasicMLP(
in_units=base_feature_units * self.num_categorical_features,
mid_units=cfg.categorical_agg.mid_units,
out_units=base_feature_units,
activation=cfg.categorical_agg.activation,
dropout=cfg.categorical_agg.dropout,
num_layers=cfg.categorical_agg.num_layers,
weight_initializer=weight_initializer,
bias_initializer=bias_initializer
)
else:
self.categorical_agg_gate = None
else:
self.categorical_agg = None
self.categorical_agg_gate = None
if self.num_numerical_features > 0:
self.numerical_networks = nn.HybridSequential()
for i in range(self.num_numerical_features):
with self.numerical_networks.name_scope():
self.numerical_networks.add(
NumericalFeatureNet(input_shape=self.numerical_input_units[i],
out_units=base_feature_units,
cfg=cfg.numerical_net))
else:
self.numerical_networks = None
self.agg_layer = FeatureAggregator(num_fields=self.num_fields,
out_shape=out_shape,
in_units=base_feature_units,
cfg=cfg.agg_net,
get_embedding=get_embedding)
@staticmethod
def get_cfg(key=None):
if key is None:
cfg = CfgNode()
cfg.base_feature_units = -1 # -1 means not given and we will use the units of BERT
cfg.text_net = CfgNode()
cfg.text_net.use_segment_id = True
cfg.text_net.pool_type = 'cls'
cfg.aggregate_categorical = True # Whether to use one network to aggregate the categorical columns.
cfg.categorical_agg = CfgNode()
cfg.categorical_agg.activation = 'leaky'
cfg.categorical_agg.mid_units = 128
cfg.categorical_agg.num_layers = 1
cfg.categorical_agg.dropout = 0.1
cfg.categorical_agg.gated_activation = False
cfg.agg_net = FeatureAggregator.get_cfg()
cfg.categorical_net = CategoricalFeatureNet.get_cfg()
cfg.numerical_net = NumericalFeatureNet.get_cfg()
cfg.initializer = CfgNode()
cfg.initializer.weight = ['xavier', 'uniform', 'avg', 3.0]
cfg.initializer.bias = ['zeros']
return cfg
else:
raise NotImplementedError
@property
def num_fields(self):
if self.cfg.aggregate_categorical and self.num_categorical_features > 1:
return self.num_text_features + 1 + self.num_numerical_features
else:
return self.num_text_features + self.num_categorical_features + self.num_numerical_features
def initialize_with_pretrained_backbone(self, backbone_params_path, ctx=None):
self.text_backbone.load_parameters(backbone_params_path, ctx=ctx)
self.agg_layer.initialize(ctx=ctx)
if self.text_proj is not None:
self.text_proj.initialize(ctx=ctx)
if self.categorical_networks is not None:
self.categorical_networks.initialize(ctx=ctx)
if self.numerical_networks is not None:
self.numerical_networks.initialize(ctx=ctx)
if self.categorical_agg is not None:
self.categorical_agg.initialize(ctx=ctx)
if self.categorical_agg_gate is not None:
self.categorical_agg_gate.initialize(ctx=ctx)
def hybrid_forward(self, F, features):
"""
Parameters
----------
features
A list of field data
It will contain
- text_features
...
- categorical_features
...
- numerical_features
...
Returns
-------
logits_or_scores
Shape (batch_size,) + out_shape
"""
field_features = []
ptr = 0
text_valid_length = None
for i in range(self.num_text_features):
batch_token_ids, batch_valid_length, batch_segment_ids = features[i]
if self.cfg.text_net.use_segment_id:
contextual_embedding, _ = self.text_backbone(batch_token_ids,
batch_segment_ids,
batch_valid_length)
else:
_, all_hidden_states = self.text_backbone(batch_token_ids, batch_valid_length)
contextual_embedding = all_hidden_states[-1]
if self.agg_type == 'attention_token' and self.num_fields > 1:
if self.text_proj is not None:
contextual_embedding = self.text_proj[i](contextual_embedding)
text_valid_length = batch_valid_length
field_features.append(contextual_embedding)
else:
pooled_output = contextual_embedding[:, 0, :]
if self.text_proj is not None:
pooled_output = self.text_proj[i](pooled_output)
field_features.append(pooled_output)
ptr += self.num_text_features
all_cat_features = []
for i in range(ptr, ptr + self.num_categorical_features):
cat_features = self.categorical_networks[i - ptr](features[i])
if self.categorical_agg is not None:
all_cat_features.append(cat_features)
else:
field_features.append(cat_features)
if self.categorical_agg is not None:
all_cat_features = F.np.concatenate(all_cat_features, axis=-1)
if self.cfg.categorical_agg.gated_activation:
field_features.append(
F.npx.sigmoid(self.categorical_agg_gate(all_cat_features))
* self.categorical_agg(all_cat_features))
else:
field_features.append(self.categorical_agg(all_cat_features))
ptr += self.num_categorical_features
for i in range(ptr, ptr + self.num_numerical_features):
numerical_features = self.numerical_networks[i - ptr](features[i])
field_features.append(numerical_features)
if self.agg_type == 'attention_token':
return self.agg_layer(field_features, text_valid_length)
else:
return self.agg_layer(field_features)
|
11591901
|
from abc import ABC, abstractmethod
from utils import *
from transformations import *
class Augmentor(ABC):
def __init__(self, params):
self.output_type_list = params.get('output_type_list', ['single', 'multi-object'])
self.overlap_ratio = params.get('overlap_ratio', 0)
self.persp_trans = params.get('persp_trans', 0)
self.background = params.get('background', 'none')
self.background_image_list = params.get('background_image_list', None)
self.flip_prob = params.get('flip_prob', 0.5)
self.max_rotate_degree = params.get('max_rotate_degree', 30)
self.salt = params.get('salt', 0)
self.pepper = params.get('pepper', 0)
self.gauss_var = params.get('gauss_var', 0)
self.smooth_kernel_size = params.get('smooth_kernel_size', 1)
self.bboxes = params.get('bboxes', False)
self.num_classes = params.get('num_classes', 0)
self.adjust_mask = params.get('adjust_mask', True)
self.pad_mask = params.get('pad_mask', 25)
self.max_background_images = 20
"""
single - single-channel mask, shows object presence
multi-object - multi-channel mask, separate color for each object (for each plant)
multi-part - multi-channel mask, separate color for each object part (for each leaf)
semantic - multi-channel mask, separate color for each type of object (leaf, root, flower)
class - multi-channel mask, separate color for each class (plant variety)
"""
self.possible_inp2out = {
'single': ['single', 'multi-object', 'class'],
'multi-part': ['single', 'multi-object', 'multi-part', 'class'],
'semantic': ['single', 'multi-object', 'semantic', 'class']
}
self.input_type = self.get_input_type()
self._check_params()
self._call_buffer = {}
@abstractmethod
def get_input_type(self):
pass
def _check_params(self):
if self.input_type not in self.possible_inp2out:
raise UserWarning('{} input type is not supported. See {} for the details'.format(
self.input_type, self.possible_inp2out))
for output_type in self.output_type_list:
if output_type not in self.possible_inp2out[self.input_type]:
raise UserWarning('{} output type is not supported for {} input type. See {} for the details'.format(
output_type, self.input_type, self.possible_inp2out))
if 'class' in self.output_type_list:
assert self.num_classes > 0
if not (0 <= self.overlap_ratio < 1):
raise UserWarning('Wrong overlap_ratio value')
if self.persp_trans < 0:
raise UserWarning('persp_trans must be >= 0')
if (self.persp_trans > 0.2) and (self.bboxes):
raise UserWarning('Bounding box is not supported for strong perspective transform yet')
if self.background not in ['img', 'none']:
raise UserWarning('background type is not supported')
if (self.background == 'img') and (self.background_image_list is None):
raise UserWarning('"background" "img" expects "background_image_list" to be not None')
if (self.background == 'img') and (len(self.background_image_list) == 0):
raise UserWarning('background_image_list should be non-empty')
if (self.background == 'img') and (len(self.background_image_list) > self.max_background_images):
self.background_image_list = self.background_image_list[:self.max_background_images]
print('Too many background images ({}). Some will be ignored'.format(len(self.background_image_list)))
for i, img in enumerate(self.background_image_list):
try:
check_is_image(img)
except:
try:
# print('reading image')
self.background_image_list[i] = read(img)
except:
raise UserWarning('Cannot read an image')
if self.salt < 0:
raise UserWarning('Wrong value')
if self.pepper < 0:
raise UserWarning('Wrong value')
if self.gauss_var < 0:
raise UserWarning('Wrong value')
if (self.smooth_kernel_size < 1) or (self.smooth_kernel_size % 2 == 0):
raise UserWarning('Wrong value')
def _check_input(self, img_list, mask_list, class_list=None):
assert len(img_list) == len(mask_list)
for i in range(len(img_list)):
check_is_image(img_list[i])
assert img_list[i].shape[:2] == mask_list[i].shape[:2]
if mask_list[i].shape[2] == 1:
mask_list[i] = single2multi(mask_list[i])
check_is_image(mask_list[i])
if 'class' in self.output_type_list:
assert class_list is not None
assert len(class_list) == len(img_list)
return img_list, mask_list, class_list
def _get_scene_size(self):
objects_height_ends = [
self._call_buffer['objects_positions'][i][0] + self._call_buffer['real_img_sizes'][i][0]
for i in range(len(self._call_buffer['objects_positions']))]
objects_width_ends = [
self._call_buffer['objects_positions'][i][1] + self._call_buffer['real_img_sizes'][i][1]
for i in range(len(self._call_buffer['objects_positions']))]
max_height = max(objects_height_ends)
max_width = max(objects_width_ends)
if self.persp_trans > 0:
added_width = int(max_width * self.persp_trans)
added_width = added_width if added_width % 2 == 0 else added_width + 1
else:
added_width = 0
self._call_buffer['added_width'] = added_width
self._call_buffer['scene_size'] = [max_height, max_width + added_width]
@staticmethod
def _transform_background(back):
return flip(back, 0.5)
def _get_scene(self):
if self.background == 'none':
self._call_buffer['scene'] = np.zeros((*self._call_buffer['scene_size'], 3))
elif self.background == 'img':
background_image = random.choice(self.background_image_list).copy()
background_image = self._transform_background(background_image)
if ((background_image.shape[0] > self._call_buffer['scene_size'][0])
and (background_image.shape[1] > self._call_buffer['scene_size'][1])):
self._call_buffer['scene'] = random_crop(background_image, self._call_buffer['scene_size'])
else:
self._call_buffer['scene'] = resize(background_image, self._call_buffer['scene_size'])
@abstractmethod
def _transform_masks(self):
pass
def _transform_pairs(self):
for i in range(len(self._call_buffer['img_list'])):
self._call_buffer['img_list'][i] = format_image(self._call_buffer['img_list'][i])
self._call_buffer['mask_list'][i] = format_image(self._call_buffer['mask_list'][i])
self._call_buffer['img_list'][i], self._call_buffer['mask_list'][i] = rotate_pair(
self._call_buffer['img_list'][i], self._call_buffer['mask_list'][i], self.max_rotate_degree
)
self._call_buffer['img_list'][i], self._call_buffer['mask_list'][i] = flip_pair(
self._call_buffer['img_list'][i], self._call_buffer['mask_list'][i], self.flip_prob
)
if self.adjust_mask:
[(x_min, y_max), (_, y_min), (x_max, _), (x_max, _), (_, _)] = mask2bbox(self._call_buffer['mask_list'][i])
self._call_buffer['img_list'][i] = self._call_buffer['img_list'][i][y_min:y_max, x_min:x_max, :]
self._call_buffer['mask_list'][i] = self._call_buffer['mask_list'][i][y_min:y_max, x_min:x_max, :]
if self.pad_mask > 0:
self._call_buffer['img_list'][i] = pad(self._call_buffer['img_list'][i], self.pad_mask)
self._call_buffer['mask_list'][i] = pad(self._call_buffer['mask_list'][i], self.pad_mask)
def _add_main_masks(self):
self._call_buffer['main_masks'] = {}
for mask in self.output_type_list:
self._call_buffer['main_masks'][mask] = np.zeros_like(self._call_buffer['scene'])
def _embed_pairs(self):
for i in range(len(self._call_buffer['img_list'])):
self._call_buffer['scene'], self._call_buffer['main_masks'] = embed_pair(
self._call_buffer['img_list'][i],
self._call_buffer['mask_list'][i],
self._call_buffer['scene'],
self._call_buffer['small_masks'][i],
self._call_buffer['main_masks'],
self._call_buffer['added_width'],
self._call_buffer['objects_positions'][i]
)
def _embed_bbox(self, bbox, start):
(x_min, y_max), (_, y_min), (x_max, _), (_, _), (_, _) = bbox
h = start[0]
w = start[1]
x_min += w
x_max += w
y_min += h
y_max += h
return (x_min, y_max), (x_min, y_min), (x_max, y_min), (x_max, y_max), (x_min, y_max)
def _get_bboxes(self):
if self.bboxes:
if 'multi-object' in self.output_type_list:
mo_bboxes = []
for obj in self._call_buffer['small_masks']:
mo_bboxes.append(mask2bbox(obj['multi-object']))
self._call_buffer['bboxes']['multi-object'] = [self._embed_bbox(
mo_bboxes[i], self._call_buffer['objects_positions'][i])
for i in range(len(mo_bboxes))]
if self.persp_trans > 0:
self._call_buffer['bboxes']['multi-object'] = [bbox_perspective_transform(
np.array([bbox], np.float32),
self._call_buffer['added_width'],
[self._call_buffer['scene'].shape[0],
self._call_buffer['scene'].shape[1] + self._call_buffer['added_width']])
for bbox in self._call_buffer['bboxes']['multi-object']]
def _transform_pipeline(self, i_list, m_list, class_list=None):
img_list = [i.copy() for i in i_list]
mask_list = [m.copy() for m in m_list]
self._call_buffer = {
'bboxes': {}
}
self._call_buffer['img_list'], self._call_buffer['mask_list'], self._call_buffer[
'class_list'] = self._check_input(img_list, mask_list, class_list)
self._transform_pairs()
self._transform_masks()
self._call_buffer['real_img_sizes'] = [[img.shape[0], img.shape[1]] for img in self._call_buffer['img_list']]
self._call_buffer['shrink_img_sizes'] = [
[int(img[0] * (1 - self.overlap_ratio)), int(img[1] * (1 - self.overlap_ratio))]
for img in self._call_buffer['real_img_sizes']]
self._call_buffer['objects_positions'] = get_pack_coords(self._call_buffer['shrink_img_sizes'])
self._get_scene_size()
self._get_scene()
self._add_main_masks()
self._embed_pairs()
if self.persp_trans > 0:
self._call_buffer['scene'] = perspective_transform(
self._call_buffer['scene'], self._call_buffer['added_width'])
for mask in self._call_buffer['main_masks']:
self._call_buffer['main_masks'][mask] = perspective_transform(
self._call_buffer['main_masks'][mask], self._call_buffer['added_width'])
self._call_buffer['scene'] = add_salt(self._call_buffer['scene'], self.salt)
self._call_buffer['scene'] = add_pepper(self._call_buffer['scene'], self.pepper)
self._call_buffer['scene'] = gauss_noise(self._call_buffer['scene'], self.gauss_var)
self._call_buffer['scene'] = smooth(self._call_buffer['scene'], self.smooth_kernel_size)
self._get_bboxes()
return self._call_buffer
def transform(self, img_list, mask_list, class_list=None):
call_buffer = self._transform_pipeline(img_list, mask_list, class_list)
result = {'scene': call_buffer['scene'], 'masks': {}}
if self.bboxes:
result['bboxes'] = self._call_buffer['bboxes']
for mask in self.output_type_list:
result['masks'][mask] = call_buffer['main_masks'][mask]
return result
|
11591905
|
from django.http import JsonResponse, HttpResponse, HttpResponseBadRequest, HttpResponseForbidden, Http404
from django.utils.crypto import get_random_string
from django.contrib.auth.decorators import login_required
from django.dispatch import Signal
from django.dispatch import receiver
from core.signals import signal_new_comment, signal_update_comment, signal_delete_comment, signal_vote
from core.constants import Constants
from comment.models import Thread, Comment, Vote, Site, Board
from django.core.exceptions import ObjectDoesNotExist
from django.utils.crypto import get_random_string
from django.utils import timezone
from core.constants import Constants
import datetime
import re
import logging
import mimetypes
logger = logging.getLogger(__name__)
"""
================================================================================================================================================================
COMMENT API ENDPOINTS
================================================================================================================================================================
"""
def new_comments_thread(site_id, board_id, thread_id):
site = None
try:
site = Site.objects.get(display_id=site_id)
except Exception as e:
return None
board = None
try:
board = Board.objects.get(display_id=board_id, site=site)
except ObjectDoesNotExist:
board = Board.objects.get(display_name='default', site=site)
logger.info('using default board')
except Exception as e:
logger.warn(e)
thread = None
try:
logger.info(site.display_id+thread_id)
thread = Thread(owner=board.owner, display_id=site.display_id+thread_id, site_id=site.display_id, board=board)
thread.save()
except Exception as e:
logger.info(e)
logger.info(site.display_id+thread_id)
def get_comments(request, site_id, board_id, thread_id):
logger.info("api_comments")
thread = None
try:
thread = Thread.objects.get(display_id=site_id+thread_id, site_id=site_id)
except ObjectDoesNotExist:
logger.info("New Comments Thread")
thread = new_comments_thread(site_id, board_id, thread_id)
except Exception as e:
logger.warn(e)
comments = None
try:
comments = Comment.objects.filter(thread=thread, state=Constants.STATE_VISIBLE)
except Exception as e:
logger.warn(e)
commentsArray=[]
for c in comments:
parent = c.parent if c.parent is not None else None
created_by_current_user = False
if request.user.is_authenticated and request.user.profile == c.creator:
created_by_current_user = True
user_has_upvoted = False
if c.up_vote_count > 0 and request.user.is_authenticated:
try:
Vote.objects.get(comment=c, voter=request.user.profile)
user_has_upvoted = True
except ObjectDoesNotExist:
user_has_upvoted = False
except Exception as e:
logger.warn(e)
modified = None
if c.modified:
modified = str(c.modified)
comment = { "id": c.comment_id, "parent": parent, "created": str(c.date_created), "modified": modified, "content": c.content, "pings": [], "creator": c.creator.display_id, "fullname": c.creator.display_name, "created_by_admin": False, "created_by_current_user": created_by_current_user, "upvote_count": c.up_vote_count, "user_has_upvoted": user_has_upvoted, "is_new": False }
if c.file_url:
comment['file_url'] = c.file_url
comment['file_mime_type'] = c.file_mime_type
commentsArray.append(comment)
return JsonResponse(commentsArray, safe=False)
@login_required
def post_comment(request, site_id, board_id, thread_id):
data = { 'site_id':site_id, 'thread_id': thread_id, 'id': get_random_string(length=32), 'parent': request.POST.get('commentData[parent]'), 'content': request.POST.get('commentData[content]'), 'author': request.user, 'created': request.POST.get('commentData[created]'), 'modified': request.POST.get('commentData[modified]') }
file_url = None
file_mime_type = None
try:
file_url = re.search("(?P<url>https?://[^\s]+)", data['content']).group("url")
if file_url:
file_mime_type = mimetypes.guess_type(file_url)
data['file_url'] = file_url
data['file_mime_type'] = file_mime_type[0]
except Exception as e:
logger.warn(e)
signal_new_comment.send_robust(sender=1, data=data)
parent = None
if data["parent"]:
parent = data["parent"]
comment = { "id": data['id'], "parent": parent, "created": data["created"], "modified": data["modified"], "content": data["content"], "pings": [], "creator": request.user.profile.display_id, "fullname": request.user.profile.display_name, "created_by_admin": False, "created_by_current_user": True, "upvote_count": 0, "user_has_upvoted": False, "is_new": False }
if file_mime_type:
comment['file_mime_type'] = file_mime_type[0]
comment['file_url'] = file_url
return JsonResponse(comment)
@login_required
def put_comment(request, site_id, board_id, thread_id):
data = { 'site_id':site_id, 'thread_id': thread_id, 'id': request.POST.get('commentData[id]'), 'parent': request.POST.get('commentData[parent]'), 'content': request.POST.get('commentData[content]'), 'author': request.user }
signal_update_comment.send_robust(sender=1, data=data)
return JsonResponse({'success': True})
@login_required
def delete_comment(request, site_id, board_id, thread_id):
data = { 'site_id':site_id, 'thread_id': thread_id, 'id': request.POST.get('commentData[id]'), 'author': request.user }
signal_delete_comment.send_robust(sender=1, data=data)
return JsonResponse({'success': True})
@login_required
def upvote_comment(request, site_id, board_id, thread_id):
data = { 'site_id':site_id, 'thread_id': thread_id, 'id': request.POST.get('commentData[id]'), 'author': request.user }
signal_vote.send_robust(sender=1, data=data)
return JsonResponse({'success': True})
"""
================================================================================================================================================================
COMMENT API RECIEVERS
================================================================================================================================================================
"""
@receiver(signal_new_comment)
def on_new_comment(sender, data, **kwargs):
thread = None
try:
thread = Thread.objects.get(display_id=data['site_id']+data['thread_id'])
except Exception as e:
logger.warn(e)
# If this comment has a parent make sure it is a valid one
parent_id = None
if data['parent']:
try:
parent_id=data['parent']
Comment.objects.get(comment_id=parent_id)
except ObjectDoesNotExist:
parent_id = None
except Exception as e:
parent_id = None
logger.warn(e)
else:
parent_id = None
comment = Comment(comment_id=data["id"], thread=thread, parent=parent_id, content=data['content'], creator=data['author'].profile)
try:
comment.file_mime_type = data['file_mime_type']
comment.file_url = data['file_url']
except:
logger.info('saving comment without media')
try:
comment.save()
except Exception as e:
logger.warn(e)
@receiver(signal_update_comment)
def on_update_comment(sender, data, **kwargs):
logger.info("signal_update_comment")
comment = None
try:
comment = Comment.objects.get( comment_id=data['id'], creator=data['author'].profile )
except Exception as e:
logger.warn(e)
if comment:
try:
comment.content = data['content']
comment.modified = datetime.datetime.now(tz=timezone.utc)
comment.save()
except Exception as e:
logger.warn(e)
@receiver(signal_delete_comment)
def on_delete_comment(sender, data, **kwargs):
logger.info("signal_delete_comment")
comment = None
try:
comment = Comment.objects.get( comment_id=data['id'], creator=data['author'].profile )
except Exception as e:
logger.warn(e)
if comment:
try:
comment.state = Constants.STATE_DELETED
comment.save()
except Exception as e:
logger.warn(e)
@receiver(signal_vote)
def on_vote_comment(sender, data, **kwargs):
logger.info("signal_vote")
comment_id = data['id']
vote_down = False
comment = None
try:
comment = Comment.objects.get(comment_id=comment_id)
except Exception as e:
logger.warn(e)
try:
vote = Vote.objects.get(comment=comment, voter=data['author'].profile)
vote.delete()
vote_down = True
except ObjectDoesNotExist:
vote_down = False
try:
vote = Vote(comment=comment, voter=data['author'].profile)
vote.save()
except:
logger.warn(e)
try:
if vote_down:
comment.up_vote_count = comment.up_vote_count - 1
else:
comment.up_vote_count = comment.up_vote_count + 1
comment.save()
except:
logger.warn(e)
|
11591912
|
import redis
POOL = redis.ConnectionPool(host='127.0.0.1',decode_responses=True,max_connections=20)
|
11591936
|
from __future__ import print_function, unicode_literals
import io
import re
from twisted.internet import reactor
from twisted.internet.defer import gatherResults, inlineCallbacks, returnValue
from twisted.internet.error import ConnectionRefusedError
from twisted.trial import unittest
import mock
from .. import _rendezvous, wormhole
from ..errors import (KeyFormatError, LonelyError, NoKeyError,
OnlyOneCodeError, ServerConnectionError, WormholeClosed,
WrongPasswordError)
from ..eventual import EventualQueue
from ..transit import allocate_tcp_port
from .common import ServerBase, poll_until
APPID = "appid"
# event orderings to exercise:
#
# * normal sender: set_code, send_phase1, connected, claimed, learn_msg2,
# learn_phase1
# * normal receiver (argv[2]=code): set_code, connected, learn_msg1,
# learn_phase1, send_phase1,
# * normal receiver (readline): connected, input_code
# *
# * set_code, then connected
# * connected, receive_pake, send_phase, set_code
class Delegate:
def __init__(self):
self.welcome = None
self.code = None
self.key = None
self.verifier = None
self.versions = None
self.messages = []
self.closed = None
def wormhole_got_welcome(self, welcome):
self.welcome = welcome
def wormhole_got_code(self, code):
self.code = code
def wormhole_got_unverified_key(self, key):
self.key = key
def wormhole_got_verifier(self, verifier):
self.verifier = verifier
def wormhole_got_versions(self, versions):
self.versions = versions
def wormhole_got_message(self, data):
self.messages.append(data)
def wormhole_closed(self, result):
self.closed = result
class Delegated(ServerBase, unittest.TestCase):
@inlineCallbacks
def test_delegated(self):
dg = Delegate()
w1 = wormhole.create(APPID, self.relayurl, reactor, delegate=dg)
# w1.debug_set_trace("W1")
with self.assertRaises(NoKeyError):
w1.derive_key("purpose", 12)
w1.set_code("1-abc")
self.assertEqual(dg.code, "1-abc")
w2 = wormhole.create(APPID, self.relayurl, reactor)
w2.set_code(dg.code)
yield poll_until(lambda: dg.key is not None)
yield poll_until(lambda: dg.verifier is not None)
yield poll_until(lambda: dg.versions is not None)
w1.send_message(b"ping")
got = yield w2.get_message()
self.assertEqual(got, b"ping")
w2.send_message(b"pong")
yield poll_until(lambda: dg.messages)
self.assertEqual(dg.messages[0], b"pong")
key1 = w1.derive_key("purpose", 16)
self.assertEqual(len(key1), 16)
self.assertEqual(type(key1), type(b""))
with self.assertRaises(TypeError):
w1.derive_key(b"not unicode", 16)
with self.assertRaises(TypeError):
w1.derive_key(12345, 16)
w1.close()
yield w2.close()
@inlineCallbacks
def test_allocate_code(self):
dg = Delegate()
w1 = wormhole.create(APPID, self.relayurl, reactor, delegate=dg)
w1.allocate_code()
yield poll_until(lambda: dg.code is not None)
w1.close()
@inlineCallbacks
def test_input_code(self):
dg = Delegate()
w1 = wormhole.create(APPID, self.relayurl, reactor, delegate=dg)
h = w1.input_code()
h.choose_nameplate("123")
h.choose_words("purple-elephant")
yield poll_until(lambda: dg.code is not None)
w1.close()
class Wormholes(ServerBase, unittest.TestCase):
# integration test, with a real server
@inlineCallbacks
def setUp(self):
# test_welcome wants to see [current_cli_version]
yield self._setup_relay(None, advertise_version="advertised.version")
def doBoth(self, d1, d2):
return gatherResults([d1, d2], True)
@inlineCallbacks
def test_allocate_default(self):
w1 = wormhole.create(APPID, self.relayurl, reactor)
w1.allocate_code()
code = yield w1.get_code()
mo = re.search(r"^\d+-\w+-\w+$", code)
self.assert_(mo, code)
# w.close() fails because we closed before connecting
yield self.assertFailure(w1.close(), LonelyError)
@inlineCallbacks
def test_allocate_more_words(self):
w1 = wormhole.create(APPID, self.relayurl, reactor)
w1.allocate_code(3)
code = yield w1.get_code()
mo = re.search(r"^\d+-\w+-\w+-\w+$", code)
self.assert_(mo, code)
yield self.assertFailure(w1.close(), LonelyError)
@inlineCallbacks
def test_basic(self):
w1 = wormhole.create(APPID, self.relayurl, reactor)
# w1.debug_set_trace("W1")
with self.assertRaises(NoKeyError):
w1.derive_key("purpose", 12)
w2 = wormhole.create(APPID, self.relayurl, reactor)
# w2.debug_set_trace(" W2")
w1.allocate_code()
code = yield w1.get_code()
w2.set_code(code)
yield w1.get_unverified_key()
yield w2.get_unverified_key()
key1 = w1.derive_key("purpose", 16)
self.assertEqual(len(key1), 16)
self.assertEqual(type(key1), type(b""))
with self.assertRaises(TypeError):
w1.derive_key(b"not unicode", 16)
with self.assertRaises(TypeError):
w1.derive_key(12345, 16)
verifier1 = yield w1.get_verifier()
verifier2 = yield w2.get_verifier()
self.assertEqual(verifier1, verifier2)
versions1 = yield w1.get_versions()
versions2 = yield w2.get_versions()
# app-versions are exercised properly in test_versions, this just
# tests the defaults
self.assertEqual(versions1, {})
self.assertEqual(versions2, {})
w1.send_message(b"data1")
w2.send_message(b"data2")
dataX = yield w1.get_message()
dataY = yield w2.get_message()
self.assertEqual(dataX, b"data2")
self.assertEqual(dataY, b"data1")
versions1_again = yield w1.get_versions()
self.assertEqual(versions1, versions1_again)
c1 = yield w1.close()
self.assertEqual(c1, "happy")
c2 = yield w2.close()
self.assertEqual(c2, "happy")
@inlineCallbacks
def test_get_code_early(self):
eq = EventualQueue(reactor)
w1 = wormhole.create(APPID, self.relayurl, reactor, _eventual_queue=eq)
d = w1.get_code()
w1.set_code("1-abc")
yield eq.flush()
code = self.successResultOf(d)
self.assertEqual(code, "1-abc")
yield self.assertFailure(w1.close(), LonelyError)
@inlineCallbacks
def test_get_code_late(self):
eq = EventualQueue(reactor)
w1 = wormhole.create(APPID, self.relayurl, reactor, _eventual_queue=eq)
w1.set_code("1-abc")
d = w1.get_code()
yield eq.flush()
code = self.successResultOf(d)
self.assertEqual(code, "1-abc")
yield self.assertFailure(w1.close(), LonelyError)
@inlineCallbacks
def test_same_message(self):
# the two sides use random nonces for their messages, so it's ok for
# both to try and send the same body: they'll result in distinct
# encrypted messages
w1 = wormhole.create(APPID, self.relayurl, reactor)
w2 = wormhole.create(APPID, self.relayurl, reactor)
w1.allocate_code()
code = yield w1.get_code()
w2.set_code(code)
w1.send_message(b"data")
w2.send_message(b"data")
dataX = yield w1.get_message()
dataY = yield w2.get_message()
self.assertEqual(dataX, b"data")
self.assertEqual(dataY, b"data")
yield w1.close()
yield w2.close()
@inlineCallbacks
def test_interleaved(self):
w1 = wormhole.create(APPID, self.relayurl, reactor)
w2 = wormhole.create(APPID, self.relayurl, reactor)
w1.allocate_code()
code = yield w1.get_code()
w2.set_code(code)
w1.send_message(b"data1")
dataY = yield w2.get_message()
self.assertEqual(dataY, b"data1")
d = w1.get_message()
w2.send_message(b"data2")
dataX = yield d
self.assertEqual(dataX, b"data2")
yield w1.close()
yield w2.close()
@inlineCallbacks
def test_unidirectional(self):
w1 = wormhole.create(APPID, self.relayurl, reactor)
w2 = wormhole.create(APPID, self.relayurl, reactor)
w1.allocate_code()
code = yield w1.get_code()
w2.set_code(code)
w1.send_message(b"data1")
dataY = yield w2.get_message()
self.assertEqual(dataY, b"data1")
yield w1.close()
yield w2.close()
@inlineCallbacks
def test_early(self):
w1 = wormhole.create(APPID, self.relayurl, reactor)
w1.send_message(b"data1")
w2 = wormhole.create(APPID, self.relayurl, reactor)
d = w2.get_message()
w1.set_code("123-abc-def")
w2.set_code("123-abc-def")
dataY = yield d
self.assertEqual(dataY, b"data1")
yield w1.close()
yield w2.close()
@inlineCallbacks
def test_fixed_code(self):
w1 = wormhole.create(APPID, self.relayurl, reactor)
w2 = wormhole.create(APPID, self.relayurl, reactor)
w1.set_code("123-purple-elephant")
w2.set_code("123-purple-elephant")
w1.send_message(b"data1"), w2.send_message(b"data2")
dl = yield self.doBoth(w1.get_message(), w2.get_message())
(dataX, dataY) = dl
self.assertEqual(dataX, b"data2")
self.assertEqual(dataY, b"data1")
yield w1.close()
yield w2.close()
@inlineCallbacks
def test_input_code(self):
w1 = wormhole.create(APPID, self.relayurl, reactor)
w2 = wormhole.create(APPID, self.relayurl, reactor)
w1.set_code("123-purple-elephant")
h = w2.input_code()
h.choose_nameplate("123")
# Pause to allow some messages to get delivered. Specifically we want
# to wait until w2 claims the nameplate, opens the mailbox, and
# receives the PAKE message, to exercise the PAKE-before-CODE path in
# Key.
yield poll_until(lambda: w2._boss._K._debug_pake_stashed)
h.choose_words("purple-elephant")
w1.send_message(b"data1"), w2.send_message(b"data2")
dl = yield self.doBoth(w1.get_message(), w2.get_message())
(dataX, dataY) = dl
self.assertEqual(dataX, b"data2")
self.assertEqual(dataY, b"data1")
yield w1.close()
yield w2.close()
@inlineCallbacks
def test_multiple_messages(self):
w1 = wormhole.create(APPID, self.relayurl, reactor)
w2 = wormhole.create(APPID, self.relayurl, reactor)
w1.set_code("123-purple-elephant")
w2.set_code("123-purple-elephant")
w1.send_message(b"data1"), w2.send_message(b"data2")
w1.send_message(b"data3"), w2.send_message(b"data4")
dl = yield self.doBoth(w1.get_message(), w2.get_message())
(dataX, dataY) = dl
self.assertEqual(dataX, b"data2")
self.assertEqual(dataY, b"data1")
dl = yield self.doBoth(w1.get_message(), w2.get_message())
(dataX, dataY) = dl
self.assertEqual(dataX, b"data4")
self.assertEqual(dataY, b"data3")
yield w1.close()
yield w2.close()
@inlineCallbacks
def test_closed(self):
eq = EventualQueue(reactor)
w1 = wormhole.create(APPID, self.relayurl, reactor, _eventual_queue=eq)
w2 = wormhole.create(APPID, self.relayurl, reactor, _eventual_queue=eq)
w1.set_code("123-foo")
w2.set_code("123-foo")
# let it connect and become HAPPY
yield w1.get_versions()
yield w2.get_versions()
yield w1.close()
yield w2.close()
# once closed, all Deferred-yielding API calls get an prompt error
yield self.assertFailure(w1.get_welcome(), WormholeClosed)
e = yield self.assertFailure(w1.get_code(), WormholeClosed)
self.assertEqual(e.args[0], "happy")
yield self.assertFailure(w1.get_unverified_key(), WormholeClosed)
yield self.assertFailure(w1.get_verifier(), WormholeClosed)
yield self.assertFailure(w1.get_versions(), WormholeClosed)
yield self.assertFailure(w1.get_message(), WormholeClosed)
@inlineCallbacks
def test_closed_idle(self):
yield self._relay_server.disownServiceParent()
w1 = wormhole.create(APPID, self.relayurl, reactor)
# without a relay server, this won't ever connect
d_welcome = w1.get_welcome()
self.assertNoResult(d_welcome)
d_code = w1.get_code()
d_key = w1.get_unverified_key()
d_verifier = w1.get_verifier()
d_versions = w1.get_versions()
d_message = w1.get_message()
yield self.assertFailure(w1.close(), LonelyError)
yield self.assertFailure(d_welcome, LonelyError)
yield self.assertFailure(d_code, LonelyError)
yield self.assertFailure(d_key, LonelyError)
yield self.assertFailure(d_verifier, LonelyError)
yield self.assertFailure(d_versions, LonelyError)
yield self.assertFailure(d_message, LonelyError)
@inlineCallbacks
def test_wrong_password(self):
eq = EventualQueue(reactor)
w1 = wormhole.create(APPID, self.relayurl, reactor, _eventual_queue=eq)
w2 = wormhole.create(APPID, self.relayurl, reactor, _eventual_queue=eq)
w1.allocate_code()
code = yield w1.get_code()
w2.set_code(code + "not")
code2 = yield w2.get_code()
self.assertNotEqual(code, code2)
# That's enough to allow both sides to discover the mismatch, but
# only after the confirmation message gets through. API calls that
# don't wait will appear to work until the mismatched confirmation
# message arrives.
w1.send_message(b"should still work")
w2.send_message(b"should still work")
key2 = yield w2.get_unverified_key() # should work
# w2 has just received w1.PAKE, and is about to send w2.VERSION
key1 = yield w1.get_unverified_key() # should work
# w1 has just received w2.PAKE, and is about to send w1.VERSION, and
# then will receive w2.VERSION. When it sees w2.VERSION, it will
# learn about the WrongPasswordError.
self.assertNotEqual(key1, key2)
# API calls that wait (i.e. get) will errback. We collect all these
# Deferreds early to exercise the wait-then-fail path
d1_verified = w1.get_verifier()
d1_versions = w1.get_versions()
d1_received = w1.get_message()
d2_verified = w2.get_verifier()
d2_versions = w2.get_versions()
d2_received = w2.get_message()
# wait for each side to notice the failure
yield self.assertFailure(w1.get_verifier(), WrongPasswordError)
yield self.assertFailure(w2.get_verifier(), WrongPasswordError)
# the rest of the loops should fire within the next tick
yield eq.flush()
# now all the rest should have fired already
self.failureResultOf(d1_verified, WrongPasswordError)
self.failureResultOf(d1_versions, WrongPasswordError)
self.failureResultOf(d1_received, WrongPasswordError)
self.failureResultOf(d2_verified, WrongPasswordError)
self.failureResultOf(d2_versions, WrongPasswordError)
self.failureResultOf(d2_received, WrongPasswordError)
# and at this point, with the failure safely noticed by both sides,
# new get_unverified_key() calls should signal the failure, even
# before we close
# any new calls in the error state should immediately fail
yield self.assertFailure(w1.get_unverified_key(), WrongPasswordError)
yield self.assertFailure(w1.get_verifier(), WrongPasswordError)
yield self.assertFailure(w1.get_versions(), WrongPasswordError)
yield self.assertFailure(w1.get_message(), WrongPasswordError)
yield self.assertFailure(w2.get_unverified_key(), WrongPasswordError)
yield self.assertFailure(w2.get_verifier(), WrongPasswordError)
yield self.assertFailure(w2.get_versions(), WrongPasswordError)
yield self.assertFailure(w2.get_message(), WrongPasswordError)
yield self.assertFailure(w1.close(), WrongPasswordError)
yield self.assertFailure(w2.close(), WrongPasswordError)
# API calls should still get the error, not WormholeClosed
yield self.assertFailure(w1.get_unverified_key(), WrongPasswordError)
yield self.assertFailure(w1.get_verifier(), WrongPasswordError)
yield self.assertFailure(w1.get_versions(), WrongPasswordError)
yield self.assertFailure(w1.get_message(), WrongPasswordError)
yield self.assertFailure(w2.get_unverified_key(), WrongPasswordError)
yield self.assertFailure(w2.get_verifier(), WrongPasswordError)
yield self.assertFailure(w2.get_versions(), WrongPasswordError)
yield self.assertFailure(w2.get_message(), WrongPasswordError)
@inlineCallbacks
def test_wrong_password_with_spaces(self):
w = wormhole.create(APPID, self.relayurl, reactor)
badcode = "4 oops spaces"
with self.assertRaises(KeyFormatError) as ex:
w.set_code(badcode)
expected_msg = "Code '%s' contains spaces." % (badcode, )
self.assertEqual(expected_msg, str(ex.exception))
yield self.assertFailure(w.close(), LonelyError)
@inlineCallbacks
def test_wrong_password_with_leading_space(self):
w = wormhole.create(APPID, self.relayurl, reactor)
badcode = " 4-oops-space"
with self.assertRaises(KeyFormatError) as ex:
w.set_code(badcode)
expected_msg = "Code '%s' contains spaces." % (badcode, )
self.assertEqual(expected_msg, str(ex.exception))
yield self.assertFailure(w.close(), LonelyError)
@inlineCallbacks
def test_wrong_password_with_non_numeric_nameplate(self):
w = wormhole.create(APPID, self.relayurl, reactor)
badcode = "four-oops-space"
with self.assertRaises(KeyFormatError) as ex:
w.set_code(badcode)
expected_msg = "Nameplate 'four' must be numeric, with no spaces."
self.assertEqual(expected_msg, str(ex.exception))
yield self.assertFailure(w.close(), LonelyError)
@inlineCallbacks
def test_welcome(self):
w1 = wormhole.create(APPID, self.relayurl, reactor)
wel1 = yield w1.get_welcome() # early: before connection established
wel2 = yield w1.get_welcome() # late: already received welcome
self.assertEqual(wel1, wel2)
self.assertIn("current_cli_version", wel1)
# cause an error, so a later get_welcome will return the error
w1.set_code("123-foo")
w2 = wormhole.create(APPID, self.relayurl, reactor)
w2.set_code("123-NOT")
yield self.assertFailure(w1.get_verifier(), WrongPasswordError)
yield self.assertFailure(w1.get_welcome(), WrongPasswordError) # late
yield self.assertFailure(w1.close(), WrongPasswordError)
yield self.assertFailure(w2.close(), WrongPasswordError)
@inlineCallbacks
def test_verifier(self):
eq = EventualQueue(reactor)
w1 = wormhole.create(APPID, self.relayurl, reactor, _eventual_queue=eq)
w2 = wormhole.create(APPID, self.relayurl, reactor, _eventual_queue=eq)
w1.allocate_code()
code = yield w1.get_code()
w2.set_code(code)
v1 = yield w1.get_verifier() # early
v2 = yield w2.get_verifier()
self.failUnlessEqual(type(v1), type(b""))
self.failUnlessEqual(v1, v2)
w1.send_message(b"data1")
w2.send_message(b"data2")
dataX = yield w1.get_message()
dataY = yield w2.get_message()
self.assertEqual(dataX, b"data2")
self.assertEqual(dataY, b"data1")
# calling get_verifier() this late should fire right away
d = w2.get_verifier()
yield eq.flush()
v1_late = self.successResultOf(d)
self.assertEqual(v1_late, v1)
yield w1.close()
yield w2.close()
@inlineCallbacks
def test_versions(self):
# there's no API for this yet, but make sure the internals work
w1 = wormhole.create(
APPID, self.relayurl, reactor, versions={"w1": 123})
w2 = wormhole.create(
APPID, self.relayurl, reactor, versions={"w2": 456})
w1.allocate_code()
code = yield w1.get_code()
w2.set_code(code)
w1_versions = yield w2.get_versions()
self.assertEqual(w1_versions, {"w1": 123})
w2_versions = yield w1.get_versions()
self.assertEqual(w2_versions, {"w2": 456})
yield w1.close()
yield w2.close()
@inlineCallbacks
def test_rx_dedup(self):
# Future clients will handle losing/reestablishing the Rendezvous
# Server connection by retransmitting messages, which will sometimes
# cause duplicate messages. Make sure this client can tolerate them.
# The first place this would fail was when the second copy of the
# incoming PAKE message was received, which would cause
# SPAKE2.finish() to be called a second time, which throws an error
# (which, being somewhat unexpected, caused a hang rather than a
# clear exception). The Mailbox object is responsible for
# deduplication, so we must patch the RendezvousConnector to simulate
# duplicated messages.
with mock.patch("wormhole._boss.RendezvousConnector", MessageDoubler):
w1 = wormhole.create(APPID, self.relayurl, reactor)
w2 = wormhole.create(APPID, self.relayurl, reactor)
w1.set_code("123-purple-elephant")
w2.set_code("123-purple-elephant")
w1.send_message(b"data1"), w2.send_message(b"data2")
dl = yield self.doBoth(w1.get_message(), w2.get_message())
(dataX, dataY) = dl
self.assertEqual(dataX, b"data2")
self.assertEqual(dataY, b"data1")
yield w1.close()
yield w2.close()
class MessageDoubler(_rendezvous.RendezvousConnector):
# we could double messages on the sending side, but a future server will
# strip those duplicates, so to really exercise the receiver, we must
# double them on the inbound side instead
# def _msg_send(self, phase, body):
# wormhole._Wormhole._msg_send(self, phase, body)
# self._ws_send_command("add", phase=phase, body=bytes_to_hexstr(body))
def _response_handle_message(self, msg):
_rendezvous.RendezvousConnector._response_handle_message(self, msg)
_rendezvous.RendezvousConnector._response_handle_message(self, msg)
class Errors(ServerBase, unittest.TestCase):
@inlineCallbacks
def test_derive_key_early(self):
w = wormhole.create(APPID, self.relayurl, reactor)
# definitely too early
with self.assertRaises(NoKeyError):
w.derive_key("purpose", 12)
yield self.assertFailure(w.close(), LonelyError)
@inlineCallbacks
def test_multiple_set_code(self):
w = wormhole.create(APPID, self.relayurl, reactor)
w.set_code("123-purple-elephant")
# code can only be set once
with self.assertRaises(OnlyOneCodeError):
w.set_code("123-nope")
yield self.assertFailure(w.close(), LonelyError)
@inlineCallbacks
def test_allocate_and_set_code(self):
w = wormhole.create(APPID, self.relayurl, reactor)
w.allocate_code()
yield w.get_code()
with self.assertRaises(OnlyOneCodeError):
w.set_code("123-nope")
yield self.assertFailure(w.close(), LonelyError)
class Reconnection(ServerBase, unittest.TestCase):
@inlineCallbacks
def test_basic(self):
w1 = wormhole.create(APPID, self.relayurl, reactor)
w1_in = []
w1._boss._RC._debug_record_inbound_f = w1_in.append
# w1.debug_set_trace("W1")
w1.allocate_code()
code = yield w1.get_code()
w1.send_message(b"data1") # queued until wormhole is established
# now wait until we've deposited all our messages on the server
def seen_our_pake():
for m in w1_in:
if m["type"] == "message" and m["phase"] == "pake":
return True
return False
yield poll_until(seen_our_pake)
w1_in[:] = []
# drop the connection
w1._boss._RC._ws.transport.loseConnection()
# wait for it to reconnect and redeliver all the messages. The server
# sends mtype=message messages in random order, but we've only sent
# one of them, so it's safe to wait for just the PAKE phase.
yield poll_until(seen_our_pake)
# now let the second side proceed. this simulates the most common
# case: the server is bounced while the sender is waiting, before the
# receiver has started
w2 = wormhole.create(APPID, self.relayurl, reactor)
# w2.debug_set_trace(" W2")
w2.set_code(code)
dataY = yield w2.get_message()
self.assertEqual(dataY, b"data1")
w2.send_message(b"data2")
dataX = yield w1.get_message()
self.assertEqual(dataX, b"data2")
c1 = yield w1.close()
self.assertEqual(c1, "happy")
c2 = yield w2.close()
self.assertEqual(c2, "happy")
class InitialFailure(unittest.TestCase):
@inlineCallbacks
def assertSCEFailure(self, eq, d, innerType):
yield eq.flush()
f = self.failureResultOf(d, ServerConnectionError)
inner = f.value.reason
self.assertIsInstance(inner, innerType)
returnValue(inner)
@inlineCallbacks
def test_bad_dns(self):
eq = EventualQueue(reactor)
# point at a URL that will never connect
w = wormhole.create(
APPID, "ws://%%%.example.org:4000/v1", reactor, _eventual_queue=eq)
# that should have already received an error, when it tried to
# resolve the bogus DNS name. All API calls will return an error.
e = yield self.assertSCEFailure(eq, w.get_unverified_key(), ValueError)
self.assertIsInstance(e, ValueError)
self.assertEqual(str(e), "invalid hostname: %%%.example.org")
yield self.assertSCEFailure(eq, w.get_code(), ValueError)
yield self.assertSCEFailure(eq, w.get_verifier(), ValueError)
yield self.assertSCEFailure(eq, w.get_versions(), ValueError)
yield self.assertSCEFailure(eq, w.get_message(), ValueError)
@inlineCallbacks
def assertSCE(self, d, innerType):
e = yield self.assertFailure(d, ServerConnectionError)
inner = e.reason
self.assertIsInstance(inner, innerType)
returnValue(inner)
@inlineCallbacks
def test_no_connection(self):
# point at a URL that will never connect
port = allocate_tcp_port()
w = wormhole.create(APPID, "ws://127.0.0.1:%d/v1" % port, reactor)
# nothing is listening, but it will take a turn to discover that
d1 = w.get_code()
d2 = w.get_unverified_key()
d3 = w.get_verifier()
d4 = w.get_versions()
d5 = w.get_message()
yield self.assertSCE(d1, ConnectionRefusedError)
yield self.assertSCE(d2, ConnectionRefusedError)
yield self.assertSCE(d3, ConnectionRefusedError)
yield self.assertSCE(d4, ConnectionRefusedError)
yield self.assertSCE(d5, ConnectionRefusedError)
@inlineCallbacks
def test_all_deferreds(self):
# point at a URL that will never connect
port = allocate_tcp_port()
w = wormhole.create(APPID, "ws://127.0.0.1:%d/v1" % port, reactor)
# nothing is listening, but it will take a turn to discover that
w.allocate_code()
d1 = w.get_code()
d2 = w.get_unverified_key()
d3 = w.get_verifier()
d4 = w.get_versions()
d5 = w.get_message()
yield self.assertSCE(d1, ConnectionRefusedError)
yield self.assertSCE(d2, ConnectionRefusedError)
yield self.assertSCE(d3, ConnectionRefusedError)
yield self.assertSCE(d4, ConnectionRefusedError)
yield self.assertSCE(d5, ConnectionRefusedError)
class Trace(unittest.TestCase):
def test_basic(self):
w1 = wormhole.create(APPID, "ws://localhost:1", reactor)
stderr = io.StringIO()
w1.debug_set_trace("W1", file=stderr)
# if Automat doesn't have the tracing API, then we won't actually
# exercise the tracing function, so exercise the RendezvousConnector
# function manually (it isn't a state machine, so it will always wire
# up the tracer)
w1._boss._RC._debug("what")
stderr = io.StringIO()
out = w1._boss._print_trace("OLD", "IN", "NEW", "C1", "M1", stderr)
self.assertEqual(stderr.getvalue().splitlines(),
["C1.M1[OLD].IN -> [NEW]"])
out("OUT1")
self.assertEqual(stderr.getvalue().splitlines(),
["C1.M1[OLD].IN -> [NEW]", " C1.M1.OUT1()"])
w1._boss._print_trace("", "R.connected", "", "C1", "RC1", stderr)
self.assertEqual(
stderr.getvalue().splitlines(),
["C1.M1[OLD].IN -> [NEW]", " C1.M1.OUT1()", "C1.RC1.R.connected"])
def test_delegated(self):
dg = Delegate()
w1 = wormhole.create(APPID, "ws://localhost:1", reactor, delegate=dg)
stderr = io.StringIO()
w1.debug_set_trace("W1", file=stderr)
w1._boss._RC._debug("what")
|
11591966
|
def sum(array, initial, to):
total = 0
for i in range(initial, to + 1):
total += array[i]
return total
def partition(array, N, K):
if K == 1:
return sum(array, 0, N - 1)
if N == 1:
return array[0]
best = 100000000
for i in range(1, N + 1):
best = min(best,
max(partition(array, i, K - 1),
sum(array, i, N - 1)))
return best
N, K, T = map(int, input("Enter array size, number
of painters and time: ").split())
x = list(map(int, input("Enter board sizes:").split()))
res = partition(x, N, K)
print("Minimum time required to paint all the boards: " + str(res * T))
"""
Example 1:
Sample Input:
Enter array size, number of painters and time: 2 2 5
Enter board sizes: 1 10
Minimum time required to paint all the boards: 50
Time complexityL O(k*N^3) (Exponential)
"""
|
11591994
|
import argparse
import sys
from sourced.ml.cmd.args import add_repo2_args
from sourced.ml.cmd import ArgumentDefaultsHelpFormatterNoNone
from cmd.code2vec_extract_features import code2vec_extract_features
def get_parser() -> argparse.ArgumentParser:
"""
Creates the cmdline argument parser.
"""
parser = argparse.ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatterNoNone)
# sourced.engine args
subparsers = parser.add_subparsers(help="Commands", dest="command")
extract_parser = subparsers.add_parser("extract",
help="Extract features from input repositories",
formatter_class=ArgumentDefaultsHelpFormatterNoNone)
extract_parser.set_defaults(handler=code2vec_extract_features)
add_repo2_args(extract_parser)
# code2vec specific args
extract_parser.add_argument('--max-length', type=int, default=5, help="Max path length.",
required=False)
extract_parser.add_argument('--max-width', type=int, default=2, help="Max path width.",
required=False)
extract_parser.add_argument('-o', '--output', type=str,
help="Output path for the Code2VecFeatures model", required=True)
return parser
def main():
parser = get_parser()
args = parser.parse_args()
try:
handler = args.handler
except AttributeError:
def print_usage(_):
parser.print_usage()
handler = print_usage
return handler(args)
if __name__ == "__main__":
sys.exit(main())
|
11592000
|
import numpy as np
import pandas as pd
from ..exceptions import ArgumentsError
from ..utils import num_of_samples
class Split:
"""Class for splitting the dataset into train and test sets"""
def __init__(self):
self.df = None
self.train_df = None
self.target_label = None
self.train_y = None
self.test_df = None
self.test_y = None
self.test_size = None
self.train_size = None
self.random_state = None
self.shuffle = False
def __repr__(self):
return f"Split(test_size={self.test_size}, train_size={self.train_size}, random_state={self.random_state})"
def __validate_input(self):
"""Function to validate inputs received by train_test_split
Parameters
----------
X : pandas.core.frames.DataFrame
Input dataframe, may or may not consist of the target label.
y : pandas.core.series.Series
Target label series. If None then X consists target label
test_size : float or int
Size of test set after splitting. Can take values from 0 - 1 for float point values,
0 - Number of samples for integer values. Is complementary to train size.
train_size : float or int
Size of train set after splitting. Can take values from 0 - 1 for float point values,
0 - Number of samples for integer values. Is complementary to test size.
shuffle : bool, default = False
Decides whether the data should be shuffled before splitting
random_state : int
Seeding to be provided for shuffling before splitting.
Returns
-------
train_size: float or int
Returns default value of 0.7 if not provided any value.
test_size: float or int
Returns default value of 0.3 if not provided any value.
"""
if self.train_df is None:
raise ValueError("Feature dataframe should not be None")
if not isinstance(self.train_df, pd.core.frame.DataFrame):
raise TypeError(
"Feature dataframe is not a valid dataframe.\nExpected object"
" type: pandas.core.frame.DataFrame"
)
n_samples = num_of_samples(self.train_df)
if self.train_y is not None:
if n_samples != self.train_y.shape[0]:
raise ValueError(
"Number of samples of target label and feature dataframe"
" unequal.\nSamples in feature dataframe:"
f" {self.X.shape[0]}\nSamples in target label: {self.y.shape[0]}"
)
if not isinstance(self.train_y, pd.core.series.Series):
raise TypeError(
"Target label is not a valid dataframe.\nExpected object"
" type: pandas.core.series.Series"
)
if self.test_size and self.train_size:
if not isinstance(self.test_size, int) or not isinstance(
self.test_size, float
):
raise TypeError("test_size must be of type int or float")
if not isinstance(self.train_size, int) or not isinstance(
self.train_size, float
):
raise TypeError("train_size must be of type int or float")
if not isinstance(self.test_size, self.train_size):
raise TypeError(
"Data types of test_size and train_size do not"
f" match.\ntest_size: {type(self.test_size)}.\ntrain_size:"
f" {type(self.train_size)}"
)
if (
isinstance(self.test_size, float)
and self.test_size + self.train_size != 1
):
raise ValueError("test_size + train_size should be equal to 1")
elif (
isinstance(self.test_size, int)
and self.test_size + self.train_size != n_samples
):
raise ValueError(
"test_size + train_size not equal to number of samples"
)
elif self.test_size:
if isinstance(self.test_size, float) and (
self.test_size < 0 or self.test_size > 1
):
raise ValueError("test_size should be between 0 and 1")
if isinstance(self.test_size, int) and (
self.test_size < 0 or self.test_size > n_samples
):
raise ValueError(
f"test_size should be between 0 and {n_samples}"
)
self.train_size = (
1 - self.test_size
if isinstance(self.test_size, float)
else n_samples - self.test_size
)
elif self.train_size:
if isinstance(self.train_size, float) and (
self.train_size < 0 or self.train_size > 1
):
raise ValueError("train_size should be between 0 and 1")
if isinstance(self.train_size, int) and (
self.train_size < 0 or self.train_size > n_samples
):
raise ValueError(
f"train_size should be between 0 and {n_samples}"
)
self.test_size = (
1 - self.train_size
if isinstance(self.train_size, float)
else n_samples - self.train_size
)
else:
if self.train_y is None:
self.test_size = 0.2
self.train_size = 0.8
else:
features = len(self.train_df.columns)
self.test_size = float(1 / np.sqrt(features))
self.train_size = 1 - self.test_size
if not isinstance(self.shuffle, bool):
raise TypeError(
f"shuffle should be of type bool. Received {self.shuffle} of type {type(self.shuffle)}."
)
if self.random_state and not isinstance(self.random_state, int):
raise TypeError(
f"random_state should be of type int. Received {self.random_state} of type {type(self.random_state)}."
)
if self.random_state and not self.shuffle:
raise ArgumentsError(
f"random_state should be None when shuffle is set to False. Received {self.random_state} as random_state."
)
def train_test_split(self, params):
"""Performs train test split on the input data
:param train_df: Input dataframe, may or may not consist of the target label.
Should not be ``None``
:type train_df: pandas.core.frames.DataFrame
:param test_df: Input dataframe, may or may not consist of the target label.
:type test_df: pandas.core.frames.DataFrame
:param target_label: Name of the Target Column.
:type target_label: str
:param test_size: Size of test set after splitting. Can take values from
0 - 1 for float point values, 0 - Number of samples for
integer values. Is complementary to train size.
:type test_size: float, int
:param train_size: Size of train set after splitting. Can take values from
0 - 1 for float point values, 0 - Number of samples for
integer values. Is complementary to test size.
:type train_size: float, int
:param shuffle: Decides whether to shuffle data before splitting.
:type shuffle: bool, default = False
:param random_state: Seeding to be provided for shuffling before splitting.
:type random_state: int
The functions inserts the following into ``params`` -
If target label is provided
- **X_train** : pandas.core.frames.DataFrame
- **y_train** : pandas.core.series.Series
- **X_test** : pandas.core.frames.DataFrame
- **y_test** : pandas.core.series.Series
Else
- **train**: pandas.core.frames.DataFrame
- **test**: pandas.core.frames.DataFrame
:raises ValueError: If the target column does not have a ``name`` property
``ValueError`` is raised.
"""
if "train_df" in params.keys():
self.train_df = params["train_df"]
if "test_df" in params.keys():
self.test_df = params["test_df"]
if "target_label" in params.keys():
self.target_label = params["target_label"]
if "test_size" in params.keys():
self.test_size = params["test_size"]
if "train_size" in params.keys():
self.train_size = params["train_size"]
if "shuffle" in params.keys():
self.shuffle = params["shuffle"]
if "random_state" in params.keys():
self.random_state = params["random_state"]
if self.target_label and self.test_df is not None:
self.train_y = self.train_df[self.target_label]
self.test_y = self.test_df[self.target_label]
self.__validate_input()
if self.test_df is not None and self.test_y is not None:
params["X_train"] = self.train_df.drop([self.target_label], axis=1)
params["X_test"] = self.test_df.drop([self.target_label], axis=1)
params["y_train"] = self.train_y
params["y_test"] = self.test_y
elif self.test_df is not None:
params["X_train"] = self.train_df
params["X_test"] = self.test_df
else:
if self.shuffle and self.random_state:
np.random.seed(self.random_state)
if self.train_y is not None:
self.df = pd.concat([self.train_df, self.train_y], axis=1)
else:
self.df = self.train_df
if self.shuffle:
self.df = self.df.iloc[
np.random.permutation(len(self.df))
].reset_index(drop=True)
if isinstance(self.test_size, float):
index = int(self.test_size * len(self.df))
train = self.df.iloc[index:]
test = self.df.iloc[:index]
else:
train = self.df.iloc[self.test_size :]
test = self.df.iloc[: self.test_size]
if self.train_y is not None:
if not self.train_y.name:
raise ValueError(
f"Target column needs to have a name. ${self.train_y.name} was provided."
)
y_train = train[self.train_y.name]
X_train = train.drop([self.train_y.name], axis=1)
y_test = test[self.train_y.name]
X_test = test.drop([self.train_y.name], axis=1)
params["X_train"] = X_train
params["X_test"] = X_test
params["y_train"] = y_train
params["y_test"] = y_test
else:
params["X_train"] = train
params["X_test"] = test
|
11592010
|
from bs4 import BeautifulSoup
import requests
import csv
URL = "https://www.indiatoday.in/"
def writeToCSV(topTenNews, category):
with open("topTen" + category + "News.csv", "w") as file:
writer = csv.writer(file)
writer.writerow(["Date", "Link", "Headline"])
for news in topTenNews:
writer.writerow(
[news[2], "https://www.indiatoday.in/" + news[1], news[0]])
def getTopTenFromDivTag(category):
topTenNews = []
count = 0
category_url = URL + category
page = requests.get(category_url)
soup = BeautifulSoup(page.text, "html.parser")
all_div_tags = soup.find_all(class_="detail")
for div in all_div_tags:
count += 1
if count > 10:
break
headline = div.find("h2").text
link = div.find("a").attrs["href"]
date = div.find("a").attrs["href"][-10:]
topTenNews.append([headline, link, date])
return topTenNews
def getTopTenFromLiTag(category):
topTenNews = []
count = 0
category_url = URL + category
page = requests.get(category_url)
soup = BeautifulSoup(page.text, "html.parser")
ul_tag = soup.find_all(class_="itg-listing")
ul_tag = str(ul_tag)[25:-6]
li_tags = ul_tag.split("</li>")
for li in li_tags:
count += 1
if count > 10:
break
ele = li.split(">")
link = ele[1].split("=")[1][2:-1]
headline = ele[2][:-3]
date = link[-10:]
topTenNews.append([headline, link, date])
return topTenNews
def main():
categories = ["india", "world", "cities", "business", "health", "technology", "sports",
"education", "lifestyle"]
print("Please Choose a Category from the following list")
for index, category in enumerate(categories):
print(str(index + 1) + ". " + category.capitalize())
print("Example: Enter 'world' for top 10 world news")
print()
category = input()
category = category.lower()
if category not in categories:
print("\nPlease choose a valid category!")
exit()
if category in categories[:5]:
topTenNews = getTopTenFromDivTag(category)
else:
topTenNews = getTopTenFromLiTag(category)
writeToCSV(topTenNews, category)
print("Created CSV File Successfully!")
main()
|
11592040
|
import unittest
import math
import pandas as pd
from hummingbot.core.clock import (
Clock,
ClockMode
)
from hummingbot.core.py_time_iterator import PyTimeIterator
NaN = float("nan")
class MockPyTimeIterator(PyTimeIterator):
def __init__(self):
super().__init__()
self._mock_variable = None
@property
def mock_variable(self):
return self._mock_variable
def tick(self, timestamp: float):
self._mock_variable = timestamp
class PyTimeIteratorUnitTest(unittest.TestCase):
start_timestamp: float = pd.Timestamp("2021-01-01", tz="UTC").timestamp()
end_timestamp: float = pd.Timestamp("2022-01-01 01:00:00", tz="UTC").timestamp()
tick_size: int = 10
def setUp(self):
self.py_time_iterator = MockPyTimeIterator()
self.clock = Clock(ClockMode.BACKTEST, self.tick_size, self.start_timestamp, self.end_timestamp)
self.clock.add_iterator(self.py_time_iterator)
def test_current_timestamp(self):
# On initialization, current_timestamp should be NaN
self.assertTrue(math.isnan(self.py_time_iterator.current_timestamp))
self.py_time_iterator.start(self.clock)
self.clock.backtest_til(self.start_timestamp)
self.assertEqual(self.start_timestamp, self.py_time_iterator.current_timestamp)
def test_clock(self):
# On initialization, clock should be None
self.assertTrue(self.py_time_iterator.clock is None)
self.py_time_iterator.start(self.clock)
self.assertEqual(self.clock, self.py_time_iterator.clock)
def test_start(self):
self.py_time_iterator.start(self.clock)
self.assertEqual(self.clock, self.py_time_iterator.clock)
self.assertEqual(self.start_timestamp, self.py_time_iterator.current_timestamp)
def test_stop(self):
self.py_time_iterator.start(self.clock)
self.assertEqual(self.clock, self.py_time_iterator.clock)
self.assertEqual(self.start_timestamp, self.py_time_iterator.current_timestamp)
self.py_time_iterator.stop(self.clock)
self.assertTrue(math.isnan(self.py_time_iterator.current_timestamp))
self.assertTrue(self.py_time_iterator.clock is None)
def test_tick(self):
self.py_time_iterator.start(self.clock)
self.assertEqual(self.start_timestamp, self.py_time_iterator.current_timestamp)
# c_tick is called within Clock
self.clock.backtest_til(self.start_timestamp + self.tick_size)
self.assertEqual(self.start_timestamp + self.tick_size, self.py_time_iterator.current_timestamp)
self.assertEqual(self.start_timestamp + self.tick_size, self.py_time_iterator.mock_variable)
|
11592079
|
from __future__ import print_function
import argparse
import logging
import odil
def add_subparser(subparsers):
parser = subparsers.add_parser(
"print", help="Print the contents of data sets",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("inputs", nargs="+", metavar="FILE", help="Input files")
parser.add_argument(
"--print-header", "-H", action="store_true",
help="Print the header as well as the data set")
parser.add_argument(
"--decode-uids", "-u", action="store_true",
help="Print human-friendly name of known UIDs")
parser.set_defaults(function=print_)
return parser
def print_(inputs, print_header, decode_uids):
for input in inputs:
logging.info("Printing {}".format(input))
with odil.open(input) as stream:
header, data_set = odil.Reader.read_file(stream)
max_length = find_max_name_length(data_set)
if print_header:
max_length = max(max_length, find_max_name_length(header))
if print_header:
print_data_set(
header, decode_uids, "", max_length, odil.Value.Strings())
print()
print_data_set(
data_set, decode_uids, "", max_length, odil.Value.Strings())
def print_data_set(
data_set, decode_uids, padding, max_length, specific_character_set):
for tag, element in data_set.items():
name = "{:04x},{:04x}".format(tag.group, tag.element)
if tag in odil.registry.public_dictionary:
entry = odil.registry.public_dictionary[tag]
name = entry.name
if tag == odil.registry.SpecificCharacterSet:
specific_character_set = element.as_string()
if element.is_data_set():
value = "(sequence, {} item{})".format(
len(element), "s" if len(element)>1 else "")
elif element.is_binary():
lengths = [len(x) for x in element.as_binary()]
value = "(binary, {} item{}, {} byte{})".format(
len(element), "s" if len(element)>1 else "",
"+".join(str(x) for x in lengths),
"s" if sum(lengths)>1 else "")
else:
getter = None
if element.empty():
getter = lambda: []
elif element.is_int():
getter = element.as_int
elif element.is_real():
getter = element.as_real
elif element.is_string():
getter = lambda: [
odil.as_unicode(x, specific_character_set)
for x in element.as_string()]
value = [x for x in getter()]
if decode_uids and element.vr == odil.VR.UI:
value = [
odil.registry.uids_dictionary[uid].name
if uid in odil.registry.uids_dictionary else uid
for uid in value
]
print("{}{}{} {:04x},{:04x} {} {}".format(
padding,
name, (max_length-len(name)-len(padding))*" ",
tag.group, tag.element, element.vr,
value))
if element.is_data_set():
sequence = element.as_data_set()
if sequence:
for item in sequence[:-1]:
print_data_set(
item, decode_uids, padding+" ", max_length,
specific_character_set)
print()
print_data_set(
sequence[-1], decode_uids, padding+" ", max_length,
specific_character_set)
def find_max_name_length(data_set, max_length=0, padding_length=0):
for tag, element in data_set.items():
if tag in odil.registry.public_dictionary:
entry = odil.registry.public_dictionary[tag]
length = len(entry.name)
else:
length = 9 # xxxx,yyyy
max_length = max(max_length, padding_length+length)
if element.is_data_set():
sequence = element.as_data_set()
for item in sequence:
max_length = max(
max_length,
find_max_name_length(item, max_length, 2+padding_length))
return max_length
|
11592112
|
from abc import ABC
from abc import abstractmethod
from typing import Any
from typing import Optional
class Step(ABC):
def __init__(self):
pass
# Concrete implementations should raise a StepException upon encountering a fatal error to signal that the
# pipeline should exit early.
@abstractmethod
def process(self, data: Any, context: dict) -> Optional[Any]:
pass
class StepException(Exception):
pass
|
11592123
|
import os
import numpy as np
from deephyper.benchmark.benchmark_functions_wrappers import linear_
np.random.seed(2018)
def load_data(dim=10):
"""
Generate data for linear function -sum(x_i).
Return:
Tuple of Numpy arrays: ``(train_X, train_y), (valid_X, valid_y)``.
"""
# size = 100000
size = 100
prop = 0.80
f, (a, b), _ = linear_()
d = b - a
x = np.array([a + np.random.random(dim) * d for i in range(size)])
y = np.array([[f(v)] for v in x])
sep_index = int(prop * size)
sep_inputs = dim//2 # we want two different inputs
tX0, tX1 = x[:sep_index, :sep_inputs], x[:sep_index, sep_inputs:]
vX0, vX1 = x[sep_index:, :sep_inputs], x[sep_index:, sep_inputs:]
ty = y[:sep_index]
vy = y[sep_index:]
print(f'tX0 shape: {np.shape(tX0)} | tX1 shape: {np.shape(tX1)}')
print(f'ty shape: {np.shape(ty)}')
print(f'vX0 shape: {np.shape(vX0)} | vX1 shape: {np.shape(vX1)}')
print(f'vy shape: {np.shape(vy)}')
return ([tX0, tX1], ty), ([vX0, vX1], vy)
if __name__ == '__main__':
load_data()
|
11592149
|
import mock
from chroma_core.models import ManagedHost
from chroma_core.models import Volume
from chroma_core.models import VolumeNode
from chroma_core.models import ForceRemoveHostJob
from chroma_core.models import StopLNetJob
from chroma_core.models import HostOfflineAlert
from chroma_core.models import Command
from chroma_core.models import StepResult
from chroma_core.models import ManagedTarget
from tests.unit.chroma_api.chroma_api_test_case import ChromaApiTestCase
from tests.unit.chroma_core.helpers import create_simple_fs, synthetic_host
from iml_common.lib.date_time import IMLDateTime
def _remove_host_resources(host_id):
"""
In real life this would be done by ResourceManager, but in order to use that
here we would have to have fully populated the StorageResourceRecords for all
VolumeNodes, which is a bit heavyweight.
"""
volume_ids = set()
for vn in VolumeNode.objects.filter(host_id=host_id):
vn.mark_deleted()
volume_ids.add(vn.volume_id)
for volume in Volume.objects.filter(pk__in=volume_ids):
if volume.volumenode_set.count() == 0:
volume.mark_deleted()
remove_host_resources_patch = mock.patch(
"chroma_core.services.plugin_runner.agent_daemon_interface.AgentDaemonRpcInterface.remove_host_resources",
new=mock.Mock(side_effect=_remove_host_resources),
create=True,
)
class TestMisc(ChromaApiTestCase):
"""API unit tests which are not specific to a particular resource"""
def test_HYD648(self):
"""Test that datetimes in the API have a timezone"""
synthetic_host("myserver")
response = self.api_client.get("/api/host/")
self.assertHttpOK(response)
host = self.deserialize(response)["objects"][0]
t = IMLDateTime.parse(host["state_modified_at"])
self.assertNotEqual(t.tzinfo, None)
@mock.patch("chroma_core.services.http_agent.HttpAgentRpc.remove_host", new=mock.Mock(), create=True)
@mock.patch("chroma_core.services.job_scheduler.agent_rpc.AgentRpc.remove", new=mock.Mock())
@remove_host_resources_patch
def test_removals(self):
"""Test that after objects are removed all GETs still work
The idea is to go through a add hosts, create FS, remove FS, remove hosts
cycle and then do a spider of the API to ensure that there aren't any
exceptions rendering things (e.g. due to trying to dereference removed
things incorrectly)"""
host = synthetic_host("myserver")
create_simple_fs()
# Create a command/job/step result referencing the host
command = Command.objects.create(message="test command", complete=True, errored=True)
job = StopLNetJob.objects.create(lnet_configuration=host.lnet_configuration, state="complete", errored=True)
command.jobs.add(job)
step_klass, args = job.get_steps()[0]
StepResult.objects.create(
job=job, backtrace="an error", step_klass=step_klass, args=args, step_index=0, step_count=1, state="failed"
)
# There will now be an CommandErroredAlert because the command above failed.
alerts = self.deserialize(self.api_client.get("/api/alert/"))["objects"]
self.assertEqual(len(alerts), 1)
self.assertEqual(alerts[0]["alert_type"], "CommandErroredAlert")
# Now create an alert/event referencing the host
HostOfflineAlert.notify(host, True)
alerts = self.deserialize(self.api_client.get("/api/alert/", data={"active": True}))["objects"]
self.assertEqual(len(alerts), 1)
self.assertEqual(alerts[0]["alert_type"], "HostOfflineAlert")
# Double check that is 2 alerts in total.
alerts = self.deserialize(self.api_client.get("/api/alert/"))["objects"]
self.assertEqual(len(alerts), 2)
# Cause JobScheduler() to delete the objects, check the objects are gone in the API
# and the API can still be spidered cleanly
job = ForceRemoveHostJob(host=host)
for step_klass, args in job.get_steps():
step_klass(job, args, None, None, None).run(args)
# Check everything is gone
self.assertEqual(ManagedTarget.objects.count(), 0)
self.assertEqual(ManagedHost.objects.count(), 0)
self.assertEqual(Volume.objects.count(), 0)
self.assertEqual(VolumeNode.objects.count(), 0)
self.assertListEqual(self.deserialize(self.api_client.get("/api/alert/?active=true"))["objects"], [])
self.assertListEqual(self.deserialize(self.api_client.get("/api/volume/"))["objects"], [])
self.assertListEqual(self.deserialize(self.api_client.get("/api/volume_node/"))["objects"], [])
self.assertListEqual(self.deserialize(self.api_client.get("/api/target/"))["objects"], [])
self.assertListEqual(self.deserialize(self.api_client.get("/api/host/"))["objects"], [])
self.assertListEqual(self.deserialize(self.api_client.get("/api/filesystem/"))["objects"], [])
# Check resources still render without exceptions
self.spider_api()
|
11592154
|
from flask import (
Blueprint,
Response,
abort,
jsonify,
render_template,
request,
send_from_directory,
)
import whiskyton.helpers.sitemap as whiskyton_sitemap
from whiskyton import app, models
from whiskyton.helpers.charts import Chart
files = Blueprint("files", __name__)
@files.route("/charts/<reference_slug>-<whisky_slug>.svg")
def create_chart(reference_slug, whisky_slug):
# get whisky objects form db
reference_obj = models.Whisky.query.filter_by(slug=reference_slug).first()
whisky_obj = models.Whisky.query.filter_by(slug=whisky_slug).first()
# error page if whisky doesn't exist
if reference_obj is None or whisky_obj is None:
return abort(404)
# if file does not exists, create it
chart = Chart(reference=reference_obj, comparison=whisky_obj)
filename = chart.cache_name(True)
if not filename.exists():
chart.cache()
# return the chart to the user
return Response(filename.read_file(), mimetype="image/svg+xml")
@files.route("/whiskyton.json")
def whisky_json():
whiskies = models.Whisky.query.all()
return jsonify(whiskies=[w.distillery for w in whiskies])
@files.route("/robots.txt")
def robots():
basedir = app.config["BASEDIR"]
return send_from_directory(basedir.child("whiskyton", "static"), "robots.txt")
@files.route("/favicon.ico")
def favicon():
basedir = app.config["BASEDIR"]
return send_from_directory(basedir.child("whiskyton", "static"), "favicon.ico")
@files.route("/sitemap.xml")
def sitemap():
whiskies = models.Whisky.query.all()
last_change = whiskyton_sitemap.most_recent_update()
return render_template(
"sitemap.xml",
whiskies=whiskies,
last_change=last_change,
url_root=request.url_root,
)
|
11592164
|
import tensorflow as tf
import numpy as np
from typing import Tuple
from modules.utils import PostNet, CBHGLayer, PreNet, PositionalEncoding
from modules.attention import BahdanauAttention, CrossAttentionBLK
class BasePosterior(tf.keras.layers.Layer):
"""Encode the target sequence into latent distributions"""
def __init__(self, name='Posterior', **kwargs):
super(BasePosterior, self).__init__(name=name, **kwargs)
def call(self, inputs, src_enc, src_lengths=None, target_lengths=None
) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]:
raise NotImplementedError
@staticmethod
def reparameterize(mu, logvar, nsamples=tf.constant(1), random=tf.constant(True)):
"""
:param mu: [batch, max_time, dim]
:param logvar: [batch, max_time, dim]
:param nsamples: int
:param random: whether sample from N(0, 1) or just use zeros
:return: samples, noises, [batch, nsamples, max_time, dim]
"""
print('tracing back at posterior reparameterize')
batch = tf.shape(mu)[0]
max_time = tf.shape(mu)[1]
dim = tf.shape(mu)[2]
std = tf.math.exp(0.5 * logvar)
if random:
eps = tf.random.normal([batch, nsamples, max_time, dim])
else:
eps = tf.zeros([batch, nsamples, max_time, dim])
samples = eps * tf.expand_dims(std, axis=1) + tf.expand_dims(mu, axis=1)
return samples, eps
@staticmethod
def log_probability(mu, logvar, z=None, eps=None, seq_lengths=None, epsilon=tf.constant(1e-8)):
"""
:param mu: [batch, max_time, dim]
:param logvar: [batch, max_time, dim]
:param z: [batch, nsamples, max_time, dim]
:param eps: [batch, nsamples, max_time, dim]
:param seq_lengths: [batch, ]
:param epsilon: small float number to avoid overflow
:return: log probabilities, [batch, nsamples]
"""
print('tracing back at posterior log-probability')
batch = tf.shape(mu)[0]
max_time = tf.shape(mu)[1]
dim = tf.shape(mu)[2]
std = tf.math.exp(0.5 * logvar)
normalized_samples = (eps if eps is not None
else (z - tf.expand_dims(mu, axis=1))
/ (tf.expand_dims(std, axis=1) + epsilon))
expanded_logvar = tf.expand_dims(logvar, axis=1)
# time_level_log_probs [batch, nsamples, max_time]
time_level_log_probs = -0.5 * (
tf.cast(dim, tf.float32) * tf.math.log(2 * np.pi)
+ tf.reduce_sum(expanded_logvar + normalized_samples ** 2.,
axis=3))
seq_mask = (tf.sequence_mask(seq_lengths, maxlen=max_time, dtype=tf.float32)
if seq_lengths is not None
else tf.ones([batch, max_time]))
seq_mask = tf.expand_dims(seq_mask, axis=1) # [batch, 1, max_time]
sample_level_log_probs = tf.reduce_sum(seq_mask * time_level_log_probs,
axis=2) # [batch, nsamples]
return sample_level_log_probs
def sample(self, inputs, src_enc, input_lengths, src_lengths,
nsamples=tf.constant(1), random=tf.constant(True)) -> Tuple[tf.Tensor, tf.Tensor]:
"""
:param inputs: [batch, tgt_max_time, in_dim]
:param src_enc: [batch, src_max_time, emb_dim]
:param input_lengths: [batch, ]
:param src_lengths: [batch, ]
:param nsamples:
:param random:
:return:
tensor1: samples from the posterior, [batch, nsamples, tgt_max_time, dim]
tensor2: log-probabilities, [batch, nsamples]
"""
raise NotImplementedError
class TransformerPosterior(BasePosterior):
def __init__(self, pre_hidden, pre_drop_rate, pre_activation,
pos_drop_rate, nblk, attention_dim, attention_heads,
temperature, ffn_hidden, latent_dim, name='TransformerPosterior'):
super(TransformerPosterior, self).__init__(name=name)
self.pos_weight = tf.Variable(1.0, trainable=True)
self.prenet = PreNet(units=pre_hidden, drop_rate=pre_drop_rate,
activation=pre_activation, name='decoder_prenet')
self.pe = PositionalEncoding('EncoderPositionEncoding')
self.pe_dropout = tf.keras.layers.Dropout(rate=pos_drop_rate)
self.attentions = []
for i in range(nblk):
attention = CrossAttentionBLK(input_dim=pre_hidden,
attention_dim=attention_dim,
attention_heads=attention_heads,
attention_temperature=temperature,
ffn_hidden=ffn_hidden)
self.attentions.append(attention)
self.mu_projection = tf.keras.layers.Dense(latent_dim,
kernel_initializer='zeros',
name='mu_projection')
self.logvar_projection = tf.keras.layers.Dense(latent_dim,
kernel_initializer='zeros',
name='logvar_projection')
def call(self, inputs, src_enc, src_lengths=None, target_lengths=None, training=None):
print('tracing back at posterior call')
prenet_outs = self.prenet(inputs)
max_time = tf.shape(prenet_outs)[1]
dim = tf.shape(prenet_outs)[2]
pos = self.pe.positional_encoding(max_time, dim)
pos_embs = prenet_outs + self.pos_weight * pos
pos_embs = self.pe_dropout(pos_embs, training=training)
att_outs = pos_embs
for att in self.attentions:
att_outs, alignments = att(
inputs=att_outs, memory=src_enc, query_lengths=target_lengths,
memory_lengths=src_lengths, training=training)
mu = self.mu_projection(att_outs)
logvar = self.logvar_projection(att_outs)
return mu, logvar, None
def sample(self, inputs, src_enc, input_lengths, src_lengths,
nsamples=tf.constant(1), random=tf.constant(True), training=None):
mu, logvar, _ = self.call(inputs, src_enc, input_lengths, src_lengths,
training=training)
samples, eps = self.reparameterize(mu, logvar, nsamples, random)
log_probs = self.log_probability(mu, logvar, eps, input_lengths)
return samples, log_probs
|
11592215
|
import unittest
import logging
import os
import re
skip_unless = os.getenv("SKIP_TEST_UNLESS", "")
skip_unless_expression = re.compile("%s" % skip_unless)
def skip_if_required():
def decorator(cls):
if skip_unless:
m = skip_unless_expression.match(cls.__name__)
if not m:
#logging.error("skipping %s" % cls.__name__)
@unittest.skip("skipping %s" % __name__)
class C(cls):
pass
return C
#raise C # unittest.SkipTest()
return cls
return decorator
|
11592234
|
import sys
from IPython.Debugger import Pdb
from IPython.Shell import IPShell
from IPython import ipapi
shell = IPShell(argv=[''])
def set_trace():
ip = ipapi.get()
def_colors = ip.options.colors
Pdb(def_colors).set_trace(sys._getframe().f_back)
|
11592268
|
import numpy as np
def resample_stats(stats, *data, n=1000):
"""
Resample data set `data` `n` times and evaluate statistic `stats`.
"""
N = data[0].shape[0]
for i in range(1, len(data)):
if data[i].shape[0] != N:
raise ValueError(
'Expected batch_size ({}) to match batch_size ({}).'
.format(data[i].shape[0], N))
out = []
for _ in range(n):
# Resample
indices = np.random.randint(0, N, size=(N,))
# Compute statistics
out.append(stats(*(d[indices] for d in data)))
return out
class ResampleStats:
def __init__(self, stats, n=1000):
self.stats = stats
self.n = n
def __call__(self, *data):
return resample_stats(self.stats, *data, n=self.n)
def __repr__(self):
return "ResampleStats(stats=%r, n=%r)" % (self.stats, self.n)
|
11592272
|
from ..util import BaseCase
import pygsti
from pygsti.protocols import vb as _vb
from pygsti.processors import CliffordCompilationRules as CCR
from pygsti.processors import QubitProcessorSpec as QPS
class TestPeriodicMirrorCircuitsDesign(BaseCase):
def test_design_construction(self):
n = 4
qs = ['Q'+str(i) for i in range(n)]
ring = [('Q'+str(i),'Q'+str(i+1)) for i in range(n-1)]
gateset1 = ['Gcphase'] + ['Gc'+str(i) for i in range(24)]
pspec1 = QPS(n, gateset1, availability={'Gcphase':ring}, qubit_labels=qs)
tmodel1 = pygsti.models.create_crosstalk_free_model(pspec1)
depths = [0, 2, 8]
q_set = ('Q0', 'Q1', 'Q2')
clifford_compilations = {'absolute': CCR.create_standard(pspec1, 'absolute', ('paulis', '1Qcliffords'), verbosity=0)}
design1 = _vb.PeriodicMirrorCircuitDesign (pspec1, depths, 3, qubit_labels=q_set,
clifford_compilations=clifford_compilations, sampler='edgegrab', samplerargs=(0.25,))
[[self.assertAlmostEqual(c.simulate(tmodel1)[bs],1.) for c, bs in zip(cl, bsl)] for cl, bsl in zip(design1.circuit_lists, design1.idealout_lists)]
|
11592291
|
import pytest
from plenum.common.constants import LEDGER_STATUS
from plenum.common.messages.node_messages import Checkpoint, LedgerStatus
from plenum.common.startable import Mode
from plenum.server.node import Node
from plenum.server.replica import Replica
from plenum.server.replica_validator_enums import STASH_CATCH_UP
from plenum.test import waits
from plenum.test.checkpoints.helper import check_for_nodes, check_stable_checkpoint
from plenum.test.delayers import cs_delay, lsDelay, \
ppDelay, pDelay, cDelay, msg_rep_delay, cr_delay
from plenum.test.pool_transactions.helper import \
disconnect_node_and_ensure_disconnected
from plenum.test.helper import sdk_send_random_and_check, assertExp, max_3pc_batch_limits, \
check_last_ordered_3pc_on_all_replicas, check_last_ordered_3pc_on_master
from plenum.test.node_catchup.helper import waitNodeDataEquality
from plenum.test.stasher import delay_rules
from plenum.test.test_node import checkNodesConnected
from plenum.test.view_change.helper import start_stopped_node
from stp_core.loop.eventually import eventually
from plenum.test.checkpoints.conftest import chkFreqPatched, reqs_for_checkpoint
CHK_FREQ = 5
@pytest.fixture(scope="module")
def tconf(tconf):
with max_3pc_batch_limits(tconf, size=1) as tconf:
yield tconf
def test_3pc_while_catchup_with_chkpoints_only(tdir, tconf,
looper,
chkFreqPatched,
reqs_for_checkpoint,
testNodeClass,
txnPoolNodeSet,
sdk_pool_handle,
sdk_wallet_client,
allPluginsPath):
'''
Check that catch-up is not started again even if a quorum of stashed checkpoints
is received during catch-up.
Assume that only checkpoints and no 3PC messages are received.
'''
# Prepare nodes
lagging_node = txnPoolNodeSet[-1]
rest_nodes = txnPoolNodeSet[:-1]
# Check that requests executed well
sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
sdk_wallet_client, 1)
# Stop one node
waitNodeDataEquality(looper, lagging_node, *rest_nodes)
disconnect_node_and_ensure_disconnected(looper,
txnPoolNodeSet,
lagging_node,
stopNode=True)
looper.removeProdable(lagging_node)
# Send more requests to active nodes
sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
sdk_wallet_client, 1)
waitNodeDataEquality(looper, *rest_nodes)
# Restart stopped node and wait for successful catch up
lagging_node = start_stopped_node(lagging_node,
looper,
tconf,
tdir,
allPluginsPath,
start=False,
)
initial_all_ledgers_caught_up = lagging_node.spylog.count(Node.allLedgersCaughtUp)
# delay all 3PC messages on the lagged node so that it
# receives only Checkpoints and catch-up messages
lagging_node.nodeIbStasher.delay(ppDelay())
lagging_node.nodeIbStasher.delay(pDelay())
lagging_node.nodeIbStasher.delay(cDelay())
with delay_rules(lagging_node.nodeIbStasher, lsDelay(), cr_delay(), msg_rep_delay(types_to_delay=[LEDGER_STATUS])):
looper.add(lagging_node)
txnPoolNodeSet[-1] = lagging_node
looper.run(checkNodesConnected(txnPoolNodeSet))
# wait till we got ledger statuses for messages missed while the node was offline,
# so that now we can order more messages, and they will not be caught up, but stashed
looper.run(
eventually(lambda: assertExp(lagging_node.nodeIbStasher.num_of_stashed(LedgerStatus) >= 3), retryWait=1,
timeout=60))
assert lagging_node.mode != Mode.participating
# make sure that more requests are being ordered while catch-up is in progress
# stash enough stable checkpoints for starting a catch-up
num_checkpoints = Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1
num_reqs = reqs_for_checkpoint * num_checkpoints + 1
sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
sdk_wallet_client,
num_reqs)
looper.run(
eventually(check_last_ordered_3pc_on_all_replicas, rest_nodes,
(0, num_reqs + 2))
)
# all good nodes stabilized checkpoint
looper.run(eventually(check_for_nodes, rest_nodes, check_stable_checkpoint, 10))
assert lagging_node.mode != Mode.participating
# lagging node is catching up and stashing all checkpoints
looper.run(
eventually(
lambda: assertExp(get_stashed_checkpoints(lagging_node) == num_checkpoints * len(rest_nodes)),
timeout=waits.expectedPoolCatchupTime(len(txnPoolNodeSet))
)
)
# check that last_ordered is set
looper.run(
eventually(check_last_ordered_3pc_on_master, [lagging_node],
(0, num_reqs + 2))
)
# check that the catch-up is finished
looper.run(
eventually(
lambda: assertExp(lagging_node.mode == Mode.participating), retryWait=1,
timeout=waits.expectedPoolCatchupTime(len(txnPoolNodeSet))
)
)
# check that catch-up was started twice, since we were able to catch-up till audit ledger only
# for the first time, and after this the node sees a quorum of stashed checkpoints
assert lagging_node.spylog.count(Node.allLedgersCaughtUp) == initial_all_ledgers_caught_up + 1
assert lagging_node.spylog.count(Node.start_catchup) == 1
waitNodeDataEquality(looper, *txnPoolNodeSet, customTimeout=5)
def get_stashed_checkpoints(node):
return sum(
1 for (stashed, sender) in node.master_replica.stasher._queues[STASH_CATCH_UP] if isinstance(stashed, Checkpoint))
|
11592295
|
from django.test import TestCase, override_settings
from tests.models import TestModel
class TestSlugification(TestCase):
def test_unicode_slugs(self):
"""
Confirm the preservation of unicode in slugification by default
"""
sample_obj = TestModel.objects.create()
# a unicode tag will be slugified for space reasons but
# unicode-ness will be kept by default
sample_obj.tags.add("あい うえお")
self.assertEqual([tag.slug for tag in sample_obj.tags.all()], ["あい-うえお"])
def test_old_slugs(self):
"""
Test that the setting that gives us the old slugification behavior
is in place
"""
with override_settings(TAGGIT_STRIP_UNICODE_WHEN_SLUGIFYING=True):
sample_obj = TestModel.objects.create()
# a unicode tag will be slugified for space reasons but
# unicode-ness will be kept by default
sample_obj.tags.add("あい うえお")
self.assertEqual([tag.slug for tag in sample_obj.tags.all()], [""])
|
11592317
|
import warnings
import numpy as np
from numba import jit
"""
This code is from scipy project with following license:
SciPy license
Copyright © 2001, 2002 Enthought, Inc.
All rights reserved.
Copyright © 2003-2019 SciPy Developers.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that
the following conditions are met:
- Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
- Neither the name of Enthought nor the names of the SciPy Developers may be used to endorse or promote products derived f
rom this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
_AXIS_TO_IND = {"x": 0, "y": 1, "z": 2}
def _elementary_basis_vector(axis):
b = np.zeros(3)
b[_AXIS_TO_IND[axis]] = 1
return b
def compute_euler_from_matrix(matrix, seq, extrinsic=False):
# The algorithm assumes intrinsic frame transformations. The algorithm
# in the paper is formulated for rotation matrices which are transposition
# rotation matrices used within Rotation.
# Adapt the algorithm for our case by
# 1. Instead of transposing our representation, use the transpose of the
# O matrix as defined in the paper, and be careful to swap indices
# 2. Reversing both axis sequence and angles for extrinsic rotations
if extrinsic:
seq = seq[::-1]
if matrix.ndim == 2:
matrix = matrix[None, :, :]
num_rotations = matrix.shape[0]
# Step 0
# Algorithm assumes axes as column vectors, here we use 1D vectors
n1 = _elementary_basis_vector(seq[0])
n2 = _elementary_basis_vector(seq[1])
n3 = _elementary_basis_vector(seq[2])
# Step 2
sl = np.dot(np.cross(n1, n2), n3)
cl = np.dot(n1, n3)
# angle offset is lambda from the paper referenced in [2] from docstring of
# `as_euler` function
offset = np.arctan2(sl, cl)
c = np.vstack((n2, np.cross(n1, n2), n1))
# Step 3
rot = np.array([[1, 0, 0], [0, cl, sl], [0, -sl, cl]])
res = np.einsum("...ij,...jk->...ik", c, matrix)
matrix_transformed = np.einsum("...ij,...jk->...ik", res, c.T.dot(rot))
# Step 4
angles = np.empty((num_rotations, 3))
# Ensure less than unit norm
positive_unity = matrix_transformed[:, 2, 2] > 1
negative_unity = matrix_transformed[:, 2, 2] < -1
matrix_transformed[positive_unity, 2, 2] = 1
matrix_transformed[negative_unity, 2, 2] = -1
angles[:, 1] = np.arccos(matrix_transformed[:, 2, 2])
# Steps 5, 6
eps = 1e-7
safe1 = np.abs(angles[:, 1]) >= eps
safe2 = np.abs(angles[:, 1] - np.pi) >= eps
# Step 4 (Completion)
angles[:, 1] += offset
# 5b
safe_mask = np.logical_and(safe1, safe2)
angles[safe_mask, 0] = np.arctan2(
matrix_transformed[safe_mask, 0, 2], -matrix_transformed[safe_mask, 1, 2]
)
angles[safe_mask, 2] = np.arctan2(
matrix_transformed[safe_mask, 2, 0], matrix_transformed[safe_mask, 2, 1]
)
if extrinsic:
# For extrinsic, set first angle to zero so that after reversal we
# ensure that third angle is zero
# 6a
angles[~safe_mask, 0] = 0
# 6b
angles[~safe1, 2] = np.arctan2(
matrix_transformed[~safe1, 1, 0] - matrix_transformed[~safe1, 0, 1],
matrix_transformed[~safe1, 0, 0] + matrix_transformed[~safe1, 1, 1],
)
# 6c
angles[~safe2, 2] = -(
np.arctan2(
matrix_transformed[~safe2, 1, 0] + matrix_transformed[~safe2, 0, 1],
matrix_transformed[~safe2, 0, 0] - matrix_transformed[~safe2, 1, 1],
)
)
else:
# For instrinsic, set third angle to zero
# 6a
angles[~safe_mask, 2] = 0
# 6b
angles[~safe1, 0] = np.arctan2(
matrix_transformed[~safe1, 1, 0] - matrix_transformed[~safe1, 0, 1],
matrix_transformed[~safe1, 0, 0] + matrix_transformed[~safe1, 1, 1],
)
# 6c
angles[~safe2, 0] = np.arctan2(
matrix_transformed[~safe2, 1, 0] + matrix_transformed[~safe2, 0, 1],
matrix_transformed[~safe2, 0, 0] - matrix_transformed[~safe2, 1, 1],
)
# Step 7
if seq[0] == seq[2]:
# lambda = 0, so we can only ensure angle2 -> [0, pi]
adjust_mask = np.logical_or(angles[:, 1] < 0, angles[:, 1] > np.pi)
else:
# lambda = + or - pi/2, so we can ensure angle2 -> [-pi/2, pi/2]
adjust_mask = np.logical_or(angles[:, 1] < -np.pi / 2, angles[:, 1] > np.pi / 2)
# Dont adjust gimbal locked angle sequences
adjust_mask = np.logical_and(adjust_mask, safe_mask)
angles[adjust_mask, 0] += np.pi
angles[adjust_mask, 1] = 2 * offset - angles[adjust_mask, 1]
angles[adjust_mask, 2] -= np.pi
angles[angles < -np.pi] += 2 * np.pi
angles[angles > np.pi] -= 2 * np.pi
# Step 8
if not np.all(safe_mask):
warnings.warn(
"Gimbal lock detected. Setting third angle to zero since"
" it is not possible to uniquely determine all angles."
)
# Reverse role of extrinsic and intrinsic rotations, but let third angle be
# zero for gimbal locked cases
if extrinsic:
angles = angles[:, ::-1]
return angles
def compute_q_from_matrix(matrix):
is_single = False
matrix = np.asarray(matrix, dtype=float)
if matrix.ndim not in [2, 3] or matrix.shape[-2:] != (3, 3):
raise ValueError(
"Expected `matrix` to have shape (3, 3) or "
"(N, 3, 3), got {}".format(matrix.shape)
)
# If a single matrix is given, convert it to 3D 1 x 3 x 3 matrix but
# set self._single to True so that we can return appropriate objects in
# the `to_...` methods
if matrix.shape == (3, 3):
matrix = matrix.reshape((1, 3, 3))
is_single = True
num_rotations = matrix.shape[0]
decision_matrix = np.empty((num_rotations, 4))
decision_matrix[:, :3] = matrix.diagonal(axis1=1, axis2=2)
decision_matrix[:, -1] = decision_matrix[:, :3].sum(axis=1)
choices = decision_matrix.argmax(axis=1)
quat = np.empty((num_rotations, 4))
ind = np.nonzero(choices != 3)[0]
i = choices[ind]
j = (i + 1) % 3
k = (j + 1) % 3
quat[ind, i] = 1 - decision_matrix[ind, -1] + 2 * matrix[ind, i, i]
quat[ind, j] = matrix[ind, j, i] + matrix[ind, i, j]
quat[ind, k] = matrix[ind, k, i] + matrix[ind, i, k]
quat[ind, 3] = matrix[ind, k, j] - matrix[ind, j, k]
ind = np.nonzero(choices == 3)[0]
quat[ind, 0] = matrix[ind, 2, 1] - matrix[ind, 1, 2]
quat[ind, 1] = matrix[ind, 0, 2] - matrix[ind, 2, 0]
quat[ind, 2] = matrix[ind, 1, 0] - matrix[ind, 0, 1]
quat[ind, 3] = 1 + decision_matrix[ind, -1]
quat /= np.linalg.norm(quat, axis=1)[:, None]
if is_single:
return quat[0]
else:
return quat
|
11592343
|
from __future__ import division
import csv
import tensorflow as tf
#import params
import numpy as np
def weight_variable(name, shape):
return tf.get_variable(name, shape=shape, initializer=tf.contrib.layers.xavier_initializer())
# initial = tf.truncated_normal(shape, stddev=0.1)
# return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W, stride):
return tf.nn.conv2d(x, W, strides=[1, stride, stride, 1], padding='VALID')
x = tf.placeholder(tf.float32, shape=[None, 66, 200, 3])
y_ = tf.placeholder(tf.float32, shape=[None, 1])
x_image = x
# first convolutional layer
W_conv1 = weight_variable("wc1", [5, 5, 3, 24])
b_conv1 = bias_variable([24])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1, 2) + b_conv1)
# second convolutional layer
W_conv2 = weight_variable("wc2", [5, 5, 24, 36])
b_conv2 = bias_variable([36])
h_conv2 = tf.nn.relu(conv2d(h_conv1, W_conv2, 2) + b_conv2)
# third convolutional layer
W_conv3 = weight_variable("wc3", [5, 5, 36, 48])
b_conv3 = bias_variable([48])
h_conv3 = tf.nn.relu(conv2d(h_conv2, W_conv3, 2) + b_conv3)
# fourth convolutional layer
W_conv4 = weight_variable("wc4", [3, 3, 48, 64])
b_conv4 = bias_variable([64])
h_conv4 = tf.nn.relu(conv2d(h_conv3, W_conv4, 1) + b_conv4)
# fifth convolutional layer
W_conv5 = weight_variable("wc5", [3, 3, 64, 64])
b_conv5 = bias_variable([64])
h_conv5 = tf.nn.relu(conv2d(h_conv4, W_conv5, 1) + b_conv5)
h_conv5_flat = tf.reshape(h_conv5, [-1, 1152])
# fully connected layer 2
W_fc2 = weight_variable("fc2", [1152, 100])
b_fc2 = bias_variable([100])
h_fc2 = tf.nn.relu(tf.matmul(h_conv5_flat, W_fc2) + b_fc2)
# fully connected layer 3
W_fc3 = weight_variable("fc3", [100, 50])
b_fc3 = bias_variable([50])
h_fc3 = tf.nn.relu(tf.matmul(h_fc2, W_fc3) + b_fc3)
# fully connected layer 4
W_fc4 = weight_variable("fc4", [50, 10])
b_fc4 = bias_variable([10])
h_fc4 = tf.nn.relu(tf.matmul(h_fc3, W_fc4) + b_fc4)
# output
W_fc5 = weight_variable("fc5", [10, 1])
b_fc5 = bias_variable([1])
#y = tf.multiply(tf.sigmoid(tf.matmul(h_fc4, W_fc5) + b_fc5), 2)
y = tf.multiply(tf.atan(tf.matmul(h_fc4, W_fc5) + b_fc5), 2)
|
11592365
|
from django.db import models
from django.urls import reverse
from target import target
from django.utils import timezone
DEFAULT_RULES = """Make words of at least four letters using the grid letters at most once.
The centre letter must be in every word.
There's one nine-letter word.
There are no plurals or proper nouns, except possibly for the nine-letter word. The nine-letter word always has a South African flavour.
Words are drawn from our dictionary which has about 100,000 words.
You can either type the letters or click on them. To delete a letter use the backspace key or click it again."""
class TargetQuerySet(models.QuerySet):
def published(self):
return self.filter(published__lte=timezone.now())
class Target(models.Model):
letters = models.CharField(max_length=9, unique=True)
words = models.TextField(blank=True)
published = models.DateTimeField(blank=True, null=True)
public_solution = models.BooleanField(default=False)
publish_solution_after = models.SmallIntegerField(
default=24, null=True,
verbose_name="solution time",
help_text="Make solution available after this many hours")
clue = models.CharField(
max_length=150, blank=True, help_text="Leave blank if no clue.")
tweet_text = models.CharField(
max_length=180,
default="Try the latest GroundUp Target.",
help_text="Blank for no tweet")
tweet_solution_text = models.CharField(
max_length=180,
default="The solution for this GroundUp Target is now available.")
tweeted = models.BooleanField(default=False, editable=False)
tweeted_solution = models.BooleanField(default=False, editable=False)
rules = models.TextField(default=DEFAULT_RULES, blank=True)
number = models.PositiveSmallIntegerField(default=0, editable=False)
created = models.DateTimeField(auto_now_add=True, editable=False)
modified = models.DateTimeField(auto_now=True, editable=False)
objects = TargetQuerySet.as_manager()
def is_published(self):
return (self.published is not None) and \
(self.published <= timezone.now())
def is_solution_public(self):
if self.public_solution or \
(self.is_published() and \
self.published + timezone.timedelta(hours=self.publish_solution_after) \
< timezone.now()):
return True
return False
def splitWords(self):
return self.words.split("\r\n")
def wordCount(self):
return len(self.splitWords())
def lettersJson(self):
letterArray = ["'" + l + "'" for l in self.letters]
return "[" + ",".join(letterArray) + "]"
def hashedWords(self):
hashed_words = [target.hashCode(w) for w in self.splitWords()]
return hashed_words
def nineLetterWord(self):
try:
return [w for w in self.splitWords() if len(w) == 9][0]
except:
return ""
def hashedNineLetterWord(self):
return target.hashCode(self.nineLetterWord())
def wordsWithoutNineLetter(self):
return ' '.join([w for w in self.splitWords() if len(w) != 9])
def __str__(self):
return str(self.pk) + "-" + str(self.number) + ":" + self.letters
def save(self, *args, **kwargs):
if self.published and self.number == 0:
objects = Target.objects.order_by("-number")
if objects:
latest = objects[0]
number = latest.number + 1
else:
number = 1
self.number = number
super(Target, self).save(*args, **kwargs)
def get_absolute_url(self):
return reverse('target:detail', args=[self.pk, ])
class Meta:
ordering = ['-number', '-modified', ]
|
11592386
|
import sys
import json
from flask import (
Flask,
Response,
request,
send_from_directory,
)
from azure.messaging.webpubsubservice import (
WebPubSubServiceClient
)
hub_name = 'chat'
app = Flask(__name__)
service = WebPubSubServiceClient.from_connection_string(sys.argv[1], hub=hub_name)
@app.route('/<path:filename>')
def index(filename):
return send_from_directory('public', filename)
@app.route('/eventhandler', methods=['POST', 'OPTIONS'])
def handle_event():
if request.method == 'OPTIONS' or request.method == 'GET':
if request.headers.get('WebHook-Request-Origin'):
res = Response()
res.headers['WebHook-Allowed-Origin'] = '*'
res.status_code = 200
return res
elif request.method == 'POST':
user_id = request.headers.get('ce-userid')
if request.headers.get('ce-type') == 'azure.webpubsub.sys.connected':
return user_id + ' connected', 200
elif request.headers.get('ce-type') == 'azure.webpubsub.user.message':
service.send_to_all(content_type="application/json", message={
'from': user_id,
'message': request.data.decode('UTF-8')
})
return Response(status=204, content_type='text/plain')
else:
return 'Not found', 404
@app.route('/negotiate')
def negotiate():
id = request.args.get('id')
if not id:
return 'missing user id', 400
token = service.get_client_access_token(user_id=id)
return {
'url': token['url']
}, 200
if __name__ == '__main__':
if len(sys.argv) != 2:
print('Usage: python server.py <connection-string>')
exit(1)
app.run(port=8080)
|
11592430
|
import logging
import os
import uuid
import pandas as pd
import pytest
# from output import build_tag
from pybatfish.client.session import Session
from pybfe.client.session import GRPCSession as Session
NETWORK_FIXTURES = ['arista']
BF_INIT_SNAPSHOT = "yes"
BF_NETWORK = 'arista'
BF_SNAPSHOT = 'snapshot0'
BF_SNAPSHOT_DIR = f"{os.getcwd()}/{BF_SNAPSHOT}"
BF_DASHBOARD = None
####################
# Set pandas options
####################
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', 1000)
#######################
# Set pybatfish options
#######################
# bf_logger.setLevel(logging.WARN)
logging.getLogger('pybatfish').setLevel(logging.WARN)
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
RESET = '\033[39;49m'
def pytest_addoption(parser):
parser.addoption("--min-severity", action="store", default=0, type=int,
help="Minimal FindIssues severity to care about")
@pytest.fixture(scope="session")
def bf():
try:
bf = Session.get('bfe')
os.environ["SESSION_TYPE"] = 'bfe'
except:
bf = Session.get('bf')
os.environ["SESSION_TYPE"] = 'bf'
session_type = os.environ.get('SESSION_TYPE')
bf.enable_diagnostics = False
bf.set_network(BF_NETWORK)
if BF_INIT_SNAPSHOT == "yes":
bf.init_snapshot(BF_SNAPSHOT_DIR, name=BF_SNAPSHOT, overwrite=True)
else:
bf.set_snapshot(BF_SNAPSHOT)
if session_type == 'bfe':
bf.get_node_roles()
return bf
@pytest.fixture
def min_severity(request):
return request.config.getoption("--min-severity")
def pytest_report_header(config):
return [
bcolors.BOLD + bcolors.OKBLUE + "Running Intentionet CI tests" + bcolors.RESET]
def pytest_terminal_summary(terminalreporter, exitstatus, config):
if exitstatus != 0 and BF_DASHBOARD is not None:
url = "{BF_DASHBOARD}/{BF_NETWORK}/{BF_SNAPSHOT}/policies".format(
BF_DASHBOARD=BF_DASHBOARD, BF_NETWORK=BF_NETWORK, BF_SNAPSHOT=BF_SNAPSHOT)
terminalreporter.write_line(
"\n\n"
+ bcolors.BOLD + bcolors.FAIL
+ "There have been failures, explore more using Intentionet Dashboard at {}".format(
url)
+ " " # saves URL
)
def pytest_sessionstart(session):
os.environ['bf_policy_name'] = session.name
# raise ValueError(session.fspath)
p_id = uuid.uuid4().hex
def pytest_runtest_setup(item):
# Get test file name
test_file_name = os.path.basename(item.parent.name)
test_name = item.name
# if os.environ.get('bf_policy_name') is None:
# raise ValueError('pid:{}'.format(p_id))
os.environ['bf_policy_name'] = test_file_name
os.environ['bf_policy_id'] = p_id
# else:
# raise ValueError('pname: {}'.format(os.environ.get('bf_policy_name')))
os.environ['bf_test_name'] = test_name
# os.environ['bf_assert_name'] = test_name
def subdict(d, keys):
return {k: d.get(k) for k in keys}
|
11592434
|
import numpy as np
import SimpleITK as sitk
from scipy.interpolate import griddata
from platipy.imaging.label.utils import vectorised_transform_index_to_physical_point
def evaluate_distance_on_surface(
reference_volume, test_volume, abs_distance=True, reference_as_distance_map=False
):
"""
Evaluates a distance map on a surface
Input: reference_volume: binary volume SimpleITK image, or alternatively a distance map
test_volume: binary volume SimpleITK image
Output: theta, phi, values
"""
if reference_as_distance_map:
reference_distance_map = reference_volume
else:
if abs_distance:
reference_distance_map = sitk.Abs(
sitk.SignedMaurerDistanceMap(
reference_volume, squaredDistance=False, useImageSpacing=True
)
)
else:
reference_distance_map = sitk.SignedMaurerDistanceMap(
reference_volume, squaredDistance=False, useImageSpacing=True
)
test_surface = sitk.LabelContour(test_volume)
distance_image = sitk.Multiply(
reference_distance_map, sitk.Cast(test_surface, sitk.sitkFloat32)
)
distance_array = sitk.GetArrayFromImage(distance_image)
# Get centre of mass of reference volume
reference_volume_array = sitk.GetArrayFromImage(reference_volume)
reference_volume_locations = np.where(reference_volume_array == 1)
com_index = reference_volume_locations.mean(axis=1)
com_real = vectorised_transform_index_to_physical_point(reference_volume, com_index)
# Calculate centre of mass in real coordinates
test_surface_array = sitk.GetArrayFromImage(test_surface)
test_surface_locations = np.where(test_surface_array == 1)
test_surface_locations_array = np.array(test_surface_locations)
# Calculate each point on the surface in real coordinates
pts = test_surface_locations_array.T
pts_real = vectorised_transform_index_to_physical_point(test_surface, pts)
pts_diff = pts_real - com_real
# Convert to spherical polar coordinates - base at north pole
rho = np.sqrt((pts_diff * pts_diff).sum(axis=1))
theta = np.pi / 2.0 - np.arccos(pts_diff.T[0] / rho)
phi = -1 * np.arctan2(pts_diff.T[2], -1.0 * pts_diff.T[1])
# Extract values
values = distance_array[test_surface_locations]
return theta, phi, values
def evaluate_distance_to_reference(reference_volume, test_volume, resample_factor=1):
"""
Evaluates the distance from the surface of a test volume to a reference
Input: reference_volume: binary volume SimpleITK image
test_volume: binary volume SimpleITK image
Output: values : the distance to each point on the reference volume surface
"""
# TO DO
# come up with a better resampling strategy
# e.g. resample images prior to this process?
# compute the distance map from the test volume surface
test_distance_map = sitk.Abs(
sitk.SignedMaurerDistanceMap(test_volume, squaredDistance=False, useImageSpacing=True)
)
# get the distance from the test surface to the reference surface
ref_surface = sitk.LabelContour(reference_volume)
ref_surface_pts = sitk.GetArrayFromImage(ref_surface) == 1
surface_values = sitk.GetArrayFromImage(test_distance_map)[ref_surface_pts]
# resample to keep the points to a reasonable amount
values = surface_values[::resample_factor]
return values
def regrid_spherical_data(theta, phi, values, resolution):
"""
Re-grids spherical data
Input: theta, phi, values
Options: plot a figure (plotFig), save a figure (saveFig), case identifier (figName)
Output: p_lat, p_long, grid_values (, fig)
"""
# Re-grid:
# Set up grid
d_radian = resolution * np.pi / 180
p_long, p_lat = np.mgrid[-np.pi : np.pi : d_radian, -np.pi / 2.0 : np.pi / 2.0 : d_radian]
# First pass - linear interpolation, works well but not for edges
grid_values = griddata(
list(zip(theta, phi)), values, (p_lat, p_long), method="linear", rescale=False
)
# Second pass - nearest neighbour interpolation
grid_values_nn = griddata(
list(zip(theta, phi)), values, (p_lat, p_long), method="nearest", rescale=False
)
# Third pass - wherever the linear interpolation isn't defined use nearest neighbour
# interpolation
grid_values[~np.isfinite(grid_values)] = grid_values_nn[~np.isfinite(grid_values)]
return p_lat, p_long, grid_values
|
11592444
|
import dectate
import morepath
from morepath.converter import Converter
from morepath.error import (
DirectiveReportError,
ConfigError,
LinkError,
TrajectError,
)
from webtest import TestApp as Client
import pytest
def test_simple_path_one_step():
class app(morepath.App):
pass
class Model:
def __init__(self):
pass
@app.path(model=Model, path="simple")
def get_model():
return Model()
@app.view(model=Model)
def default(self, request):
return "View"
@app.view(model=Model, name="link")
def link(self, request):
return request.link(self)
c = Client(app())
response = c.get("/simple")
assert response.body == b"View"
response = c.get("/simple/link")
assert response.body == b"http://localhost/simple"
def test_simple_path_two_steps():
class app(morepath.App):
pass
class Model:
def __init__(self):
pass
@app.path(model=Model, path="one/two")
def get_model():
return Model()
@app.view(model=Model)
def default(self, request):
return "View"
@app.view(model=Model, name="link")
def link(self, request):
return request.link(self)
c = Client(app())
response = c.get("/one/two")
assert response.body == b"View"
response = c.get("/one/two/link")
assert response.body == b"http://localhost/one/two"
def test_variable_path_one_step():
class app(morepath.App):
pass
class Model:
def __init__(self, name):
self.name = name
@app.path(model=Model, path="{name}")
def get_model(name):
return Model(name)
@app.view(model=Model)
def default(self, request):
return "View: %s" % self.name
@app.view(model=Model, name="link")
def link(self, request):
return request.link(self)
c = Client(app())
response = c.get("/foo")
assert response.body == b"View: foo"
response = c.get("/foo/link")
assert response.body == b"http://localhost/foo"
def test_variable_path_two_steps():
class app(morepath.App):
pass
class Model:
def __init__(self, name):
self.name = name
@app.path(model=Model, path="document/{name}")
def get_model(name):
return Model(name)
@app.view(model=Model)
def default(self, request):
return "View: %s" % self.name
@app.view(model=Model, name="link")
def link(self, request):
return request.link(self)
c = Client(app())
response = c.get("/document/foo")
assert response.body == b"View: foo"
response = c.get("/document/foo/link")
assert response.body == b"http://localhost/document/foo"
def test_variable_path_two_variables():
class app(morepath.App):
pass
class Model:
def __init__(self, name, version):
self.name = name
self.version = version
@app.path(model=Model, path="{name}-{version}")
def get_model(name, version):
return Model(name, version)
@app.view(model=Model)
def default(self, request):
return f"View: {self.name} {self.version}"
@app.view(model=Model, name="link")
def link(self, request):
return request.link(self)
c = Client(app())
response = c.get("/foo-one")
assert response.body == b"View: foo one"
response = c.get("/foo-one/link")
assert response.body == b"http://localhost/foo-one"
def test_variable_path_explicit_converter():
class app(morepath.App):
pass
class Model:
def __init__(self, id):
self.id = id
@app.path(model=Model, path="{id}", converters=dict(id=Converter(int)))
def get_model(id):
return Model(id)
@app.view(model=Model)
def default(self, request):
return "View: {} ({})".format(self.id, type(self.id))
@app.view(model=Model, name="link")
def link(self, request):
return request.link(self)
c = Client(app())
response = c.get("/1")
assert response.body in (
b"View: 1 (<type 'int'>)",
b"View: 1 (<class 'int'>)",
)
response = c.get("/1/link")
assert response.body == b"http://localhost/1"
response = c.get("/broken", status=404)
def test_variable_path_implicit_converter():
class app(morepath.App):
pass
class Model:
def __init__(self, id):
self.id = id
@app.path(model=Model, path="{id}")
def get_model(id=0):
return Model(id)
@app.view(model=Model)
def default(self, request):
return "View: {} ({})".format(self.id, type(self.id))
@app.view(model=Model, name="link")
def link(self, request):
return request.link(self)
c = Client(app())
response = c.get("/1")
assert response.body in (
b"View: 1 (<type 'int'>)",
b"View: 1 (<class 'int'>)",
)
response = c.get("/1/link")
assert response.body == b"http://localhost/1"
response = c.get("/broken", status=404)
def test_variable_path_explicit_trumps_implicit():
class app(morepath.App):
pass
class Model:
def __init__(self, id):
self.id = id
@app.path(model=Model, path="{id}", converters=dict(id=Converter(int)))
def get_model(id="foo"):
return Model(id)
@app.view(model=Model)
def default(self, request):
return "View: {} ({})".format(self.id, type(self.id))
@app.view(model=Model, name="link")
def link(self, request):
return request.link(self)
c = Client(app())
response = c.get("/1")
assert response.body in (
b"View: 1 (<type 'int'>)",
b"View: 1 (<class 'int'>)",
)
response = c.get("/1/link")
assert response.body == b"http://localhost/1"
response = c.get("/broken", status=404)
def test_url_parameter_explicit_converter():
class app(morepath.App):
pass
class Model:
def __init__(self, id):
self.id = id
@app.path(model=Model, path="/", converters=dict(id=Converter(int)))
def get_model(id):
return Model(id)
@app.view(model=Model)
def default(self, request):
return "View: {} ({})".format(self.id, type(self.id))
@app.view(model=Model, name="link")
def link(self, request):
return request.link(self)
c = Client(app())
response = c.get("/?id=1")
assert response.body in (
b"View: 1 (<type 'int'>)",
b"View: 1 (<class 'int'>)",
)
response = c.get("/link?id=1")
assert response.body == b"http://localhost/?id=1"
response = c.get("/?id=broken", status=400)
response = c.get("/")
assert response.body in (
b"View: None (<type 'NoneType'>)",
b"View: None (<class 'NoneType'>)",
)
def test_url_parameter_explicit_converter_get_converters():
class app(morepath.App):
pass
class Model:
def __init__(self, id):
self.id = id
def get_converters():
return dict(id=Converter(int))
@app.path(model=Model, path="/", get_converters=get_converters)
def get_model(id):
return Model(id)
@app.view(model=Model)
def default(self, request):
return "View: {} ({})".format(self.id, type(self.id))
@app.view(model=Model, name="link")
def link(self, request):
return request.link(self)
c = Client(app())
response = c.get("/?id=1")
assert response.body in (
b"View: 1 (<type 'int'>)",
b"View: 1 (<class 'int'>)",
)
response = c.get("/link?id=1")
assert response.body == b"http://localhost/?id=1"
response = c.get("/?id=broken", status=400)
response = c.get("/")
assert response.body in (
b"View: None (<type 'NoneType'>)",
b"View: None (<class 'NoneType'>)",
)
def test_url_parameter_get_converters_overrides_converters():
class app(morepath.App):
pass
class Model:
def __init__(self, id):
self.id = id
def get_converters():
return dict(id=Converter(int))
@app.path(
model=Model,
path="/",
converters={id: type("")},
get_converters=get_converters,
)
def get_model(id):
return Model(id)
@app.view(model=Model)
def default(self, request):
return "View: {} ({})".format(self.id, type(self.id))
@app.view(model=Model, name="link")
def link(self, request):
return request.link(self)
c = Client(app())
response = c.get("/?id=1")
assert response.body in (
b"View: 1 (<type 'int'>)",
b"View: 1 (<class 'int'>)",
)
response = c.get("/link?id=1")
assert response.body == b"http://localhost/?id=1"
response = c.get("/?id=broken", status=400)
response = c.get("/")
assert response.body in (
b"View: None (<type 'NoneType'>)",
b"View: None (<class 'NoneType'>)",
)
def test_url_parameter_implicit_converter():
class app(morepath.App):
pass
class Model:
def __init__(self, id):
self.id = id
@app.path(model=Model, path="/")
def get_model(id=0):
return Model(id)
@app.view(model=Model)
def default(self, request):
return "View: {} ({})".format(self.id, type(self.id))
@app.view(model=Model, name="link")
def link(self, request):
return request.link(self)
c = Client(app())
response = c.get("/?id=1")
assert response.body in (
b"View: 1 (<type 'int'>)",
b"View: 1 (<class 'int'>)",
)
response = c.get("/link?id=1")
assert response.body == b"http://localhost/?id=1"
response = c.get("/?id=broken", status=400)
response = c.get("/")
assert response.body in (
b"View: 0 (<type 'int'>)",
b"View: 0 (<class 'int'>)",
)
def test_multiple_url_parameters_stable_order():
class App(morepath.App):
pass
class Model:
def __init__(self, a, b):
self.a = a
self.b = b
@App.path(model=Model, path="/")
def get_model(a, b):
return Model(a, b)
@App.view(model=Model, name="link")
def link(self, request):
return request.link(self)
c = Client(App())
response = c.get("/link?a=A&b=B")
assert response.body == b"http://localhost/?a=A&b=B"
def test_url_parameter_explicit_trumps_implicit():
class app(morepath.App):
pass
class Model:
def __init__(self, id):
self.id = id
@app.path(model=Model, path="/", converters=dict(id=Converter(int)))
def get_model(id="foo"):
return Model(id)
@app.view(model=Model)
def default(self, request):
return "View: {} ({})".format(self.id, type(self.id))
@app.view(model=Model, name="link")
def link(self, request):
return request.link(self)
c = Client(app())
response = c.get("/?id=1")
assert response.body in (
b"View: 1 (<type 'int'>)",
b"View: 1 (<class 'int'>)",
)
response = c.get("/link?id=1")
assert response.body == b"http://localhost/?id=1"
response = c.get("/?id=broken", status=400)
response = c.get("/")
assert response.body in (
b"View: foo (<type 'str'>)",
b"View: foo (<class 'str'>)",
)
def test_decode_encode():
class app(morepath.App):
pass
class Model:
def __init__(self, id):
self.id = id
def my_decode(s):
return s + "ADD"
def my_encode(s):
return s[: -len("ADD")]
@app.path(
model=Model,
path="/",
converters=dict(id=Converter(my_decode, my_encode)),
)
def get_model(id):
return Model(id)
@app.view(model=Model)
def default(self, request):
return "View: %s" % self.id
@app.view(model=Model, name="link")
def link(self, request):
return request.link(self)
c = Client(app())
response = c.get("/?id=foo")
assert response.body == b"View: fooADD"
response = c.get("/link?id=foo")
assert response.body == b"http://localhost/?id=foo"
def test_unknown_converter():
class app(morepath.App):
pass
class Model:
def __init__(self, d):
self.d = d
class Unknown:
pass
@app.path(model=Model, path="/")
def get_model(d=Unknown()):
return Model(d)
@app.view(model=Model)
def default(self, request):
return "View: %s" % self.d
@app.view(model=Model, name="link")
def link(self, request):
return request.link(self)
with pytest.raises(DirectiveReportError):
app.commit()
def test_not_all_path_variables_arguments_of_model_factory():
class App(morepath.App):
pass
class Model:
def __init__(self, foo):
self.foo = foo
class Unknown:
pass
@App.path(model=Model, path="/{foo}/{bar}")
def get_model(foo):
return Model(foo)
with pytest.raises(DirectiveReportError) as e:
App.commit()
assert str(e.value).startswith(
"Variable in path not found in function " "signature: bar"
)
def test_unknown_explicit_converter():
class app(morepath.App):
pass
class Model:
def __init__(self, d):
self.d = d
class Unknown:
pass
@app.path(model=Model, path="/", converters={"d": Unknown})
def get_model(d):
return Model(d)
@app.view(model=Model)
def default(self, request):
return "View: %s" % self.d
@app.view(model=Model, name="link")
def link(self, request):
return request.link(self)
with pytest.raises(DirectiveReportError):
app.commit()
def test_default_date_converter():
class app(morepath.App):
pass
class Model:
def __init__(self, d):
self.d = d
from datetime import date
@app.path(model=Model, path="/")
def get_model(d=date(2011, 1, 1)):
return Model(d)
@app.view(model=Model)
def default(self, request):
return "View: %s" % self.d
@app.view(model=Model, name="link")
def link(self, request):
return request.link(self)
c = Client(app())
response = c.get("/?d=20121110")
assert response.body == b"View: 2012-11-10"
response = c.get("/")
assert response.body == b"View: 2011-01-01"
response = c.get("/link?d=20121110")
assert response.body == b"http://localhost/?d=20121110"
response = c.get("/link")
assert response.body == b"http://localhost/?d=20110101"
response = c.get("/?d=broken", status=400)
def test_default_datetime_converter():
class app(morepath.App):
pass
class Model:
def __init__(self, d):
self.d = d
from datetime import datetime
@app.path(model=Model, path="/")
def get_model(d=datetime(2011, 1, 1, 10, 30)):
return Model(d)
@app.view(model=Model)
def default(self, request):
return "View: %s" % self.d
@app.view(model=Model, name="link")
def link(self, request):
return request.link(self)
c = Client(app())
response = c.get("/?d=20121110T144530")
assert response.body == b"View: 2012-11-10 14:45:30"
response = c.get("/")
assert response.body == b"View: 2011-01-01 10:30:00"
response = c.get("/link?d=20121110T144500")
assert response.body == b"http://localhost/?d=20121110T144500"
response = c.get("/link")
assert response.body == b"http://localhost/?d=20110101T103000"
c.get("/?d=broken", status=400)
def test_custom_date_converter():
class app(morepath.App):
pass
class Model:
def __init__(self, d):
self.d = d
from datetime import date
from time import strptime, mktime
def date_decode(s):
return date.fromtimestamp(mktime(strptime(s, "%d-%m-%Y")))
def date_encode(d):
return d.strftime("%d-%m-%Y")
@app.converter(type=date)
def date_converter():
return Converter(date_decode, date_encode)
@app.path(model=Model, path="/")
def get_model(d=date(2011, 1, 1)):
return Model(d)
@app.view(model=Model)
def default(self, request):
return "View: %s" % self.d
@app.view(model=Model, name="link")
def link(self, request):
return request.link(self)
c = Client(app())
response = c.get("/?d=10-11-2012")
assert response.body == b"View: 2012-11-10"
response = c.get("/")
assert response.body == b"View: 2011-01-01"
response = c.get("/link?d=10-11-2012")
assert response.body == b"http://localhost/?d=10-11-2012"
response = c.get("/link")
assert response.body == b"http://localhost/?d=01-01-2011"
response = c.get("/?d=broken", status=400)
def test_variable_path_parameter_required_no_default():
class app(morepath.App):
pass
class Model:
def __init__(self, id):
self.id = id
@app.path(model=Model, path="", required=["id"])
def get_model(id):
return Model(id)
@app.view(model=Model)
def default(self, request):
return "View: %s" % self.id
@app.view(model=Model, name="link")
def link(self, request):
return request.link(self)
c = Client(app())
response = c.get("/?id=a")
assert response.body == b"View: a"
response = c.get("/", status=400)
def test_variable_path_parameter_required_with_default():
class app(morepath.App):
pass
class Model:
def __init__(self, id):
self.id = id
@app.path(model=Model, path="", required=["id"])
def get_model(id="b"):
return Model(id)
@app.view(model=Model)
def default(self, request):
return "View: %s" % self.id
@app.view(model=Model, name="link")
def link(self, request):
return request.link(self)
c = Client(app())
response = c.get("/?id=a")
assert response.body == b"View: a"
response = c.get("/", status=400)
def test_type_hints_and_converters():
class app(morepath.App):
pass
class Model:
def __init__(self, d):
self.d = d
from datetime import date
@app.path(model=Model, path="", converters=dict(d=date))
def get_model(d):
return Model(d)
@app.view(model=Model)
def default(self, request):
return "View: %s" % self.d
@app.view(model=Model, name="link")
def link(self, request):
return request.link(self)
c = Client(app())
response = c.get("/?d=20140120")
assert response.body == b"View: 2014-01-20"
response = c.get("/link?d=20140120")
assert response.body == b"http://localhost/?d=20140120"
def test_link_for_none_means_no_parameter():
class app(morepath.App):
pass
class Model:
def __init__(self, id):
self.id = id
@app.path(model=Model, path="")
def get_model(id):
return Model(id)
@app.view(model=Model)
def default(self, request):
return "View: %s" % self.id
@app.view(model=Model, name="link")
def link(self, request):
return request.link(self)
c = Client(app())
response = c.get("/")
assert response.body == b"View: None"
response = c.get("/link")
assert response.body == b"http://localhost/"
def test_path_and_url_parameter_converter():
class app(morepath.App):
pass
class Model:
def __init__(self, id, param):
self.id = id
self.param = param
from datetime import date
@app.path(model=Model, path="/{id}", converters=dict(param=date))
def get_model(id=0, param=None):
return Model(id, param)
@app.view(model=Model)
def default(self, request):
return f"View: {self.id} {self.param}"
@app.view(model=Model, name="link")
def link(self, request):
return request.link(self)
c = Client(app())
response = c.get("/1/link")
assert response.body == b"http://localhost/1"
def test_path_converter_fallback_on_view():
class app(morepath.App):
pass
class Root:
pass
class Model:
def __init__(self, id):
self.id = id
@app.path(model=Root, path="")
def get_root():
return Root()
@app.path(model=Model, path="/{id}")
def get_model(id=0):
return Model(id)
@app.view(model=Model)
def default(self, request):
return "Default view for %s" % self.id
@app.view(model=Root, name="named")
def named(self, request):
return "Named view on root"
c = Client(app())
response = c.get("/1")
assert response.body == b"Default view for 1"
response = c.get("/named")
assert response.body == b"Named view on root"
def test_root_named_link():
class app(morepath.App):
pass
@app.path(path="")
class Root:
pass
@app.view(model=Root)
def default(self, request):
return request.link(self, "foo")
c = Client(app())
response = c.get("/")
assert response.body == b"http://localhost/foo"
def test_path_class_and_model_argument():
class app(morepath.App):
pass
class Foo:
pass
@app.path(path="", model=Foo)
class Root:
pass
with pytest.raises(ConfigError):
app.commit()
def test_path_no_class_and_no_model_argument():
class app(morepath.App):
pass
@app.path(path="")
def get_foo():
return None
with pytest.raises(ConfigError):
app.commit()
def test_url_parameter_list():
class app(morepath.App):
pass
class Model:
def __init__(self, item):
self.item = item
@app.path(model=Model, path="/", converters={"item": [int]})
def get_model(item):
return Model(item)
@app.view(model=Model)
def default(self, request):
return repr(self.item)
@app.view(model=Model, name="link")
def link(self, request):
return request.link(self)
c = Client(app())
response = c.get("/?item=1&item=2")
assert response.body == b"[1, 2]"
response = c.get("/link?item=1&item=2")
assert response.body == b"http://localhost/?item=1&item=2"
response = c.get("/link")
assert response.body == b"http://localhost/"
response = c.get("/?item=broken&item=1", status=400)
response = c.get("/")
assert response.body == b"[]"
def test_url_parameter_list_empty():
class app(morepath.App):
pass
class Model:
def __init__(self, item):
self.item = item
@app.path(model=Model, path="/", converters={"item": []})
def get_model(item):
return Model(item)
@app.view(model=Model)
def default(self, request):
return repr(self.item)
@app.view(model=Model, name="link")
def link(self, request):
return request.link(self)
c = Client(app())
response = c.get("/?item=a&item=b")
assert response.body in (b"[u'a', u'b']", b"['a', 'b']")
response = c.get("/link?item=a&item=b")
assert response.body == b"http://localhost/?item=a&item=b"
response = c.get("/link")
assert response.body == b"http://localhost/"
response = c.get("/")
assert response.body == b"[]"
def test_url_parameter_list_explicit_converter():
class app(morepath.App):
pass
class Model:
def __init__(self, item):
self.item = item
@app.path(model=Model, path="/", converters={"item": [Converter(int)]})
def get_model(item):
return Model(item)
@app.view(model=Model)
def default(self, request):
return repr(self.item)
@app.view(model=Model, name="link")
def link(self, request):
return request.link(self)
c = Client(app())
response = c.get("/?item=1&item=2")
assert response.body == b"[1, 2]"
response = c.get("/link?item=1&item=2")
assert response.body == b"http://localhost/?item=1&item=2"
response = c.get("/link")
assert response.body == b"http://localhost/"
response = c.get("/?item=broken&item=1", status=400)
response = c.get("/")
assert response.body == b"[]"
def test_url_parameter_list_unknown_explicit_converter():
class app(morepath.App):
pass
class Model:
def __init__(self, item):
self.item = item
class Unknown:
pass
@app.path(model=Model, path="/", converters={"item": [Unknown]})
def get_model(item):
return Model(item)
with pytest.raises(DirectiveReportError):
app.commit()
def test_url_parameter_list_but_only_one_allowed():
class app(morepath.App):
pass
class Model:
def __init__(self, item):
self.item = item
@app.path(model=Model, path="/", converters={"item": int})
def get_model(item):
return Model(item)
@app.view(model=Model)
def default(self, request):
return repr(self.item)
@app.view(model=Model, name="link")
def link(self, request):
return request.link(self)
c = Client(app())
c.get("/?item=1&item=2", status=400)
c.get("/link?item=1&item=2", status=400)
def test_extra_parameters():
class app(morepath.App):
pass
class Model:
def __init__(self, extra_parameters):
self.extra_parameters = extra_parameters
@app.path(model=Model, path="/")
def get_model(extra_parameters):
return Model(extra_parameters)
@app.view(model=Model)
def default(self, request):
return repr(sorted(self.extra_parameters.items()))
@app.view(model=Model, name="link")
def link(self, request):
return request.link(self)
c = Client(app())
response = c.get("/?a=A&b=B")
assert response.body in (
b"[(u'a', u'A'), (u'b', u'B')]",
b"[('a', 'A'), ('b', 'B')]",
)
response = c.get("/link?a=A&b=B")
assert sorted(response.body[len("http://localhost/?") :].split(b"&")) == [
b"a=A",
b"b=B",
]
def test_extra_parameters_with_get_converters():
class app(morepath.App):
pass
class Model:
def __init__(self, extra_parameters):
self.extra_parameters = extra_parameters
def get_converters():
return {
"a": int,
"b": type(""),
}
@app.path(model=Model, path="/", get_converters=get_converters)
def get_model(extra_parameters):
return Model(extra_parameters)
@app.view(model=Model)
def default(self, request):
return repr(sorted(self.extra_parameters.items()))
@app.view(model=Model, name="link")
def link(self, request):
return request.link(self)
c = Client(app())
response = c.get("/?a=1&b=B")
assert response.body in (
b"[(u'a', 1), (u'b', u'B')]",
b"[('a', 1), ('b', 'B')]",
)
response = c.get("/link?a=1&b=B")
assert sorted(response.body[len("http://localhost/?") :].split(b"&")) == [
b"a=1",
b"b=B",
]
c.get("/?a=broken&b=B", status=400)
def test_script_name():
class app(morepath.App):
pass
class Model:
def __init__(self):
pass
@app.path(model=Model, path="simple")
def get_model():
return Model()
@app.view(model=Model)
def default(self, request):
return "View"
@app.view(model=Model, name="link")
def link(self, request):
return request.link(self)
c = Client(app())
response = c.get(
"/prefix/simple", extra_environ=dict(SCRIPT_NAME="/prefix")
)
assert response.body == b"View"
response = c.get(
"/prefix/simple/link", extra_environ=dict(SCRIPT_NAME="/prefix")
)
assert response.body == b"http://localhost/prefix/simple"
def test_sub_path_different_variable():
# See discussion in https://github.com/morepath/morepath/issues/155
class App(morepath.App):
pass
class Foo:
def __init__(self, id):
self.id = id
class Bar:
def __init__(self, id, foo):
self.id = id
self.foo = foo
@App.path(model=Foo, path="{id}")
def get_foo(id):
return Foo(id)
@App.path(model=Bar, path="{foo_id}/{bar_id}")
def get_client(foo_id, bar_id):
return Bar(bar_id, Foo(foo_id))
@App.view(model=Foo)
def default_sbar(self, request):
return "M: %s" % self.id
@App.view(model=Bar)
def default_bar(self, request):
return f"S: {self.id} {self.foo.id}"
c = Client(App())
with pytest.raises(TrajectError) as ex:
response = c.get("/a")
assert response.body == b"M: a"
response = c.get("/a/b")
assert response.body == b"S: b a"
assert str(ex.value) == "step {id} and {foo_id} are in conflict"
def test_absorb_path():
class app(morepath.App):
pass
class Root:
pass
class Model:
def __init__(self, absorb):
self.absorb = absorb
@app.path(model=Root, path="")
def get_root():
return Root()
@app.path(model=Model, path="foo", absorb=True)
def get_model(absorb):
return Model(absorb)
@app.view(model=Model)
def default(self, request):
return "%s" % self.absorb
@app.view(model=Root)
def default_root(self, request):
return request.link(Model("a/b"))
c = Client(app())
response = c.get("/foo/a")
assert response.body == b"a"
response = c.get("/foo")
assert response.body == b""
response = c.get("/foo/a/b")
assert response.body == b"a/b"
# link to a/b absorb
response = c.get("/")
assert response.body == b"http://localhost/foo/a/b"
def test_absorb_path_with_variables():
class app(morepath.App):
pass
class Root:
pass
class Model:
def __init__(self, id, absorb):
self.id = id
self.absorb = absorb
@app.path(model=Root, path="")
def get_root():
return Root()
@app.path(model=Model, path="{id}", absorb=True)
def get_model(id, absorb):
return Model(id, absorb)
@app.view(model=Model)
def default(self, request):
return f"I:{self.id} A:{self.absorb}"
@app.view(model=Root)
def default_root(self, request):
return request.link(Model("foo", "a/b"))
c = Client(app())
response = c.get("/foo/a")
assert response.body == b"I:foo A:a"
response = c.get("/foo")
assert response.body == b"I:foo A:"
response = c.get("/foo/a/b")
assert response.body == b"I:foo A:a/b"
# link to a/b absorb
response = c.get("/")
assert response.body == b"http://localhost/foo/a/b"
def test_absorb_path_explicit_subpath_ignored():
class app(morepath.App):
pass
class Root:
pass
class Model:
def __init__(self, absorb):
self.absorb = absorb
class Another:
pass
@app.path(model=Root, path="")
def get_root():
return Root()
@app.path(model=Model, path="foo", absorb=True)
def get_model(absorb):
return Model(absorb)
@app.path(model=Another, path="foo/another")
def get_another():
return Another()
@app.view(model=Model)
def default(self, request):
return "%s" % self.absorb
@app.view(model=Another)
def default_another(self, request):
return "Another"
@app.view(model=Root)
def default_root(self, request):
return request.link(Another())
c = Client(app())
response = c.get("/foo/a")
assert response.body == b"a"
response = c.get("/foo/another")
assert response.body == b"another"
# link to another still works XXX is this wrong?
response = c.get("/")
assert response.body == b"http://localhost/foo/another"
def test_absorb_path_root():
class app(morepath.App):
pass
class Model:
def __init__(self, absorb):
self.absorb = absorb
@app.path(model=Model, path="", absorb=True)
def get_model(absorb):
return Model(absorb)
@app.view(model=Model)
def default(self, request):
return "A:{} L:{}".format(self.absorb, request.link(self))
c = Client(app())
response = c.get("/a")
assert response.body == b"A:a L:http://localhost/a"
response = c.get("/")
assert response.body == b"A: L:http://localhost/"
response = c.get("/a/b")
assert response.body == b"A:a/b L:http://localhost/a/b"
def test_path_explicit_variables():
class App(morepath.App):
pass
class Model:
def __init__(self, id):
self.store_id = id
@App.path(
model=Model, path="models/{id}", variables=lambda m: {"id": m.store_id}
)
def get_model(id):
return Model(id)
@App.view(model=Model)
def default(self, request):
return request.link(self)
c = Client(App())
response = c.get("/models/1")
assert response.body == b"http://localhost/models/1"
def test_path_explicit_variables_app_arg():
class App(morepath.App):
pass
class Model:
def __init__(self, id):
self.store_id = id
def my_variables(app, m):
assert isinstance(app, App)
return {"id": m.store_id}
@App.path(model=Model, path="models/{id}", variables=my_variables)
def get_model(id):
return Model(id)
@App.view(model=Model)
def default(self, request):
return request.link(self)
c = Client(App())
response = c.get("/models/1")
assert response.body == b"http://localhost/models/1"
def test_error_when_path_variable_is_none():
class App(morepath.App):
pass
class Model:
def __init__(self, id):
self.store_id = id
@App.path(model=Model, path="models/{id}", variables=lambda m: {"id": None})
def get_model(id):
return Model(id)
@App.view(model=Model)
def default(self, request):
return request.link(self)
c = Client(App())
with pytest.raises(LinkError):
c.get("/models/1")
def test_error_when_path_variable_is_missing():
class App(morepath.App):
pass
class Model:
def __init__(self, id):
self.store_id = id
@App.path(model=Model, path="models/{id}", variables=lambda m: {})
def get_model(id):
return Model(id)
@App.view(model=Model)
def default(self, request):
return request.link(self)
c = Client(App())
with pytest.raises(KeyError):
c.get("/models/1")
def test_error_when_path_variables_isnt_dict():
class App(morepath.App):
pass
class Model:
def __init__(self, id):
self.store_id = id
@App.path(model=Model, path="models/{id}", variables=lambda m: "nondict")
def get_model(id):
return Model(id)
@App.view(model=Model)
def default(self, request):
return request.link(self)
c = Client(App())
with pytest.raises(LinkError):
c.get("/models/1")
def test_resolve_path_method_on_request_same_app():
class App(morepath.App):
pass
class Model:
def __init__(self):
pass
@App.path(model=Model, path="simple")
def get_model():
return Model()
@App.view(model=Model)
def default(self, request):
return str(isinstance(request.resolve_path("simple"), Model))
@App.view(model=Model, name="extra")
def extra(self, request):
return str(request.resolve_path("nonexistent") is None)
@App.view(model=Model, name="appnone")
def appnone(self, request):
return request.resolve_path("simple", app=None)
c = Client(App())
response = c.get("/simple")
assert response.body == b"True"
response = c.get("/simple/extra")
assert response.body == b"True"
with pytest.raises(LinkError):
c.get("/simple/appnone")
def test_resolve_path_method_on_request_different_app():
class App(morepath.App):
pass
class Model:
def __init__(self):
pass
@App.path(model=Model, path="simple")
def get_model():
return Model()
@App.view(model=Model)
def default(self, request):
obj = request.resolve_path("p", app=request.app.child("sub"))
return str(isinstance(obj, SubModel))
class Sub(morepath.App):
pass
class SubModel:
pass
@Sub.path(model=SubModel, path="p")
def get_sub_model():
return SubModel()
@App.mount(path="sub", app=Sub)
def mount_sub():
return Sub()
c = Client(App())
response = c.get("/simple")
assert response.body == b"True"
def test_resolve_path_with_dots_in_url():
class app(morepath.App):
pass
class Root:
def __init__(self, absorb):
self.absorb = absorb
@app.path(model=Root, path="root", absorb=True)
def get_root(absorb):
return Root(absorb)
@app.view(model=Root)
def default(self, request):
return "%s" % self.absorb
c = Client(app())
response = c.get("/root/x/../child")
assert response.body == b"child"
response = c.get("/root/x/%2E%2E/child")
assert response.body == b"child"
response = c.get("/root/%2E%2E/%2E%2E/root")
assert response.body == b""
response = c.get("/root/%2E%2E/%2E%2E/root")
assert response.body == b""
response = c.get("/root/%2E%2E/%2E%2E/test", expect_errors=True)
assert response.status_code == 404
def test_quoting_link_generation():
class App(morepath.App):
pass
class Model:
def __init__(self):
pass
@App.path(model=Model, path="sim?ple")
def get_model():
return Model()
@App.view(model=Model)
def default(self, request):
return "View"
@App.view(model=Model, name="link")
def link(self, request):
return request.link(self)
c = Client(App())
response = c.get("/sim%3Fple")
assert response.body == b"View"
response = c.get("/sim%3Fple/link")
assert response.body == b"http://localhost/sim%3Fple"
def test_quoting_link_generation_umlaut():
class App(morepath.App):
pass
class Model:
def __init__(self):
pass
@App.path(model=Model, path="simëple")
def get_model():
return Model()
@App.view(model=Model)
def default(self, request):
return "View"
@App.view(model=Model, name="link")
def link(self, request):
return request.link(self)
c = Client(App())
response = c.get("/sim%C3%ABple")
assert response.body == b"View"
response = c.get("/sim%C3%ABple/link")
assert response.body == b"http://localhost/sim%C3%ABple"
def test_quoting_link_generation_tilde():
# tilde is an unreserved character according to
# https://www.ietf.org/rfc/rfc3986.txt but urllib.quote
# quotes it anyway. We test whether our workaround using
# the safe parameter works
class App(morepath.App):
pass
class Model:
def __init__(self):
pass
@App.path(model=Model, path="sim~ple")
def get_model():
return Model()
@App.view(model=Model)
def default(self, request):
return "View"
@App.view(model=Model, name="link")
def link(self, request):
return request.link(self)
c = Client(App())
response = c.get("/sim~ple")
assert response.body == b"View"
response = c.get("/sim~ple/link")
assert response.body == b"http://localhost/sim~ple"
def test_parameter_quoting():
class App(morepath.App):
pass
class Model:
def __init__(self, s):
self.s = s
@App.path(model=Model, path="")
def get_model(s):
return Model(s)
@App.view(model=Model)
def default(self, request):
return "View: %s" % self.s
@App.view(model=Model, name="link")
def link(self, request):
return request.link(self)
c = Client(App())
response = c.get("/?s=sim%C3%ABple")
assert response.body == "View: simëple".encode()
response = c.get("/link?s=sim%C3%ABple")
assert response.body == b"http://localhost/?s=sim%C3%ABple"
def test_parameter_quoting_tilde():
class App(morepath.App):
pass
class Model:
def __init__(self, s):
self.s = s
@App.path(model=Model, path="")
def get_model(s):
return Model(s)
@App.view(model=Model)
def default(self, request):
return "View: %s" % self.s
@App.view(model=Model, name="link")
def link(self, request):
return request.link(self)
c = Client(App())
response = c.get("/?s=sim~ple")
assert response.body == b"View: sim~ple"
response = c.get("/link?s=sim~ple")
assert response.body == b"http://localhost/?s=sim~ple"
def test_class_link_without_variables():
class App(morepath.App):
pass
class Model:
pass
@App.path(model=Model, path="/foo")
def get_model():
return Model()
@App.view(model=Model)
def link(self, request):
return request.class_link(Model)
c = Client(App())
response = c.get("/foo")
assert response.body == b"http://localhost/foo"
def test_class_link_no_app():
class App(morepath.App):
pass
class Model:
pass
@App.path(model=Model, path="/foo")
def get_model():
return Model()
@App.view(model=Model)
def link(self, request):
return request.class_link(Model, app=None)
c = Client(App())
with pytest.raises(LinkError):
c.get("/foo")
def test_class_link_with_variables():
class App(morepath.App):
pass
class Model:
pass
@App.path(model=Model, path="/foo/{x}")
def get_model(x):
return Model()
@App.view(model=Model)
def link(self, request):
return request.class_link(Model, variables={"x": "X"})
c = Client(App())
response = c.get("/foo/3")
assert response.body == b"http://localhost/foo/X"
def test_class_link_with_missing_variables():
class App(morepath.App):
pass
class Model:
pass
@App.path(model=Model, path="/foo/{x}")
def get_model(x):
return Model()
@App.view(model=Model)
def link(self, request):
return request.class_link(Model, variables={})
c = Client(App())
with pytest.raises(KeyError):
c.get("/foo/3")
def test_class_link_with_extra_variable():
class App(morepath.App):
pass
class Model:
pass
@App.path(model=Model, path="/foo/{x}")
def get_model(x):
return Model()
@App.view(model=Model)
def link(self, request):
return request.class_link(Model, variables={"x": "X", "y": "Y"})
c = Client(App())
response = c.get("/foo/3")
assert response.body == b"http://localhost/foo/X"
def test_class_link_with_url_parameter_variable():
class App(morepath.App):
pass
class Model:
pass
@App.path(model=Model, path="/foo/{x}")
def get_model(x, y):
return Model()
@App.view(model=Model)
def link(self, request):
return request.class_link(Model, variables={"x": "X", "y": "Y"})
c = Client(App())
response = c.get("/foo/3")
assert response.body == b"http://localhost/foo/X?y=Y"
def test_class_link_with_subclass():
class App(morepath.App):
pass
class Model:
pass
class Sub(Model):
pass
@App.path(model=Model, path="/foo/{x}")
def get_model(x):
return Model()
@App.view(model=Model)
def link(self, request):
return request.class_link(Sub, variables={"x": "X"})
c = Client(App())
response = c.get("/foo/3")
assert response.body == b"http://localhost/foo/X"
def test_absorb_class_path():
class App(morepath.App):
pass
class Root:
pass
class Model:
def __init__(self, absorb):
self.absorb = absorb
@App.path(model=Root, path="")
def get_root():
return Root()
@App.path(model=Model, path="foo", absorb=True)
def get_model(absorb):
return Model(absorb)
@App.view(model=Model)
def default(self, request):
return "%s" % self.absorb
@App.view(model=Root)
def default_root(self, request):
return request.class_link(Model, variables={"absorb": "a/b"})
c = Client(App())
# link to a/b absorb
response = c.get("/")
assert response.body == b"http://localhost/foo/a/b"
def test_absorb_class_path_with_variables():
class App(morepath.App):
pass
class Root:
pass
class Model:
def __init__(self, id, absorb):
self.id = id
self.absorb = absorb
@App.path(model=Root, path="")
def get_root():
return Root()
@App.path(model=Model, path="{id}", absorb=True)
def get_model(id, absorb):
return Model(id, absorb)
@App.view(model=Model)
def default(self, request):
return f"I:{self.id} A:{self.absorb}"
@App.view(model=Root)
def default_root(self, request):
return request.class_link(Model, variables=dict(id="foo", absorb="a/b"))
c = Client(App())
# link to a/b absorb
response = c.get("/")
assert response.body == b"http://localhost/foo/a/b"
def test_class_link_extra_parameters():
class App(morepath.App):
pass
class Model:
def __init__(self, extra_parameters):
self.extra_parameters = extra_parameters
@App.path(model=Model, path="/")
def get_model(extra_parameters):
return Model(extra_parameters)
@App.view(model=Model)
def default(self, request):
return repr(sorted(self.extra_parameters.items()))
@App.view(model=Model, name="link")
def link(self, request):
return request.class_link(
Model, variables={"extra_parameters": {"a": "A", "b": "B"}}
)
c = Client(App())
response = c.get("/link?a=A&b=B")
assert sorted(response.body[len("http://localhost/?") :].split(b"&")) == [
b"a=A",
b"b=B",
]
def test_path_on_model_class():
class App(morepath.App):
pass
@App.path("/")
class Model:
def __init__(self):
pass
@App.path("/login")
class Login:
pass
@App.view(model=Model)
def model_view(self, request):
return "Model"
@App.view(model=Login)
def login_view(self, request):
return "Login"
c = Client(App())
response = c.get("/")
assert response.body == b"Model"
response = c.get("/login")
assert response.body == b"Login"
def test_path_without_model():
class App(morepath.App):
pass
@App.path("/")
def get_path():
pass
with pytest.raises(dectate.DirectiveReportError):
App.commit()
def test_two_path_on_same_model_should_conflict():
class App(morepath.App):
pass
@App.path("/login")
@App.path("/")
class Login:
pass
with pytest.raises(dectate.ConflictError):
App.commit()
def test_path_on_same_model_explicit_and_class_should_conflict():
class App(morepath.App):
pass
@App.path("/")
class Login:
pass
@App.path("/login", model=Login)
def get_path():
return Login()
with pytest.raises(dectate.ConflictError):
App.commit()
def test_nonexisting_path_too_long_unconsumed():
class App(morepath.App):
pass
class Model:
def __init__(self):
pass
@App.path(model=Model, path="simple")
def get_model():
return Model()
@App.view(model=Model)
def default(self, request):
return "View"
c = Client(App())
c.get("/foo/bar/baz", status=404)
def test_collection_and_item():
class App(morepath.App):
pass
class Collection:
def __init__(self):
self.items = {}
class Item:
def __init__(self, id):
self.id = id
collection = Collection()
collection.items["a"] = Item("a")
collection.items["b"] = Item("b")
@App.path(model=Collection, path="/")
def get_collection():
return collection
@App.path(model=Item, path="/{id}")
def get_item(id):
return collection.items.get(id)
@App.view(model=Collection)
def default_collection(self, request):
return "Collection"
@App.view(model=Item)
def default(self, request):
return "View: %s" % self.id
c = Client(App())
r = c.get("/c", status=404)
assert r.body != "Collection"
r = c.get("/a")
assert r.body == b"View: a"
def test_view_for_missing():
class App(morepath.App):
pass
class Item:
def __init__(self, id):
self.id = id
@App.path(model=Item, path="/{id}")
def get_item(id):
if id == "found":
return Item(id)
return None
@App.view(model=Item, name="edit")
def default(self, request):
return "View: %s" % self.id
c = Client(App())
c.get("/notfound/+edit", status=404)
c.get("/notfound/edit", status=404)
def test_absorb_error():
class App(morepath.App):
pass
@App.path("/")
class Root:
pass
@App.view(model=Root)
def view_root(self, request):
return "root"
class File:
def __init__(self, absorb):
self.absorb = absorb
@App.path("/files", model=File, absorb=True)
def get_file(absorb):
if absorb == "foo":
return File("foo")
return None
@App.view(model=File)
def view_file(self, request):
return request.path
App.commit()
client = Client(App())
assert client.get("/").text == "root"
assert client.get("/files/foo").text == "/files/foo"
client.get("/files/bar", status=404)
def test_named_view_on_root():
class App(morepath.App):
pass
@App.path(path="/")
class Root:
pass
@App.view(model=Root, name="named")
def named(self, request):
return "Named view on root"
@App.view(model=Root)
def default(self, request):
return "Default view on root"
c = Client(App())
response = c.get("/named")
assert response.body == b"Named view on root"
response = c.get("/+named")
assert response.body == b"Named view on root"
response = c.get("/")
assert response.body == b"Default view on root"
|
11592446
|
import logging
import requests
from bs4 import BeautifulSoup
from http_request_randomizer.requests.parsers.UrlParser import UrlParser
from http_request_randomizer.requests.proxy.ProxyObject import ProxyObject, AnonymityLevel
logger = logging.getLogger(__name__)
__author__ = 'pgaref'
class RebroWeeblyParser(UrlParser):
def __init__(self, id, web_url, timeout=None):
self.top_proxy_path = "proxy-list.html"
self.txt_proxy_path = "txt-lists.html"
UrlParser.__init__(self, id=id, web_url=web_url, timeout=timeout)
def parse_proxyList(self, use_top15k=False):
curr_proxy_list = []
try:
response = requests.get(self.get_url() + "/" + self.top_proxy_path, timeout=self.timeout)
if not response.ok:
logger.warning("Proxy Provider url failed: {}".format(self.get_url()))
return []
content = response.content
soup = BeautifulSoup(content, "html.parser")
all_divs = soup.findAll("div", attrs={"class": "paragraph", 'style': "text-align:left;"})
# address_table = soup.find("div", attrs={"class": "paragraph", 'style': "text-align:left;"})
# .find('font', attrs={'color': '#33a27f'})
# Parse Top Proxy List page
address_list = []
country_list = []
anonymity_list = []
for div in all_divs:
address_div = div.find('font', attrs={'color': '#33a27f'})
if address_div is not None:
for row in [x for x in address_div.contents if getattr(x, 'name', None) != 'br']:
address_list.append(str(row))
curr_div = div.findAll('font', attrs={'size': '2'})
if curr_div[0] is not None:
row_data = []
# font -> strong -> font
title = curr_div[0].contents[0].contents[0].contents[0]
for row in [x for x in curr_div[-1].contents if getattr(x, 'name', None) != 'br']:
row_data.append(str(row))
if 'Country' in str(title):
country_list.extend(row_data)
if 'Status' in str(title):
anonymity_list.extend(row_data)
for address, country, anonymity in zip(address_list, country_list, anonymity_list):
# Make sure it is a Valid Proxy Address
proxy_obj = self.create_proxy_object(address, country, anonymity)
if proxy_obj is not None and UrlParser.valid_ip_port(proxy_obj.get_address()):
curr_proxy_list.append(proxy_obj)
else:
logger.debug("Proxy Invalid: {}".format(row))
# Usually these proxies are stale
if use_top15k:
# Parse 15k Nodes Text file (named *-all-*.txt)
content = requests.get(self.get_url() + "/" + self.txt_proxy_path).content
soup = BeautifulSoup(content, "html.parser")
table = soup.find("div", attrs={"class": "wsite-multicol-table-wrap"})
for link in table.findAll('a'):
current_link = link.get('href')
if current_link is not None and "all" in current_link:
self.txt_proxy_path = current_link
more_content = requests.get(self.get_url() + self.txt_proxy_path).text
for proxy_address in more_content.split():
if UrlParser.valid_ip_port(proxy_address):
proxy_obj = self.create_proxy_object(row)
curr_proxy_list.append(proxy_obj)
except AttributeError as e:
logger.error("Provider {0} failed with Attribute error: {1}".format(self.id, e))
except KeyError as e:
logger.error("Provider {0} failed with Key error: {1}".format(self.id, e))
except Exception as e:
logger.error("Provider {0} failed with Unknown error: {1}".format(self.id, e))
finally:
return curr_proxy_list
def create_proxy_object(self, address, country, anonymity):
# Make sure it is a Valid IP
ip = address.strip().split(":")[0]
if not UrlParser.valid_ip(ip):
logger.debug("IP with Invalid format: {}".format(ip))
return None
port = address.strip().split(":")[1]
country = country.strip()
anonymity = AnonymityLevel.get(anonymity.strip())
return ProxyObject(source=self.id, ip=ip, port=port, anonymity_level=anonymity, country=country)
def __str__(self):
return "RebroWeebly Parser of '{0}' with required bandwidth: '{1}' KBs" \
.format(self.url, self.minimum_bandwidth_in_KBs)
|
11592458
|
online_store = {
"keychain": 0.75,
"tshirt": 8.50,
"bottle": 10.00
}
keychain = online_store['keychain']
tshirt = online_store['tshirt']
bottle = online_store['bottle']
choicekey = input("How many keychains will you be purchasing? If not purchasing keychains, enter 0. ")
choicetshirt = input("How many t-shirts will you be purchasing? If not purchasing t-shirts, enter 0. ")
choicebottle = input("How many t-shirts will you be purchasing? If not purchasing water bottles, enter 0. ")
print("You are purchasing " + str(choicekey) + " keychains, " + str(choicetshirt) + " t-shirts, and " + str(choicebottle) + " water bottles.")
|
11592468
|
import numpy as np
from scipy.spatial.distance import cdist
def create_bins_and_dist_matrices(ns, constraints=True):
"""Get bins and distance matrix for pairwise distributions comparison using Earth Mover's
Distance (EMD).
ns requires:
bw_bonds
bw_angles
bw_constraints
bw_dihedrals
bins_constraints
bonded_max_range
ns creates:
bins_bonds
bins_angles
bins_dihedrals
bins_constraints
bins_bonds_dist_matrix
bins_angles_dist_matrix
bins_dihedrals_dist_matrix
bins_constraints_dist_matrix
"""
if constraints:
ns.bins_constraints = np.arange(0, ns.bonded_max_range + ns.bw_constraints, ns.bw_constraints)
ns.bins_bonds = np.arange(0, ns.bonded_max_range + ns.bw_bonds, ns.bw_bonds)
ns.bins_angles = np.arange(0, 180 + 2 * ns.bw_angles,
ns.bw_angles) # one more bin for angle/dihedral because we are later using a strict inferior for bins definitions
ns.bins_dihedrals = np.arange(-180, 180 + 2 * ns.bw_dihedrals, ns.bw_dihedrals)
# bins distance for Earth Mover's Distance (EMD) to calculate histograms similarity
if constraints:
bins_constraints_reshape = np.array(ns.bins_constraints).reshape(-1, 1)
ns.bins_constraints_dist_matrix = cdist(bins_constraints_reshape, bins_constraints_reshape)
bins_bonds_reshape = np.array(ns.bins_bonds).reshape(-1, 1)
ns.bins_bonds_dist_matrix = cdist(bins_bonds_reshape, bins_bonds_reshape)
bins_angles_reshape = np.array(ns.bins_angles).reshape(-1, 1)
ns.bins_angles_dist_matrix = cdist(bins_angles_reshape, bins_angles_reshape)
bins_dihedrals_reshape = np.array(ns.bins_dihedrals).reshape(-1, 1)
bins_dihedrals_dist_matrix = cdist(bins_dihedrals_reshape, bins_dihedrals_reshape) # 'classical' distance matrix
ns.bins_dihedrals_dist_matrix = np.where(bins_dihedrals_dist_matrix > max(bins_dihedrals_dist_matrix[0]) / 2,
max(bins_dihedrals_dist_matrix[0]) - bins_dihedrals_dist_matrix,
bins_dihedrals_dist_matrix) # periodic distance matrix
|
11592482
|
import numpy as np
from bico.geometry.squared_euclidean import squared_euclidean_distance
from bico.nearest_neighbor.base import NearestNeighbor, NearestNeighborResult
from typing import List
class SimpleProjection(NearestNeighbor):
""" Nearest neighbor implementation by projecting points into buckets using random dot products """
def __init__(self, dimension: int, number_projections: int, threshold_filter: float):
"""
:param dimension:
Number of dimensions of input points
:param number_projections:
Number of random projections used for finding nearest neighbors.
Trade-off: More projections result in a smaller number of false positives in candidate set
:param threshold_filter:
Distance threshold for definition nearest: all points within this specific distance
"""
self.dimension = dimension
self.number_projections = number_projections
self.threshold_filter = threshold_filter
self.__create_projections()
def __create_projections(self):
self.projections = np.array(list(np.random.standard_normal(self.dimension)
for _ in range(self.number_projections)))
self.buckets = [dict() for _ in range(self.number_projections)]
def project(self, point: np.ndarray) -> np.ndarray:
return self.projections.dot(point)
def get_candidates(self, point: np.ndarray) -> List[NearestNeighborResult]:
proj_values = self.project(point)
bucket_values = self.get_bucket_values(proj_values)
smallest_bucket = min([x[1].get(bucket_values[x[0]], [])
for x in enumerate(self.buckets)], key=len)
distances = [squared_euclidean_distance(p[0], point) for p in smallest_bucket]
res = [NearestNeighborResult(p[0][0], p[0][1], p[1]) for p in zip(smallest_bucket, distances)]
return sorted(res, key=lambda x: x.distance)
def get_bucket_values(self, proj_values: np.ndarray) -> np.ndarray:
return (proj_values / (2 * self.threshold_filter)).astype(int)
def insert_candidate(self, point: np.ndarray, metadata):
proj_values = self.project(point)
data_point = (point, metadata)
bucket_values = self.get_bucket_values(proj_values)
for i, bucket in enumerate(self.buckets):
cand_list = bucket.get(bucket_values[i], [])
if len(cand_list) > 0:
cand_list.append(data_point)
else:
bucket[bucket_values[i]] = [data_point]
|
11592517
|
from __future__ import unicode_literals
from .compat import implements_to_string
from . import diagnose
from .interface import AttributeExposer
__all__ = ["MoyaException", "FatalMoyaException", "throw"]
@implements_to_string
class MoyaException(Exception, AttributeExposer):
fatal = False
__moya_exposed_attributes__ = ["type", "msg", "info", "diagnosis"]
def __init__(self, type, msg, diagnosis=None, info=None):
self.type = type
self.msg = msg
self._diagnosis = diagnosis
self.info = info or {}
@property
def diagnosis(self):
return self._diagnosis or diagnose.diagnose_moya_exception(self)
def __str__(self):
return "{}: {}".format(self.type, self.msg)
def __repr__(self):
return '<exception %s:"%s">' % (self.type, self.msg)
def __moyaconsole__(self, console):
from . import pilot
console(self.type + ": ", fg="red", bold=True)(self.msg).nl()
if self.info:
console.obj(pilot.context, self.info)
class FatalMoyaException(MoyaException):
fatal = True
def throw(type, msg, diagnosis=None, info=None):
raise MoyaException(type, msg, diagnosis=diagnosis, info=info)
|
11592522
|
VERSION = (0, 10, 1, 'final', 0)
__all__ = [
'WebSocketApplication',
'Resource',
'WebSocketServer',
'WebSocketError',
'get_version'
]
def get_version(*args, **kwargs):
from .utils import get_version
return get_version(*args, **kwargs)
try:
from .resource import WebSocketApplication, Resource
from .server import WebSocketServer
from .exceptions import WebSocketError
except ImportError:
pass
|
11592524
|
from __future__ import annotations
from decimal import Decimal
from typing import Type
import pytest
from typic.constraints import (
IntContraints,
FloatContraints,
DecimalContraints,
ConstraintValueError,
ConstraintSyntaxError,
)
from typic.constraints.common import BaseConstraints
@pytest.mark.parametrize(
argnames=("val", "constraint", "expected"),
argvalues=[
(0, IntContraints(), 0),
(1, IntContraints(gt=0), 1),
(2, IntContraints(ge=2), 2),
(3, IntContraints(lt=4), 3),
(4, IntContraints(le=4), 4),
(5, IntContraints(mul=5), 5),
(0.0, FloatContraints(), 0.0),
(1.0, FloatContraints(gt=0), 1.0),
(2.0, FloatContraints(ge=2), 2.0),
(3.0, FloatContraints(lt=4), 3.0),
(4.0, FloatContraints(le=4), 4.0),
(5.0, FloatContraints(mul=5), 5.0),
(Decimal(0.0), DecimalContraints(), 0.0),
(Decimal(1.0), DecimalContraints(gt=0), 1.0),
(Decimal(2.0), DecimalContraints(ge=2), 2.0),
(Decimal(3.0), DecimalContraints(lt=4), 3.0),
(Decimal(4.0), DecimalContraints(le=4), 4.0),
(Decimal(5.0), DecimalContraints(mul=5), 5.0),
(Decimal(6.0), DecimalContraints(max_digits=2), 6.0),
(Decimal(7.0), DecimalContraints(decimal_places=2), 7.0),
(
Decimal("0.7"),
DecimalContraints(decimal_places=2, max_digits=2),
Decimal("0.7"),
),
],
)
def test_validate_values(val: str, constraint: IntContraints, expected: str):
assert constraint.validate(val) == expected
@pytest.mark.parametrize(
argnames=("val", "constraint", "expected"),
argvalues=[
(0, IntContraints(gt=0), ConstraintValueError),
(1, IntContraints(ge=2), ConstraintValueError),
(2, IntContraints(lt=2), ConstraintValueError),
(3, IntContraints(le=2), ConstraintValueError),
(0.0, FloatContraints(gt=0), ConstraintValueError),
(1.0, FloatContraints(ge=2), ConstraintValueError),
(2.0, FloatContraints(lt=2), ConstraintValueError),
(3.0, FloatContraints(le=2), ConstraintValueError),
(Decimal(1.0), DecimalContraints(gt=1), ConstraintValueError),
(Decimal(1.0), DecimalContraints(ge=2), ConstraintValueError),
(Decimal(4.0), DecimalContraints(lt=4), ConstraintValueError),
(Decimal(5.0), DecimalContraints(le=4), ConstraintValueError),
(Decimal(6.0), DecimalContraints(mul=5), ConstraintValueError),
(Decimal("60.0"), DecimalContraints(max_digits=2), ConstraintValueError),
(Decimal("7.000"), DecimalContraints(decimal_places=2), ConstraintValueError),
],
)
def test_validate_values_error(val: str, constraint: IntContraints, expected: str):
with pytest.raises(expected):
constraint.validate(val)
@pytest.mark.parametrize(
argnames=("constraint", "kwargs"),
argvalues=[
(IntContraints, dict(gt=2, ge=2)),
(IntContraints, dict(lt=2, le=2)),
(IntContraints, dict(lt=2, gt=2)),
(IntContraints, dict(lt=2, ge=2)),
(DecimalContraints, dict(max_digits=1, decimal_places=2)),
],
)
def test_constraint_syntax_error(constraint: Type[BaseConstraints], kwargs: dict):
with pytest.raises(ConstraintSyntaxError):
constraint(**kwargs)
@pytest.mark.parametrize(
argnames=("val", "constraint", "expected"),
argvalues=[
(1, IntContraints(gt=0, lt=2), 1),
(Decimal(7.0), DecimalContraints(gt=1, max_digits=3, decimal_places=2), 7.0),
],
)
def test_validate_values_complex(val: str, constraint: IntContraints, expected: str):
assert constraint.validate(val) == expected
|
11592569
|
t = int(input())
while t:
n = int(input())
if n < 1500:
HRA = n*0.10
DA = n*0.9
else:
HRA = 500
DA = n*0.98
print(n + HRA + DA)
t = t-1
|
11592580
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
PROGRESSION_STATUS = (
(0, 'Completed'),
(1, 'Almost there'),
(2, 'Under progression'),
(3, 'Initialized'),
)
class Activity(models.Model):
user = models.ForeignKey(User)
activity_category = models.CharField(max_length=200)
creation_date = models.DateTimeField(auto_now_add=True, blank=True)
details = models.CharField(max_length=500)
status = models.CharField(max_length=1, choices=PROGRESSION_STATUS)
def __unicode__(self):
return self.activity_category
|
11592583
|
import numpy as np
import matplotlib.pyplot as plt
from aeropy.geometry.airfoil import CST
from aeropy.morphing.camber_2D import *
# testing = 'structurally_consistent'
testing = 'structurally_consistent'
inverted = False
morphing_direction = 'forwards'
if testing == 'tracing':
N1 = 1.
N2 = 1.
tip_displacement = {'x': 1., 'y':.5}
other_points = {'x': [0.7], 'y':[0.25]}
A0 = -tip_displacement['x']/tip_displacement['y']
# Check if y values are smaller than tip y
for y_i in other_points['y']:
if y_i>=tip_displacement['y']:
print('Y value out of bounds!')
A = calculate_shape_coefficients_tracing(A0, other_points['y'], other_points['x'], N1, N2, chord = tip_displacement['y'], EndThickness = tip_displacement['x'])
#plotting
y = np.linspace(0, tip_displacement['y'], 100000)
x = CST(y, tip_displacement['y'], deltasz= tip_displacement['x'], Au = A, N1=N1, N2=N2)
plt.plot(x,y)
plt.scatter(other_points['x'] + [tip_displacement['x']],
other_points['y'] + [tip_displacement['y']])
plt.gca().set_aspect('equal', adjustable='box')
plt.show()
elif testing == 'structurally_consistent':
#==============================================================================
# Inputs
#==============================================================================
# Parameter
c_P = 1. #m
deltaz = 0.*c_P #m
# Avian wing, order 5
Au_P = [0.23993240191629417, 0.34468227138908186, 0.18125405377549103,
0.35371349126072665, 0.2440815012119143, 0.25724974995738387]
Al_P = [0.18889012559339036, -0.24686758992053115, 0.077569769493868401,
-0.547827192265256, -0.0047342206759065641, -0.23994805474814629]
# NACA0012
# Au_P = [0.10887, 0.1187, 0.07843, 0.12084, 0.07919, 0.09840]
# Al_P = [0.11117, 0.1000, 0.1239, 0.06334, 0.11539, 0.10400]
# Passive shape coefficients for parent
# Au_P = [.5,.4,.3]
# Active shape coefficients for parent
# Al_P = [.5,.1,.1]
n = len(Au_P) - 1
if inverted:
temp = Au_P
Au_P = list(-np.array(Al_P))
Al_P = list(-np.array(temp))
# Shape coefficients for upper surface of cruise airfoil
# AC_u1 = 0.25 #Adimensional
# AC_u2 = 0.25 #Adimensional
# AC_u3 = 0.25 #Adimensional
# AC_u4 = 0.25 #Adimensional
# AC_u5 = 0.25
# Medium
# AC_u1 = 0.2187 #Adimensional
# AC_u2 = 0.17843 #Adimensional
# AC_u3 = 0.22084 #Adimensional
# AC_u4 = 0.17919 #Adimensional
# AC_u5 = 0.19840 #Adimensional
# Small
# AC_u1 = 0.1487 #Adimensional
# AC_u2 = 0.10843 #Adimensional
# AC_u3 = 0.15084 #Adimensional
# AC_u4 = 0.10919 #Adimensional
# AC_u5 = 0.12840 #Adimensional
# Passive shape coefficients for child
AC_u = [.25, .25, .25]
# AC_u1 = 0.34468227138908186 #Adimensional
# AC_u2 = 0.18125405377549103 #Adimensional
# AC_u3 = 0.35371349126072665 #Adimensional
# AC_u4 = 0.2440815012119143 #Adimensional
# AC_u5 = 0.25724974995738387 #Adimensional
#Spar position for cruise (adiminesional because the chord will still be calculated)
psi_spars = [0.1, 0.2, 0.3]
#==============================================================================
# Calculate dependent coefficients
#==============================================================================
Au_C, Al_C, c_C, spar_thicknesses = calculate_dependent_shape_coefficients(
AC_u,
psi_spars, Au_P, Al_P,
deltaz, c_P, morphing=morphing_direction)
#==============================================================================
# Plot results
#==============================================================================
np.set_printoptions(precision=20)
# Print shape for children
x = np.linspace(0, c_C, 100000)
y = CST(x, c_C, deltasz= [deltaz/2., deltaz/2.], Al= Al_C, Au =Au_C)
plt.plot(x, y['u'], 'b', label = 'Children', lw=2)
plt.plot(x, y['l'], 'b', label = None, lw=2)
# Print shape for parent
x = np.linspace(0, c_P, 100000)
y = CST(x, c_P, deltasz= [deltaz/2., deltaz/2.], Al= Al_P, Au =Au_P)
plt.plot(x, y['u'], 'r--', label='Parent', lw=2)
plt.plot(x, y['l'], 'r--', label = None, lw=2)
if morphing_direction == 'forwards':
psi_flats = []
intersections_x_children = [0]
intersections_y_children = [0]
intersections_x_parent = [0]
intersections_y_parent = [0]
for j in range(len(psi_spars)):
psi_parent_j = psi_spars[j]
# Calculate psi at landing
# psi_baseline, Au_baseline, Au_goal, deltaz, c_baseline, c_goal
psi_children_j = calculate_psi_goal(psi_parent_j, Au_P, Au_C, deltaz, c_P, c_C)
x_children_j = psi_children_j*c_C
# Calculate xi at landing
temp = CST(x_children_j, c_C, [deltaz/2., deltaz/2.], Al= Al_C, Au =Au_C)
y_children_j = temp['u']
s = calculate_spar_direction(psi_spars[j], Au_P, Au_C, deltaz, c_C)
# Print spars for children
if not inverted:
plt.plot([x_children_j, x_children_j - spar_thicknesses[j]*s[0]],[y_children_j, y_children_j - spar_thicknesses[j]*s[1]], c = 'b', lw=2, label=None)
else:
plt.plot([x_children_j, x_children_j - spar_thicknesses[j]*s[0]],[-y_children_j, -y_children_j + spar_thicknesses[j]*s[1]], c = 'b', lw=2, label=None)
psi_flats.append(x_children_j - spar_thicknesses[j]*s[0])
y = CST(np.array([psi_parent_j*c_P]), c_P, deltasz=[deltaz/2., deltaz/2.], Al= Al_P, Au =Au_P)
intersections_x_children.append(x_children_j - spar_thicknesses[j]*s[0])
intersections_y_children.append(y_children_j - spar_thicknesses[j]*s[1])
# Print spars for parents
if not inverted:
plt.plot([psi_parent_j*c_P, psi_parent_j*c_P], [y['u'], y['u']-spar_thicknesses[j]], 'r--', lw=2, label = None)
else:
plt.plot([psi_parent_j*c_P, psi_parent_j*c_P], [-y['u'], -y['u']+spar_thicknesses[j]], 'r--', lw=2, label = None)
intersections_x_parent.append(psi_parent_j*c_P)
intersections_y_parent.append(y['u']-spar_thicknesses[j])
elif morphing_direction == 'backwards':
# For backwards, goal is the parent and deformed is children
for i in range(len(psi_spars)):
psi_i = psi_spars[i]
# Calculate psi at landing
psi_goal_i = calculate_psi_goal(psi_i, Au_C, Au_P, deltaz, c_C, c_P)
x_goal_i = psi_goal_i*c_P
# Calculate xi at landing
temp = CST(x_goal_i, c_P, [deltaz/2., deltaz/2.], Al= Al_P, Au =Au_P)
y_goal_i = temp['u']
#calculate spar direction
s = calculate_spar_direction(psi_i, Au_C, Au_P, deltaz, c_P)
plt.plot([x_goal_i, x_goal_i - spar_thicknesses[i]*s[0]],[y_goal_i, y_goal_i - spar_thicknesses[i]*s[1]], 'r--')
y = CST(np.array([psi_i*c_C]), c_C, deltasz=[deltaz/2., deltaz/2.], Al= Al_C, Au =Au_C)
plt.plot([psi_i*c_C, psi_i*c_C], [y['u'], y['u']-spar_thicknesses[i]], 'b', lw=2, label = None)
plt.xlabel('$\psi^p$', fontsize = 14)
plt.ylabel(r'$\zeta^p$', fontsize = 14)
plt.ylim([-0.06,0.17])
plt.grid()
plt.gca().set_aspect('equal', adjustable='box')
plt.legend(loc=1)
plt.show()
if morphing_direction == 'forwards':
print('chords', c_P, c_C)
# Calculate initial lengths
strains, av_strains = calculate_strains(Au_P, Al_P, c_P, Au_C, Al_C, c_C, deltaz, psi_spars)
intersections_x_children.append(c_C)
intersections_y_children.append(0)
intersections_x_parent.append(c_P)
intersections_y_parent.append(0)
# Wire lengths
for i in range(len(intersections_x_children)-1):
length_parent = math.sqrt((intersections_x_parent[i]-intersections_x_parent[i+1])**2+
(intersections_y_parent[i]-intersections_y_parent[i+1])**2)
length_children = math.sqrt((intersections_x_children[i]-intersections_x_children[i+1])**2+
(intersections_y_children[i]-intersections_y_children[i+1])**2)
print((length_children-length_parent)/length_parent)
|
11592602
|
from umongo.fields import ListField, EmbeddedField
from umongo.document import DocumentImplementation
from umongo.embedded_document import EmbeddedDocumentImplementation
def map_entry(entry, fields):
"""
Retrieve the entry from the given fields and replace it if it should
have a different name within the database.
:param entry: is one of the followings:
- invalid field name
- command (i.g. $eq)
- valid field with no attribute name
- valid field with an attribute name to use instead
"""
field = fields.get(entry)
if isinstance(field, ListField) and isinstance(field.inner, EmbeddedField):
fields = field.inner.embedded_document_cls.schema.fields
elif isinstance(field, EmbeddedField):
fields = field.embedded_document_cls.schema.fields
return getattr(field, 'attribute', None) or entry, fields
def map_entry_with_dots(entry, fields):
"""
Consider the given entry can be a '.' separated combination of single entries.
"""
mapped = []
for sub_entry in entry.split('.'):
mapped_sub_entry, fields = map_entry(sub_entry, fields)
mapped.append(mapped_sub_entry)
return '.'.join(mapped), fields
def map_query(query, fields):
"""
Retrieve given fields whithin the query and replace there name with
the one they should have within the database.
"""
if isinstance(query, dict):
mapped_query = {}
for entry, entry_query in query.items():
mapped_entry, entry_fields = map_entry_with_dots(entry, fields)
mapped_query[mapped_entry] = map_query(entry_query, entry_fields)
return mapped_query
if isinstance(query, (list, tuple)):
return [map_query(x, fields) for x in query]
# Passing a Document only makes sense in a Reference, let's query on ObjectId
if isinstance(query, DocumentImplementation):
return query.pk
if isinstance(query, EmbeddedDocumentImplementation):
return query.to_mongo()
return query
|
11592619
|
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import math
import torch
def conv3x1(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=(3, 1), stride=(stride, 1),
padding=(1, 0), bias=False)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=(stride, 1), bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x1(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x1(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
#self.se = SELayer(planes)
self.downsample = downsample
self.stride = stride
self.dropout = nn.Dropout(.2)
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.dropout(out)
out = self.conv2(out)
out = self.bn2(out)
#out = self.se(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, in_channel=1, out_channel=10, mode='MSE', zero_init_residual=False):
super(ResNet, self).__init__()
self.inplanes = 8
self.conv1 = nn.Conv2d(in_channel, self.inplanes, kernel_size=(7, 1), stride=(1, 1), padding=(3, 0),
bias=False)
self.bn1 = nn.BatchNorm2d(self.inplanes)
self.relu = nn.ReLU(inplace=True)
#self.maxpool = nn.MaxPool2d(kernel_size=(3, 1), stride=(2, 1), padding=(1, 0))
self.layer1 = self._make_layer(block, 16, layers[0], 2)
self.layer2 = self._make_layer(block, 16, layers[1], 2) #16
self.layer3 = self._make_layer(block, 32, layers[2]) #32
#self.layer4 = self._make_layer(block, 128, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 14))
cat_number = 32*2*14*4
self.fc = nn.Sequential(nn.Linear(cat_number * block.expansion, 100), nn.ReLU(inplace=True))
self.dropout = nn.Dropout(0.2)
self.fc1 = nn.Sequential(nn.Linear(100, out_channel))
"""
self.fc1 = nn.Sequential(nn.Linear(cat_number * block.expansion, 100), nn.ReLU(inplace=True),
nn.Dropout(dropout_rate), nn.Linear(100, out_channel), nn.ReLU(inplace=True),)
"""
self.fc2 = nn.Sequential(nn.Linear(100, out_channel))
self.fc3 = nn.Sequential(nn.Linear(100, out_channel))
self.fc_std = nn.Sequential(nn.Linear(100, out_channel), nn.Softplus())
self.mode = mode
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
"""
for m in self.modules():
if isinstance(m, nn.Conv1d):
n = m.kernel_size[0] * m.kernel_size[0] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
"""
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
#x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
#print(x.shape)
x = self.layer3(x)
#print(x.shape)
#x = self.layer4(x)
#x = self.avgpool(x)
#x_max = self.maxpool(x)
#x = torch.cat((x_avg, x_max), 1)
x = x.view(x.size(0), -1)
#x = self.dropout(x)
if self.mode == 'MSE':
x = self.fc(x)
x = self.dropout(x)
x = self.fc1(x)
return x
elif self.mode == 'QL':
x = self.fc(x)
x = self.dropout(x)
x1 = self.fc1(x)
x2 = self.fc2(x)
x3 = self.fc3(x)
return x1, x2, x3
elif self.mode == 'GD':
x = self.fc(x)
x = self.dropout(x)
x1 = self.fc1(x)
x2 = self.fc_std(x)
return x1, x2
else:
x = self.fc1(x)
return x
def Mresnet(**kwargs):
"""Constructs a modified ResNet model.
"""
model = ResNet(BasicBlock, [1, 1, 1, 1], **kwargs)
return model
|
11592724
|
import torch.nn as nn
class EMAHelper(object):
def __init__(self, mu=0.999):
self.mu = mu
self.shadow = {}
def register(self, module):
if isinstance(module, nn.DataParallel):
module = module.module
for name, param in module.named_parameters():
if param.requires_grad:
self.shadow[name] = param.data.clone()
def update(self, module):
if isinstance(module, nn.DataParallel):
module = module.module
for name, param in module.named_parameters():
if param.requires_grad:
self.shadow[name].data = (
1. - self.mu) * param.data + self.mu * self.shadow[name].data
def ema(self, module):
if isinstance(module, nn.DataParallel):
module = module.module
for name, param in module.named_parameters():
if param.requires_grad:
param.data.copy_(self.shadow[name].data)
def ema_copy(self, module):
if isinstance(module, nn.DataParallel):
inner_module = module.module
module_copy = type(inner_module)(
inner_module.config).to(inner_module.config.device)
module_copy.load_state_dict(inner_module.state_dict())
module_copy = nn.DataParallel(module_copy)
else:
module_copy = type(module)(module.config).to(module.config.device)
module_copy.load_state_dict(module.state_dict())
# module_copy = copy.deepcopy(module)
self.ema(module_copy)
return module_copy
def state_dict(self):
return self.shadow
def load_state_dict(self, state_dict):
self.shadow = state_dict
|
11592734
|
import numpy as np
import tensorflow as tf
from keras import backend as K
from keras import initializers
from keras import layers
from keras.utils.generic_utils import get_custom_objects
# Obtained from https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py
class EfficientNetConvInitializer(initializers.Initializer):
"""Initialization for convolutional kernels.
The main difference with tf.variance_scaling_initializer is that
tf.variance_scaling_initializer uses a truncated normal with an uncorrected
standard deviation, whereas base_path we use a normal distribution. Similarly,
tf.contrib.layers.variance_scaling_initializer uses a truncated normal with
a corrected standard deviation.
# Arguments:
shape: shape of variable
dtype: dtype of variable
partition_info: unused
# Returns:
an initialization for the variable
"""
def __init__(self):
super(EfficientNetConvInitializer, self).__init__()
def __call__(self, shape, dtype=None):
dtype = dtype or K.floatx()
kernel_height, kernel_width, _, out_filters = shape
fan_out = int(kernel_height * kernel_width * out_filters)
return K.random_normal(
shape, mean=0.0, stddev=np.sqrt(2.0 / fan_out), dtype=dtype)
# Obtained from https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py
class EfficientNetDenseInitializer(initializers.Initializer):
"""Initialization for dense kernels.
This initialization is equal to
tf.variance_scaling_initializer(scale=1.0/3.0, mode='fan_out',
distribution='uniform').
It is written out explicitly base_path for clarity.
# Arguments:
shape: shape of variable
dtype: dtype of variable
partition_info: unused
# Returns:
an initialization for the variable
"""
def __init__(self):
super(EfficientNetDenseInitializer, self).__init__()
def __call__(self, shape, dtype=None):
dtype = dtype or K.floatx()
init_range = 1.0 / np.sqrt(shape[1])
return K.random_uniform(shape, -init_range, init_range, dtype=dtype)
# Obtained from https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py
class Swish(layers.Layer):
def __init__(self, **kwargs):
super(Swish, self).__init__(**kwargs)
self.supports_masking = True
def call(self, inputs, training=None):
return tf.nn.swish(inputs)
# Obtained from https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py
class DropConnect(layers.Layer):
def __init__(self, drop_connect_rate=0., **kwargs):
super(DropConnect, self).__init__(**kwargs)
self.drop_connect_rate = float(drop_connect_rate)
def call(self, inputs, training=None):
def drop_connect():
keep_prob = 1.0 - self.drop_connect_rate
# Compute drop_connect tensor
batch_size = tf.shape(inputs)[0]
random_tensor = keep_prob
random_tensor += K.random_uniform([batch_size, 1, 1, 1], dtype=inputs.dtype)
binary_tensor = tf.floor(random_tensor)
output = (inputs / keep_prob) * binary_tensor
return output
return K.in_train_phase(drop_connect, inputs, training=training)
def get_config(self):
config = {
'drop_connect_rate': self.drop_connect_rate,
}
base_config = super(DropConnect, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
get_custom_objects().update({
'EfficientNetConvInitializer': EfficientNetConvInitializer,
'EfficientNetDenseInitializer': EfficientNetDenseInitializer,
'DropConnect': DropConnect,
'Swish': Swish,
})
|
11592794
|
from ipykernel.kernelbase import Kernel
from tableauhyperapi import HyperProcess, Connection, Telemetry, HyperException
from tabulate import tabulate
import time
import json
from datetime import datetime
import shlex
class HyperKernel(Kernel):
implementation = 'Hyper'
implementation_version = '0.0'
language = 'sql'
language_version = '0.0'
language_info = {
'name': 'sql',
'mimetype': 'text/sql',
'file_extension': '.sql',
}
banner = "Hyper 🚀 - Your friendly neighborhood SQL database.\n" +\
"Type '\\?' for help."
def __init__(self, *args, **kwargs):
super(HyperKernel, self).__init__(*args, **kwargs)
self._hyper_process = HyperProcess(Telemetry.DO_NOT_SEND_USAGE_DATA_TO_TABLEAU, 'jupyter_sql_kernel')
self._connection = Connection(self._hyper_process.endpoint)
self._output_func = self._display_output
def do_shutdown(self, restart):
self._connection.close()
self._hyper_process.close()
return {'status': 'ok', 'restart': restart}
def _success_response(self, payloads=[]):
return {
'status': 'ok',
# The base class increments the execution count for us already
'execution_count': self.execution_count,
'payload': payloads,
'user_expressions': {},
}
def _error_response(self, ename, evalue, traceback):
# Format & send the error message
error_response = {
'ename': ename,
'evalue': evalue,
'traceback': traceback
}
self.send_response(self.iopub_socket, 'error', error_response)
error_response['status'] = 'error'
error_response['execution_count'] = self.execution_count
return error_response
def _send_text(self, txt):
self.send_response(self.iopub_socket, 'display_data', {'data': {'text/plain': txt}, 'metadata': {}})
def _format_hyper_error(self, e):
formatted = f"Error:\n{e.main_message}"
if e.hint:
formatted += f"HINT: {e.hint}"
return formatted
def _display_output(self, sql_result, silent):
if not silent:
column_names = [c.name for c in sql_result.schema.columns]
result = list(sql_result)
if column_names or result:
response_data = {
'text/plain': tabulate(result, headers=column_names),
'text/html': tabulate(result, headers=column_names, tablefmt='html'),
}
# Integration with the "@tableau/query-graphs-jupyterlab-extension" extension for plan rendering in JupyterLab
if column_names == ["plan"]:
try:
response_data['application/vnd.tableau.hyper-queryplan'] = json.loads("".join(row[0] for row in result))
except json.JSONDecodeError as e:
pass
# Support for "Vega output" form Hyper.
# In case the user is skilled enough to write a SQL query which outputs a Vega visualizations, go ahead and display the visualization in JupyterLab.
if len(column_names) == 1 and len(result) == 1 and isinstance(result[0][0], str):
try:
parsed = json.loads(result[0][0])
if isinstance(parsed, dict):
if parsed.get("$schema", "").startswith('https://vega.github.io/schema/vega/'):
response_data['application/vnd.vega.v5+json'] = parsed
del response_data['text/html']
if parsed.get("$schema", "").startswith('https://vega.github.io/schema/vega-lite/'):
response_data['application/vnd.vegalite.v3+json'] = parsed
del response_data['text/html']
except json.JSONDecodeError as e:
pass
self.send_response(self.iopub_socket, 'display_data', {'source': 'sql', 'data': response_data, 'metadata': {}})
def _create_file_output_func(self, filename):
def _file_output(self, sql_result, silent):
with open(filename, "a") as f:
column_names = [c.name for c in sql_result.schema.columns]
result = list(sql_result)
f.write(tabulate(result, headers=column_names))
f.write("\n")
return _file_output.__get__(self, HyperKernel)
def _discard_output(self, sql_result, silent):
if sql_result is not None and sql_result.schema is not None:
# We still want to fetch the whole result (to not screw up timing measurements)
for i in sql_result:
pass
def execute_sql(self, code, silent):
"Execute a SQL query and display the results to the user"
start_time = time.perf_counter()
try:
with self._connection.execute_query(code) as sql_result:
self._output_func(sql_result, silent)
except HyperException as e:
# Format & send the error message
return self._error_response(str("HyperException"), str(e.args[0]), [self._format_hyper_error(e)])
end_time = time.perf_counter()
elapsed = end_time - start_time
self._send_text('{:.3f}s elapsed'.format(elapsed))
return self._success_response()
def _command_input_sql(self, args):
"""
Read SQL query from a file and execute it
"""
if len(args) != 1:
return self._error_response("InvalidClientCommandArguments", repr(args), ["Unexpected number of arguments"])
filename = args[0]
try:
with open(filename) as f:
file_content = f.read()
except:
return self._error_response("IOError", repr(args), [f"Unable to read file '{filename}'"])
self.execute_sql(file_content, silent=False)
def _command_redirect_output(self, args):
"""
Redirect output into a file
"""
if len(args) > 1:
return self._error_response("InvalidClientCommandArguments", repr(args), ["Unexpected number of arguments"])
if len(args) == 0:
self._output_func = self._display_output
elif args[0] == "-":
self._output_func = self._discard_output
else:
filename = args[0]
# Truncate the file & create if it does not exist
try:
with open(filename, "w"):
pass
except:
return self._error_response("IOError", repr(args), [f"Unable to read file '{filename}'"])
self._output_func = self._create_file_output_func(filename)
def _command_attach(self, args):
"""
Open a Hyper file
"""
if len(args) != 2:
return self._error_response("InvalidClientCommandArguments", repr(args), ["Unexpected number of arguments"])
database_path = args[0]
alias = args[1]
try:
self._connection.catalog.attach_database(database_path, alias)
except HyperException as e:
# Format & send the error message
return self._error_response(str("HyperException"), str(e.args[0]), [self._format_hyper_error(e)])
def _command_detach(self, args):
"""
Close a Hyper file
"""
if len(args) != 1:
return self._error_response("InvalidClientCommandArguments", repr(args), ["Unexpected number of arguments"])
alias = args[0]
try:
self._connection.catalog.detach_database(alias)
except HyperException as e:
# Format & send the error message
return self._error_response(str("HyperException"), str(e.args[0]), [self._format_hyper_error(e)])
def _process_client_command(self, code, silent):
"Execute a client command"
commands = {
"i": self._command_input_sql,
"o": self._command_redirect_output,
"attach": self._command_attach,
"detach": self._command_detach,
}
# Tokenize command line
code = code.lstrip()
assert code[0] == '\\'
code = code[1:]
args = list(shlex.split(code, posix=True))
cmd = args.pop(0)
if cmd == "?" or cmd == "help":
help_text = 'SQL command reference: https://help.tableau.com/current/api/hyper_api/en-us/reference/sql/sql-commands.html\n'
help_text += 'Additional client-side commands:\n'
help_text += tabulate((["\\" + c[0], c[1].__doc__] for c in commands.items()), tablefmt='plain')
help_text += '\n'
help_text += 'Parameters are parsed in POSIX shell manner.\n'
self._send_text(help_text)
return self._success_response()
if cmd not in commands:
return self._error_response("UnknownClientCommand", cmd, [f"Unknown client command \{cmd}"])
response = commands[cmd](args)
return response if response is not None else self._success_response()
def do_execute(self, code, silent, store_history=True, user_expressions=None, allow_stdin=False):
if code.lstrip()[0] == '\\':
return self._process_client_command(code, silent)
else:
return self.execute_sql(code, silent)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.