id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
117934
|
from config import Config
class inputConfig():
NUM_CLASSES = 4
CLASS_DICT = {1: 'waterways', 2: 'fieldborders', 3: 'terraces', 4: 'wsb'}
CATEGORIES = list(CLASS_DICT.values())
CATEGORIES_VALUES = list(CLASS_DICT.keys())
NUM_EPOCHES = 1
# TRAIN_LAYERS = 'all'
# SAVE_TRAIN = 'logs'
IMAGE_HEIGHT = 224
IMAGE_WIDTH = 224
JPG_NAME = 'jpg4'
OUTPUT_DIR = '/home/tinzha/Projects/LandOLakes/posthack/metrics'
MODEL_DIR = '/home/tinzha/Projects/LandOLakes/logs'
class modelConfig(Config):
"""Configuration for training on MS COCO.
Derives from the base Config class and overrides values specific
to the COCO dataset.
"""
# Give the configuration a recognizable name
NAME = "coco"
# We use a GPU with 12GB memory, which can fit two images.
# Adjust down if you use a smaller GPU.
IMAGES_PER_GPU = 1
# Use small images for faster training. Set the limits of the small side
# the large side, and that determines the image shape.
IMAGE_MIN_DIM = 128
IMAGE_MAX_DIM = 256
# Number of classes (including background)
NUM_CLASSES = 1 + inputConfig.NUM_CLASSES
class inferenceConfig(modelConfig):
GPU_COUNT = 1
IMAGES_PER_GPU = 1
|
117950
|
import pytest
def test_create_meshed_flow(api):
"""Demonstrates a fully meshed configuration
"""
config = api.config()
for i in range(1, 33):
config.ports.port(name='Port %s' % i, location='localhost/%s' % i)
device = config.devices.device(name='Device %s' % i)[-1]
device.ethernets.ethernet()
device.ethernets[-1].port_name = 'Port %s' % i
device.ethernets[-1].name = 'Eth %s' % i
device.ethernets[-1].mac = '00:00:00:00:00:{:02x}'.format(i)
device.ethernets[-1].ipv4_addresses.ipv4()
device.ethernets[-1].ipv4_addresses[-1].name = 'Ipv4 %s' % i
device.ethernets[-1].ipv4_addresses[-1].gateway = '10.1.1.%s' % i
device.ethernets[-1].ipv4_addresses[-1].address = '10.1.2.%s' % i
flow = config.flows.flow(name='Fully Meshed Flow')[0]
flow.tx_rx.device.tx_names = [tx.name for tx in config.devices]
flow.tx_rx.device.rx_names = [rx.name for rx in config.devices]
flow.tx_rx.device.mode = flow.tx_rx.device.MESH
flow.size.fixed = 128
flow.rate.pps = 1000
flow.duration.fixed_packets.packets = 10000
flow.packet.ethernet().vlan().ipv4().tcp()
print(config)
if __name__ == '__main__':
pytest.main(['-vv', '-s', __file__])
|
117971
|
import torch
from torch import nn
from torch.nn import functional as F
from torch.distributions.uniform import Uniform
from networks.layers.non_linear import NonLinear, NonLinearType
from networks.layers.conv_bn import ConvBN
class DropConnect(nn.Module):
def __init__(self, survival_prob):
"""
A module that implements drop connection
:param survival_prob: the probability if connection survival
"""
super(DropConnect, self).__init__()
self.survival_prob = survival_prob
self.u = Uniform(0, 1)
def forward(self, x):
"""
The forward function of the DropConnect module
:param x: Input tensor x
:return: A tensor after drop connection
"""
if self.training:
random_tensor = self.u.sample([x.shape[0], 1, 1, 1]).cuda()
random_tensor += self.survival_prob
binary_tensor = torch.floor(random_tensor)
return x * binary_tensor / self.survival_prob
else:
return x
class GlobalAvgPool2d(nn.Module):
def __init__(self):
"""
Global Average pooling module
"""
super(GlobalAvgPool2d, self).__init__()
def forward(self, x):
"""
The forward function of the GlobalAvgPool2d module
:param x: Input tensor x
:return: A tensor after average pooling
"""
return F.avg_pool2d(x, (x.shape[2], x.shape[3]))
class SEBlock(nn.Module):
def __init__(self, nc, in_channels, reduce_channels):
"""
An implantation of Squeeze Excite block
:param nc: Input network controller
:param in_channels: the number of input channels
:param reduce_channels: the number of channels after reduction
"""
super(SEBlock, self).__init__()
self.gap = GlobalAvgPool2d()
self.conv_reduce = nn.Sequential(
ConvBN(nc, in_channels, reduce_channels, 1, disable_bn=True),
NonLinear(nc, reduce_channels, NonLinearType.SWISH))
self.conv_expand = nn.Sequential(
ConvBN(nc, reduce_channels, in_channels, 1, disable_bn=True),
NonLinear(nc, in_channels, NonLinearType.SIGMOID))
def forward(self, x):
"""
The forward function of the SEBlock module
:param x: Input tensor x
:return: A tensor after SE Block
"""
return x * self.conv_expand(self.conv_reduce(self.gap(x)))
class ConvBNNonLinear(nn.Sequential):
def __init__(self, nc, in_planes, out_planes, kernel_size=3, stride=1, groups=1, nl_type=NonLinearType.RELU6,
batch_norm_epsilon=1e-5, batch_norm_momentum=0.1, tf_padding=False):
"""
A joint block of 2d convolution with batch normalization and non linear function modules
with HMQ quantization of both the convolution weights and activation function
:param nc: The network quantization controller
:param in_planes: The number of input channels
:param out_planes: The number of output channels
:param kernel_size: The kernel size
:param stride: The convolution stride
:param groups: The convolution group size
:param nl_type: enum that state the non-linear type.
:param batch_norm_epsilon: The batch normalization epsilon
:param batch_norm_momentum: The batch normalization momentum
:param tf_padding: Use TensorFlow padding (for EfficientNet)
"""
padding = kernel_size - stride if tf_padding else (kernel_size - 1) // 2
super(ConvBNNonLinear, self).__init__(
ConvBN(nc, in_planes, out_planes, kernel_size, stride, padding, group=groups,
batch_norm_epsilon=batch_norm_epsilon, batch_norm_momentum=batch_norm_momentum,
tf_padding=tf_padding),
NonLinear(nc, out_planes, nl_type)
)
class InvertedResidual(nn.Module):
def __init__(self, nc, inp, oup, stride, expand_ratio, kernel_size=3, nl_type=NonLinearType.RELU6, se_ratio=0,
survival_prob=0, batch_norm_epsilon=1e-5, batch_norm_momentum=0.1, tf_padding=False):
"""
A Inverted Residual block use in Efficient-Net
:param nc: The network quantization controller
:param inp: The number of input channels
:param oup: The number of output channels
:param stride: The depth wise convolution stride
:param expand_ratio: The block expand ratio for depth-wise convolution
:param kernel_size: The kernel size
:param nl_type: enum that state the non-linear type.
:param se_ratio: the ratio between the number of input channel and mid channels in SE Bloock
:param survival_prob: the probability if connection survival
:param batch_norm_epsilon: The batch normalization epsilon
:param batch_norm_momentum: The batch normalization momentum
:param tf_padding: Use TensorFlow padding (for EfficientNet)
"""
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = int(round(inp * expand_ratio))
self.use_res_connect = self.stride == 1 and inp == oup
self.kernel_size = kernel_size
layers = []
if expand_ratio != 1:
# pw
layers.append(ConvBNNonLinear(nc, inp, hidden_dim, kernel_size=1, nl_type=nl_type,
batch_norm_epsilon=batch_norm_epsilon,
batch_norm_momentum=batch_norm_momentum))
layers.append(
ConvBNNonLinear(nc, hidden_dim, hidden_dim, kernel_size=kernel_size, stride=stride, groups=hidden_dim,
nl_type=nl_type, batch_norm_epsilon=batch_norm_epsilon,
batch_norm_momentum=batch_norm_momentum, tf_padding=tf_padding))
if se_ratio != 0:
layers.append(SEBlock(nc, hidden_dim, int(inp * se_ratio)))
layers.append(ConvBNNonLinear(nc, hidden_dim, oup, kernel_size=1, stride=1, nl_type=NonLinearType.IDENTITY,
batch_norm_epsilon=batch_norm_epsilon,
batch_norm_momentum=batch_norm_momentum))
if survival_prob != 0 and self.use_res_connect:
layers.append(DropConnect(survival_prob))
self.conv = nn.Sequential(*layers)
self.output_q = NonLinear(nc, oup, nl_type=NonLinearType.IDENTITY)
def forward(self, x):
"""
The forward function of the InvertedResidual module
:param x: Input tensor x
:return: A tensor after InvertedResidual
"""
if self.use_res_connect:
y = self.conv(x)
return self.output_q(x + y)
else:
x = self.conv(x)
return self.output_q(x)
class RepeatedInvertedResidual(nn.Module):
def __init__(self, nc, n_repeat, in_channels, out_channels, stride_first, expand_ratio, kernel_size=3,
nl_type=NonLinearType.RELU6,
se_ratio=0,
survival_prob_start=0, drop_rate=0, batch_norm_epsilon=1e-5, batch_norm_momentum=0.1,
tf_padding=False):
"""
A block the repeatedly run the InvertedResidual block
:param nc:The network quantization controller
:param n_repeat:
:param in_channels: The number of input channels
:param out_channels: The number of output channels
:param stride_first: The depth wise convolution stride in the first block
:param expand_ratio: The block expand ratio for depth-wise convolution
:param kernel_size: The kernel size
:param nl_type: enum that state the non-linear type.
:param se_ratio: the ratio between the number of input channel and mid channels in SE Bloock
:param survival_prob_start: the probability if connection survival in the first block
:param batch_norm_epsilon: The batch normalization epsilon
:param batch_norm_momentum: The batch normalization momentum
:param tf_padding: Use TensorFlow padding (for EfficientNet)
"""
super(RepeatedInvertedResidual, self).__init__()
layers = []
for i in range(n_repeat):
if survival_prob_start > 0 and drop_rate > 0:
survival_prob = survival_prob_start - drop_rate * float(i)
else:
survival_prob = 0
block = InvertedResidual(nc, in_channels if i == 0 else out_channels, out_channels,
stride_first if i == 0 else 1, expand_ratio, kernel_size=kernel_size,
nl_type=nl_type, se_ratio=se_ratio, survival_prob=survival_prob,
batch_norm_epsilon=batch_norm_epsilon, batch_norm_momentum=batch_norm_momentum,
tf_padding=tf_padding)
layers.append(block)
self.blocks = nn.Sequential(*layers)
def forward(self, x):
"""
The forward function of the RepeatedInvertedResidual module
:param x: Input tensor x
:return: A tensor after RepeatedInvertedResidual
"""
return self.blocks(x)
|
117978
|
import os
import json
import numpy as np
import scipy.sparse as sp
from src.model.linear_svm import LinearSVM
from src.model.random_forest import RandomForest
from src.metric.uar import get_UAR, get_post_probability, get_late_fusion_UAR
from src.utils.io import load_proc_baseline_feature, save_UAR_results
from src.utils.io import save_post_probability, load_post_probability
from src.utils.io import save_cv_results
from src.utils.preprocess import upsample
from src.utils.preprocess import k_fold_cv
'''
BASELINE CLASSIFICATION (py) PROVIDED BY AVEC2018
features | computed level
-------- | --------------
MFCCs | frame level
eGeMAPS | turn level
DeepSpectrum | activations in ALEXNET
BoAW | window size (2s)
FAUs | session level
BoVW | window size (11s)
'''
class BaseLine():
"""
Baseline system in BD classification, based on SVM/RF using LLDs and fusion
---
Attributes
-----------
model_name: str
model for BaseLine() instance, SVM or RF
feature_name: str
feature for BaseLine() instance, MFCC/eGeMAPS/Deep/BoAW/FAU/BoVW
test: bool
whether to test BaseLine() or not
----------------------------------------------------------------------
Functions
-----------
run(): public
main function
run_[MFCC,eGeMAPS,DeepSpectrum,BoAW,AU,BoVW](): public
run classifier on specified feature (single modality)
run_fusion(): public
run late fusion on a pair of specified features
"""
def __init__(self, model_name, feature_name, test=False):
# para model: determine the model in baseline system
# para name: determine the feature in baseline system
self.model_name = model_name
self.feature_name = feature_name
self.test = test
print("\nbaseline system initialized, model %s feature %s" % (self.model_name, self.feature_name))
def run(self):
"""main function of BaseLine() instance
"""
if self.feature_name == 'FUSE':
feature_name_1 = ''
feature_name_2 = ''
self.run_fusion(feature_name_1, feature_name_2)
elif self.feature_name == 'MFCC':
self.run_MFCC()
elif self.feature_name == 'eGeMAPS':
self.run_eGeMAPS()
elif self.feature_name == 'Deep':
self.run_DeepSpectrum()
elif self.feature_name == 'BoAW':
self.run_BoAW()
elif self.feature_name == 'AU':
self.run_AU()
elif self.feature_name == 'BoVW':
self.run_BoVW()
def run_MFCC(self):
"""run classifier on MFCC feature (single modality)
"""
print("\nbuilding a classifier on MFCC features (both frame-level and session-level)")
X_train, y_train, train_inst, X_dev, y_dev, dev_inst = load_proc_baseline_feature('MFCC', verbose=True)
if self.model_name == 'RF_cv':
y_train, y_dev = np.ravel(y_train), np.ravel(y_dev)
train_inst, dev_inst = np.ravel(train_inst), np.ravel(dev_inst)
X = np.vstack((X_train, X_dev))
y = np.hstack((y_train, y_dev))
inst = np.hstack((train_inst, dev_inst))
assert len(X) == len(y) == len(inst)
cv_ids = k_fold_cv(len(X))
cv_res = []
for (ids_train, ids_dev) in cv_ids:
X_train = X[ids_train]
y_train = y[ids_train]
X_dev = X[ids_dev]
y_dev = y[ids_dev]
dev_inst = inst[ids_dev]
RF_MFCC = RandomForest(self.feature_name, X_train, y_train, X_dev, y_dev, baseline=True, test=self.test)
RF_MFCC.run()
y_pred_train, y_pred_dev = RF_MFCC.evaluate()
_, session_res = get_UAR(y_pred_dev, y_dev, dev_inst, self.model_name, self.feature_name, 'baseline', baseline=True, test=True)
cv_res.append(session_res)
save_cv_results(cv_res, self.model_name, self.feature_name, 'baseline')
print("\nupsampling training data to address class imbalance")
X_train, y_train, train_inst = upsample(X_train, y_train, train_inst)
print("\nobtaining sparse matrix for better classification")
# X_train = sp.csr_matrix(np.vstack((X_train, X_dev)))
# X_dev = sp.csr_matrix(X_dev)
# y_train = np.hstack((y_train, y_dev))
X_train, X_dev = sp.csr_matrix(X_train), sp.csr_matrix(X_dev)
if self.model_name == 'SVM':
SVM_MFCC = LinearSVM(self.feature_name, X_train, y_train, X_dev, y_dev, baseline=True, test=self.test)
SVM_MFCC.run()
y_pred_train, y_pred_dev = SVM_MFCC.evaluate()
elif self.model_name == 'RF':
RF_MFCC = RandomForest(self.feature_name, X_train, y_train, X_dev, y_dev, baseline=True, test=self.test)
RF_MFCC.run()
y_pred_train, y_pred_dev = RF_MFCC.evaluate()
get_UAR(y_pred_train, y_train, train_inst, self.model_name, self.feature_name, 'baseline', baseline=True, train_set=True, test=self.test)
get_UAR(y_pred_dev, y_dev, dev_inst, self.model_name, self.feature_name, 'baseline', baseline=True, test=self.test)
if not self.test:
get_post_probability(y_pred_dev, y_dev, dev_inst, np.array([]), self.model_name, self.feature_name)
def run_eGeMAPS(self):
"""run classifier on eGeMAPS feature (single modality)
"""
print("\nbuilding a classifier on eGeMAPS features (both frame-level and session-level)")
X_train, y_train, train_inst, X_dev, y_dev, dev_inst = load_proc_baseline_feature('eGeMAPS', verbose=True)
if self.model_name == 'RF_cv':
X = np.vstack((X_train, X_dev))
y = np.hstack((y_train, y_dev))
assert len(X) == len(y)
cv_ids = k_fold_cv(len(X))
cv_res = []
for (ids_train, ids_dev) in cv_ids:
X_train = X[ids_train]
y_train = y[ids_train]
X_dev = X[ids_dev]
y_dev = y[ids_dev]
RF_MFCC = RandomForest(self.feature_name, X_train, y_train, X_dev, y_dev, baseline=True, test=self.test)
RF_MFCC.run()
y_pred_train, y_pred_dev = RF_MFCC.evaluate()
_, session_res = get_UAR(y_pred_dev, y_dev, dev_inst, self.model_name, self.feature_name, 'baseline', baseline=True, test=True)
cv_res.append(session_res)
save_cv_results(cv_res, self.model_name, self.feature_name, 'baseline')
print("\nupsampling training data to address class imbalance")
X_train, y_train, train_inst = upsample(X_train, y_train, train_inst)
print("\nobtaining sparse matrix for better classification")
# X_train = sp.csr_matrix(np.vstack((X_train, X_dev)))
# X_dev = sp.csr_matrix(X_dev)
# y_train = np.hstack((y_train, y_dev))
X_train, X_dev = sp.csr_matrix(X_train), sp.csr_matrix(X_dev)
if self.model_name == 'SVM':
SVM_eGeMAPS = LinearSVM(self.feature_name, X_train, y_train, X_dev, y_dev, baseline=True, test=self.test)
SVM_eGeMAPS.run()
y_pred_train, y_pred_dev = SVM_eGeMAPS.evaluate()
elif self.model_name == 'RF':
RF_eGeMAPS = RandomForest(self.feature_name, X_train, y_train, X_dev, y_dev, baseline=True, test=self.test)
RF_eGeMAPS.run()
y_pred_train, y_pred_dev = RF_eGeMAPS.evaluate()
get_UAR(y_pred_train, y_train, train_inst, self.model_name, self.feature_name, 'baseline', baseline=True, train_set=True, test=self.test)
get_UAR(y_pred_dev, y_dev, dev_inst, self.model_name, self.feature_name, 'baseline', baseline=True, test=self.test)
if not self.test:
get_post_probability(y_pred_dev, y_dev, dev_inst, np.array([]), self.model_name, self.feature_name)
def run_DeepSpectrum(self):
"""run classifier on DeepSpectrum feature (single modality)
"""
print("\nbuilding a classifier on Deep features (both frame-level and session-level)")
X_train, y_train, train_inst, X_dev, y_dev, dev_inst = load_proc_baseline_feature('Deep', verbose=True)
if self.model_name == 'RF_cv':
X = np.vstack((X_train, X_dev))
y = np.hstack((y_train, y_dev))
assert len(X) == len(y)
cv_ids = k_fold_cv(len(X))
cv_res = []
for (ids_train, ids_dev) in cv_ids:
X_train = X[ids_train]
y_train = y[ids_train]
X_dev = X[ids_dev]
y_dev = y[ids_dev]
RF_MFCC = RandomForest(self.feature_name, X_train, y_train, X_dev, y_dev, baseline=True, test=self.test)
RF_MFCC.run()
y_pred_train, y_pred_dev = RF_MFCC.evaluate()
_, session_res = get_UAR(y_pred_dev, y_dev, dev_inst, self.model_name, self.feature_name, 'baseline', baseline=True, test=True)
cv_res.append(session_res)
save_cv_results(cv_res, self.model_name, self.feature_name, 'baseline')
print("\nupsampling training data to address class imbalance")
X_train, y_train, train_inst = upsample(X_train, y_train, train_inst)
print("\nobtaining sparse matrix for better classification")
# X_train = sp.csr_matrix(np.vstack((X_train, X_dev)))
# X_dev = sp.csr_matrix(X_dev)
# y_train = np.hstack((y_train, y_dev))
X_train, X_dev = sp.csr_matrix(X_train), sp.csr_matrix(X_dev)
if self.model_name == 'SVM':
SVM_Deep = LinearSVM(self.feature_name, X_train, y_train, X_dev, y_dev, baseline=True, test=self.test)
SVM_Deep.run()
y_pred_train, y_pred_dev = SVM_Deep.evaluate()
elif self.model_name == 'RF':
RF_Deep = RandomForest(self.feature_name, X_train, y_train, X_dev, y_dev, baseline=True, test=self.test)
RF_Deep.run()
y_pred_train, y_pred_dev = RF_Deep.evaluate()
get_UAR(y_pred_train, y_train, train_inst, self.model_name, self.feature_name, 'baseline', baseline=True, train_set=True, test=self.test)
get_UAR(y_pred_dev, y_dev, dev_inst, self.model_name, self.feature_name, 'baseline', baseline=True, test=self.test)
if not self.test:
get_post_probability(y_pred_dev, y_dev, dev_inst, np.array([]), self.model_name, self.feature_name)
def run_BoAW(self):
"""run classifier on BoAW feature (single modality)
"""
print("\nbuilding a classifier on BoAW features (both frame-level and session-level)")
X_train, y_train, train_inst, X_dev, y_dev, dev_inst = load_proc_baseline_feature('BoAW', verbose=True)
if self.model_name == 'RF_cv':
y_train, y_dev = np.ravel(y_train), np.ravel(y_dev)
X = np.vstack((X_train, X_dev))
y = np.hstack((y_train, y_dev))
assert len(X) == len(y)
cv_ids = k_fold_cv(len(X))
cv_res = []
for (ids_train, ids_dev) in cv_ids:
X_train = X[ids_train]
y_train = y[ids_train]
X_dev = X[ids_dev]
y_dev = y[ids_dev]
RF_MFCC = RandomForest(self.feature_name, X_train, y_train, X_dev, y_dev, baseline=True, test=self.test)
RF_MFCC.run()
y_pred_train, y_pred_dev = RF_MFCC.evaluate()
_, session_res = get_UAR(y_pred_dev, y_dev, dev_inst, self.model_name, self.feature_name, 'baseline', baseline=True, test=True)
cv_res.append(session_res)
save_cv_results(cv_res, self.model_name, self.feature_name, 'baseline')
print("\nupsampling training data to address class imbalance")
X_train, y_train, train_inst = upsample(X_train, y_train, train_inst)
print("\nobtaining sparse matrix for better classification")
# X_train = sp.csr_matrix(np.vstack((X_train, X_dev)))
# X_dev = sp.csr_matrix(X_dev)
# y_train = np.hstack((y_train, y_dev))
X_train, X_dev = sp.csr_matrix(X_train), sp.csr_matrix(X_dev)
if self.model_name == 'SVM':
SVM_BoAW = LinearSVM(self.feature_name, X_train, y_train, X_dev, y_dev, baseline=True, test=self.test)
SVM_BoAW.run()
y_pred_train, y_pred_dev = SVM_BoAW.evaluate()
elif self.model_name == 'RF':
RF_BoAW = RandomForest(self.feature_name, X_train, y_train, X_dev, y_dev, baseline=True, test=self.test)
RF_BoAW.run()
y_pred_train, y_pred_dev = RF_BoAW.evaluate()
get_UAR(y_pred_train, y_train, train_inst, self.model_name, self.feature_name, 'baseline', baseline=True, train_set=True, test=self.test)
get_UAR(y_pred_dev, y_dev, dev_inst, self.model_name, self.feature_name, 'baseline', baseline=True, test=self.test)
if not self.test:
get_post_probability(y_pred_dev, y_dev, dev_inst, np.array([]), self.model_name, self.feature_name)
def run_AU(self):
"""run classifier on AU feature (single modality)
"""
print("\nbuilding a classifier on AU features (already session-level)")
X_train, y_train, _, X_dev, y_dev, _ = load_proc_baseline_feature('AU', verbose=True)
if self.model_name == 'RF_cv':
X = np.vstack((X_train, X_dev))
y = np.hstack((y_train, y_dev))
assert len(X) == len(y)
cv_ids = k_fold_cv(len(X))
cv_res = []
for (ids_train, ids_dev) in cv_ids:
X_train = X[ids_train]
y_train = y[ids_train]
X_dev = X[ids_dev]
y_dev = y[ids_dev]
RF_MFCC = RandomForest(self.feature_name, X_train, y_train, X_dev, y_dev, baseline=True, test=self.test)
RF_MFCC.run()
y_pred_train, y_pred_dev = RF_MFCC.evaluate()
_, session_res = get_UAR(y_pred_dev, y_dev, np.array([]), self.model_name, self.feature_name, 'baseline', baseline=True, test=True)
cv_res.append(session_res)
save_cv_results(cv_res, self.model_name, self.feature_name, 'baseline')
print("\nupsampling training data to address class imbalance")
X_train, y_train, _ = upsample(X_train, y_train, np.array([]))
print("\nobtaining sparse matrix for better classification")
# X_train = sp.csr_matrix(np.vstack((X_train, X_dev)))
# X_dev = sp.csr_matrix(X_dev)
# y_train = np.hstack((y_train, y_dev))
X_train, X_dev = sp.csr_matrix(X_train), sp.csr_matrix(X_dev)
if self.model_name == 'SVM':
SVM_AU = LinearSVM(self.feature_name, X_train, y_train, X_dev, y_dev, baseline=True, test=self.test)
SVM_AU.run()
y_pred_train, y_pred_dev = SVM_AU.evaluate()
session_prob = SVM_AU.get_session_probability()
elif self.model_name == 'RF':
RF_AU = RandomForest(self.feature_name, X_train, y_train, X_dev, y_dev, baseline=True, test=self.test)
RF_AU.run()
y_pred_train, y_pred_dev = RF_AU.evaluate()
session_prob = RF_AU.get_session_probability()
get_UAR(y_pred_train, y_train, np.array([]), self.model_name, self.feature_name, 'baseline', baseline=True, train_set=True, test=self.test)
get_UAR(y_pred_dev, y_dev, np.array([]), self.model_name, self.feature_name, 'baseline', baseline=True, test=self.test)
def run_BoVW(self):
"""run classifier on BoVW feature (single modality)
"""
print("\nbuilding a classifier on BoVW features (both frame-level and session-level)")
X_train, y_train, train_inst, X_dev, y_dev, dev_inst = load_proc_baseline_feature('BoVW', verbose=True)
if self.model_name == 'RF_cv':
X = np.vstack((X_train, X_dev))
y = np.hstack((y_train, y_dev))
assert len(X) == len(y)
cv_ids = k_fold_cv(len(X))
cv_res = []
for (ids_train, ids_dev) in cv_ids:
X_train = X[ids_train]
y_train = y[ids_train]
X_dev = X[ids_dev]
y_dev = y[ids_dev]
RF_MFCC = RandomForest(self.feature_name, X_train, y_train, X_dev, y_dev, baseline=True, test=self.test)
RF_MFCC.run()
y_pred_train, y_pred_dev = RF_MFCC.evaluate()
_, session_res = get_UAR(y_pred_dev, y_dev, dev_inst, self.model_name, self.feature_name, 'baseline', baseline=True, test=True)
cv_res.append(session_res)
save_cv_results(cv_res, self.model_name, self.feature_name, 'baseline')
print("\nupsampling training data to address class imbalance")
X_train, y_train, train_inst = upsample(X_train, y_train, train_inst)
print("\nobtaining sparse matrix for better classification")
# X_train = sp.csr_matrix(np.vstack((X_train, X_dev)))
# X_dev = sp.csr_matrix(X_dev)
# y_train = np.hstack((y_train, y_dev))
X_train, X_dev = sp.csr_matrix(X_train), sp.csr_matrix(X_dev)
if self.model_name == 'SVM':
SVM_BoVW = LinearSVM(self.feature_name, X_train, y_train, X_dev, y_dev, baseline=True, test=self.test)
SVM_BoVW.run()
y_pred_train, y_pred_dev = SVM_BoVW.evaluate()
elif self.model_name == 'RF':
RF_BoVW = RandomForest(self.feature_name, X_train, y_train, X_dev, y_dev, baseline=True, test=self.test)
RF_BoVW.run()
y_pred_train, y_pred_dev = RF_BoVW.evaluate()
get_UAR(y_pred_train, y_train, train_inst, self.model_name, self.feature_name, 'baseline', baseline=True, train_set=True, test=self.test)
get_UAR(y_pred_dev, y_dev, dev_inst, self.model_name, self.feature_name, 'baseline', baseline=True, test=self.test)
if not self.test:
get_post_probability(y_pred_dev, y_dev, dev_inst, np.array([]), self.model_name, self.feature_name)
def run_fusion(self, feature_name_1, feature_name_2):
"""run late fusion on a pair of specified features
"""
get_late_fusion_UAR(self.model_name, feature_name_1, feature_name_2, baseline=True)
|
117984
|
import ctds
from .base import TestExternalDatabase
class TestCursor(TestExternalDatabase):
def test___doc__(self):
self.assertEqual(
ctds.Cursor.__doc__,
'''\
A database cursor used to manage the context of a fetch operation.
:pep:`0249#cursor-objects`
'''
)
def test_typeerror(self):
self.assertRaises(TypeError, ctds.Cursor)
|
118000
|
import sys
import os
import torch
import unittest
import numpy as np
from TorchProteinLibrary import FullAtomModel
class TestCoords2TypedCoordsBackward(unittest.TestCase):
def setUp(self):
self.a2c = FullAtomModel.Angles2Coords()
self.c2tc = FullAtomModel.Coords2TypedCoords()
self.c2cc = FullAtomModel.CoordsTransform.Coords2CenteredCoords(rotate=True, translate=True)
self.error = 0.0
self.N = 0
def runTest(self):
sequence = ['GGMLGWAHFGY']
x0 = torch.zeros(1, 7, len(sequence[0]), dtype=torch.double).requires_grad_()
x1 = torch.zeros(1, 7, len(sequence[0]), dtype=torch.double)
x0.data[0,0,:] = -1.047
x0.data[0,1,:] = -0.698
x0.data[0,2:,:] = 110.4*np.pi/180.0
y0, res, at, num_atoms = self.a2c(x0, sequence)
y0 = self.c2cc(y0, num_atoms)
coords, num_atoms_of_type, offsets = self.c2tc(y0, res, at, num_atoms)
coords = coords.resize(1, int(coords.size(1)/3), 3)
center_mass = coords.mean(dim=1).unsqueeze(dim=1)
coords = coords - center_mass
Rg = torch.mean(torch.sqrt((coords*coords).sum(dim=2)))
Rg.backward()
back_grad_x0 = torch.zeros(x0.grad.size(), dtype=torch.double).copy_(x0.grad.data)
x1.data.copy_(x0.data)
for i in range(0,7):
grads = []
for j in range(0,x0.size(2)):
dx = 0.0001
x1.data.copy_(x0.data)
x1.data[0,i,j]+=dx
y1, res, at, num_atoms = self.a2c(x1, sequence)
coords, num_atoms_of_type, offsets = self.c2tc(y1, res, at, num_atoms)
coords = coords.resize(1, int(coords.size(1)/3), 3)
center_mass = coords.mean(dim=1).unsqueeze(dim=1)
coords = coords - center_mass
Rg_1 = torch.mean(torch.sqrt((coords*coords).sum(dim=2)))
dy_dx = (Rg_1.data-Rg.data)/(dx)
grads.append(dy_dx)
self.error += torch.abs(dy_dx - back_grad_x0[0,i,j])
self.N+=1
self.error /= float(self.N)
print('Error = ', self.error)
self.assertLess(self.error, 0.01)
if __name__=='__main__':
unittest.main()
|
118080
|
from attr import attrs, attrib
from aioalice.types import AliceObject, BaseSession, Response
from aioalice.utils import ensure_cls
@attrs
class AliceResponse(AliceObject):
"""AliceResponse is a response to Alice API"""
response = attrib(converter=ensure_cls(Response))
session = attrib(converter=ensure_cls(BaseSession))
session_state = attrib(type=dict)
user_state_update = attrib(type=dict)
application_state = attrib(type=dict)
version = attrib(type=str)
|
118117
|
import datetime
from thingsboard_gateway.tb_utility.tb_utility import TBUtility
try:
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography import x509
from cryptography.x509.oid import NameOID
from cryptography.hazmat.primitives import hashes
except ImportError:
print("Requests library not found - installing...")
TBUtility.install_package("cryptography")
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography import x509
from cryptography.x509.oid import NameOID
from cryptography.hazmat.primitives import hashes
class SSLGenerator:
def __init__(self, hostname):
self.hostname: str = hostname
def generate_certificate(self):
key = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend(),
)
with open("domain_srv.key", "wb") as f:
f.write(key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption(),
))
subject = issuer = x509.Name([
x509.NameAttribute(NameOID.COUNTRY_NAME, u"US"),
x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, u"CA"),
x509.NameAttribute(NameOID.LOCALITY_NAME, u"locality"),
x509.NameAttribute(NameOID.ORGANIZATION_NAME, u"A place"),
x509.NameAttribute(NameOID.COMMON_NAME, self.hostname),
])
cert = x509.CertificateBuilder().subject_name(
subject
).issuer_name(
issuer
).public_key(
key.public_key()
).serial_number(
x509.random_serial_number()
).not_valid_before(
datetime.datetime.utcnow()
).not_valid_after(
datetime.datetime.utcnow() + datetime.timedelta(days=365)
).add_extension(
x509.SubjectAlternativeName([
x509.DNSName(u"localhost"),
x509.DNSName(self.hostname),
x509.DNSName(u"127.0.0.1")]),
critical=False,
).sign(key, hashes.SHA256(), default_backend())
with open("domain_srv.crt", "wb") as f:
f.write(cert.public_bytes(serialization.Encoding.PEM))
|
118123
|
from __future__ import absolute_import
from __future__ import print_function
import argparse
import os
import sys
import string
import subprocess, logging
from threading import Thread
import time
import socket
import commands
def get_mpi_env(envs):
"""get the mpirun command for setting the envornment
support both openmpi and mpich2
"""
cmd = ''
# windows hack: we will use msmpi
if sys.platform == 'win32':
for k, v in envs.items():
cmd += ' -env %s %s' % (k, str(v))
return cmd
# decide MPI version.
(out, err) = subprocess.Popen(['mpirun', '--version'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
if b'Open MPI' in out:
for k, v in envs.items():
if k == 'LS_COLORS':
continue
cmd += ' -x %s=%s' % (k, str(v))
elif b'mpich' in err:
for k, v in envs.items():
cmd += ' -env %s %s' % (k, str(v))
else:
raise RuntimeError('Unknown MPI Version')
return cmd
def mpi_submit(role, nworker, nserver, args, envs):
"""Internal closure for job submission."""
def run(prog):
"""run the program"""
subprocess.check_call(prog, shell=True)
def notify_ready():
master_ip = envs['DMLC_PS_ROOT_URI']
master_port = 12345
s = socket.socket()
s.connect((master_ip, master_port))
role = s.recv(128)
print(role, file=sys.stderr)
# close the connection
if role == 'Master':
progPs = s.recv(2048)
progWk = s.recv(2048)
print(progPs, file=sys.stderr)
print(progWk, file=sys.stderr)
else:
progPs = ''
progWk = ''
s.close()
return role, progPs, progWk
if 'UAI_HOSTS' in envs:
hosts = envs['UAI_HOSTS']
else:
sys.exit(1)
print(hosts, file=sys.stderr)
cmd =' '.join(args.command)
if hosts is not None:
host_list = string.split(hosts, ',')
cmd += ' --uai-hosts %s ' % (hosts)
else:
sys.exit(1)
if role == 'server':
print('Bind to role=server', file=sys.stderr)
port = 12346
s = socket.socket()
s.bind(('', port))
s.listen(5)
c, addr = s.accept()
c.close()
else: # role is worker
print(os.path.isdir('/data/data/'), file=sys.stderr)
print('change /root/.ssh/config', file=sys.stderr)
ssh_cmd = "sed -i 's/port 23/port 24/' /root/.ssh/config"
subprocess.check_call(ssh_cmd, shell=True)
prog = '/usr/sbin/sshd -D -p 24'
print(prog, file=sys.stderr)
thread = Thread(target=run, args=(prog,))
thread.setDaemon(True)
thread.start()
time.sleep(1)
while True:
output = commands.getoutput('ps -A')
print(output, file=sys.stderr)
if 'sshd' in output:
break
time.sleep(1)
role, progPs, progWk = notify_ready()
if role == 'Master':
print('Start servers by mpirun', file=sys.stderr)
progPs = '%s %s' % (progPs, cmd)
print(progPs, file=sys.stderr)
thread_ps_run = Thread(target=run, args=(progPs,))
thread_ps_run.setDaemon(True)
thread_ps_run.start()
print('Start workers by mpirun', file=sys.stderr)
progWk = '%s %s' % (progWk, cmd)
print(progWk, file=sys.stderr)
thread_wk_run = Thread(target=run, args=(progWk,))
thread_wk_run.setDaemon(True)
thread_wk_run.start()
while thread_wk_run.isAlive():
thread_wk_run.join(120)
thread_ps_run.join(120)
print('Master worker exit', file=sys.stderr)
sys.exit(0)
else:
while thread.isAlive():
thread.join(120)
sys.exit(0)
def run_tracker(args, env):
def run(prog):
"""run the program"""
subprocess.check_call(prog, shell=True)
print('Start scheduler', file=sys.stderr)
thread = Thread(target=(lambda: subprocess.check_call(' '.join(args.command), env=env, shell=True)), args=())
thread.setDaemon(True)
thread.start()
env['DMLC_JOB_CLUSTER'] = 'mpi'
if 'UAI_HOSTS' in env:
hosts = env['UAI_HOSTS']
else:
sys.exit(1)
print(hosts, file=sys.stderr)
cmd =' '.join(args.command)
if hosts is not None:
host_list = string.split(hosts, ',')
cmd += ' --uai-hosts %s ' % (hosts)
else:
sys.exit(1)
host_cnt = len(host_list)
hosts = ','.join(sorted(host_list, reverse=True))
print(host_list, file=sys.stderr)
print(hosts, file=sys.stderr)
# start socket
s = socket.socket()
master_ip = env['DMLC_PS_ROOT_URI']
port = 12345
s.bind(('', port))
print('socket bind %s' %(port), file=sys.stderr)
s.listen(5)
# Now check for worker docker start
for x in xrange(host_cnt):
c, addr = s.accept()
print(addr, file=sys.stderr)
if addr[0] == master_ip:
c.send('Master')
master_c = c
else:
c.send('Slave')
c.close()
env['OMP_NUM_THREADS'] = 4
#env['MXNET_CPU_WORKER_NTHREADS'] = 8
#env['MXNET_GPU_WORKER_NTHREADS'] = 4
env['DMLC_ROLE'] = 'server'
prog = 'mpirun --allow-run-as-root --mca btl_tcp_if_include eth1 --map-by socket -n %d -host %s %s' % (host_cnt, hosts, get_mpi_env(env))
master_c.send(prog)
env['DMLC_ROLE'] = 'worker'
prog = 'mpirun --allow-run-as-root --mca btl_tcp_if_include eth1 --map-by socket -n %d -host %s %s' % (host_cnt, hosts, get_mpi_env(env))
master_c.send(prog)
master_c.close()
while thread.isAlive():
thread.join(100)
def main():
parser = argparse.ArgumentParser(description='Launch a distributed job')
parser.add_argument('--command', nargs='+',
help = 'command for launching the program')
args, unknown = parser.parse_known_args()
args.command += unknown
env = os.environ.copy()
role = env['DMLC_ROLE']
nworker = env['DMLC_NUM_WORKER']
nserver = env['DMLC_NUM_SERVER']
print(args.command)
if role == 'scheduler':
run_tracker(args, env)
else:
mpi_submit(role, nworker, nserver, args, env)
if __name__ == "__main__":
main()
|
118156
|
import requests
import json
from tokens.settings import BLOCKCYPHER_API_KEY
def register_new_token(email, new_token, first=None, last=None):
assert new_token and email
post_params = {
"first": "MichaelFlaxman",
"last": "TestingOkToToss",
"email": "<EMAIL>",
"token": new_token,
}
url = 'https://api.blockcypher.com/v1/tokens'
get_params = {'token': BLOCKCYPHER_API_KEY}
r = requests.post(url, data=json.dumps(post_params), params=get_params,
verify=True, timeout=20)
assert 'error' not in json.loads(r.text)
return new_token
|
118165
|
import datetime
import io
from openpyxl import load_workbook
from ftc.management.commands._base_scraper import HTMLScraper
from ftc.models import Organisation, OrganisationLocation
class Command(HTMLScraper):
"""
Spider for scraping details of Registered Social Landlords in England
"""
name = "rsl"
allowed_domains = ["gov.uk", "githubusercontent.com"]
start_urls = [
"https://www.gov.uk/government/publications/current-registered-providers-of-social-housing",
]
org_id_prefix = "GB-SHPE"
id_field = "registration number"
source = {
"title": "Current registered providers of social housing",
"description": "Current registered providers of social housing and new registrations and deregistrations. Covers England",
"identifier": "rsl",
"license": "http://www.nationalarchives.gov.uk/doc/open-government-licence/version/3/",
"license_name": "Open Government Licence v3.0",
"issued": "",
"modified": "",
"publisher": {
"name": "Regulator of Social Housing",
"website": "https://www.gov.uk/government/organisations/regulator-of-social-housing",
},
"distribution": [
{
"downloadURL": "",
"accessURL": "",
"title": "Current registered providers of social housing",
}
],
}
orgtypes = ["Registered Provider of Social Housing"]
def parse_file(self, response, source_url):
link = [link for link in response.html.links if link.endswith(".xlsx")][0]
self.set_download_url(link)
r = self.session.get(link)
r.raise_for_status()
wb = load_workbook(io.BytesIO(r.content), read_only=True)
sheets = [
sheetname
for sheetname in wb.sheetnames
if "listing" in sheetname.lower() or "find view" in sheetname.lower()
]
for sheetname in sheets:
ws = wb[sheetname]
# self.source["issued"] = wb.properties.modified.isoformat()[0:10]
headers = None
for k, row in enumerate(ws.rows):
if not headers:
headers = [c.value.lower() for c in row]
else:
record = dict(zip(headers, [c.value for c in row]))
self.parse_row(record)
def parse_row(self, record):
record = self.clean_fields(record)
if not record.get("organisation name") or not record.get("registration number"):
return
org_types = [
self.add_org_type("Registered Provider of Social Housing"),
]
if record.get("corporate form"):
if record["corporate form"] == "Company":
org_types.append(self.add_org_type("Registered Company"))
org_types.append(
self.add_org_type(
"{} {}".format(record["designation"], record["corporate form"])
)
)
elif record["corporate form"] == "CIO-Charitable Incorporated Organisation":
org_types.append(
self.add_org_type("Charitable Incorporated Organisation")
)
org_types.append(self.add_org_type("Registered Charity"))
elif record["corporate form"] == "Charitable Company":
org_types.append(self.add_org_type("Registered Company"))
org_types.append(self.add_org_type("Incorporated Charity"))
org_types.append(self.add_org_type("Registered Charity"))
elif record["corporate form"] == "Unincorporated Charity":
org_types.append(self.add_org_type("Registered Charity"))
else:
org_types.append(self.add_org_type(record["corporate form"]))
elif record.get("designation"):
org_types.append(self.add_org_type(record["designation"]))
org_ids = [self.get_org_id(record)]
if record.get("designation") == "Local Authority":
la_codes = LA_LOOKUP.get(record.get(self.id_field))
if la_codes:
org_ids.append("GB-LAE-{}".format(la_codes.get("register-code")))
self.add_location_record(
{
"org_id": self.get_org_id(record),
"name": la_codes.get("name"),
"geoCode": la_codes.get("GSS"),
"geoCodeType": OrganisationLocation.GeoCodeTypes.ONS_CODE,
"locationType": OrganisationLocation.LocationTypes.AREA_OF_OPERATION,
"spider": self.name,
"scrape": self.scrape,
"source": self.source,
}
)
self.add_org_record(
Organisation(
**{
"org_id": self.get_org_id(record),
"name": record.get("organisation name"),
"charityNumber": None,
"companyNumber": None,
"streetAddress": None,
"addressLocality": None,
"addressRegion": None,
"addressCountry": "England",
"postalCode": None,
"telephone": None,
"alternateName": [],
"email": None,
"description": None,
"organisationType": [o.slug for o in org_types],
"organisationTypePrimary": org_types[0],
"url": None,
# "location": locations,
"latestIncome": None,
"dateModified": datetime.datetime.now(),
"dateRegistered": record.get("registration date"),
"dateRemoved": None,
"active": True,
"parent": None,
"orgIDs": org_ids,
"scrape": self.scrape,
"source": self.source,
"spider": self.name,
"org_id_scheme": self.orgid_scheme,
}
)
)
LA_LOOKUP = {
"45UB": {"GSS": "E07000223", "register-code": "ADU", "name": "Adur"},
"45UC": {"GSS": "E07000224", "register-code": "ARU", "name": "Arun"},
"37UB": {"GSS": "E07000170", "register-code": "ASH", "name": "Ashfield"},
"29UB": {"GSS": "E07000105", "register-code": "ASF", "name": "Ashford"},
"42UB": {"GSS": "E07000200", "register-code": "BAB", "name": "Babergh"},
"00CC": {"GSS": "E08000016", "register-code": "BNS", "name": "Barnsley"},
"16UC": {"GSS": "E07000027", "register-code": "BAR", "name": "Barrow-in-Furness"},
"22UB": {"GSS": "E07000066", "register-code": "BAI", "name": "Basildon"},
"37UC": {"GSS": "E07000171", "register-code": "BAE", "name": "Bassetlaw"},
"00CN": {"GSS": "E08000025", "register-code": "BIR", "name": "Birmingham"},
"00EY": {"GSS": "E06000009", "register-code": "BPL", "name": "Blackpool"},
"17UC": {"GSS": "E07000033", "register-code": "BOS", "name": "Bolsover"},
"00BL": {"GSS": "E08000001", "register-code": "BOL", "name": "Bolton"},
"00HP": {"GSS": "E06000029", "register-code": "POL", "name": "Poole"},
"00HN": {"GSS": "E06000028", "register-code": "BMH", "name": "Bournemouth"},
"5069": {
"GSS": "E06000058",
"register-code": "BPC",
"name": "Bournemouth, Christchurch and Poole",
},
"00MA": {"GSS": "E06000036", "register-code": "BRC", "name": "<NAME>"},
"22UD": {"GSS": "E07000068", "register-code": "BRW", "name": "Brentwood"},
"00ML": {"GSS": "E06000043", "register-code": "BNH", "name": "<NAME>"},
"00HB": {"GSS": "E06000023", "register-code": "BST", "name": "Bristol, City of"},
"37UD": {"GSS": "E07000172", "register-code": "BRT", "name": "Broxtowe"},
"00BM": {"GSS": "E08000002", "register-code": "BUR", "name": "Bury"},
"12UB": {"GSS": "E07000008", "register-code": "CAB", "name": "Cambridge"},
"41UB": {"GSS": "E07000192", "register-code": "CAN", "name": "<NAME>"},
"29UC": {"GSS": "E07000106", "register-code": "CAT", "name": "Canterbury"},
"22UE": {"GSS": "E07000069", "register-code": "CAS", "name": "<NAME>"},
"00KC": {
"GSS": "E06000056",
"register-code": "CBF",
"name": "Central Bedfordshire",
},
"31UC": {"GSS": "E07000130", "register-code": "CHA", "name": "Charnwood"},
"23UB": {"GSS": "E07000078", "register-code": "CHT", "name": "Cheltenham"},
"38UB": {"GSS": "E07000177", "register-code": "CHR", "name": "Cherwell"},
"00EW": {
"GSS": "E06000050",
"register-code": "CHW",
"name": "Cheshire West and Chester",
},
"17UD": {"GSS": "E07000034", "register-code": "CHS", "name": "Chesterfield"},
"30UE": {"GSS": "E07000118", "register-code": "CHO", "name": "Chorley"},
"00CX": {"GSS": "E08000032", "register-code": "BRD", "name": "Bradford"},
"32UD": {"GSS": "E07000138", "register-code": "LIC", "name": "Lincoln"},
"00AA": {"GSS": "E09000001", "register-code": "LND", "name": "City of London"},
"00DB": {"GSS": "E08000036", "register-code": "WKF", "name": "Wakefield"},
"00BK": {"GSS": "E09000033", "register-code": "WSM", "name": "Westminster"},
"00FF": {"GSS": "E06000014", "register-code": "YOR", "name": "York"},
"22UG": {"GSS": "E07000071", "register-code": "COL", "name": "Colchester"},
"34UB": {"GSS": "E07000150", "register-code": "COR", "name": "Corby"},
"00HE": {"GSS": "E06000052", "register-code": "CON", "name": "Cornwall"},
"00HF": {"GSS": "E06000053", "register-code": "IOS", "name": "<NAME>"},
"36UB": {"GSS": "E07000163", "register-code": "CRA", "name": "Craven"},
"45UE": {"GSS": "E07000226", "register-code": "CRW", "name": "Crawley"},
"26UC": {"GSS": "E07000096", "register-code": "DAC", "name": "Dacorum"},
"00EH": {"GSS": "E06000005", "register-code": "DAL", "name": "Darlington"},
"29UD": {"GSS": "E07000107", "register-code": "DAR", "name": "Dartford"},
"5076": {"GSS": "E07000151", "register-code": "DAV", "name": "Daventry"},
"00FK": {"GSS": "E06000015", "register-code": "DER", "name": "Derby"},
"00CE": {"GSS": "E08000017", "register-code": "DNC", "name": "Doncaster"},
"29UE": {"GSS": "E07000108", "register-code": "DOV", "name": "Dover"},
"00CR": {"GSS": "E08000027", "register-code": "DUD", "name": "Dudley"},
"00EJ": {"GSS": "E06000047", "register-code": "DUR", "name": "<NAME>"},
"18UB": {"GSS": "E07000040", "register-code": "EDE", "name": "<NAME>"},
"5070": {"GSS": "E07000244", "register-code": "ESK", "name": "East Suffolk"},
"26UD": {"GSS": "E07000097", "register-code": "EHE", "name": "East Hertfordshire"},
"00FB": {
"GSS": "E06000011",
"register-code": "ERY",
"name": "East Riding of Yorkshire",
},
"21UC": {"GSS": "E07000061", "register-code": "EAS", "name": "Eastbourne"},
"22UH": {"GSS": "E07000072", "register-code": "EPP", "name": "Epping Forest"},
"18UC": {"GSS": "E07000041", "register-code": "EXE", "name": "Exeter"},
"24UE": {"GSS": "E07000087", "register-code": "FAR", "name": "Fareham"},
"42UC": {"GSS": "E07000201", "register-code": "FOR", "name": "Forest Heath"},
"00CH": {"GSS": "E08000037", "register-code": "GAT", "name": "Gateshead"},
"23UE": {"GSS": "E07000081", "register-code": "GLO", "name": "Gloucester"},
"24UF": {"GSS": "E07000088", "register-code": "GOS", "name": "Gosport"},
"29UG": {"GSS": "E07000109", "register-code": "GRA", "name": "Gravesham"},
"33UD": {"GSS": "E07000145", "register-code": "GRY", "name": "Great Yarmouth"},
"43UD": {"GSS": "E07000209", "register-code": "GRT", "name": "Guildford"},
"00AP": {"GSS": "E09000014", "register-code": "HRY", "name": "Haringey"},
"22UJ": {"GSS": "E07000073", "register-code": "HAR", "name": "Harlow"},
"36UD": {"GSS": "E07000165", "register-code": "HAG", "name": "Harrogate"},
"00EB": {"GSS": "E06000001", "register-code": "HPL", "name": "Hartlepool"},
"17UH": {"GSS": "E07000037", "register-code": "HIG", "name": "<NAME>"},
"31UE": {
"GSS": "E07000132",
"register-code": "HIN",
"name": "<NAME>",
},
"42UD": {"GSS": "E07000202", "register-code": "IPS", "name": "Ipswich"},
"34UE": {"GSS": "E07000153", "register-code": "KET", "name": "Kettering"},
"00FA": {
"GSS": "E06000010",
"register-code": "KHL",
"name": "<NAME>, City of",
},
"00CZ": {"GSS": "E08000034", "register-code": "KIR", "name": "Kirklees"},
"30UH": {"GSS": "E07000121", "register-code": "LAC", "name": "Lancaster"},
"00DA": {"GSS": "E08000035", "register-code": "LDS", "name": "Leeds"},
"00FN": {"GSS": "E06000016", "register-code": "LCE", "name": "Leicester"},
"21UF": {"GSS": "E07000063", "register-code": "LEE", "name": "Lewes"},
"00AB": {
"GSS": "E09000002",
"register-code": "BDG",
"name": "<NAME>",
},
"00AC": {"GSS": "E09000003", "register-code": "BNE", "name": "Barnet"},
"00AD": {"GSS": "E09000004", "register-code": "BEX", "name": "Bexley"},
"00AE": {"GSS": "E09000005", "register-code": "BEN", "name": "Brent"},
"00AG": {"GSS": "E09000007", "register-code": "CMD", "name": "Camden"},
"00AH": {"GSS": "E09000008", "register-code": "CRY", "name": "Croydon"},
"00AJ": {"GSS": "E09000009", "register-code": "EAL", "name": "Ealing"},
"00AK": {"GSS": "E09000010", "register-code": "ENF", "name": "Enfield"},
"00AL": {"GSS": "E09000011", "register-code": "GRE", "name": "Greenwich"},
"00AM": {"GSS": "E09000012", "register-code": "HCK", "name": "Hackney"},
"00AN": {
"GSS": "E09000013",
"register-code": "HMF",
"name": "<NAME>",
},
"00AQ": {"GSS": "E09000015", "register-code": "HRW", "name": "Harrow"},
"00AR": {"GSS": "E09000016", "register-code": "HAV", "name": "Havering"},
"00AS": {"GSS": "E09000017", "register-code": "HIL", "name": "Hillingdon"},
"00AT": {"GSS": "E09000018", "register-code": "HNS", "name": "Hounslow"},
"00AU": {"GSS": "E09000019", "register-code": "ISL", "name": "Islington"},
"00AY": {"GSS": "E09000022", "register-code": "LBH", "name": "Lambeth"},
"00AZ": {"GSS": "E09000023", "register-code": "LEW", "name": "Lewisham"},
"00BA": {"GSS": "E09000024", "register-code": "MRT", "name": "Merton"},
"00BB": {"GSS": "E09000025", "register-code": "NWM", "name": "Newham"},
"00BC": {"GSS": "E09000026", "register-code": "RDB", "name": "Redbridge"},
"00BF": {"GSS": "E09000029", "register-code": "STN", "name": "Sutton"},
"00BG": {"GSS": "E09000030", "register-code": "TWH", "name": "<NAME>"},
"00BH": {"GSS": "E09000031", "register-code": "WFT", "name": "<NAME>"},
"00BJ": {"GSS": "E09000032", "register-code": "WND", "name": "Wandsworth"},
"00KA": {"GSS": "E06000032", "register-code": "LUT", "name": "Luton"},
"5074": {"GSS": "E08000012", "register-code": "LIV", "name": "Liverpool"},
"29UH": {"GSS": "E07000110", "register-code": "MAI", "name": "Maidstone"},
"00BN": {"GSS": "E08000003", "register-code": "MAN", "name": "Manchester"},
"37UF": {"GSS": "E07000174", "register-code": "MAS", "name": "Mansfield"},
"00LC": {"GSS": "E06000035", "register-code": "MDW", "name": "Medway"},
"31UG": {"GSS": "E07000133", "register-code": "MEL", "name": "Melton"},
"18UD": {"GSS": "E07000042", "register-code": "MDE", "name": "<NAME>"},
"42UE": {"GSS": "E07000203", "register-code": "MSU", "name": "Mid Suffolk"},
"00EC": {"GSS": "E06000002", "register-code": "MDB", "name": "Middlesbrough"},
"00MG": {"GSS": "E06000042", "register-code": "MIK", "name": "<NAME>"},
"43UE": {"GSS": "E07000210", "register-code": "MOL", "name": "<NAME>"},
"24UJ": {"GSS": "E07000091", "register-code": "NEW", "name": "New Forest"},
"37UG": {"GSS": "E07000175", "register-code": "NEA", "name": "Newark and Sherwood"},
"00CJ": {"GSS": "E08000021", "register-code": "NET", "name": "Newcastle upon Tyne"},
"17UJ": {
"GSS": "E07000038",
"register-code": "NED",
"name": "North East Derbyshire",
},
"32UE": {"GSS": "E07000139", "register-code": "NKE", "name": "North Kesteven"},
"00HC": {"GSS": "E06000024", "register-code": "NSM", "name": "North Somerset"},
"00CK": {"GSS": "E08000022", "register-code": "NTY", "name": "North Tyneside"},
"44UB": {"GSS": "E07000218", "register-code": "NWA", "name": "North Warwickshire"},
"31UH": {
"GSS": "E07000134",
"register-code": "NWL",
"name": "North West Leicestershire",
},
"34UF": {"GSS": "E07000154", "register-code": "NOR", "name": "Northampton"},
"00EM": {"GSS": "E06000057", "register-code": "NBL", "name": "Northumberland"},
"33UG": {"GSS": "E07000148", "register-code": "NOW", "name": "Norwich"},
"00FY": {"GSS": "E06000018", "register-code": "NGM", "name": "Nottingham"},
"44UC": {
"GSS": "E07000219",
"register-code": "NUN",
"name": "<NAME>",
},
"31UJ": {"GSS": "E07000135", "register-code": "OAD", "name": "<NAME>"},
"00BP": {"GSS": "E08000004", "register-code": "OLD", "name": "Oldham"},
"38UC": {"GSS": "E07000178", "register-code": "OXO", "name": "Oxford"},
"00MR": {"GSS": "E06000044", "register-code": "POR", "name": "Portsmouth"},
"00MC": {"GSS": "E06000038", "register-code": "RDG", "name": "Reading"},
"47UD": {"GSS": "E07000236", "register-code": "RED", "name": "Redditch"},
"30UL": {"GSS": "E07000124", "register-code": "RIB", "name": "<NAME>"},
"36UE": {"GSS": "E07000166", "register-code": "RIH", "name": "Richmondshire"},
"00BQ": {"GSS": "E08000005", "register-code": "RCH", "name": "Rochdale"},
"30UM": {"GSS": "E07000125", "register-code": "ROS", "name": "Rossendale"},
"00CF": {"GSS": "E08000018", "register-code": "ROT", "name": "Rotherham"},
"00AW": {
"GSS": "E09000020",
"register-code": "KEC",
"name": "<NAME>",
},
"00AX": {
"GSS": "E09000021",
"register-code": "KTT",
"name": "<NAME>",
},
"44UD": {"GSS": "E07000220", "register-code": "RUG", "name": "Rugby"},
"43UG": {"GSS": "E07000212", "register-code": "RUN", "name": "Runnymede"},
"36UF": {"GSS": "E07000167", "register-code": "RYE", "name": "Ryedale"},
"00BR": {"GSS": "E08000006", "register-code": "SLF", "name": "Salford"},
"00CS": {"GSS": "E08000028", "register-code": "SAW", "name": "Sandwell"},
"40UC": {"GSS": "E07000188", "register-code": "SEG", "name": "Sedgemoor"},
"36UH": {"GSS": "E07000169", "register-code": "SEL", "name": "Selby"},
"00CG": {"GSS": "E08000019", "register-code": "SHF", "name": "Sheffield"},
"29UL": {"GSS": "E07000112", "register-code": "SHE", "name": "Shepway"},
"00GG": {"GSS": "E06000051", "register-code": "SHR", "name": "Shropshire"},
"00MD": {"GSS": "E06000039", "register-code": "SLG", "name": "Slough"},
"00CT": {"GSS": "E08000029", "register-code": "SOL", "name": "Solihull"},
"5067": {
"GSS": "E07000246",
"register-code": "SWT",
"name": "<NAME>",
},
"12UG": {
"GSS": "E07000012",
"register-code": "SCA",
"name": "South Cambridgeshire",
},
"17UK": {"GSS": "E07000039", "register-code": "SDE", "name": "South Derbyshire"},
"5078": {"GSS": "E07000044", "register-code": "SHA", "name": "South Hams"},
"32UF": {"GSS": "E07000140", "register-code": "SHO", "name": "South Holland"},
"32UG": {"GSS": "E07000141", "register-code": "SKE", "name": "South Kesteven"},
"16UG": {"GSS": "E07000031", "register-code": "SLA", "name": "South Lakeland"},
"5085": {"GSS": "E07000126", "register-code": "SRI", "name": "South Ribble"},
"00CL": {"GSS": "E08000023", "register-code": "STY", "name": "South Tyneside"},
"00MS": {"GSS": "E06000045", "register-code": "STH", "name": "Southampton"},
"00KF": {"GSS": "E06000033", "register-code": "SOS", "name": "Southend-on-Sea"},
"00BE": {"GSS": "E09000028", "register-code": "SWK", "name": "Southwark"},
"5091": {"GSS": "E07000213", "register-code": "SPE", "name": "Spelthorne"},
"26UG": {"GSS": "E07000100", "register-code": "SAL", "name": "St Albans"},
"26UH": {"GSS": "E07000101", "register-code": "STV", "name": "Stevenage"},
"00BS": {"GSS": "E08000007", "register-code": "SKP", "name": "Stockport"},
"00EF": {"GSS": "E06000004", "register-code": "STT", "name": "Stockton-on-Tees"},
"00GL": {"GSS": "E06000021", "register-code": "STE", "name": "Stoke-on-Trent"},
"23UF": {"GSS": "E07000082", "register-code": "STO", "name": "Stroud"},
"5080": {"GSS": "E08000024", "register-code": "SND", "name": "Sunderland"},
"42UG": {"GSS": "E07000205", "register-code": "SUF", "name": "Suffolk Coastal"},
"00HX": {"GSS": "E06000030", "register-code": "SWD", "name": "Swindon"},
"41UK": {"GSS": "E07000199", "register-code": "TAW", "name": "Tamworth"},
"43UK": {"GSS": "E07000215", "register-code": "TAN", "name": "Tandridge"},
"40UE": {"GSS": "E07000190", "register-code": "TAU", "name": "<NAME>"},
"18UH": {"GSS": "E07000045", "register-code": "TEI", "name": "Teignbridge"},
"22UN": {"GSS": "E07000076", "register-code": "TEN", "name": "Tendring"},
"29UN": {"GSS": "E07000114", "register-code": "THA", "name": "Thanet"},
"00KG": {"GSS": "E06000034", "register-code": "THR", "name": "Thurrock"},
"29UP": {
"GSS": "E07000115",
"register-code": "TON",
"name": "<NAME>",
},
"22UQ": {"GSS": "E07000077", "register-code": "UTT", "name": "Uttlesford"},
"00EU": {"GSS": "E06000007", "register-code": "WRT", "name": "Warrington"},
"44UF": {"GSS": "E07000222", "register-code": "WAW", "name": "Warwick"},
"26UK": {"GSS": "E07000103", "register-code": "WAT", "name": "Watford"},
"42UH": {"GSS": "E07000206", "register-code": "WAV", "name": "Waveney"},
"43UL": {"GSS": "E07000216", "register-code": "WAE", "name": "Waverley"},
"21UH": {"GSS": "E07000065", "register-code": "WEA", "name": "Wealden"},
"26UL": {"GSS": "E07000104", "register-code": "WEW", "name": "<NAME>"},
"00MB": {"GSS": "E06000037", "register-code": "WBK", "name": "West Berkshire"},
"5077": {"GSS": "E07000047", "register-code": "WDE", "name": "West Devon"},
"30UP": {"GSS": "E07000127", "register-code": "WLA", "name": "West Lancashire"},
"5068": {"GSS": "E07000245", "register-code": "WSK", "name": "West Suffolk"},
"00BW": {"GSS": "E08000010", "register-code": "WGN", "name": "Wigan"},
"00HY": {"GSS": "E06000054", "register-code": "WIL", "name": "Wiltshire"},
"24UP": {"GSS": "E07000094", "register-code": "WIN", "name": "Winchester"},
"00CB": {"GSS": "E08000015", "register-code": "WRL", "name": "Wirral"},
"43UM": {"GSS": "E07000217", "register-code": "WOI", "name": "Woking"},
"00MF": {"GSS": "E06000041", "register-code": "WOK", "name": "Wokingham"},
"00CW": {"GSS": "E08000031", "register-code": "WLV", "name": "Wolverhampton"},
"11UF": {"GSS": "E07000007", "register-code": "WYO", "name": "Wycombe"},
}
|
118188
|
import datetime as dt
import json
import logging
import os
import shutil
from typing import Any, Dict, List, Optional, Tuple, Union
import pandas as pd
from extra_model._adjectives import adjective_info
from extra_model._aspects import generate_aspects
from extra_model._filter import filter
from extra_model._summarize import link_aspects_to_texts, link_aspects_to_topics, qa
from extra_model._topics import get_topics
from extra_model._vectorizer import Vectorizer
CB_BASE_DIR = "/"
EMBEDDING_TYPE = "glove.840B.300d"
logger = logging.getLogger(__name__)
class ModelBase:
"""Base class that provides file loading functionality."""
models_folder: str
_storage_metadata: Dict[str, str]
def load_from_files(self):
"""Load model files."""
# load the storage metadata info obtained when loading embeddings from model storage
file_name = os.path.join(self.models_folder, "metadata.json")
storage_metadata = {}
if os.path.isfile(file_name):
with open(file_name) as f:
storage_metadata = json.load(f)
# overwrite storage_metadata for select fields
self._storage_metadata["id"] = storage_metadata.get("id")
self._storage_metadata["dag_id"] = storage_metadata.get("dag_id")
self._storage_metadata["dag_run_id"] = storage_metadata.get("dag_run_id")
self._storage_metadata["date_trained"] = storage_metadata.get(
"date_trained"
)
self._storage_metadata["target_training_date"] = storage_metadata.get(
"target_training_date"
)
self._storage_metadata["json_extras"] = storage_metadata.get("json_extras")
class ExtraModelBase:
"""Extra model class that provides an interface for training and predicting."""
is_trained = False
models_folder = "/embeddings"
training_folder = ""
_filenames = {
"embeddings": f"{EMBEDDING_TYPE}.vectors.npy",
"prepro": EMBEDDING_TYPE,
}
# there is no need for this since Extra doesn't create any artifacts
_training_artifacts: Dict[str, str] = {}
def __init__(
self,
dag_id="",
dag_run_id="",
models_folder=models_folder,
embedding_type=EMBEDDING_TYPE,
):
"""Init function for ExtraModel object.
:param dag_id: Name of dag
:param dag_runs_ids: Dag run IDs
:param models_folder: Path to folder where model files are stored
:param embedding_type: Name of embedding file. Default is "glove.840B.300d"
"""
self.models_folder = models_folder
self.embedding_type = embedding_type
self.api_spec_names = {
"position": "Position",
"aspect": "Aspect",
"descriptor": "Descriptor",
"aspect_count": "AspectCount",
"wordnet_node": "WordnetNode",
"sentiment_compound_aspect": "SentimentCompound",
"sentiment_binary_aspect": "SentimentBinary",
"adcluster": "AdCluster",
"source_guid": "CommentId",
"topic": "Topic",
"importance": "TopicImportance",
"sentiment_compound_topic": "TopicSentimentCompound",
"sentiment_binary_topic": "TopicSentimentBinary",
"num_occurance": "TopicCount",
}
self._storage_metadata = {
"type": "text",
"owner": "blank",
"description": "Running ExtRA algorithm",
"display_name": "extra-model",
"features": {},
"hyperparameters": {},
"dag_id": dag_id,
"dag_run_id": dag_run_id,
"is_scheduled_creation": False,
"date_trained": str(dt.date.today()),
"target_training_date": str(dt.date.today()),
"json_extras": {"classification_report_json": ""},
}
for key in self._filenames:
self._storage_metadata[key] = {}
def storage_metadata(self):
"""Docstring."""
return self._storage_metadata
def load_from_files(self):
"""Docstring."""
super().load_from_files()
self.vectorizer = Vectorizer(
os.path.join(self.models_folder, self.embedding_type)
)
self.is_trained = True
def train(self):
"""Docstring."""
for key, filename in self._filenames.items():
logger.debug(f"Downloading {key}")
shutil.copyfile(
src=os.path.join(CB_BASE_DIR, filename),
dst=os.path.join(self.models_folder, filename),
)
self.is_trained = True
def predict(self, comments: List[Dict[str, str]]) -> List[Dict]:
"""Docstring."""
if not self.is_trained:
raise RuntimeError("Extra must be trained before you can predict!")
dataframe_texts = pd.DataFrame(comments)
dataframe_texts.rename(
{"CommentId": "source_guid"}, axis="columns", inplace=True
)
dataframe_texts = filter(dataframe_texts)
dataframe_aspects = generate_aspects(dataframe_texts)
if dataframe_aspects.empty:
raise ValueError(
"Input dataset doesn't contain valid aspects, stopping the algorithm"
)
# aggregate and abstract aspects into topics
dataframe_topics = get_topics(dataframe_aspects, self.vectorizer)
dataframe_topics, dataframe_aspects = adjective_info(
dataframe_topics, dataframe_aspects, self.vectorizer
)
dataframe_aspects = link_aspects_to_topics(dataframe_aspects, dataframe_topics)
dataframe_aspects = link_aspects_to_texts(dataframe_aspects, dataframe_texts)
# do some extra book-keeping if debug-level is set low enough
if logger.isEnabledFor(20):
qa(dataframe_texts, dataframe_aspects, dataframe_topics)
# write output_tables, after dropping auxilliary information
dataframe_topics.loc[:, "num_occurance"] = dataframe_topics["rawnums"].apply(
lambda counts: sum(counts)
)
dataframe_topics = dataframe_topics[
[
"topicID",
"topic",
"importance",
"sentiment_compound",
"sentiment_binary",
"num_occurance",
]
]
dataframe_aspects.dropna(axis=0, inplace=True)
dataframe_aspects["topicID"] = dataframe_aspects["topicID"].astype(int)
output = dataframe_aspects.merge(
dataframe_topics, on="topicID", suffixes=("_aspect", "_topic")
)
return standardize_output(output, names=self.api_spec_names).to_dict("records")
# NOTE: improve typehints!
def extra_factory(bases: Optional[Union[Any, Tuple[Any]]] = None) -> Any:
"""Create for ExtraModel class types.
Will dynamically create the class when called with the provided base classes.
:param bases: Base classes to be used when creating ExtraModel class
:type bases: Class type or tuple of class types
:return: ExtraModel class
"""
if bases is None:
bases = (ModelBase,)
elif not isinstance(bases, tuple):
bases = (bases,)
bases = (ExtraModelBase,) + bases
return type("ExtraModel", bases, {})
def standardize_output(data: pd.DataFrame, names: dict) -> pd.DataFrame:
"""Standarize output.
Ensures the following:
- only required columns are returned and
- they are named according to spec
:param data: input dataframe.
:param names: dictionary to standardize output to API spec.
:return: renamed dataframe.
"""
return data[list(names.keys())].rename(columns=names)
ExtraModel = extra_factory()
|
118222
|
import geopandas as gpd
import pandas as pd
from ..utils import csv_string_to_df, get_api_response
class MergeBoundaryStats:
"""境界データと統計データをマージするためのクラス"""
def __init__(
self,
app_id,
stats_table_id,
boundary_gdf,
area,
class_code,
year):
"""イニシャライザ
Args:
app_id (str): e-statAPIのAPIkey
stats_table_id (str): 取得したい統計情報の統計表ID
boundary_gdf (gpd.GeoDataFrame): 境界データのgdf
stats_df (pd.DataFrame): 統計データのdf
area (str): 標準地域コード
class_code (str): 統計表メタデータのクラスコード
year (str): データを取得したい年度
"""
self.app_id = app_id
self.stats_table_id = stats_table_id
self.boundary_gdf = boundary_gdf
self.area = area
self.class_code = class_code
self.year = year + "100000"
self.detail_url = "http://api.e-stat.go.jp/rest/3.0/app/getSimpleStatsData" \
f"?appId={self.app_id}" \
f"&cdArea={self.area}" \
f"&cdCat01={self.class_code}" \
f"&cdTime={self.year}" \
f"&statsDataId={self.stats_table_id}" \
f"&lang=J&metaGetFlg=N&cntGetFlg=N&explanationGetFlg=N&annotationGetFlg=N§ionHeaderFlg=2"
self.stats_df = self._extraction_only_year(self._create_stats_df())
self.merged_df = self._merge_df()
def _create_stats_df(self):
"""統計表をAPIから取得して、データフレームとして返す
Returns:
pd.DataFrame: 統計表のデータフレーム
"""
print(f"統計表を取得します。URL={self.detail_url}")
res = get_api_response(self.detail_url)
row_text = res.text
return csv_string_to_df(row_text)
def _extraction_only_year(self, df):
return df[df["time_code"].str.startswith(str(self.year))]
def _merge_df(self):
return pd.merge(
self.boundary_gdf,
self.stats_df,
left_on='AREA_CODE',
right_on='area_code')
|
118264
|
from vol import Vol
from net import Net
from trainers import Trainer
training_data = []
testing_data = []
network = None
sgd = None
N_TRAIN = 800
def load_data():
global training_data, testing_data
train = [ line.split(',') for line in
file('./data/titanic-kaggle/train.csv').read().split('\n')[1:] ]
for ex in train:
PassengerId,Survived,Pclass,Name,NameRest,Sex,Age,SibSp,Parch,Ticket,Fare,Cabin,Embarked = ex
# Fixing
sex = 0.0 if Sex == 'male' else 1.0
age = 0 if Age == '' else float(Age)
Embarked = Embarked.replace('\r', '')
if Embarked == 'C':
emb = 0.0
elif Embarked == 'Q':
emb = 1.0
else:
emb = 2.0
vec = [ float(Pclass), sex, age, float(SibSp), float(Parch), float(Fare), emb ]
v = Vol(vec)
training_data.append((v, int(Survived)))
test = [ line.split(',') for line in
file('./data/titanic-kaggle/test.csv').read().split('\n')[1:] ]
for ex in test:
PassengerId,Pclass,Name,NameRest,Sex,Age,SibSp,Parch,Ticket,Fare,Cabin,Embarked = ex
# Fixing
sex = 0.0 if Sex == 'male' else 1.0
age = 0 if Age == '' else float(Age)
Embarked = Embarked.replace('\r', '')
if Embarked == 'C':
emb = 0.0
elif Embarked == 'Q':
emb = 1.0
else:
emb = 2.0
fare = 0 if Fare == '' else float(Fare)
vec = [ float(Pclass), sex, age, float(SibSp), float(Parch), fare, emb ]
testing_data.append(Vol(vec))
print 'Data loaded...'
def start():
global network, sgd
layers = []
layers.append({'type': 'input', 'out_sx': 1, 'out_sy': 1, 'out_depth': 7})
#layers.append({'type': 'fc', 'num_neurons': 30, 'activation': 'relu'})
#layers.append({'type': 'fc', 'num_neurons': 30, 'activation': 'relu'})
layers.append({'type': 'softmax', 'num_classes': 2}) #svm works too
print 'Layers made...'
network = Net(layers)
print 'Net made...'
print network
sgd = Trainer(network, {'momentum': 0.2, 'l2_decay': 0.001})
print 'Trainer made...'
print sgd
def train():
global training_data, sgd
print 'In training...'
print 'k', 'time\t\t ', 'loss\t ', 'training accuracy'
print '----------------------------------------------------'
for x, y in training_data[:N_TRAIN]:
stats = sgd.train(x, y)
print stats['k'], stats['time'], stats['loss'], stats['accuracy']
def test():
global training_data, network
print 'In testing...'
right = 0
for x, y in training_data[N_TRAIN:]:
network.forward(x)
right += network.getPrediction() == y
accuracy = float(right) / (len(training_data) - N_TRAIN) * 100
print accuracy
|
118274
|
from __future__ import unicode_literals, absolute_import
from .alphatrade import AlphaTrade, TransactionType, OrderType, ProductType, LiveFeedType, Instrument
from alphatrade import exceptions
__all__ = ['AlphaTrade', 'TransactionType', 'OrderType',
'ProductType', 'LiveFeedType', 'Instrument', 'exceptions']
|
118292
|
class Solution:
def Rob(self, nums, m, n) -> int:
prev, curr = nums[m], max(nums[m], nums[m + 1])
for i in range(m + 2, n):
prev, curr = curr, max(prev + nums[i], curr)
return curr
def rob(self, nums: List[int]) -> int:
if len(nums) == 0:
return 0
elif len(nums) <= 2:
return max(nums)
return max(self.Rob(nums, 0, len(nums) - 1), self.Rob(nums, 1, len(nums)))
|
118310
|
from unittest import TestCase
import numpy as np
import toolkit.metrics as metrics
class TestMotion(TestCase):
def test_true_positives(self):
y_true = np.array([[1, 1, 1, 1],
[1, 0, 1, 1],
[0, 0, 0, 1]])
y_pred = np.array([[0, 1, 1, 1],
[0, 1, 1, 1],
[0, 1, 0, 1]])
tp, _, _, _ = metrics.multilabel_tp_fp_tn_fn_scores(y_true, y_pred)
self.assertSequenceEqual(tp.tolist(), [0, 1, 2, 3])
def test_false_positives(self):
y_true = np.array([[1, 1, 1, 0],
[1, 1, 0, 0],
[1, 0, 0, 0]])
y_pred = np.array([[0, 0, 1, 1],
[1, 0, 1, 1],
[1, 1, 1, 1]])
_, fp, _, _ = metrics.multilabel_tp_fp_tn_fn_scores(y_true, y_pred)
self.assertSequenceEqual(fp.tolist(), [0, 1, 2, 3])
def test_true_negatives(self):
y_true = np.array([[1, 1, 1, 0],
[1, 1, 0, 0],
[1, 0, 0, 0]])
y_pred = np.array([[1, 0, 1, 0],
[1, 0, 0, 0],
[1, 0, 0, 0]])
_, _, tn, _ = metrics.multilabel_tp_fp_tn_fn_scores(y_true, y_pred)
self.assertSequenceEqual(tn.tolist(), [0, 1, 2, 3])
def test_false_negatives(self):
y_true = np.array([[1, 0, 0, 1],
[1, 1, 1, 1],
[1, 0, 1, 1]])
y_pred = np.array([[1, 0, 1, 0],
[1, 0, 0, 0],
[1, 0, 0, 0]])
_, _, _, fn = metrics.multilabel_tp_fp_tn_fn_scores(y_true, y_pred)
self.assertSequenceEqual(fn.tolist(), [0, 1, 2, 3])
|
118315
|
import os, sys
import unittest, psutil
current_path = os.path.dirname(os.path.realpath(__file__))
project_root = os.path.abspath(os.path.join(current_path, '..'))
sys.path.insert(0, project_root)
from steamworks import STEAMWORKS
_steam_running = False
for process in psutil.process_iter():
if process.name() == 'Steam.exe':
_steam_running = True
break
if not _steam_running:
raise Exception('Steam not running, but required for tests')
class TestCaseSTEAMWORKS(unittest.TestCase):
def setUp(self):
self.steam = STEAMWORKS()
self.steam.initialize()
def test_populated(self):
for interface in ['Apps', 'Friends', 'Matchmaking', 'Music', 'Screenshots', 'Users', 'UserStats', 'Utils', \
'Workshop']:
self.assertTrue((hasattr(self.steam, interface) and getattr(self.steam, interface) is not None))
|
118349
|
from PySide2.QtWidgets import QDialog, QDialogButtonBox, QVBoxLayout, QPlainTextEdit, QShortcut, QMessageBox, QGroupBox, \
QScrollArea, QCheckBox, QLabel
class CodeGenDialog(QDialog):
def __init__(self, modules: dict, parent=None):
super(CodeGenDialog, self).__init__(parent)
self.modules = modules
main_layout = QVBoxLayout()
imports_group_box = QGroupBox('Imports')
imports_group_box.setLayout(QVBoxLayout())
info_text_edit = QPlainTextEdit('''I found the following imports in the inspected components. Please unselect all imports whose source code you want me to include in the output. All checked modules remain imported using the import statements. Notice that import alias names (import ... as ...) of course won\'t work when including the module\'s source. Same goes for imports using 'from' (indicated in the list below). And for those, the whole (direct) source will be included if you unselect a module.''')
info_text_edit.setReadOnly(True)
imports_group_box.layout().addWidget(info_text_edit)
imports_scroll_area = QScrollArea()
imports_scroll_area.setLayout(QVBoxLayout())
self.import_widget_assignment = {'imports': {}, 'fromimports': {}}
# imports
imports_scroll_area.layout().addWidget(QLabel('imports:'))
for i in modules['imports'].keys():
import_check_box = QCheckBox(i)
import_check_box.setChecked(True)
imports_scroll_area.layout().addWidget(import_check_box)
self.import_widget_assignment['imports'][import_check_box] = i
# from-imports
imports_scroll_area.layout().addWidget(QLabel('\'from\'-imports:'))
for i in modules['fromimports'].keys():
names = modules['fromimports'][i][2]
from_names_list = ', '.join(names)
import_check_box = QCheckBox(i + ': ' + from_names_list)
import_check_box.setChecked(True)
imports_scroll_area.layout().addWidget(import_check_box)
self.import_widget_assignment['fromimports'][import_check_box] = i
imports_group_box.layout().addWidget(imports_scroll_area)
main_layout.addWidget(imports_group_box)
button_box = QDialogButtonBox()
button_box.setStandardButtons(QDialogButtonBox.Cancel | QDialogButtonBox.Ok)
button_box.accepted.connect(self.accept)
button_box.rejected.connect(self.reject)
main_layout.addWidget(button_box)
self.setLayout(main_layout)
self.resize(500, 500)
self.setWindowTitle('Source Code Gen Manager')
def get_import_selection(self) -> dict:
imports = self.modules['imports']
ia = self.import_widget_assignment['imports']
for k in ia.keys():
imports[ia[k]][1] = k.isChecked()
fromimports = self.modules['fromimports']
fia = self.import_widget_assignment['fromimports']
for k in fia.keys():
fromimports[fia[k]][1] = k.isChecked()
return {'imports': imports, 'fromimports': fromimports}
|
118370
|
from typing import Mapping
import meerkat as mk
from dcbench.common import Problem, Solution
from dcbench.common.artifact import (
DataPanelArtifact,
ModelArtifact,
VisionDatasetArtifact,
)
from dcbench.common.artifact_container import ArtifactSpec
from dcbench.common.table import AttributeSpec
class SliceDiscoverySolution(Solution):
artifact_specs: Mapping[str, ArtifactSpec] = {
"pred_slices": ArtifactSpec(
artifact_type=DataPanelArtifact,
description="A DataPanel of predicted slice labels with columns `id`"
" and `pred_slices`.",
),
}
attribute_specs = {
"problem_id": AttributeSpec(
description="A unique identifier for this problem.",
attribute_type=str,
),
"slicer_class": AttributeSpec(
description="The ",
attribute_type=type,
),
"slicer_config": AttributeSpec(
description="The configuration for the slicer.",
attribute_type=dict,
),
"embedding_column": AttributeSpec(
description="The column name of the embedding.",
attribute_type=str,
),
}
task_id: str = "slice_discovery"
@property
def problem(self):
from dcbench import tasks
return tasks["slice_discovery"].problems[self.problem_id]
def merge(self) -> mk.DataPanel:
return self["pred_slices"].merge(
self.problem.merge(split="test", slices=True), on="id", how="left"
)
class SliceDiscoveryProblem(Problem):
artifact_specs: Mapping[str, ArtifactSpec] = {
"val_predictions": ArtifactSpec(
artifact_type=DataPanelArtifact,
description=(
"A DataPanel of the model's predictions with columns `id`,"
"`target`, and `probs.`"
),
),
"test_predictions": ArtifactSpec(
artifact_type=DataPanelArtifact,
description=(
"A DataPanel of the model's predictions with columns `id`,"
"`target`, and `probs.`"
),
),
"test_slices": ArtifactSpec(
artifact_type=DataPanelArtifact,
description="A DataPanel of the ground truth slice labels with columns "
" `id`, `slices`.",
),
"activations": ArtifactSpec(
artifact_type=DataPanelArtifact,
description="A DataPanel of the model's activations with columns `id`,"
"`act`",
),
"model": ArtifactSpec(
artifact_type=ModelArtifact,
description="A trained PyTorch model to audit.",
),
"base_dataset": ArtifactSpec(
artifact_type=VisionDatasetArtifact,
description="A DataPanel representing the base dataset with columns `id` "
"and `image`.",
),
"clip": ArtifactSpec(
artifact_type=DataPanelArtifact,
description="A DataPanel of the image embeddings from OpenAI's CLIP model",
),
}
attribute_specs = {
"n_pred_slices": AttributeSpec(
description="The number of slice predictions that each slice discovery "
"method can return.",
attribute_type=int,
),
"slice_category": AttributeSpec(
description="The type of slice .", attribute_type=str
),
"target_name": AttributeSpec(
description="The name of the target column in the dataset.",
attribute_type=str,
),
"dataset": AttributeSpec(
description="The name of the dataset being audited.",
attribute_type=str,
),
"alpha": AttributeSpec(
description="The alpha parameter for the AUC metric.",
attribute_type=float,
),
"slice_names": AttributeSpec(
description="The names of the slices in the dataset.",
attribute_type=list,
),
}
task_id: str = "slice_discovery"
def merge(self, split="val", slices: bool = False):
base_dataset = self["base_dataset"]
base_dataset = base_dataset[[c for c in base_dataset.columns if c != "split"]]
dp = self[f"{split}_predictions"].merge(
base_dataset, on="id", how="left"
)
if slices:
dp = dp.merge(self[f"{split}_slices"], on="id", how="left")
return dp
def solve(self, pred_slices_dp: mk.DataPanel) -> SliceDiscoverySolution:
if ("id" not in pred_slices_dp) or ("pred_slices" not in pred_slices_dp):
raise ValueError(
f"DataPanel passed to {self.__class__.__name__} must include columns "
"`id` and `pred_slices`"
)
return SliceDiscoverySolution(
artifacts={"pred_slices": pred_slices_dp},
attributes={"problem_id": self.id},
)
def evaluate(self):
pass
|
118379
|
N=int(input("Enter the number of test cases:"))
for i in range(0,N):
L,D,S,C=map(int,input().split())
for i in range(1,D):
if(S>=L):
S+=C*S
break
if L<= S:
print("ALIVE AND KICKING")
else:
print("DEAD AND ROTTING")
|
118381
|
from typing import Optional, List, Callable, Tuple
import torch
import random
import sys
import torch.nn.functional as F
import torch.multiprocessing as mp
import torch.nn.parallel as paralle
import unittest
import torchshard as ts
from testing import IdentityLayer
from testing import dist_worker, assertEqual, set_seed
from testing import loss_reduction_type, threshold
def torch_cross_entropy(batch_size, seq_length, vocab_size, logits_scale, seed, local_rank):
set_seed(seed)
identity = IdentityLayer((batch_size, seq_length, vocab_size),
scale=logits_scale).cuda(local_rank)
logits = identity()
target = torch.LongTensor(
size=(batch_size, seq_length)).random_(0, vocab_size).cuda(local_rank)
logits = logits.view(-1, logits.size()[-1])
target = target.view(-1)
loss = F.cross_entropy(logits, target, reduction=loss_reduction_type).view_as(target)
if loss_reduction_type == 'none':
loss = loss.sum()
loss.backward()
return loss, identity.weight.grad, logits, target
def torchshard_cross_entropy(batch_size, seq_length, vocab_size, logits_scale, seed, local_rank):
set_seed(seed)
identity = IdentityLayer((batch_size, seq_length, vocab_size),
scale=logits_scale).cuda(local_rank)
logits = identity()
target = torch.LongTensor(
size=(batch_size, seq_length)).random_(0, vocab_size).cuda(local_rank)
logits = logits.view(-1, logits.size()[-1])
target = target.view(-1)
logits_parallel = ts.distributed.scatter(logits, dim=-1)
loss = ts.nn.functional.parallel_cross_entropy(logits_parallel, target, reduction=loss_reduction_type)
if loss_reduction_type == 'none':
loss = loss.sum()
loss.backward()
return loss, identity.weight.grad, logits, target
class TestCrossEntropy(unittest.TestCase):
@staticmethod
def run_test_naive_cross_entropy(local_rank: int) -> None:
# settings
batch_size = 13
seq_length = 17
vocab_size_per_partition = 11
logits_scale = 1000.0
tensor_model_parallel_size = ts.distributed.get_world_size()
vocab_size = vocab_size_per_partition * tensor_model_parallel_size
seed = 1234
loss_torch, grad_torch, logits_torch, target_torch = \
torch_cross_entropy(batch_size, seq_length,
vocab_size, logits_scale,
seed, local_rank)
loss_ts, grad_ts, logits_ts, target_ts = \
torchshard_cross_entropy(batch_size, seq_length,
vocab_size, logits_scale,
seed, local_rank)
assertEqual(logits_torch, logits_ts, threshold=threshold)
assertEqual(target_torch, target_ts, threshold=threshold)
assertEqual(loss_torch, loss_ts, threshold=threshold)
assertEqual(grad_torch, grad_ts, threshold=threshold)
@unittest.skipIf(not torch.cuda.is_available(), 'CUDA is not available')
def test_naive_cross_entropy(self):
ngpus = torch.cuda.device_count()
mp.spawn(
dist_worker,
args=(self.run_test_naive_cross_entropy, ngpus),
nprocs=ngpus
)
ts.distributed.destroy_process_group()
if __name__ == '__main__':
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
unittest.main()
|
118402
|
DynamoTable # unused import (dynamo_query/__init__.py:8)
DynamoRecord # unused variable (dynamo_query/__init__.py:12)
create # unused function (dynamo_query/data_table.py:119)
memo # unused variable (dynamo_query/data_table.py:137)
filter_keys # unused function (dynamo_query/data_table.py:299)
get_column # unused function (dynamo_query/data_table.py:472)
drop_duplicates # unused function (dynamo_query/data_table.py:734)
sanitize_key # unused function (dynamo_query/dictclasses/dictclass.py:127)
compute_key # unused function (dynamo_query/dictclasses/dictclass.py:131)
sanitize # unused function (dynamo_query/dictclasses/dictclass.py:351)
get_field_names # unused function (dynamo_query/dictclasses/dynamo_dictclass.py:32)
DynamoAutoscaler # unused class (dynamo_query/dynamo_autoscaler.py:17)
deregister_auto_scaling # unused function (dynamo_query/dynamo_autoscaler.py:47)
register_auto_scaling # unused function (dynamo_query/dynamo_autoscaler.py:76)
get_last_evaluated_key # unused function (dynamo_query/dynamo_query_main.py:648)
reset_start_key # unused function (dynamo_query/dynamo_query_main.py:677)
get_raw_responses # unused function (dynamo_query/dynamo_query_main.py:684)
DynamoTable # unused class (dynamo_query/dynamo_table.py:63)
delete_table # unused function (dynamo_query/dynamo_table.py:232)
invalidate_cache # unused function (dynamo_query/dynamo_table.py:546)
cached_batch_get # unused function (dynamo_query/dynamo_table.py:552)
batch_get_records # unused function (dynamo_query/dynamo_table.py:729)
batch_delete_records # unused function (dynamo_query/dynamo_table.py:747)
batch_upsert_records # unused function (dynamo_query/dynamo_table.py:762)
cached_get_record # unused function (dynamo_query/dynamo_table.py:829)
upsert_record # unused function (dynamo_query/dynamo_table.py:851)
delete_record # unused function (dynamo_query/dynamo_table.py:920)
clear_records # unused function (dynamo_query/dynamo_table.py:1131)
NE # unused variable (dynamo_query/enums.py:58)
IN # unused variable (dynamo_query/enums.py:59)
EXISTS # unused variable (dynamo_query/enums.py:65)
NOT_EXISTS # unused variable (dynamo_query/enums.py:66)
CONTAINS # unused variable (dynamo_query/enums.py:68)
default # unused function (dynamo_query/json_tools.py:40)
pluralize # unused function (dynamo_query/utils.py:91)
get_nested_item # unused function (dynamo_query/utils.py:112)
|
118411
|
import logging
import asyncio
import weakref
import functools
L = logging.getLogger(__name__)
class PubSub(object):
def __init__(self, app):
self.Subscribers = {}
self.Loop = app.Loop
def subscribe(self, message_type, callback):
"""
Subscribe a subscriber to the an message type.
It could be even plain function, method or its coroutine variant (then it will be delivered in a dedicated future)
"""
# If subscribe is a bound method, do special treatment
# https://stackoverflow.com/questions/53225/how-do-you-check-whether-a-python-method-is-bound-or-not
if hasattr(callback, '__self__'):
callback = weakref.WeakMethod(callback)
else:
callback = weakref.ref(callback)
if message_type not in self.Subscribers:
self.Subscribers[message_type] = [callback]
else:
self.Subscribers[message_type].append(callback)
def subscribe_all(self, obj):
"""
Find all @asab.subscribe decorated methods on the obj and do subscription
"""
for member_name in dir(obj):
member = getattr(obj, member_name)
message_types = getattr(member, 'asab_pubsub_subscribe_to_message_types', None)
if message_types is not None:
for message_type in message_types:
self.subscribe(message_type, member)
def unsubscribe(self, message_type, callback):
""" Remove a subscriber of an message type from the set. """
callback_list = self.Subscribers.get(message_type)
if callback_list is None:
L.warning("Message type subscription '{}'' not found.".format(message_type))
return
remove_list = None
for i in range(len(callback_list)):
# Take an weakref entry in the callback list and references it
c = callback_list[i]()
# Check if a weak reference is working
if c is None: # a reference is lost, remove this entry
if remove_list is None:
remove_list = list()
remove_list.append(callback_list[i])
continue
if c == callback:
callback_list.pop(i)
break
else:
L.warning("Subscriber '{}'' not found for the message type '{}'.".format(message_type, callback))
if remove_list is not None:
for callback_ref in remove_list:
callback_list.remove(callback_ref)
if len(callback_list) == 0:
del self.Subscribers['message_type']
def _callback_iter(self, message_type):
def _deliver_async(loop, callback, message_type, *args, **kwargs):
asyncio.ensure_future(callback(message_type, *args, **kwargs), loop=loop)
callback_list = self.Subscribers.get(message_type)
if callback_list is None:
return
remove_list = None
for callback_ref in callback_list:
callback = callback_ref()
# Check if a weak reference is working
if callback is None: # a reference is lost
if remove_list is None:
remove_list = list()
remove_list.append(callback_ref)
continue
if asyncio.iscoroutinefunction(callback):
callback = functools.partial(_deliver_async, self.Loop, callback)
yield callback
if remove_list is not None:
for callback_ref in remove_list:
callback_list.remove(callback_ref)
def publish(self, message_type, *args, **kwargs):
""" Notify subscribers of an message type. Including arguments. """
asynchronously = kwargs.pop('asynchronously', False)
if asynchronously:
for callback in self._callback_iter(message_type):
self.Loop.call_soon(functools.partial(callback, message_type, *args, **kwargs))
else:
for callback in self._callback_iter(message_type):
callback(message_type, *args, **kwargs)
###
class subscribe(object):
'''
Decorator
Usage:
@asab.subscribe("tick")
def on_tick(self, message_type):
print("Service tick")
'''
def __init__(self, message_type):
self.message_type = message_type
def __call__(self, f):
if getattr(f, 'asab_pubsub_subscribe_to_message_types', None) is None:
f.asab_pubsub_subscribe_to_message_types = [self.message_type]
else:
f.asab_pubsub_subscribe_to_message_types.append(self.message_type)
return f
###
class Subscriber(object):
'''
:any:`Subscriber` object allows to consume PubSub messages in coroutines.
It subscribes for various message types and consumes them.
It works on FIFO basis (First message In, first message Out).
If ``pubsub`` argument is None, the initial subscription is skipped.
.. code:: python
subscriber = asab.Subscriber(
app.PubSub,
"Application.tick!",
"Application.stop!"
)
'''
def __init__(self, pubsub=None, *message_types):
self._q = asyncio.Queue()
self._subscriptions = []
if pubsub is not None:
for message_type in message_types:
self.subscribe(pubsub, message_type)
def subscribe(self, pubsub, message_type):
'''
Subscribe for more message types. This method can be called many times with various ``pubsub`` objects.
'''
pubsub.subscribe(message_type, self)
self._subscriptions.append((pubsub, message_type))
def __call__(self, message_type, *args, **kwargs):
self._q.put_nowait((message_type, args, kwargs))
def message(self):
'''
Wait for a message asynchronously.
Returns a three-members tuple ``(message_type, args, kwargs)``.
Example of the `await message()` use:
.. code:: python
async def my_coroutine(app):
# Subscribe for a two application events
subscriber = asab.Subscriber(
app.PubSub,
"Application.tick!",
"Application.exit!"
)
while True:
message_type, args, kwargs = await subscriber.message()
if message_type == "Application.exit!":
break
print("Tick.")
'''
return self._q.get()
def __aiter__(self):
return self
async def __anext__(self):
return await self._q.get()
|
118426
|
from cauldron.session import projects
from cauldron.session.writing.components import bokeh_component
from cauldron.session.writing.components import definitions
from cauldron.session.writing.components import plotly_component
from cauldron.session.writing.components import project_component
from cauldron.session.writing.components.definitions import COMPONENT
from cauldron.session.writing.components.definitions import WEB_INCLUDE
def _get_components(lib_name: str, project: 'projects.Project') -> COMPONENT:
if lib_name == 'bokeh':
return bokeh_component.create(project)
if lib_name == 'plotly':
return plotly_component.create(project)
# Unknown components will just return as empty components. There used
# to be a shared component type that was removed in 1.0.0, but hadn't
# been used for a long time before that. If that becomes interesting
# again old code can be reviewed to see how shared components once
# worked.
return COMPONENT([], [])
def get(step: 'projects.ProjectStep') -> COMPONENT:
"""..."""
return definitions.merge_components(
project_component.create_many(step.project, step.web_includes),
*[
_get_components(name, step.project)
for name in step.report.library_includes
],
)
|
118429
|
import os
from glob import glob
import numpy as np
import dlib
import cv2
from PIL import Image
def remove_undetected(directory ,detector ='hog'):
'''
Removes the undetected images in data
Args:
-----------------------------------------
directory: path to the data folder
detector: type of detector (Hog or Cnn) to detect faces
Returns:
------------------------------------------
Removes the undetected images in data
Returns co-ordinates of rectangle bounding face in order (y1,y2,x1,x2)
'''
all_imgs = glob(f'{directory}*/*')
for img in all_imgs:
arr_img = np.asarray(Image.open(img))
#Removes image if face could not be detected
try:
faces_detected = face_detector(arr_img,detector)
except:
print(img)
os.remove(img)
continue
if (faces_detected == None) or (faces_detected == []):
print(img)
os.remove(img)
def face_detector(img,detector = 'hog'):
'''
Detects faces in images from data
Args:
-----------------------------------------
img: numpy image array
detector: type of detector (Hog or Cnn) to detect faces
Returns:
------------------------------------------
Returns co-ordinates of rectangle bounding face in order (y1,y2,x1,x2)
'''
if detector.lower() == 'hog':
hogFaceDetector = dlib.get_frontal_face_detector()
faceRects = hogFaceDetector(img, 1)
faceRect = faceRects[0]
if faceRect.top() > 0 and faceRect.bottom() > 0 and faceRect.left() > 0 and faceRect.right() > 0:
return faceRect.top(), faceRect.bottom(), faceRect.left(), faceRect.right()
else:
return None
elif detector.lower() == 'cnn':
dnnFaceDetector = dlib.cnn_face_detection_model_v1('./database/dlib-models/mmod_human_face_detector.dat')
rects = dnnFaceDetector(img, 1)
faceRect = rects[0]
if faceRect.rect.top() > 0 and faceRect.rect.bottom() > 0 and faceRect.rect.left() > 0 and faceRect.rect.right() > 0:
return faceRect.rect.top(),faceRect.rect.bottom(),faceRect.rect.left(),faceRect.rect.right()
else:
return None
def make_data_array(directory ,paths,img_size ,imgs_per_folder,total_imgs, check_detect,detector = 'hog'):
'''
Loads the data from disk to an array to speed up during training
Args:
-----------------------------------------
directory: path to data folder
paths: paths to persons/classes in data
img_size = size of image to be resized to
imgs_per_folder: no of images to be taken from each class
total_imgs = total number of images in data folder
detector: type of detector (Hog or Cnn) to detect faces
check_detect: bool to check all imgs in data are detectable
Returns:
------------------------------------------
Returns the loaded input and its corresponding labels
'''
data = np.zeros((total_imgs,img_size,img_size,3),dtype = np.int)
y = np.zeros((total_imgs))
if check_detect:
print("Removing undetected Images..")
remove_undetected(directory,detector)
# Re compute imgs per folder as value could be change by removed_undetected.
minimum = 1e+8
for i in paths:
temp = len(glob(f'{i}/*'))
if temp < minimum:
minimum = temp
imgs_per_folder = int(minimum)
print("Removed undetected Images")
print('-----------------------------------------\n')
else:
print("Skipping Detection Check")
print('-----------------------------------------')
print("Detecting Faces")
# Storing imgs_per_folder faces from each class and its corresponding labels
for index1,individual in enumerate(paths):
for index2,picture in enumerate(glob(f'{individual}/*')[:imgs_per_folder]):
img = np.asarray(Image.open(picture))
y1,y2,x1,x2 = face_detector(img,detector)
resized_img = cv2.resize(img[y1:y2,x1:x2],(img_size,img_size))
data[index1*imgs_per_folder+index2] = resized_img
y[index1*imgs_per_folder+index2] = index1
print("Faces Detected and Loaded Successfully")
return data,y,imgs_per_folder
|
118470
|
s = 'xyzABC'
print(f'{s} is a valid identifier = {s.isidentifier()}')
s = '0xyz' # identifier can't start with digits 0-9
print(f'{s} is a valid identifier = {s.isidentifier()}')
s = '' # identifier can't be empty string
print(f'{s} is a valid identifier = {s.isidentifier()}')
s = '_xyz'
print(f'{s} is a valid identifier = {s.isidentifier()}')
s = 'ꝗꞨꫳ' # PEP-3131 introduced Non-ASCII characters to identifier list
print(f'{s} is a valid identifier = {s.isidentifier()}')
import unicodedata
count = 0
for codepoint in range(2 ** 16):
ch = chr(codepoint)
if ch.isidentifier():
print(u'{:04x}: {} ({})'.format(codepoint, ch, unicodedata.name(ch, 'UNNAMED')))
count = count + 1
print(f'Total Number of Identifier Unicode Characters = {count}')
|
118488
|
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
from torchvision import models
from linear_attention_transformer import ImageLinearAttention
class ResnetGenerator(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, n_blocks=6, img_size=256):
assert(n_blocks >= 0)
super(ResnetGenerator, self).__init__()
self.input_nc = input_nc
self.output_nc = output_nc
self.ngf = ngf
self.n_blocks = n_blocks
self.img_size = img_size
self.Hiera4 = self.hierarchical(4)
self.Hiera3 = self.hierarchical(3)
self.Hiera2 = self.hierarchical(2)
self.Hiera1 = self.hierarchical(1)
self.conv_up3 = convrelu(512, 256, 3, 1)
self.conv_up2 = convrelu(256, 128, 3, 1)
self.conv_up1 = convrelu(128, 64, 3, 1)
n_downsampling = 2
mult = 2**n_downsampling
UpBlock0 = [nn.ReflectionPad2d(1),
nn.Conv2d(int(ngf * mult / 4), ngf * mult, kernel_size=3, stride=1, padding=0, bias=True),
ILN(ngf * mult),
nn.ReLU(True)]
self.relu = nn.ReLU(True)
# Gamma, Beta block
FC = [nn.Linear(ngf * mult, ngf * mult, bias=False),
nn.ReLU(True),
nn.Linear(ngf * mult, ngf * mult, bias=False),
nn.ReLU(True)]
self.gamma = nn.Linear(ngf * mult, ngf * mult, bias=False)
self.beta = nn.Linear(ngf * mult, ngf * mult, bias=False)
# Up-Sampling Bottleneck
for i in range(n_blocks):
setattr(self, 'UpBlock1_' + str(i+1), ResnetAdaILNBlock(ngf * mult, use_bias=False))
# Up-Sampling
UpBlock2 = []
for i in range(n_downsampling):
mult = 2**(n_downsampling - i)
# Experiments show that the performance of Up-sample and Sub-pixel is similar,
# although theoretically Sub-pixel has more parameters and less FLOPs.
# UpBlock2 += [nn.Upsample(scale_factor=2, mode='nearest'),
# nn.ReflectionPad2d(1),
# nn.Conv2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=1, padding=0, bias=False),
# ILN(int(ngf * mult / 2)),
# nn.ReLU(True)]
UpBlock2 += [nn.ReflectionPad2d(1),
nn.Conv2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=1, padding=0, bias=False),
ILN(int(ngf * mult / 2)),
nn.ReLU(True),
nn.Conv2d(int(ngf * mult / 2), int(ngf * mult / 2)*4, kernel_size=1, stride=1, bias=True),
nn.PixelShuffle(2),
ILN(int(ngf * mult / 2)),
nn.ReLU(True)
]
UpBlock2 += [nn.ReflectionPad2d(3),
nn.Conv2d(ngf, output_nc, kernel_size=7, stride=1, padding=0, bias=False),
nn.Tanh()]
self.FC = nn.Sequential(*FC)
self.UpBlock0 = nn.Sequential(*UpBlock0)
self.UpBlock2 = nn.Sequential(*UpBlock2)
def hierarchical(self,mult):
ngf = 64
mult = 2 ** (mult-1)
Hiera = [nn.ReflectionPad2d(1),
nn.Conv2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=1, padding=0, bias=False),
ILN(int(ngf * mult / 2)),
nn.ReLU(True),
nn.Conv2d(int(ngf * mult / 2), int(ngf * mult / 2)*4, kernel_size=1, stride=1, bias=True),
nn.PixelShuffle(2),
ILN(int(ngf * mult / 2)),
nn.ReLU(True)
]
return nn.Sequential(*Hiera)
def forward(self, E1, E2, E3, E4):
E4 = self.Hiera4(E4) # 16,16,256
E3 = torch.cat([E4,E3],dim=1) # 16,16,512
E3 = self.conv_up3(E3) # 16,16,256
E3 = self.Hiera3(E3) # 32,32,128
E2 = torch.cat([E3,E2],dim=1) # 32,32,256
E2 = self.conv_up2(E2) # 32,32,128
E2 = self.Hiera2(E2) # 64,64,64
E1 = torch.cat([E2,E1],dim=1) # 64,64,128
E1 = self.conv_up1(E1) # 64,64,64
x = self.UpBlock0(E1)
x_ = torch.nn.functional.adaptive_avg_pool2d(x, 1)
x_ = self.FC(x_.view(x_.shape[0], -1))
gamma, beta = self.gamma(x_), self.beta(x_)
for i in range(self.n_blocks):
x = getattr(self, 'UpBlock1_' + str(i+1))(x, gamma, beta)
out = self.UpBlock2(x)
return out
class ResnetAdaILNBlock(nn.Module):
def __init__(self, dim, use_bias):
super(ResnetAdaILNBlock, self).__init__()
self.pad1 = nn.ReflectionPad2d(1)
self.conv1 = nn.Conv2d(dim, dim, kernel_size=3, stride=1, padding=0, bias=use_bias)
self.norm1 = adaILN(dim)
self.relu1 = nn.ReLU(True)
self.pad2 = nn.ReflectionPad2d(1)
self.conv2 = nn.Conv2d(dim, dim, kernel_size=3, stride=1, padding=0, bias=use_bias)
self.norm2 = adaILN(dim)
def forward(self, x, gamma, beta):
out = self.pad1(x)
out = self.conv1(out)
out = self.norm1(out, gamma, beta)
out = self.relu1(out)
out = self.pad2(out)
out = self.conv2(out)
out = self.norm2(out, gamma, beta)
return out + x
class adaILN(nn.Module):
def __init__(self, num_features, eps=1e-5, momentum=0.9, using_moving_average=True, using_bn=False):
super(adaILN, self).__init__()
self.eps = eps
self.momentum = momentum
self.using_moving_average = using_moving_average
self.using_bn = using_bn
self.num_features = num_features
if self.using_bn:
self.rho = Parameter(torch.Tensor(1, num_features, 3))
self.rho[:,:,0].data.fill_(3)
self.rho[:,:,1].data.fill_(1)
self.rho[:,:,2].data.fill_(1)
self.register_buffer('running_mean', torch.zeros(1, num_features, 1,1))
self.register_buffer('running_var', torch.zeros(1, num_features, 1,1))
self.running_mean.zero_()
self.running_var.zero_()
else:
self.rho = Parameter(torch.Tensor(1, num_features, 2))
self.rho[:,:,0].data.fill_(3.2)
self.rho[:,:,1].data.fill_(1)
def forward(self, input, gamma, beta):
in_mean, in_var = torch.mean(input, dim=[2, 3], keepdim=True), torch.var(input, dim=[2, 3], keepdim=True)
out_in = (input - in_mean) / torch.sqrt(in_var + self.eps)
ln_mean, ln_var = torch.mean(input, dim=[1, 2, 3], keepdim=True), torch.var(input, dim=[1, 2, 3], keepdim=True)
out_ln = (input - ln_mean) / torch.sqrt(ln_var + self.eps)
softmax = nn.Softmax(2)
rho = softmax(self.rho)
if self.using_bn:
if self.training:
bn_mean, bn_var = torch.mean(input, dim=[0, 2, 3], keepdim=True), torch.var(input, dim=[0, 2, 3], keepdim=True)
if self.using_moving_average:
self.running_mean.mul_(self.momentum)
self.running_mean.add_((1 - self.momentum) * bn_mean.data)
self.running_var.mul_(self.momentum)
self.running_var.add_((1 - self.momentum) * bn_var.data)
else:
self.running_mean.add_(bn_mean.data)
self.running_var.add_(bn_mean.data ** 2 + bn_var.data)
else:
bn_mean = torch.autograd.Variable(self.running_mean)
bn_var = torch.autograd.Variable(self.running_var)
out_bn = (input - bn_mean) / torch.sqrt(bn_var + self.eps)
rho_0 = rho[:,:,0]
rho_1 = rho[:,:,1]
rho_2 = rho[:,:,2]
rho_0 = rho_0.view(1, self.num_features, 1,1)
rho_1 = rho_1.view(1, self.num_features, 1,1)
rho_2 = rho_2.view(1, self.num_features, 1,1)
rho_0 = rho_0.expand(input.shape[0], -1, -1, -1)
rho_1 = rho_1.expand(input.shape[0], -1, -1, -1)
rho_2 = rho_2.expand(input.shape[0], -1, -1, -1)
out = rho_0 * out_in + rho_1 * out_ln + rho_2 * out_bn
else:
rho_0 = rho[:,:,0]
rho_1 = rho[:,:,1]
rho_0 = rho_0.view(1, self.num_features, 1,1)
rho_1 = rho_1.view(1, self.num_features, 1,1)
rho_0 = rho_0.expand(input.shape[0], -1, -1, -1)
rho_1 = rho_1.expand(input.shape[0], -1, -1, -1)
out = rho_0 * out_in + rho_1 * out_ln
out = out * gamma.unsqueeze(2).unsqueeze(3) + beta.unsqueeze(2).unsqueeze(3)
return out
class ILN(nn.Module):
def __init__(self, num_features, eps=1e-5, momentum=0.9, using_moving_average=True, using_bn=False):
super(ILN, self).__init__()
self.eps = eps
self.momentum = momentum
self.using_moving_average = using_moving_average
self.using_bn = using_bn
self.num_features = num_features
if self.using_bn:
self.rho = Parameter(torch.Tensor(1, num_features, 3))
self.rho[:,:,0].data.fill_(1)
self.rho[:,:,1].data.fill_(3)
self.rho[:,:,2].data.fill_(3)
self.register_buffer('running_mean', torch.zeros(1, num_features, 1,1))
self.register_buffer('running_var', torch.zeros(1, num_features, 1,1))
self.running_mean.zero_()
self.running_var.zero_()
else:
self.rho = Parameter(torch.Tensor(1, num_features, 2))
self.rho[:,:,0].data.fill_(1)
self.rho[:,:,1].data.fill_(3.2)
self.gamma = Parameter(torch.Tensor(1, num_features, 1, 1))
self.beta = Parameter(torch.Tensor(1, num_features, 1, 1))
self.gamma.data.fill_(1.0)
self.beta.data.fill_(0.0)
def forward(self, input):
in_mean, in_var = torch.mean(input, dim=[2, 3], keepdim=True), torch.var(input, dim=[2, 3], keepdim=True)
out_in = (input - in_mean) / torch.sqrt(in_var + self.eps)
ln_mean, ln_var = torch.mean(input, dim=[1, 2, 3], keepdim=True), torch.var(input, dim=[1, 2, 3], keepdim=True)
out_ln = (input - ln_mean) / torch.sqrt(ln_var + self.eps)
softmax = nn.Softmax(2)
rho = softmax(self.rho)
if self.using_bn:
if self.training:
bn_mean, bn_var = torch.mean(input, dim=[0, 2, 3], keepdim=True), torch.var(input, dim=[0, 2, 3], keepdim=True)
if self.using_moving_average:
self.running_mean.mul_(self.momentum)
self.running_mean.add_((1 - self.momentum) * bn_mean.data)
self.running_var.mul_(self.momentum)
self.running_var.add_((1 - self.momentum) * bn_var.data)
else:
self.running_mean.add_(bn_mean.data)
self.running_var.add_(bn_mean.data ** 2 + bn_var.data)
else:
bn_mean = torch.autograd.Variable(self.running_mean)
bn_var = torch.autograd.Variable(self.running_var)
out_bn = (input - bn_mean) / torch.sqrt(bn_var + self.eps)
rho_0 = rho[:,:,0]
rho_1 = rho[:,:,1]
rho_2 = rho[:,:,2]
rho_0 = rho_0.view(1, self.num_features, 1,1)
rho_1 = rho_1.view(1, self.num_features, 1,1)
rho_2 = rho_2.view(1, self.num_features, 1,1)
rho_0 = rho_0.expand(input.shape[0], -1, -1, -1)
rho_1 = rho_1.expand(input.shape[0], -1, -1, -1)
rho_2 = rho_2.expand(input.shape[0], -1, -1, -1)
out = rho_0 * out_in + rho_1 * out_ln + rho_2 * out_bn
else:
rho_0 = rho[:,:,0]
rho_1 = rho[:,:,1]
rho_0 = rho_0.view(1, self.num_features, 1,1)
rho_1 = rho_1.view(1, self.num_features, 1,1)
rho_0 = rho_0.expand(input.shape[0], -1, -1, -1)
rho_1 = rho_1.expand(input.shape[0], -1, -1, -1)
out = rho_0 * out_in + rho_1 * out_ln
out = out * self.gamma.expand(input.shape[0], -1, -1, -1) + self.beta.expand(input.shape[0], -1, -1, -1)
return out
class Discriminator(nn.Module):
def __init__(self, input_nc, ndf=64, n_layers=7):
super(Discriminator, self).__init__()
model = [nn.ReflectionPad2d(1),
nn.utils.spectral_norm(
nn.Conv2d(int(ndf), ndf * 2, kernel_size=3, stride=1, padding=0, bias=True)),
nn.LeakyReLU(0.2, True)]
# Class Activation Map
mult = 2 ** (1)
self.fc = nn.utils.spectral_norm(nn.Linear(ndf * mult * 2, 1, bias=False))
self.conv1x1 = nn.Conv2d(ndf * mult * 2, ndf * mult, kernel_size=1, stride=1, bias=True)
self.leaky_relu = nn.LeakyReLU(0.2, True)
self.lamda = nn.Parameter(torch.zeros(1))
Dis0_0 = []
for i in range(2, n_layers - 4): # 1+3*2^0 + 3*2^1 + 3*2^2 =22
mult = 2 ** (i - 1)
Dis0_0 += [nn.ReflectionPad2d(1),
nn.utils.spectral_norm(
nn.Conv2d(ndf * mult, ndf * mult * 2, kernel_size=4, stride=2, padding=0, bias=True)),
nn.LeakyReLU(0.2, True)]
mult = 2 ** (n_layers - 4 - 1)
Dis0_1 = [nn.ReflectionPad2d(1), #1+3*2^0 + 3*2^1 + 3*2^2 +3*2^3 = 46
nn.utils.spectral_norm(
nn.Conv2d(ndf * mult, ndf * mult * 2, kernel_size=4, stride=1, padding=0, bias=True)),
nn.LeakyReLU(0.2, True)]
mult = 2 ** (n_layers - 4)
self.conv0 = nn.utils.spectral_norm( #1+3*2^0 + 3*2^1 + 3*2^2 +3*2^3 + 3*2^3= 70
nn.Conv2d(ndf * mult, 1, kernel_size=4, stride=1, padding=0, bias=False))
Dis1_0 = []
for i in range(n_layers - 4, n_layers - 2): # 1+3*2^0 + 3*2^1 + 3*2^2 + 3*2^3=46, 1+3*2^0 + 3*2^1 + 3*2^2 +3*2^3 +3*2^4 = 94
mult = 2 ** (i - 1)
Dis1_0 += [nn.ReflectionPad2d(1),
nn.utils.spectral_norm(
nn.Conv2d(ndf * mult, ndf * mult * 2, kernel_size=4, stride=2, padding=0, bias=True)),
nn.LeakyReLU(0.2, True)]
mult = 2 ** (n_layers - 2 - 1)
Dis1_1 = [nn.ReflectionPad2d(1), #1+3*2^0 + 3*2^1 + 3*2^2 +3*2^3 +3*2^4 + 3*2^5= 94 + 96 = 190
nn.utils.spectral_norm(
nn.Conv2d(ndf * mult, ndf * mult * 2, kernel_size=4, stride=1, padding=0, bias=True)),
nn.LeakyReLU(0.2, True)]
mult = 2 ** (n_layers - 2)
self.conv1 = nn.utils.spectral_norm( #1+3*2^0 + 3*2^1 + 3*2^2 +3*2^3 +3*2^4 + 3*2^5 + 3*2^5 = 286
nn.Conv2d(ndf * mult, 1, kernel_size=4, stride=1, padding=0, bias=False))
# self.attn = Self_Attn( ndf * mult)
self.pad = nn.ReflectionPad2d(1)
self.model = nn.Sequential(*model)
self.Dis0_0 = nn.Sequential(*Dis0_0)
self.Dis0_1 = nn.Sequential(*Dis0_1)
self.Dis1_0 = nn.Sequential(*Dis1_0)
self.Dis1_1 = nn.Sequential(*Dis1_1)
def forward(self, input):
x = self.model(input)
x_0 = x
gap = torch.nn.functional.adaptive_avg_pool2d(x, 1)
gmp = torch.nn.functional.adaptive_max_pool2d(x, 1)
x = torch.cat([x, x], 1)
cam_logit = torch.cat([gap, gmp], 1)
cam_logit = self.fc(cam_logit.view(cam_logit.shape[0], -1))
weight = list(self.fc.parameters())[0]
x = x * weight.unsqueeze(2).unsqueeze(3)
x = self.conv1x1(x)
x = self.lamda*x + x_0
# print("lamda:",self.lamda)
x = self.leaky_relu(x)
heatmap = torch.sum(x, dim=1, keepdim=True)
x0 = self.Dis0_0(x)
x1 = self.Dis1_0(x0)
x0 = self.Dis0_1(x0)
x1 = self.Dis1_1(x1)
x0 = self.pad(x0)
x1 = self.pad(x1)
out0 = self.conv0(x0)
out1 = self.conv1(x1)
return out0, out1, cam_logit, heatmap
def convrelu(in_channels, out_channels, kernel, padding):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel, padding=padding),
nn.ReLU(inplace=True),
)
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x):
return self.fn(x) + x
class Rezero(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
self.g = nn.Parameter(torch.zeros(1))
def forward(self, x):
return self.fn(x) * self.g
attn_and_ff = lambda chan: nn.Sequential(*[
Residual(Rezero(ImageLinearAttention(chan, norm_queries = True))),
Residual(Rezero(nn.Sequential(nn.Conv2d(chan, chan * 2, 1), nn.LeakyReLU(0.2, True), nn.Conv2d(chan * 2, chan, 1))))
])
class Encoder(nn.Module):
def __init__(self, n_class):
super().__init__()
self.base_model = models.resnet18(pretrained=True)
self.base_layers = list(self.base_model.children())
self.layer0 = nn.Sequential(*self.base_layers[:3]) # size=(N, 64, x.H/2, x.W/2)
self.layer0_1x1 = convrelu(64, 64, 1, 0)
self.layer1 = nn.Sequential(*self.base_layers[3:5]) # size=(N, 64, x.H/4, x.W/4)
self.layer1_1x1 = convrelu(64, 64, 1, 0)
self.layer2 = self.base_layers[5] # size=(N, 128, x.H/8, x.W/8)
self.layer2_1x1 = convrelu(128, 128, 1, 0)
self.layer3 = self.base_layers[6] # size=(N, 256, x.H/16, x.W/16)
self.layer3_1x1 = convrelu(256, 256, 1, 0)
self.layer4 = self.base_layers[7] # size=(N, 512, x.H/32, x.W/32)
self.layer4_1x1 = convrelu(512, 512, 1, 0)
self.attn_fn1 = attn_and_ff(64)
self.attn_fn2 = attn_and_ff(128)
self.attn_fn3 = attn_and_ff(256)
self.attn_fn4 = attn_and_ff(512)
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv_up3 = convrelu(256 + 512, 512, 3, 1)
self.conv_up2 = convrelu(128 + 512, 256, 3, 1)
self.conv_up1 = convrelu(64 + 256, 256, 3, 1)
self.conv_up0 = convrelu(64 + 256, 128, 3, 1)
self.conv_original_size0 = convrelu(3, 64, 3, 1)
self.conv_original_size1 = convrelu(64, 64, 3, 1)
self.conv_original_size2 = convrelu(64 + 128, 64, 3, 1)
self.conv_last = nn.Conv2d(64, n_class, 1)
def forward(self, input):
x_original = self.conv_original_size0(input)
x_original = self.conv_original_size1(x_original)
layer0 = self.layer0(input)
layer1 = self.layer1(layer0)
E1 = layer1
layer1 = self.attn_fn1(layer1)
layer2 = self.layer2(layer1)
E2 = layer2
layer2 = self.attn_fn2(layer2)
layer3 = self.layer3(layer2)
E3 = layer3
layer3 = self.attn_fn3(layer3)
layer4 = self.layer4(layer3)
E4 = layer4
layer4 = self.attn_fn4(layer4)
layer4 = self.layer4_1x1(layer4)
x = self.upsample(layer4)
layer3 = self.layer3_1x1(layer3)
x = torch.cat([x, layer3], dim=1)
x = self.conv_up3(x)
x = self.upsample(x)
layer2 = self.layer2_1x1(layer2)
x = torch.cat([x, layer2], dim=1)
x = self.conv_up2(x)
x = self.upsample(x)
layer1 = self.layer1_1x1(layer1)
x = torch.cat([x, layer1], dim=1)
x = self.conv_up1(x)
x = self.upsample(x)
layer0 = self.layer0_1x1(layer0)
x = torch.cat([x, layer0], dim=1)
x = self.conv_up0(x)
x = self.upsample(x)
x = torch.cat([x, x_original], dim=1)
x = self.conv_original_size2(x)
out = self.conv_last(x)
return E1, E2, E3, E4, out
|
118511
|
import os
# 数据库配置
MYSQL_HOST = os.getenv('MYSQL_HOST', 'localhost')
MYSQL_PORT = os.getenv('MYSQL_PORT', '3306')
MYSQL_DATABASE = os.getenv('MYSQL_DATABASE', 'my-site')
MYSQL_USER = os.getenv('MYSQL_USER', 'admin')
MYSQL_PASSWORD = os.getenv('MYSQL_PASSWORD', '<PASSWORD>')
# redis配置
REDIS_HOST = os.getenv('REDIS_HOST', '127.0.0.1')
REDIS_PORT = os.getenv('REDIS_PORT', '6379')
|
118514
|
import pandas as pd
def resample_and_merge(df_blocks: pd.DataFrame,
df_prices: pd.DataFrame,
dict_params: dict,
freq: str = "5T"):
df = resample(df_blocks, freq, dict_params)
df_prices = resample(df_prices, freq, dict_params)
# we add the 24h lagged variable of the mean gas price
df["mean_gas_price_24h_lagged"] = df["mean_gas_price"].shift(288, axis=0)
# we incorporate the eth price into our main DataFrame
df["eth_usd_price"] = df_prices["eth_usd_price"]
df["eth_usd_price"] = df["eth_usd_price"].ffill()
return df.drop([df.index[i] for i in range(288)]) # We drop the first day because of the shift that includ NaNs.
def resample(df: pd.DataFrame,
freq: str,
dict_params: dict) -> pd.DataFrame:
"""
:param df:
:param freq: e: '5T' for 5 minutes
:param dict_params:
:return:
"""
columns = []
for c in df.columns:
if c in list(dict_params.keys()):
op = dict_params[c]
if op == "mean":
columns.append(df[c].resample(freq, label="right").mean())
elif op == "last":
columns.append(df[c].resample(freq, label="right").last())
else:
raise RuntimeError(f"{op} is not a valid resampling operation:"
f" currently supported are 'mean' or 'last'")
return pd.concat(columns, axis=1)
def clip_bounds(df: pd.DataFrame,
dict_params: pd.DataFrame) -> pd.DataFrame:
"""
:param df: the df to clip
:param dict_params: ex : {col1: {'min': 0, 'max': 30}, col2: {'min': -10, 'max': 80}}
:return:
"""
for c in df.columns:
if c in list(dict_params.keys()):
low_bound = dict_params[c]["min"]
up_bound = dict_params[c]["max"]
df_c_clipped = df[c].clip(lower=low_bound, upper=up_bound,
axis=0, inplace=False)
df[c] = df_c_clipped
return df
def clip_std(df: pd.DataFrame,
dict_params: pd.DataFrame) -> pd.DataFrame:
"""
:param df: the df to clip
:param dict_params: ex : {col1: 1.5, col2: 2}
:return:
"""
for c in df.columns:
if c in list(dict_params.keys()):
std_mult = dict_params[c]
while True:
mean, std = df[c].mean(), df[c].std()
low_bound = mean - std_mult * std
up_bound = mean + std_mult * std
df_c_clipped = df[c].clip(lower=low_bound, upper=up_bound,
axis=0, inplace=False)
if ((df_c_clipped - df[c]) ** 2).max() < 0.01:
break
df[c] = df_c_clipped
return df
|
118522
|
from mango.relations import base
from mango.relations.constants import CASCADE
__all__ = [
"Collection",
]
class Collection(base.Relation):
def __init__(
self,
cls=None,
name=None,
multi=True,
hidden=False,
persist=True,
typed=False,
validators=(),
on_delete=CASCADE
):
"""
Collections allow for a relation to be created on the source model
only. The message attribute of the target is used to connect to the
source attribute. This means that collection relations are always
reverse relations.
"""
super(Collection, self).__init__(
cls=cls,
name=name,
rev_name="message",
rev_hidden=True,
rev_relation=None,
hidden=hidden,
persist=persist,
typed=typed,
on_delete=on_delete,
validators=validators,
multi=multi,
)
self.rev = True
|
118534
|
import aiounittest
import copy
import pydantic
from app.github.webhook_model import Webhook
data = {
"action": 'renamed',
"pull_request": {
"merged": True
},
"repository": {
"full_name": "organization/project",
"lastName": "p",
"age": 71
},
"changes": {
"repository": {
"name": {
"from": 'project2'
}
}
}
}
data2 = {
"action": 'created',
"repository": {
"full_name": "organization/project",
"lastName": "p",
"age": 71
}
}
class TestWebhookValidator(aiounittest.AsyncTestCase):
def test_good_import(self):
local_data = copy.deepcopy(data2)
event = Webhook(**local_data)
self.assertTrue(event.requires_import())
self.assertEqual(False, event.requires_delete())
def test_good_delete(self):
local_data = copy.deepcopy(data2)
local_data['action'] = 'deleted'
event = Webhook(**local_data)
self.assertTrue(event.requires_delete())
self.assertEqual(False, event.requires_import())
def test_good_renamed(self):
local_data = copy.deepcopy(data)
event = Webhook(**local_data)
self.assertTrue(event.requires_delete())
self.assertTrue(event.requires_import())
def test_repo_name(self):
local_data = copy.deepcopy(data2)
local_data['repository']['full_name'] = ''
threw = False
try:
event = Webhook(**local_data)
except pydantic.ValidationError as e:
threw = True
self.assertTrue(threw)
|
118562
|
import argparse
import os
import re
import yaml
_ENV_EXPAND = {}
def nested_set(dic, keys, value, existed=False):
for key in keys[:-1]:
dic = dic[key]
if existed:
if keys[-1] not in dic:
raise RuntimeError('{} does not exist in the dict'.format(keys[-1]))
value = type(dic[keys[-1]])(value)
dic[keys[-1]] = value
class LoaderMeta(type):
"""Constructor for supporting `!include` and `!path`."""
def __new__(mcs, __name__, __bases__, __dict__):
"""Add include constructer to class."""
# register the include constructor on the class
cls = super().__new__(mcs, __name__, __bases__, __dict__)
cls.add_constructor('!include', cls.construct_include)
cls.add_constructor('!path', cls.path_constructor)
return cls
class Loader(yaml.Loader, metaclass=LoaderMeta):
"""YAML Loader with `!include` and `!path` constructor.
'_default' is reserved for override.
'xxx.yyy.zzz' is parsed for overriding.
"""
def __init__(self, stream):
try:
self._root = os.path.split(stream.name)[0]
except AttributeError:
self._root = os.path.curdir
super().__init__(stream)
path_matcher = re.compile(r'.*\$\{([^}^{]+)\}.*')
self.add_implicit_resolver('!path', path_matcher, None)
def construct_include(self, node):
"""Include file referenced at node."""
filename_related = os.path.expandvars(self.construct_scalar(node))
filename = os.path.abspath(os.path.join(self._root, filename_related))
extension = os.path.splitext(filename)[1].lstrip('.')
with open(filename, 'r') as f:
if extension in ('yaml', 'yml'):
return yaml.load(f, Loader)
return ''.join(f.readlines())
def path_constructor(self, node):
src = node.value
res = os.path.expandvars(src)
_ENV_EXPAND[src] = res
return res
def get_single_data(self, *args, **kwargs):
res = super(Loader, self).get_single_data(*args, **kwargs)
default = res.pop('_default', {})
default.update(res)
for key, val in list(default.items()):
keys = key.split('.')
if len(keys) != 1:
default.pop(key)
nested_set(default, keys, val)
return default
class AttrDict(dict):
"""Dict as attribute trick."""
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
for key in self.__dict__:
value = self.__dict__[key]
if isinstance(value, dict):
self.__dict__[key] = AttrDict(value)
elif isinstance(value, list):
if isinstance(value[0], dict):
self.__dict__[key] = [AttrDict(item) for item in value]
else:
self.__dict__[key] = value
def yaml(self):
"""Convert object to yaml dict and return."""
yaml_dict = {}
for key in self.__dict__:
value = self.__dict__[key]
if isinstance(value, AttrDict):
yaml_dict[key] = value.yaml()
elif isinstance(value, list):
if isinstance(value[0], AttrDict):
new_l = []
for item in value:
new_l.append(item.yaml())
yaml_dict[key] = new_l
else:
yaml_dict[key] = value
else:
yaml_dict[key] = value
return yaml_dict
def __repr__(self):
"""Print all variables."""
ret_str = []
for key in self.__dict__:
value = self.__dict__[key]
if isinstance(value, AttrDict):
ret_str.append('{}:'.format(key))
child_ret_str = value.__repr__().split('\n')
for item in child_ret_str:
ret_str.append(' ' + item)
elif isinstance(value, list):
if isinstance(value[0], AttrDict):
ret_str.append('{}:'.format(key))
for item in value:
# treat as AttrDict above
child_ret_str = item.__repr__().split('\n')
for item in child_ret_str:
ret_str.append(' ' + item)
else:
ret_str.append('{}: {}'.format(key, value))
else:
ret_str.append('{}: {}'.format(key, value))
return '\n'.join(ret_str)
class Config(AttrDict):
"""Config with yaml file.
This class is used to config model hyper-parameters, global constants, and
other settings with yaml file. All settings in yaml file will be
automatically logged into file.
Args:
filename(str): File name.
Examples:
yaml file ``model.yml``::
NAME: 'neuralgym'
ALPHA: 1.0
DATASET: '/mnt/data/imagenet'
Usage in .py:
>>> from neuralgym import Config
>>> config = Config('model.yml')
>>> print(config.NAME)
neuralgym
>>> print(config.ALPHA)
1.0
>>> print(config.DATASET)
/mnt/data/imagenet
"""
def __init__(self, filename=None, verbose=False):
assert os.path.exists(filename), 'File {} not exist.'.format(filename)
try:
with open(filename, 'r') as f:
cfg_dict = yaml.load(f, Loader)
except EnvironmentError as e:
print('Please check the file with name of "%s"', filename)
raise e
cfg_dict['config_path'] = filename
super(Config, self).__init__(cfg_dict)
if verbose:
print(' pi.cfg '.center(80, '-'))
print(self.__repr__())
print(''.center(80, '-'))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', type=str, default='configs/seg.yaml')
parser.add_argument('opts', default=None, nargs=argparse.REMAINDER)
args = parser.parse_args()
cfg = Config(args.cfg)
if len(args.opts) % 2 == 1:
raise RuntimeError('Override params should be key/val')
for key, val in [args.opts[i:i + 2] for i in range(0, len(args.opts), 2)]:
if not key.startswith('--'):
raise RuntimeError('Override key should start with `--`')
keys = key[len('--'):].split('.')
nested_set(cfg, keys, val, existed=True)
return cfg
|
118566
|
import argparse
import os
from util import util
from ipdb import set_trace as st
# for gray scal : input_nc, output_nc, ngf, ndf, gpu_ids, batchSize, norm
class BaseOptions():
def __init__(self):
self.parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
self.initialized = False
def initialize(self):
self.parser.add_argument('--G', type=str, default='UnetINDiv4_CCAM', help='choice of network for Generator')
self.parser.add_argument('--dataroot', type=str, default='./../../Hdd_DATA/BRATS2015_mat_std_sbjnorm', help='data root')
self.parser.add_argument('--savepath', type=str, default='./results', help='savepath')
self.parser.add_argument('--nEpoch', type=int, default=1000, help='number of Epoch iteration')
self.parser.add_argument('--lr', type=float, default=0.00001, help='learning rate')
self.parser.add_argument('--lr_D', type=float, default=0.00001, help='learning rate for D')
self.parser.add_argument('--lr_C', type=float, default=0.00001, help='learning rate for C')
self.parser.add_argument('--disp_div_N', type=int, default=100, help=' display N per epoch')
self.parser.add_argument('--nB', type=int, default=1, help='input batch size')
self.parser.add_argument('--DB_small', action='store_true', help='use small DB')
self.parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2.')
self.parser.add_argument('--name', type=str, default='demo_exp_CollaGAN_BRATS', help='name of the experiment. It decides where to store samples and models')
self.parser.add_argument('--w_decay', type=float, default=0.01, help='weight decay for generator')
self.parser.add_argument('--w_decay_D', type=float, default=0., help='weight decay for discriminator')
self.parser.add_argument('--lambda_l1_cyc', type=float, default=10, help='lambda_L1_cyc, StarGAN cyc loss rec')
self.parser.add_argument('--lambda_l2_cyc', type=float, default=0., help='lambda_L2_cyc, StarGAN cyc loss rec')
self.parser.add_argument('--lambda_ssim_cyc', type=float, default=1., help='lambda_ssim')
self.parser.add_argument('--lambda_l2', type=float, default=0., help='lambda_L2')
self.parser.add_argument('--lambda_l1', type=float, default=0., help='lambda_L1')
self.parser.add_argument('--lambda_ssim', type=float, default=0., help='lambda_ssim')
self.parser.add_argument('--lambda_GAN', type=float, default=1., help='lambda GAN')
self.parser.add_argument('--lambda_G_clsf', type=float, default=1., help='generator classification loss. fake to be well classified')
self.parser.add_argument('--lambda_D_clsf', type=float, default=1., help='discriminator classification loss. fake to be well classified')
self.parser.add_argument('--lambda_cyc', type=float, default=1, help='lambda_cyc')
self.parser.add_argument('--nEpochDclsf', type=int, default=0, help='# of nEpoch for Discriminator pretrain')
self.parser.add_argument('--nCh_D', type=int, default=4, help='# of ngf for Discriminator')
self.parser.add_argument('--nCh_C', type=int, default=16, help='# of ngf for Classifier')
self.parser.add_argument('--use_lsgan', action='store_true', help='use lsgan, if not defualt GAN')
self.parser.add_argument('--use_1x1Conv', action='store_true', help='use 1x1Conv, if not defualt 3x3conv')
self.parser.add_argument('--wo_norm_std', action='store_true', help='NOT use std normalization')
self.parser.add_argument('--N_null', type=int, default=1, help='# of nulling in input images')
self.parser.add_argument('--ngf', type=int, default=64, help=' ngf')
self.parser.add_argument('--dropout', type=float, default=0.5, help='droptout ')
self.parser.add_argument('--test_mode', action='store_true', help='not train. just test')
self.parser.add_argument('--AUG', action='store_true', help='use augmentation')
self.parser.add_argument('--nEpochD', type=int, default=2, help = 'nEpochD update while 1 G update')
self.initialized = True
def parse(self):
if not self.initialized:
self.initialize()
self.opt = self.parser.parse_args()
#self.opt.isTrain = self.isTrain # train or test
str_ids = self.opt.gpu_ids.split(',')
self.opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if id >= 0:
self.opt.gpu_ids.append(id)
# set gpu ids
#if len(self.opt.gpu_ids) > 0:
# torch.cuda.set_device(self.opt.gpu_ids[0])
args = vars(self.opt)
print('------------ Options -------------')
for k, v in sorted(args.items()):
print('%s: %s' % (str(k), str(v)))
print('-------------- End ----------------')
# save to the disk
expr_dir = os.path.join(self.opt.savepath, self.opt.name)
if not os.path.exists(expr_dir):
os.makedirs(expr_dir)
file_name = os.path.join(expr_dir, 'opt.txt')
with open(file_name, 'wt') as opt_file:
opt_file.write('------------ Options -------------\n')
for k, v in sorted(args.items()):
opt_file.write('%s: %s\n' % (str(k), str(v)))
opt_file.write('-------------- End ----------------\n')
return self.opt
@staticmethod
def load_opts(opt,exp_name):
#optLists = ['model','dataroot','savepath','nEpoch','lr','disp_div_N','batchSize','input_nc','gpu_ids','name','use_residual','no_flip','lambda_cost','weight_decay','use_dropout','optimizer','ri','normalize']
exp_dir = os.path.join(opt.savepath,exp_name)
with open(os.path.join(exp_dir,'opt.txt'),'r') as opt_file:
for aLine in opt_file.readlines():
idx = aLine.find(':')
if idx==-1:
continue
else:
cur_opt = aLine[:idx]
cur_val = aLine[idx+2:-1]
if cur_opt=='model':
opt.model = cur_val
elif cur_opt=='dataroot':
opt.dataroot = cur_val
elif cur_opt=='savepath':
opt.savepath = cur_val
elif cur_opt=='nEpoch':
opt.savepath = cur_val
elif cur_opt=='lr':
opt.lr = float(cur_val)
elif cur_opt=='disp_div_N':
opt.disp_div_N = int(cur_val)
elif cur_opt=='batchSize':
opt.batchSize = int(cur_val)
elif cur_opt=='input_nc':
opt.input_nc = int(cur_val)
elif cur_opt=='gpu_ids':
cur_val = cur_val[1:-1]
opt.gpu_ids = [int(cur_val)]
print('Use GPU id......')
elif cur_opt=='name':
opt.name = cur_val
elif cur_opt=='use_residual':
opt.use_residual= (cur_val=='True')
elif cur_opt=='no_flip':
opt.use_residual= (cur_val=='True')
elif cur_opt=='lambda_cost':
opt.lambda_cost = float(cur_val)
elif cur_opt=='weight_decay':
opt.weight_decay= float(cur_val)
elif cur_opt=='use_dropout':
opt.use_dropout = (cur_val=='True')
elif cur_opt=='optimizer':
opt.optimizer= cur_val
elif cur_opt=='ri':
opt.ri = (cur_val=='True')
elif cur_opt=='normalize':
opt.normalize = (cur_val=='True')
else:
st()
return opt
|
118578
|
import tensorflow as tf
import numpy as np
class TFPositionalEncoding2D(tf.keras.layers.Layer):
def __init__(self, channels:int, return_format:str="pos", dtype=tf.float32):
"""
Args:
channels int: The last dimension of the tensor you want to apply pos emb to.
Keyword Args:
return_format str: Return either the position encoding "pos" or the sum
of the inputs with the position encoding "sum". Default is "pos".
dtype: output type of the encodings. Default is "tf.float32".
"""
super(TFPositionalEncoding2D, self).__init__()
if return_format not in ["pos", "sum"]:
raise ValueError(f'"{return_format}" is an unkown return format. Value must be "pos" or "sum')
self.return_format = return_format
self.channels = int(2 * np.ceil(channels/4))
self.inv_freq = np.float32(1 / np.power(10000, np.arange(0, self.channels, 2) / np.float32(self.channels)))
@tf.function
def call(self, inputs):
"""
:param tensor: A 4d tensor of size (batch_size, x, y, ch)
:return: Positional Encoding Matrix of size (batch_size, x, y, ch)
"""
if len(inputs.shape)!=4:
raise RuntimeError("The input tensor has to be 4d!")
_, x, y, org_channels = inputs.shape
dtype = self.inv_freq.dtype
pos_x = tf.range(x, dtype=dtype)
pos_y = tf.range(y, dtype=dtype)
sin_inp_x = tf.einsum("i,j->ij", pos_x, self.inv_freq)
sin_inp_y = tf.einsum("i,j->ij", pos_y, self.inv_freq)
emb_x = tf.expand_dims(tf.concat((tf.sin(sin_inp_x), tf.cos(sin_inp_x)), -1),1)
emb_y = tf.expand_dims(tf.concat((tf.sin(sin_inp_y), tf.cos(sin_inp_y)), -1),0)
emb_x = tf.tile(emb_x, (1,y,1))
emb_y = tf.tile(emb_y, (x,1,1))
emb = tf.concat((emb_x, emb_y),-1)
pos_enc = tf.repeat(emb[None, :, :, :org_channels], tf.shape(inputs)[0], axis=0)
if self.return_format == "pos":
return pos_enc
elif self.return_format == "sum":
return inputs + pos_enc
|
118663
|
import time
import pickle
import os
path = os.path.dirname(os.path.realpath(__file__))
path ="F:/learnning/ai/data"
def backupSave(data,fname):
now = time.strftime("%Y-%m-%d-%H_%M_%S",time.localtime(time.time()))
fullName=path+"/trainData/"+fname+"_"+now+".data"
f= open(fullName, 'wb')
pickle.dump(data, f)
f.close()
return fullName
def pushHumanSave(data,fname):
fullName=path+"/selfCreateData/"+fname+"_"+".data"
f= open(fullName, 'wb')
pickle.dump(data, f)
f.close()
return fullName
def backupLoad(fname):
f= open(fname, 'rb')
data = pickle.load(f)
f.close()
return data
#s = [1, 2, 3, 4, 5]
#fName = save(s,"test")
#print(fName)
#print(load(fName))
|
118684
|
import torch
from Utils import *
import torchvision
import math
import numpy as np
import faiss
from Utils import LogText
import clustering
from scipy.optimize import linear_sum_assignment
import imgaug.augmenters as iaa
import imgaug.augmentables.kps
class SuperPoint():
def __init__(self, number_of_clusters, confidence_thres_superpoint,nms_thres_superpoint,path_to_pretrained_superpoint,experiment_name,log_path,remove_superpoint_outliers_percentage,use_box=False,UseScales=False,RemoveBackgroundClusters=False):
self.path_to_pretrained_superpoint=path_to_pretrained_superpoint
self.use_box=use_box
self.confidence_thres_superpoint=confidence_thres_superpoint
self.nms_thres_superpoint=nms_thres_superpoint
self.log_path=log_path
self.remove_superpoint_outliers_percentage=remove_superpoint_outliers_percentage
self.experiment_name=experiment_name
self.number_of_clusters=number_of_clusters
self.model = Cuda(SuperPointNet())
self.UseScales=UseScales
self.RemoveBackgroundClusters=RemoveBackgroundClusters
if(self.UseScales):
self.SuperpointUndoScaleDistill1 = iaa.Affine(scale={"x": 1 / 1.3, "y": 1 / 1.3})
self.SuperpointUndoScaleDistill2 = iaa.Affine(scale={"x": 1 / 1.6, "y": 1 / 1.6})
try:
checkpoint = torch.load(path_to_pretrained_superpoint, map_location='cpu')
self.model.load_state_dict(checkpoint)
LogText(f"Superpoint Network from checkpoint {path_to_pretrained_superpoint}", self.experiment_name, self.log_path)
except:
raise Exception(f"Superpoint weights from {path_to_pretrained_superpoint} failed to load.")
self.softmax = torch.nn.Softmax(dim=1)
self.pixelSuffle = torch.nn.PixelShuffle(8)
self.model.eval()
def CreateInitialPseudoGroundtruth(self, dataloader):
LogText(f"Extraction of initial Superpoint pseudo groundtruth", self.experiment_name,self.log_path)
imagesize=256
heatmapsize=64
numberoffeatures=256
buffersize=500000
#allocation of 2 buffers for temporal storing of keypoints and descriptors.
Keypoint_buffer = torch.zeros(buffersize, 3)
Descriptor__buffer = torch.zeros(buffersize, numberoffeatures)
#arrays on which we save buffer content periodically. Corresponding files are temporal and
#will be deleted after the completion of the process
CreateFileArray(str(GetCheckPointsPath(self.experiment_name,self.log_path) / 'keypoints'),3)
CreateFileArray(str(GetCheckPointsPath(self.experiment_name,self.log_path) / 'descriptors'), numberoffeatures)
#intermediate variables
first_index = 0
last_index = 0
buffer_first_index = 0
buffer_last_index = 0
keypoint_indexes = {}
LogText(f"Inference of Keypoints begins", self.experiment_name, self.log_path)
for i_batch, sample in enumerate(dataloader):
input = Cuda(sample['image_gray'])
names = sample['filename']
bsize=input.size(0)
if(self.UseScales):
input=input.view(-1,1,input.shape[2],input.shape[3])
with torch.no_grad():
detectorOutput,descriptorOutput=self.GetSuperpointOutput(input)
if(self.UseScales):
detectorOutput=detectorOutput.view(bsize,-1,detectorOutput.shape[2],detectorOutput.shape[3])
input=input.view(bsize,-1,input.shape[2],input.shape[3])
descriptorOutput=descriptorOutput.view(bsize,-1,descriptorOutput.size(1),descriptorOutput.size(2),descriptorOutput.size(3))[:,0]
for i in range(0, bsize):
keypoints = self.GetPoints(detectorOutput[i].unsqueeze(0), self.confidence_thres_superpoint, self.nms_thres_superpoint)
if (self.RemoveBackgroundClusters):
bounding_box=sample['bounding_box'][i]
pointsinbox = torch.ones(len(keypoints))
pointsinbox[(keypoints[:, 0] < int(bounding_box[0]))] = -1
pointsinbox[(keypoints[:, 1] < int(bounding_box[1]))] = -1
pointsinbox[(keypoints[:, 0] > int(bounding_box[2]))] = -1
pointsinbox[(keypoints[:, 1] > int(bounding_box[3]))] = -1
elif (self.use_box):
bounding_box=sample['bounding_box'][i]
pointsinbox = torch.ones(len(keypoints))
pointsinbox[(keypoints[:, 0] < int(bounding_box[0]))] = -1
pointsinbox[(keypoints[:, 1] < int(bounding_box[1]))] = -1
pointsinbox[(keypoints[:, 0] > int(bounding_box[2]))] = -1
pointsinbox[(keypoints[:, 1] > int(bounding_box[3]))] = -1
keypoints=keypoints[pointsinbox==1]
descriptors = GetDescriptors(descriptorOutput[i], keypoints, input.shape[3], input.shape[2])
#scale image keypoints to FAN resolution
keypoints=dataloader.dataset.keypointsToFANResolution(dataloader.dataset,names[i],keypoints)
keypoints = ((heatmapsize/imagesize)*keypoints).round()
last_index += len(keypoints)
buffer_last_index += len(keypoints)
Keypoint_buffer[buffer_first_index:buffer_last_index, :2] = keypoints
Descriptor__buffer[buffer_first_index:buffer_last_index] = descriptors
if (self.RemoveBackgroundClusters):
Keypoint_buffer[buffer_first_index:buffer_last_index, 2] = pointsinbox
keypoint_indexes[names[i]] = [first_index, last_index]
first_index += len(keypoints)
buffer_first_index += len(keypoints)
#periodically we store the buffer in file
if buffer_last_index>int(buffersize*0.8):
AppendFileArray(np.array(Keypoint_buffer[:buffer_last_index]),str(GetCheckPointsPath(self.experiment_name,self.log_path) / 'keypoints'))
AppendFileArray(np.array(Descriptor__buffer[:buffer_last_index]), str(GetCheckPointsPath(self.experiment_name,self.log_path) / 'descriptors'))
Keypoint_buffer = torch.zeros(buffersize, 3)
Descriptor__buffer = torch.zeros(buffersize, numberoffeatures)
buffer_first_index = 0
buffer_last_index = 0
LogText(f"Inference of Keypoints completed", self.experiment_name, self.log_path)
#store any keypoints left on the buffers
AppendFileArray(np.array(Keypoint_buffer[:buffer_last_index]), str(GetCheckPointsPath(self.experiment_name,self.log_path) / 'keypoints'))
AppendFileArray(np.array(Descriptor__buffer[:buffer_last_index]), str(GetCheckPointsPath(self.experiment_name,self.log_path) / 'descriptors'))
#load handlers to the Keypoints and Descriptor files
Descriptors,fileHandler1=OpenreadFileArray(str(GetCheckPointsPath(self.experiment_name,self.log_path) / 'descriptors'))
Keypoints, fileHandler2 = OpenreadFileArray( str(GetCheckPointsPath(self.experiment_name,self.log_path) / 'keypoints'))
Keypoints = Keypoints[:, :]
LogText(f"Keypoints Detected per image {len(Keypoints)/len(keypoint_indexes)}", self.experiment_name, self.log_path)
#perform outlier detection
inliersindexes=np.ones(len(Keypoints))==1
if(self.remove_superpoint_outliers_percentage>0):
inliersindexes=self.Indexes_of_inliers(Keypoints,Descriptors,buffersize)
#extend outliers with background points for constant background datasets
if (self.RemoveBackgroundClusters):
foregroundpointindex=self.Indexes_of_BackgroundPoints(Keypoints,Descriptors,keypoint_indexes)
inliersindexes = np.logical_and(inliersindexes, foregroundpointindex)
LogText(f"Keypoints Detected per image(filtering) {sum(inliersindexes) / len(keypoint_indexes)}", self.experiment_name,self.log_path)
#we use a subset of all the descriptors for clustering based on the recomendation of the Faiss repository
numberOfPointsForClustering=500000
LogText(f"Clustering of keypoints", self.experiment_name, self.log_path)
#clustering of superpoint features
KmeansClustering = clustering.Kmeans(self.number_of_clusters, centroids=None)
descriptors = clustering.preprocess_features( Descriptors[:numberOfPointsForClustering][inliersindexes[:numberOfPointsForClustering]])
KmeansClustering.cluster(descriptors, verbose=False)
thresholds=self.GetThresholdsPerCluster( inliersindexes,Descriptors,KmeansClustering)
Image_Keypoints = {}
averagepointsperimage=0
for image in keypoint_indexes:
start,end=keypoint_indexes[image]
inliersinimage=inliersindexes[start:end]
keypoints=Keypoints[start:end,:]
inliersinimage[np.sum(keypoints[:,:2]<0 ,1)>0]=False
inliersinimage[np.sum(keypoints[:,:2]>64 ,1)>0]=False
keypoints=keypoints[inliersinimage]
image_descriptors=clustering.preprocess_features(Descriptors[start:end])
image_descriptors=image_descriptors[inliersinimage]
#calculate distance of each keypoints to each centroid
distanceMatrix, clustering_assignments = KmeansClustering.index.search(image_descriptors, self.number_of_clusters)
distanceMatrix=np.take_along_axis(distanceMatrix, np.argsort(clustering_assignments), axis=-1)
#assign keypoints to centroids using the Hungarian algorithm. This ensures that each
#image has at most one instance of each cluster
keypointIndex,clusterAssignment= linear_sum_assignment(distanceMatrix)
tempKeypoints=keypoints[keypointIndex]
clusterAssignmentDistance = distanceMatrix[keypointIndex, clusterAssignment]
clusterstokeep = np.zeros(len(clusterAssignmentDistance))
clusterstokeep = clusterstokeep == 1
# keep only points that lie in their below a cluster specific theshold
clusterstokeep[clusterAssignmentDistance < thresholds[clusterAssignment]] = True
tempKeypoints[:,2]=clusterAssignment
Image_Keypoints[image]=tempKeypoints[clusterstokeep]
averagepointsperimage+=sum(clusterstokeep)
LogText(f"Keypoints Detected per image(clusteringAssignment) {averagepointsperimage / len(Image_Keypoints)}",self.experiment_name, self.log_path)
ClosereadFileArray(fileHandler1,str(GetCheckPointsPath(self.experiment_name,self.log_path) / 'keypoints'))
ClosereadFileArray(fileHandler2,str(GetCheckPointsPath(self.experiment_name,self.log_path) / 'descriptors'))
self.save_keypoints(Image_Keypoints,"SuperPointKeypoints.pickle")
LogText(f"Extraction of Initial pseudoGroundtruth completed", self.experiment_name, self.log_path)
return Image_Keypoints
def Indexes_of_inliers(self,Keypoints,Descriptors,buffersize):
res = faiss.StandardGpuResources()
nlist = 100
quantizer = faiss.IndexFlatL2(256)
index = faiss.IndexIVFFlat(quantizer, 256, nlist)
gpu_index_flat = faiss.index_cpu_to_gpu(res, 0, index)
gpu_index_flat.train(clustering.preprocess_features(Descriptors[:buffersize]))
gpu_index_flat.add(clustering.preprocess_features(Descriptors[:buffersize]))
#we process the descriptors in batches of 10000 vectors
rg = np.linspace(0, len(Descriptors), math.ceil(len(Descriptors) / 10000) + 1, dtype=int)
keypoints_outlier_score=np.zeros(len(Keypoints))
for i in range(len(rg) - 1):
descr = clustering.preprocess_features(Descriptors[rg[i]:rg[i + 1], :])
distance_to_closest_points, _ = gpu_index_flat.search(descr, 100)
outlierscore = np.median(distance_to_closest_points, axis=1)
keypoints_outlier_score[rg[i]:rg[i + 1]] = outlierscore
inliers = keypoints_outlier_score.copy()
inliers = np.sort(inliers)
threshold = inliers[int((1-self.remove_superpoint_outliers_percentage) * (len(inliers) - 1))]
inliers = keypoints_outlier_score < threshold
return inliers
# For constant background datasets like Human3.6 we use this method to discur background keypoints inside the objects bounding box.
# We cluster foreground and background keypoints seperately. Then remove keypoints whos descriptors are closer to background centroids.
def Indexes_of_BackgroundPoints(self,Keypoints,Descriptors,keypoint_indexes):
backgroundpoitnsIndex = Keypoints[:, 2] == -1
insideboxPoitnsIndex = Keypoints[:, 2] == 1
backgroundDescriptors = clustering.preprocess_features(
Descriptors[:500000 ][ [backgroundpoitnsIndex[:500000 ]]])
insideboxDescriptors = clustering.preprocess_features(
Descriptors[:500000][ [insideboxPoitnsIndex[:500000 ]]])
number_of_insideClusters=100
number_of_outsideClusters=250
backgroundclustering= clustering.Kmeans(number_of_outsideClusters, centroids=None)
insideboxclustering = clustering.Kmeans(number_of_insideClusters, centroids=None)
backgroundclustering.cluster(backgroundDescriptors, verbose=False)
insideboxclustering.cluster(insideboxDescriptors, verbose=False)
foregroundpointindex=np.zeros(len(Keypoints))==-1
for imagename in keypoint_indexes:
start,end=keypoint_indexes[imagename]
keypoints = Keypoints[start:end, :]
descriptors=Descriptors[start:end,:]
distanceinside, Iinside = insideboxclustering.index.search(clustering.preprocess_features(descriptors), 1)
distanceoutside, Ioutside = backgroundclustering.index.search(clustering.preprocess_features(descriptors), 1)
points_to_keep = (distanceinside < distanceoutside).reshape(-1)
points_to_keep = np.logical_and(points_to_keep,keypoints[:,2]==1)
foregroundpointindex[start:end] = points_to_keep
return foregroundpointindex
def GetPoints(self,confidenceMap, threshold, NMSthes):
if(confidenceMap.size(1)==1):
points,_=self.GetPointsFromHeatmap(confidenceMap, threshold, NMSthes)
return points
keypoints,keypointprob = self.GetPointsFromHeatmap(confidenceMap[:,0:1], threshold, NMSthes)
keypoints1,keypoint1prob = self.GetPointsFromHeatmap(confidenceMap[:,1:2], threshold, NMSthes)
keypoints2,keypoint2prob = self.GetPointsFromHeatmap(confidenceMap[:,2:3], threshold, NMSthes)
keys = keypoints1
imgaug_keypoints = []
for j in range(len(keys)):
imgaug_keypoints.append(imgaug.augmentables.kps.Keypoint(x=keys[j, 0], y=keys[j, 1]))
kpsoi = imgaug.augmentables.kps.KeypointsOnImage(imgaug_keypoints, shape=confidenceMap.shape[2:])
keypoitns_aug = self.SuperpointUndoScaleDistill1(keypoints=kpsoi)
keys = keypoitns_aug.to_xy_array()
keypoints1 = keys
keys = keypoints2
imgaug_keypoints = []
for j in range(len(keys)):
imgaug_keypoints.append(imgaug.augmentables.kps.Keypoint(x=keys[j, 0], y=keys[j, 1]))
kpsoi = imgaug.augmentables.kps.KeypointsOnImage(imgaug_keypoints, shape=confidenceMap.shape[2:])
keypoitns_aug = self.SuperpointUndoScaleDistill2(keypoints=kpsoi)
keys = keypoitns_aug.to_xy_array()
keypoints2 = keys
newkeypoints = Cuda(torch.from_numpy(np.row_stack((keypoints.cpu().detach().numpy(),keypoints1,keypoints2))))
newkeypointsprob = torch.cat((keypointprob,keypoint1prob,keypoint2prob))
newkeypoints=torch.cat((newkeypoints,newkeypointsprob.unsqueeze(1)),1)
newkeypoints = MergeScales(newkeypoints, int(NMSthes/2))
return newkeypoints[:,:2]
def GetPointsFromHeatmap(self,confidenceMap, threshold, NMSthes):
mask = confidenceMap > threshold
prob = confidenceMap[mask]
value, indices = prob.sort(descending=True)
pred = torch.nonzero(mask)
prob = prob[indices]
pred = pred[indices]
points = pred[:, 2:4]
points = points.flip(1)
nmsPoints = torch.cat((points.float(), prob.unsqueeze(1)), 1).transpose(0, 1)
thres = math.ceil(NMSthes / 2)
newpoints = torch.cat((nmsPoints[0:1, :] - thres, nmsPoints[1:2, :] - thres, nmsPoints[0:1, :] + thres,
nmsPoints[1:2, :] + thres, nmsPoints[2:3, :]), 0).transpose(0, 1)
res = torchvision.ops.nms(newpoints[:, 0:4], newpoints[:, 4], 0.01)
points = nmsPoints[:, res].transpose(0, 1)
returnPoints = points[:, 0:2]
prob = points[:, 2]
return returnPoints,prob
def GetSuperpointOutput(self,input):
keypoints_volume, descriptors_volume = self.model(input)
keypoints_volume = keypoints_volume.detach()
keypoints_volume = self.softmax(keypoints_volume)
volumeNoDustbin = keypoints_volume[:, :-1, :, :]
spaceTensor = self.pixelSuffle(volumeNoDustbin)
return spaceTensor,descriptors_volume
def GetThresholdsPerCluster(self,inliersindexes,Descriptors,deepcluster):
rg = np.linspace(0, sum(inliersindexes), math.ceil(sum(inliersindexes) / 10000) + 1, dtype=int)
distance_to_centroid_per_cluster = list([[] for i in range(self.number_of_clusters)])
for i in range(len(rg) - 1):
descriptors = clustering.preprocess_features(Descriptors[rg[i]:rg[i + 1], :][inliersindexes[rg[i]:rg[i + 1]]])
distancesFromCenter, clustering_assingments = deepcluster.index.search(descriptors, 1)
for point in range(len(clustering_assingments)):
distance_to_centroid_per_cluster[int(clustering_assingments[point])].append(
distancesFromCenter[point][0])
thresholds = np.zeros(self.number_of_clusters)
for i in range(self.number_of_clusters):
if (len(distance_to_centroid_per_cluster[i]) == 0):
thresholds[i] = 0
else:
thresholds[i]=np.average(np.array(distance_to_centroid_per_cluster[i]))+np.std(distance_to_centroid_per_cluster[i])
return thresholds
def save_keypoints(self,Image_Keypoints,filename):
checkPointdir = GetCheckPointsPath(self.experiment_name,self.log_path)
checkPointFile=checkPointdir /filename
with open(checkPointFile, 'wb') as handle:
pickle.dump(Image_Keypoints, handle, protocol=pickle.HIGHEST_PROTOCOL)
# ----------------------------------------------------------------------
# https://github.com/magicleap/SuperPointPretrainedNetwork/
#
# --------------------------------------------------------------------*/
#
class SuperPointNet(torch.nn.Module):
""" Pytorch definition of SuperPoint Network. """
def __init__(self):
super(SuperPointNet, self).__init__()
self.relu = torch.nn.ReLU(inplace=True)
self.pool = torch.nn.MaxPool2d(kernel_size=2, stride=2)
self.numberOfClasses=1
c1, c2, c3, c4, c5, d1 = 64, 64, 128, 128, 256, 256
# Shared Encoder.
self.conv1a = torch.nn.Conv2d(1, c1, kernel_size=3, stride=1, padding=1)
self.conv1b = torch.nn.Conv2d(c1, c1, kernel_size=3, stride=1, padding=1)
self.conv2a = torch.nn.Conv2d(c1, c2, kernel_size=3, stride=1, padding=1)
self.conv2b = torch.nn.Conv2d(c2, c2, kernel_size=3, stride=1, padding=1)
self.conv3a = torch.nn.Conv2d(c2, c3, kernel_size=3, stride=1, padding=1)
self.conv3b = torch.nn.Conv2d(c3, c3, kernel_size=3, stride=1, padding=1)
self.conv4a = torch.nn.Conv2d(c3, c4, kernel_size=3, stride=1, padding=1)
self.conv4b = torch.nn.Conv2d(c4, c4, kernel_size=3, stride=1, padding=1)
# Detector Head.
self.convPa = torch.nn.Conv2d(c4, c5, kernel_size=3, stride=1, padding=1)
self.convPb = torch.nn.Conv2d(c5, 65, kernel_size=1, stride=1, padding=0)
# Descriptor Head.
self.convDa = torch.nn.Conv2d(c4, c5, kernel_size=3, stride=1, padding=1)
self.convDb = torch.nn.Conv2d(c5, d1, kernel_size=1, stride=1, padding=0)
def forward(self, x):
""" Forward pass that jointly computes unprocessed point and descriptor
tensors.
Input
x: Image pytorch tensor shaped N x 1 x H x W.
Output
semi: Output point pytorch tensor shaped N x 65 x H/8 x W/8.git c
desc: Output descriptor pytorch tensor shaped N x 256 x H/8 x W/8.
"""
# Shared Encoder.
x = self.relu(self.conv1a(x))
x = self.relu(self.conv1b(x))
x = self.pool(x)
x = self.relu(self.conv2a(x))
x = self.relu(self.conv2b(x))
x = self.pool(x)
x = self.relu(self.conv3a(x))
x = self.relu(self.conv3b(x))
x = self.pool(x)
x = self.relu(self.conv4a(x))
x = self.relu(self.conv4b(x))
# Detector Head.
cPa = self.relu(self.convPa(x))
semi = self.convPb(cPa)
# Descriptor Head.
cDa = self.relu(self.convDa(x))
desc = self.convDb(cDa)
dn = torch.norm(desc, p=2, dim=1) # Compute the norm.
desc = desc.div(torch.unsqueeze(dn, 1)) # Divide by norm to normalize.
return semi, desc
|
118714
|
import json
import os
import io
import re
from collections import defaultdict
import flask
from flask import Flask
app = Flask(__name__)
#ndcg_eval_dir = "data/ndcg_eval_dir"
origs = {}
needed_judgements = defaultdict(list)
# From http://stackoverflow.com/questions/273192/how-to-check-if-a-directory-exists-and-create-it-if-necessary
def ensure_dir(f):
d = os.path.dirname(f)
if not os.path.exists(d):
os.makedirs(d)
def make_id_save(name):
return re.sub(r'[^\w]', '_', name.replace(' ','_'))
def parse_ndcg_data(ndcg_eval_dir):
methods = os.listdir(ndcg_eval_dir)
print methods
for method in methods:
method_dir = ndcg_eval_dir + '/' + method
for json_file in [f for f in os.listdir(method_dir) if f.endswith('.json')]:
with io.open(method_dir + '/' +json_file, 'r', encoding='utf-8') as json_in_file:
filename_raw = json_file[:-5]
ndcg_json = json.loads(json_in_file.read())
top10 = ndcg_json[u'top10']
print method,json_file,[(top["title"],top["score"]) for top in top10]
top5 = top10[:5]
print method,json_file,[(top["title"],top["score"]) for top in top5]
if filename_raw not in origs:
origs[filename_raw] = ndcg_json[u'orig']
for top in top5:
top[u'id'] = make_id_save(top[u'title'])
if top[u'id'] not in [judgement[u'id'] for judgement in needed_judgements[filename_raw]]:
needed_judgements[filename_raw].append(top)
#print origs
#print needed_judgements
judgements = 0
for key in needed_judgements:
print key
judgements += len(needed_judgements[key])
print len(needed_judgements[key])
print 'total judgements:',judgements
@app.route("/ndcg_list/<username>")
def ndcg_list(username):
html = u'''<!doctype html><html lang=en><head><meta charset=utf-8><title>NDCG Ambient Search Eval</title>
</head><body>'''
html += u'<h1>Hi '+username+u'</h1>'
html += u'<ul>'
for key in needed_judgements:
length = len(needed_judgements[key])
potential_save = 'data/ndcg_save/' + make_id_save(username) + "/" + key + '.json'
saved_judgements = {}
if os.path.exists(potential_save):
with open(potential_save) as file_in:
json_save = json.loads(file_in.read())
saved_judgements = json_save.keys()
needed = len(list(set([elem['title'] for elem in needed_judgements[key]]) - set(saved_judgements)))
html += u'<li><a href="/ndcg/'+key+u'/'+username+u'">'+key+u'</a> (needed '+str(needed)+'/'+str(length)+u')</li>'
html += u'</ul>'
html += u'''
</body>
</html>'''
return html
@app.route('/ndcg_save/<filename>/<username>', methods=['POST'])
def ndcg_save(filename,username):
username_dir = 'data/ndcg_save/' + make_id_save(username) + "/"
ensure_dir(username_dir)
json_out = {}
parse_errors = ''
for key in flask.request.form:
try:
value = int(flask.request.form[key])
if value >= 0 and value <= 3:
json_out[key] = value
parse_error = False
else:
parse_error = True
except:
parse_error = True
if parse_error:
parse_errors += key + ' ' + (flask.request.form[key] if flask.request.form[key] != '' else '<empty string>') + '<br/>'
json_str = json.dumps(json_out)
print 'New json str:',json_str
print 'Parse errors:',parse_errors
with open(username_dir + filename + '.json','w') as filename_out:
filename_out.write(json_str)
html = u'''<!doctype html><html lang=en><head><meta charset=utf-8><title>NDCG Ambient Search Eval</title>
</head><body>'''
if parse_errors != '':
html += u'Could not parse: <br/>' + parse_errors + '<br/>'
html += u'Thanks. Now go back to: <a href="/ndcg_list/'+username+'">the list</a>'
html += u'</body></html>'
return html
@app.route("/ndcg/<filename>/<username>")
def ndcg(filename,username):
if filename not in origs:
return u'Could not find: '+filename
potential_save = {}
#check if we already have scores, if so load them
potential_save = 'data/ndcg_save/' + make_id_save(username) + "/" + filename + '.json'
if os.path.exists(potential_save):
with open(potential_save) as in_file:
json_save = json.loads(in_file.read())
html = u'''<!doctype html><html lang=en><head><meta charset=utf-8><title>NDCG Ambient Search Eval</title>
</head><body>'''
#you give a 0 score for an irrelevant result, 1 for a partially relevant, 2 for relevant, and 3 for perfect.
html += u'<h1>Hi '+username+u'</h1>'
html +=u'''<p>Each document is to be judged on a scale of 0-3 with 0 meaning irrelevant,
1 partially relevant, 2 for relevant and 3 for very relevant / perfect.</p>'''
html += u'<h2>' + filename + u' text is:</h2>'
html += origs[filename]
html += '<form action="/ndcg_save/'+filename+'/'+username+'" method="post">'
for judgement in needed_judgements[filename]:
html += u'<h3>On a scale from 0 to 3, how relevant is <a href="'+ judgement[u'url'] +'">'+ judgement[u'title'] +'</a></h3>'
html += u'Wiki text: ' + judgement[u'text'] + u'<br/>'
html += u'Wiki categories: ' + u' '.join(judgement[u'categories']) + u'<br/>'
my_id = judgement[u'id']
html += u'<input value="'+ (str(json_save[judgement[u'title']]) if judgement[u'title'] in json_save else '') +'" list="'+my_id+u'" name="'+judgement[u'title']+'"><datalist id="'+my_id+'"><option value="0">0</option><option value="1">1</option><option value="2">2</option><option value="3">3</option></datalist></input>'
html += '<br/><input type="submit"></form>'
#html += u'<ul>'
#html += u'/<ul>'
html += u'</body></html>'
return html
if __name__ == "__main__":
parse_ndcg_data("data/ndcg_eval_dir")
#parse_ndcg_data("data/ndcg_eval_dir_unfair_druid")
app.debug = True
app.run(host='0.0.0.0')
|
118736
|
import urllib
image = urllib.URLopener()
for k in xrange(300,400):
try:
image.retrieve("http://olympicshub.stats.com/flags/48x48/"+str(k)+".png",str(k)+".png")
except:
print k
|
118739
|
from __future__ import print_function
import argparse
import torch
import torch.utils.data
from torch import nn, optim
from torch.autograd import Variable
from torchvision import datasets, transforms
from torchvision.utils import save_image
from torch.nn import functional as F
import numpy as np
import collections
from collections import OrderedDict
import datetime
import os
import vae_conv_model_mnist
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--dataroot', help='path to dataset')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='enables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--outf', default='.', help='folder to output images and model checkpoints')
parser.add_argument('--model', default='model.pth', help='saved model file')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
print("cuda", args.cuda, args.no_cuda, torch.cuda.is_available())
params = 20
model = vae_conv_model_mnist.VAE(params)
model.have_cuda = args.cuda
if args.cuda:
model.cuda()
if args.cuda:
model.load_state_dict(torch.load(args.model))
else:
model.load_state_dict(torch.load(args.model, map_location={'cuda:0': 'cpu'}))
np.set_printoptions(threshold=500000,linewidth=1000)
print(model)
# Summarize Model
from pytorch_summary import Summary
s = Summary(model.encoder, input_size=(1, 1, 28, 28))
s = Summary(model.decoder, input_size=(1, 1024, 1, 1))
side_x = 40
side_y = 20
z_input = np.full((side_x*side_y,params), 0.0)
# print(z_input.shape)
for i in range(side_y):
for j in range(side_x):
z_input[i*side_x+j][i] = (j-side_x/2.0) * 0.1
# z_input[i*side+j][1] = (j-side/2.0) * 0.1
# for i in range(side):
# for j in range(side):
# z_input[i*side+j][0] = (i-side/2.0) * 0.1
# z_input[i*side+j][1] = (j-side/2.0) * 0.1
# print(z_input)
if args.cuda:
z_batch = torch.cuda.FloatTensor(z_input)
else:
z_batch = torch.FloatTensor(z_input)
z_batch = Variable(z_batch)
vis_batch = model.decode(z_batch)
outf = args.outf
save_image(vis_batch.data.cpu(), outf + '/test.png', nrow=side_x)
|
118767
|
from typing import List, Tuple, Union
from io import StringIO
from nltk.corpus import stopwords
import torch
from torch import Tensor
import torch.nn.functional as F
from transformers import BertTokenizer, BertForMaskedLM
from transformers.tokenization_utils import PreTrainedTokenizer
class MaskedStego:
def __init__(self, model_name_or_path: str = 'bert-base-cased') -> None:
self._tokenizer: PreTrainedTokenizer = BertTokenizer.from_pretrained(model_name_or_path)
self._model = BertForMaskedLM.from_pretrained(model_name_or_path)
self._STOPWORDS: List[str] = stopwords.words('english')
def __call__(self, cover_text: str, message: str, mask_interval: int = 3, score_threshold: float = 0.01) -> str:
assert set(message) <= set('01')
message_io = StringIO(message)
processed = self._preprocess_text(cover_text, mask_interval)
input_ids = processed['input_ids']
masked_ids = processed['masked_ids']
sorted_score, indices = processed['sorted_output']
for i_token, token in enumerate(masked_ids):
if token != self._tokenizer.mask_token_id:
continue
ids = indices[i_token]
scores = sorted_score[i_token]
candidates = self._pick_candidates_threshold(ids, scores, score_threshold)
print(self._tokenizer.convert_ids_to_tokens(candidates))
if len(candidates) < 2:
continue
replace_token_id = self._block_encode_single(candidates, message_io).item()
print('replace', replace_token_id, self._tokenizer.convert_ids_to_tokens([replace_token_id]))
input_ids[i_token] = replace_token_id
encoded_message: str = message_io.getvalue()[:message_io.tell()]
message_io.close()
stego_text = self._tokenizer.decode(input_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)
return { 'stego_text': stego_text, 'encoded_message': encoded_message }
def decode(self, stego_text: str, mask_interval: int = 3, score_threshold: float = 0.005) -> str:
decoded_message: List[str] = []
processed = self._preprocess_text(stego_text, mask_interval)
input_ids = processed['input_ids']
masked_ids = processed['masked_ids']
sorted_score, indices = processed['sorted_output']
for i_token, token in enumerate(masked_ids):
if token != self._tokenizer.mask_token_id:
continue
ids = indices[i_token]
scores = sorted_score[i_token]
candidates = self._pick_candidates_threshold(ids, scores, score_threshold)
if len(candidates) < 2:
continue
chosen_id: int = input_ids[i_token].item()
decoded_message.append(self._block_decode_single(candidates, chosen_id))
return {'decoded_message': ''.join(decoded_message)}
def _preprocess_text(self, sentence: str, mask_interval: int) -> dict:
encoded_ids = self._tokenizer([sentence], return_tensors='pt').input_ids[0]
masked_ids = self._mask(encoded_ids.clone(), mask_interval)
sorted_score, indices = self._predict(masked_ids)
return { 'input_ids': encoded_ids, 'masked_ids': masked_ids, 'sorted_output': (sorted_score, indices) }
def _mask(self, input_ids: Union[Tensor, List[List[int]]], mask_interval: int) -> Tensor:
length = len(input_ids)
tokens: List[str] = self._tokenizer.convert_ids_to_tokens(input_ids)
offset = mask_interval // 2 + 1
mask_count = offset
for i, token in enumerate(tokens):
# Skip initial subword
if i + 1 < length and self._is_subword(tokens[i + 1]): continue
if not self._substitutable_single(token): continue
if mask_count % mask_interval == 0:
input_ids[i] = self._tokenizer.mask_token_id
mask_count += 1
return input_ids
def _predict(self, input_ids: Union[Tensor, List[List[int]]]):
self._model.eval()
with torch.no_grad():
output = self._model(input_ids.unsqueeze(0))['logits'][0]
softmaxed_score = F.softmax(output, dim=1) # [word_len, vocab_len]
return softmaxed_score.sort(dim=1, descending=True)
def _encode_topk(self, ids: List[int], message: StringIO, bits_per_token: int) -> int:
k = 2**bits_per_token
candidates: List[int] = []
for id in ids:
token = self._tokenizer.convert_ids_to_tokens(id)
if not self._substitutable_single(token):
continue
candidates.append(id)
if len(candidates) >= k:
break
return self._block_encode_single(candidates, message)
def _pick_candidates_threshold(self, ids: Tensor, scores: Tensor, threshold: float) -> List[int]:
filtered_ids: List[int] = ids[scores >= threshold]
def filter_fun(idx: Tensor) -> bool:
return self._substitutable_single(self._tokenizer.convert_ids_to_tokens(idx.item()))
return list(filter(filter_fun, filtered_ids))
def _substitutable_single(self, token: str) -> bool:
if self._is_subword(token): return False
if token.lower() in self._STOPWORDS: return False
if not token.isalpha(): return False
return True
@staticmethod
def _block_encode_single(ids: List[int], message: StringIO) -> int:
assert len(ids) > 0
if len(ids) == 1:
return ids[0]
capacity = len(ids).bit_length() - 1
bits_str = message.read(capacity)
if len(bits_str) < capacity:
padding: str = '0' * (capacity - len(bits_str))
bits_str = bits_str + padding
message.write(padding)
index = int(bits_str, 2)
return ids[index]
@staticmethod
def _block_decode_single(ids: List[int], chosen_id: int) -> str:
if len(ids) < 2:
return ''
capacity = len(ids).bit_length() - 1
index = ids.index(chosen_id)
return format(index, '0' + str(capacity) +'b')
@staticmethod
def _is_subword(token: str) -> bool:
return token.startswith('##')
|
118777
|
import torch.autograd as autograd
import torch.nn.functional as F
from torch.autograd import Variable
def linear(inputs, weight, bias, meta_step_size=0.001, meta_loss=None, stop_gradient=False):
if meta_loss is not None:
if not stop_gradient:
grad_weight = autograd.grad(meta_loss, weight, create_graph=True)[0]
if bias is not None:
grad_bias = autograd.grad(meta_loss, bias, create_graph=True)[0]
bias_adapt = bias - grad_bias * meta_step_size
else:
bias_adapt = bias
else:
grad_weight = Variable(autograd.grad(meta_loss, weight, create_graph=True)[0].data, requires_grad=False)
if bias is not None:
grad_bias = Variable(autograd.grad(meta_loss, bias, create_graph=True)[0].data, requires_grad=False)
bias_adapt = bias - grad_bias * meta_step_size
else:
bias_adapt = bias
return F.linear(inputs,
weight - grad_weight * meta_step_size,
bias_adapt)
else:
return F.linear(inputs, weight, bias)
def conv2d(inputs, weight, bias, meta_step_size=0.001, stride=1, padding=0, dilation=1, groups=1, meta_loss=None,
stop_gradient=False):
if meta_loss is not None:
if not stop_gradient:
grad_weight = autograd.grad(meta_loss, weight, create_graph=True)[0]
if bias is not None:
grad_bias = autograd.grad(meta_loss, bias, create_graph=True)[0]
bias_adapt = bias - grad_bias * meta_step_size
else:
bias_adapt = bias
else:
grad_weight = Variable(autograd.grad(meta_loss, weight, create_graph=True)[0].data,
requires_grad=False)
if bias is not None:
grad_bias = Variable(autograd.grad(meta_loss, bias, create_graph=True)[0].data, requires_grad=False)
bias_adapt = bias - grad_bias * meta_step_size
else:
bias_adapt = bias
return F.conv2d(inputs,
weight - grad_weight * meta_step_size,
bias_adapt, stride,
padding,
dilation, groups)
else:
return F.conv2d(inputs, weight, bias, stride, padding, dilation, groups)
def relu(inputs):
return F.threshold(inputs, 0, 0, inplace=True)
def maxpool(inputs, kernel_size, stride=None, padding=0):
return F.max_pool2d(inputs, kernel_size, stride, padding=padding)
|
118815
|
import torch
import numpy as np
from elf.io import open_file
from elf.wrapper import RoiWrapper
from ..util import ensure_tensor_with_channels
class RawDataset(torch.utils.data.Dataset):
"""
"""
max_sampling_attempts = 500
@staticmethod
def compute_len(path, key, patch_shape, with_channels):
with open_file(path, mode="r") as f:
shape = f[key].shape[1:] if with_channels else f[key].shape
n_samples = int(np.prod(
[float(sh / csh) for sh, csh in zip(shape, patch_shape)]
))
return n_samples
def __init__(
self,
raw_path,
raw_key,
patch_shape,
raw_transform=None,
transform=None,
roi=None,
dtype=torch.float32,
n_samples=None,
sampler=None,
ndim=None,
with_channels=False,
):
self.raw_path = raw_path
self.raw_key = raw_key
self.raw = open_file(raw_path, mode="r")[raw_key]
self._with_channels = with_channels
if ndim is None:
self._ndim = self.raw.ndim - 1 if with_channels else self.raw.ndim
else:
self._ndim = ndim
assert self._ndim in (2, 3), "Invalid data dimensionality: {self._ndim}. Only 2d or 3d data is supported"
if self._with_channels:
assert self.raw.ndim == self._ndim + 1, f"{self.raw.ndim}, {self._ndim}"
raw_ndim = self.raw.ndim - 1 if self._with_channels else self.raw.ndim
if roi is not None:
assert len(roi) == raw_ndim, f"{roi}, {raw_ndim}"
self.raw = RoiWrapper(self.raw, (slice(None),) + roi) if self._with_channels else RoiWrapper(self.raw, roi)
self.roi = roi
self.shape = self.raw.shape[1:] if self._with_channels else self.raw.shape
assert len(patch_shape) == raw_ndim, f"{patch_shape}, {raw_ndim}"
self.patch_shape = patch_shape
self.raw_transform = raw_transform
self.transform = transform
self.sampler = sampler
self.dtype = dtype
if n_samples is None:
self._len = n_samples
else:
self._len = self.compute_len(raw_path, raw_key, self.patch_shape, with_channels)
# TODO
self.trafo_halo = None
# self.trafo_halo = None if self.transform is None\
# else self.transform.halo(self.patch_shape)
if self.trafo_halo is None:
self.sample_shape = self.patch_shape
else:
if len(self.trafo_halo) == 2 and self._ndim == 3:
self.trafo_halo = (0,) + self.trafo_halo
assert len(self.trafo_halo) == self._ndim
self.sample_shape = tuple(sh + ha for sh, ha in zip(self.patch_shape, self.trafo_halo))
self.inner_bb = tuple(slice(ha, sh - ha) for sh, ha in zip(self.patch_shape, self.trafo_halo))
def __len__(self):
return self._len
@property
def ndim(self):
return self._ndim
def _sample_bounding_box(self):
bb_start = [
np.random.randint(0, sh - psh) if sh - psh > 0 else 0
for sh, psh in zip(self.shape, self.sample_shape)
]
return tuple(slice(start, start + psh) for start, psh in zip(bb_start, self.sample_shape))
def _get_sample(self, index):
bb = self._sample_bounding_box()
if self._with_channels:
raw = self.raw[(slice(None),) + bb]
else:
raw = self.raw[bb]
if self.sampler is not None:
sample_id = 0
while not self.sampler(raw):
bb = self._sample_bounding_box()
raw = self.raw[(slice(None),) + bb] if self._with_channels else self.raw[bb]
sample_id += 1
if sample_id > self.max_sampling_attempts:
raise RuntimeError(f"Could not sample a valid batch in {self.max_sampling_attempts} attempts")
return raw
def crop(self, tensor):
bb = self.inner_bb
if tensor.ndim > len(bb):
bb = (tensor.ndim - len(bb)) * (slice(None),) + bb
return tensor[bb]
def __getitem__(self, index):
raw = self._get_sample(index)
if self.raw_transform is not None:
raw = self.raw_transform(raw)
if self.transform is not None:
raw = self.transform(raw)
if self.trafo_halo is not None:
raw = self.crop(raw)
raw = ensure_tensor_with_channels(raw, ndim=self._ndim, dtype=self.dtype)
return raw
# need to overwrite pickle to support h5py
def __getstate__(self):
state = self.__dict__.copy()
del state["raw"]
return state
def __setstate__(self, state):
state["raw"] = open_file(state["raw_path"], mode="r")[state["raw_key"]]
self.__dict__.update(state)
|
118821
|
from b_rabbit import BRabbit
def event_listener(msg):
print('Event received')
print("Message body is: " + msg.body)
print("Message properties are: " + str(msg.properties))
rabbit = BRabbit(host='localhost', port=5672)
subscriber = rabbit.EventSubscriber(
b_rabbit=rabbit,
routing_key='publisher.pub',
publisher_name='publisher',
external=False,
event_listener=event_listener)
subscriber.subscribe_on_thread()
|
118834
|
from src.platform.tomcat.interfaces import AppInterface
class FPrint(AppInterface):
def __init__(self):
super(FPrint, self).__init__()
self.version = "3.3"
self.uri = "/doc/readme"
|
118835
|
import sqlalchemy as db
from sqlalchemy.orm import relationship
from src.db import helper
from src.db.sqlalchemy import Base
from src.model.category import Category
class Local(Base):
__tablename__ = 'compra_local_local'
id = db.Column(db.Integer, helper.get_sequence(__tablename__), primary_key=True)
name = db.Column(db.String(64), nullable=False)
description = db.Column(db.String(256))
postal_address = db.Column(db.String(256), nullable=False)
latitude = db.Column(db.Float, nullable=False)
longitude = db.Column(db.Float, nullable=False)
website = db.Column(db.String(256))
phone_number = db.Column(db.String(64))
pick_up = db.Column(db.Boolean, nullable=False, default=True)
delivery = db.Column(db.Boolean, nullable=False, default=False)
image = db.Column(db.Text)
category_id = db.Column(db.Integer, db.ForeignKey(f'{Category.__tablename__}.id'))
category = relationship(Category.__name__)
def serialize(self):
return dict(
id=self.id,
name=self.name,
description=self.description,
postal_address=self.postal_address,
latitude=self.latitude,
longitude=self.longitude,
website=self.website,
phone_number=self.phone_number,
pick_up=self.pick_up,
delivery=self.delivery,
image=self.image,
category=None if not self.category_id else self.category.name
)
|
118861
|
from .acting_interface import ActingInterface
class ActorWrapper(ActingInterface):
"""Wrapper for a created actor
Allows overriding only specific actor methods while passing through the
rest, similar to gym wrappers
"""
def __init__(self, actor):
super().__init__(*actor.get_spaces())
self._actor = actor
def get_samples(self, min_samples):
return self._actor.get_samples(min_samples)
def get_env_count(self):
return self._actor.get_env_count()
def set_actor_policy(self, actor_policy):
return self._actor.set_actor_policy(actor_policy)
def update_state(self, progress, policy_state=None):
return self._actor.update_state(progress, policy_state)
def close(self):
return self._actor.close()
|
118870
|
from pyradioconfig.parts.jumbo.calculators.calc_synth import CALC_Synth_jumbo
class CALC_Synth_nixi(CALC_Synth_jumbo):
pass
|
118884
|
from __future__ import absolute_import
_F='\ufeff'
_E='\x00'
_D=False
_C='ascii'
_B='\n'
_A=None
import codecs
from .error import YAMLError,FileMark,StringMark,YAMLStreamError
from .compat import text_type,binary_type,PY3,UNICODE_SIZE
from .util import RegExp
if _D:from typing import Any,Dict,Optional,List,Union,Text,Tuple,Optional
__all__=['Reader','ReaderError']
class ReaderError(YAMLError):
def __init__(A,name,position,character,encoding,reason):A.name=name;A.character=character;A.position=position;A.encoding=encoding;A.reason=reason
def __str__(A):
if isinstance(A.character,binary_type):return'\'%s\' codec can\'t decode byte #x%02x: %s\n in "%s", position %d'%(A.encoding,ord(A.character),A.reason,A.name,A.position)
else:return'unacceptable character #x%04x: %s\n in "%s", position %d'%(A.character,A.reason,A.name,A.position)
class Reader:
def __init__(A,stream,loader=_A):
A.loader=loader
if A.loader is not _A and getattr(A.loader,'_reader',_A)is _A:A.loader._reader=A
A.reset_reader();A.stream=stream
def reset_reader(A):A.name=_A;A.stream_pointer=0;A.eof=True;A.buffer='';A.pointer=0;A.raw_buffer=_A;A.raw_decode=_A;A.encoding=_A;A.index=0;A.line=0;A.column=0
@property
def stream(self):
try:return self._stream
except AttributeError:raise YAMLStreamError('input stream needs to specified')
@stream.setter
def stream(self,val):
B=val;A=self
if B is _A:return
A._stream=_A
if isinstance(B,text_type):A.name='<unicode string>';A.check_printable(B);A.buffer=B+_E
elif isinstance(B,binary_type):A.name='<byte string>';A.raw_buffer=B;A.determine_encoding()
else:
if not hasattr(B,'read'):raise YAMLStreamError('stream argument needs to have a read() method')
A._stream=B;A.name=getattr(A.stream,'name','<file>');A.eof=_D;A.raw_buffer=_A;A.determine_encoding()
def peek(A,index=0):
B=index
try:return A.buffer[A.pointer+B]
except IndexError:A.update(B+1);return A.buffer[A.pointer+B]
def prefix(A,length=1):
B=length
if A.pointer+B>=len(A.buffer):A.update(B)
return A.buffer[A.pointer:A.pointer+B]
def forward_1_1(A,length=1):
B=length
if A.pointer+B+1>=len(A.buffer):A.update(B+1)
while B!=0:
C=A.buffer[A.pointer];A.pointer+=1;A.index+=1
if C in'\n\x85\u2028\u2029'or C=='\r'and A.buffer[A.pointer]!=_B:A.line+=1;A.column=0
elif C!=_F:A.column+=1
B-=1
def forward(A,length=1):
B=length
if A.pointer+B+1>=len(A.buffer):A.update(B+1)
while B!=0:
C=A.buffer[A.pointer];A.pointer+=1;A.index+=1
if C==_B or C=='\r'and A.buffer[A.pointer]!=_B:A.line+=1;A.column=0
elif C!=_F:A.column+=1
B-=1
def get_mark(A):
if A.stream is _A:return StringMark(A.name,A.index,A.line,A.column,A.buffer,A.pointer)
else:return FileMark(A.name,A.index,A.line,A.column)
def determine_encoding(A):
while not A.eof and(A.raw_buffer is _A or len(A.raw_buffer)<2):A.update_raw()
if isinstance(A.raw_buffer,binary_type):
if A.raw_buffer.startswith(codecs.BOM_UTF16_LE):A.raw_decode=codecs.utf_16_le_decode;A.encoding='utf-16-le'
elif A.raw_buffer.startswith(codecs.BOM_UTF16_BE):A.raw_decode=codecs.utf_16_be_decode;A.encoding='utf-16-be'
else:A.raw_decode=codecs.utf_8_decode;A.encoding='utf-8'
A.update(1)
if UNICODE_SIZE==2:NON_PRINTABLE=RegExp('[^\t\n\r -~\x85\xa0-\ud7ff\ue000-�]')
else:NON_PRINTABLE=RegExp('[^\t\n\r -~\x85\xa0-\ud7ff\ue000-�𐀀-\U0010ffff]')
_printable_ascii=('\t\n\r'+''.join(map(chr,range(32,127)))).encode(_C)
@classmethod
def _get_non_printable_ascii(D,data):
A=data.encode(_C);B=A.translate(_A,D._printable_ascii)
if not B:return _A
C=B[:1];return A.index(C),C.decode(_C)
@classmethod
def _get_non_printable_regex(B,data):
A=B.NON_PRINTABLE.search(data)
if not bool(A):return _A
return A.start(),A.group()
@classmethod
def _get_non_printable(A,data):
try:return A._get_non_printable_ascii(data)
except UnicodeEncodeError:return A._get_non_printable_regex(data)
def check_printable(A,data):
B=A._get_non_printable(data)
if B is not _A:C,D=B;E=A.index+(len(A.buffer)-A.pointer)+C;raise ReaderError(A.name,E,ord(D),'unicode','special characters are not allowed')
def update(A,length):
if A.raw_buffer is _A:return
A.buffer=A.buffer[A.pointer:];A.pointer=0
while len(A.buffer)<length:
if not A.eof:A.update_raw()
if A.raw_decode is not _A:
try:C,E=A.raw_decode(A.raw_buffer,'strict',A.eof)
except UnicodeDecodeError as B:
if PY3:F=A.raw_buffer[B.start]
else:F=B.object[B.start]
if A.stream is not _A:D=A.stream_pointer-len(A.raw_buffer)+B.start
elif A.stream is not _A:D=A.stream_pointer-len(A.raw_buffer)+B.start
else:D=B.start
raise ReaderError(A.name,D,F,B.encoding,B.reason)
else:C=A.raw_buffer;E=len(C)
A.check_printable(C);A.buffer+=C;A.raw_buffer=A.raw_buffer[E:]
if A.eof:A.buffer+=_E;A.raw_buffer=_A;break
def update_raw(A,size=_A):
C=size
if C is _A:C=4096 if PY3 else 1024
B=A.stream.read(C)
if A.raw_buffer is _A:A.raw_buffer=B
else:A.raw_buffer+=B
A.stream_pointer+=len(B)
if not B:A.eof=True
|
118904
|
import argparse
from src.utils.logger import Logger
class CustomFormatter(
argparse.ArgumentDefaultsHelpFormatter, argparse.RawDescriptionHelpFormatter
):
pass
class CustomArgumentParser(argparse.ArgumentParser):
def __init__(self, *args, **kwargs):
super().__init__(
formatter_class=lambda prog: CustomFormatter(
prog, max_help_position=40
),
*args,
**kwargs
)
base_parser = argparse.ArgumentParser(description="", add_help=False)
data_args = base_parser.add_argument_group(
"Data proportions", "Data proportions to use when " "training and validating"
)
data_args.add_argument(
"-tdp",
"--train_data_proportion",
type=float,
default=1.0,
metavar="",
help="Proportion of the training data to use.",
)
data_args.add_argument(
"-ddp",
"--dev_data_proportion",
type=float,
default=1.0,
metavar="",
help="Proportion of the validation data to use.",
)
training_args = base_parser.add_argument_group(
"Training Hyperparameters",
"Hyperparameters specific to "
"the training procedure, and "
"unrelated to the NN "
"architecture",
)
training_args.add_argument(
"--epochs",
type=int,
default=20,
metavar="",
help="Number of epochs for training",
)
training_args.add_argument(
"--batch_size",
type=int,
default=64,
metavar="",
help="Size of the minibatch to use for training and" "validation",
)
optim_args = base_parser.add_argument_group("Optimizer parameters")
optim_args.add_argument(
"-lr",
"--learning_rate",
type=float,
default=0.1,
metavar="",
help="Initial learning rate",
)
optim_choices = ["sgd", "adagrad", "adadelta", "adam", "rmsprop"]
optim_args.add_argument(
"--optim",
type=str,
default="sgd",
choices=optim_choices,
help="Optimizer to use",
)
optim_args.add_argument(
"-gc",
"--grad_clipping",
type=float,
default=5.0,
metavar="",
help="Gradients are clipped to this value each "
"time step is called by the optimizer",
)
misc_args = base_parser.add_argument_group("Miscellaneous")
misc_args.add_argument(
"--write_mode",
type=str,
choices=Logger.WRITE_MODES,
default="BOTH",
help="Mode for saving hyperparameters and results",
)
misc_args.add_argument(
"-nsm",
"--no_save_model",
action="store_true",
help="Force the model not to be saved",
)
misc_args.add_argument(
"--no_cuda",
action="store_true",
help="Force the use of the cpu even if a gpu is " "available",
)
misc_args.add_argument(
"--log_interval",
"-li",
type=int,
default=50,
metavar="",
help="Number of iterations between training loss " "loggings",
)
misc_args.add_argument(
"--seed",
type=int,
default=42,
metavar="",
help="Random seed to be used by torch initializations",
)
|
118906
|
import string
def arcade_int_to_string(key: int, mod: int) -> str:
if 97 <= key <= 122:
if (mod & 1) == 1:
return string.ascii_uppercase[key-97]
else:
return string.ascii_lowercase[key-97]
elif 48 <= key <= 57:
return str(key - 48)
elif 65456 <= key <= 65465:
return str(key - 65456)
elif 58 <= key <= 64 or 91 <= key <= 96 or 123 <= key <= 126 or 32 <= key <= 47:
return chr(key)
else:
return ""
|
118933
|
import os
import sys
import unittest
relative_path = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
if relative_path not in sys.path:
sys.path.insert(0, relative_path)
from onesaitplatform.auth.token import Token
from onesaitplatform.auth.authclient import AuthClient
class AuthClientTest(unittest.TestCase):
def setUp(self):
self.example_json_token = {
"access_token": "<KEY>",
"token_type": "bearer",
"refresh_token": "<KEY>",
"expires_in": 21098,
"scope": "openid",
"principal": "administrator",
"clientId": "onesaitplatform",
"name": "administrator",
"grantType": "password",
"parameters": {
"grant_type": "password",
"vertical": None,
"username": "administrator"
},
"authorities": [
"ROLE_ADMINISTRATOR"
],
"jti": "1b6862d9-98d3-4c35-a0b9-41aaebe5c24f"
}
self.example_token = Token()
self.example_token.access_token = self.example_json_token['access_token']
self.example_token.refresh_token = self.example_json_token['refresh_token']
self.example_token.token_type = self.example_json_token['token_type']
self.example_token.expires_in = self.example_json_token['expires_in']
self.example_token.scope = self.example_json_token['scope']
self.example_token.principal = self.example_json_token['principal']
self.example_token.clientId = self.example_json_token['clientId']
self.example_token.name = self.example_json_token['name']
self.example_token.grantType = self.example_json_token['grantType']
self.example_token.parameters = self.example_json_token['parameters']
self.example_token.authorities = self.example_json_token['authorities']
self.example_token.jti = self.example_json_token['jti']
self.example_json_auth_client = {
"host": "host",
"username": "username",
"password": "password",
"vertical": "onesaitplatform",
"token": self.example_json_token
}
self.example_auth_client = AuthClient(
host=self.example_json_auth_client["host"],
username=self.example_json_auth_client["username"],
password=self.example_json_auth_client["password"])
self.example_auth_client.token = self.example_token
# replace with real credentials
self.credentials = {
"username": "<username>",
"password": "<password>",
"vertical": "onesaitplatform"
}
def test_from_json(self):
client = AuthClient.from_json(self.example_json_auth_client)
for key in self.example_json_auth_client.keys():
if key == "token":
token = Token.from_json(self.example_json_token)
for token_key in self.example_json_auth_client[key]:
self.assertEqual(self.example_json_token.get(token_key), getattr(token, token_key))
else:
self.assertEqual(self.example_json_auth_client.get(key), getattr(client, key))
def test_to_json(self):
auth_json = self.example_auth_client.to_json()
for key in self.example_json_auth_client:
if key == "token":
self.assertEqual(getattr(self.example_auth_client, key).to_json(), auth_json[key])
else:
self.assertIn(key, auth_json)
self.assertEqual(getattr(self.example_auth_client, key), auth_json[key])
@unittest.skip('Real credentials are necessary')
def test_raw_login(self):
client = AuthClient(host="lab.onesaitplatform.com")
client.protocol = "https"
client.avoid_ssl_certificate = True
res_login = client.raw_login(username=self.credentials["username"], password=self.credentials["password"])
self.assertEqual(res_login.status_code, 200)
self.assertIsInstance(client.token, Token)
@unittest.skip('Real credentials are necessary')
def test_login(self):
client = AuthClient(host="lab.onesaitplatform.com")
client.protocol = "https"
client.avoid_ssl_certificate = True
ok_login, res_login = client.login(username=self.credentials["username"], password=self.credentials["password"])
self.assertTrue(ok_login)
self.assertIsInstance(client.token, Token)
self.assertTrue(client.token_str.startswith("bearer "))
@unittest.skip('Real credentials are necessary')
def test_raw_refresh(self):
client = AuthClient(host="lab.onesaitplatform.com")
client.protocol = "https"
client.avoid_ssl_certificate = True
res_login = client.raw_login(username=self.credentials["username"], password=self.credentials["password"])
if res_login.status_code == 200:
res_refresh = client.raw_refresh()
self.assertEqual(res_refresh.status_code, 200)
self.assertIsInstance(client.token, Token)
@unittest.skip('Real credentials are necessary')
def test_refresh(self):
client = AuthClient(host="lab.onesaitplatform.com")
client.protocol = "https"
client.avoid_ssl_certificate = True
ok_login, res_login = client.login(username=self.credentials["username"], password=self.credentials["password"])
if ok_login:
ok_refresh, res_refresh = client.refresh()
self.assertTrue(ok_refresh)
self.assertIsInstance(client.token, Token)
if __name__ == '__main__':
unittest.main()
|
118948
|
import lzma
def lzma_compress(data):
# https://svn.python.org/projects/external/xz-5.0.3/doc/lzma-file-format.txt
compressed_data = lzma.compress(
data,
format=lzma.FORMAT_ALONE,
filters=[
{
"id": lzma.FILTER_LZMA1,
"preset": 6,
"dict_size": 16 * 1024,
}
],
)
compressed_data = compressed_data[13:]
return compressed_data
def lz77_decompress(data):
"""Decompresses rwdata used to initialize variables.
The table at address 0x0801807c has format:
0-3 Relative offset to this elements location to the initialization function.
Example:
0x0801807c + (0xFFFE9617 - 0x100000000) == 0x8001693
# 0x8001693 is the location of the bss_init_function
The function is then passed in a pointer to the offset 4.
The function returns a pointer to the end of the inputs it has consumed
from the table. For example, the lz_decompress function consumes 12 bytes
from the table. This, combined with the relative offset to the function,
means that 16 bytes in total of the table are used.
Header format (not included in data) are 16 bytes in a table where:
Index
-------------
0-3 Relative offset of data from header
4-7 Length of the compressed data in bytes
8-11 Destination (in RAM) of decompressed data
"""
index = 0
out = bytearray()
while index < len(data):
opcode = data[index]
index += 1
# Opcode parsing
direct_len = opcode & 0x03
offset_256 = (opcode >> 2) & 0x03
pattern_len = opcode >> 4
if direct_len == 0:
direct_len = data[index] + 3
index += 1
assert direct_len > 0
direct_len -= 1
if pattern_len == 0xF:
pattern_len += data[index]
# pattern_len (not including the +2) can be in range [0, 270]
index += 1
# Direct Copy
for _ in range(direct_len):
out.append(data[index])
index += 1
# Pattern
if pattern_len > 0:
offset_add = data[index]
index += 1
if offset_256 == 0x03:
offset_256 = data[index]
index += 1
offset = offset_add + offset_256 * 256
# offset can be in range [0, 0xffff]
# +2 because anything shorter wouldn't be a pattern.
for _ in range(pattern_len + 2):
out.append(out[-offset])
return out
|
118987
|
import sys
import os
from pathlib import Path
# parameter handling
path = 0
if len(sys.argv)>1:
path = sys.argv[1]
else:
raise RuntimeError("missing argument")
src = Path(path)
if not src.exists():
raise RuntimeError("path does not exist")
if src.parts[0] != "pycqed":
raise RuntimeError("path should start with 'pycqed'")
dst = Path('deprecated') / src.parent
print(f"mkdir {str(dst)}")
dst.mkdir(parents=True, exist_ok=True)
cmd = f"git mv {str(src)} {str(dst)}"
print(cmd)
os.system(cmd)
|
118990
|
from Bio import AlignIO
def get_id_from_tag(alignment,tag):
"""return the index of an alignment given the alignment tag"""
for index in range(len(alignment)):
if(alignment[index].id == tag):
return index
raise LookupError("invalid tag specified")
def find_gaps(alignment,tag):
"""return a list of tuples defining start and end points of all sequence gaps"""
gaplist = []
sequence = None
for record in alignment:
if(record.id == tag): #as far as I can tell I can only iterate through an alignment, I can't lookup by tag
sequence = record
break
if sequence == None:
raise LookupError("invalid tag specified")
resid = 1
gap_start = 1
gap_end = 1
in_gap = False
for residue in sequence:
if residue == '-': # we are in a gap now
if not in_gap: #this must be the first position of the gap
gap_start = resid
in_gap = True
else: #we are not in a gap now
if in_gap: #the last gap position must have been the previous residue
gap_end = resid-1
in_gap = False
print gap_start, gap_end
gaplist.append( (gap_start,gap_end) )
resid += 1
return gaplist
|
119003
|
import logging
from datetime import datetime
from django.db import models
from django.contrib.auth.models import User
from wouso.core.common import Item, CachedItem
from wouso.core.decorators import cached_method, drop_cache
from wouso.core.game import get_games
from wouso.core.game.models import Game
class Coin(CachedItem, models.Model):
""" Different scoring categories.
A special coin is 'points' since is used for ladder and levels.
"""
CACHE_PART = 'name'
name = models.CharField(max_length=100, unique=True)
# The coin owner module, or null if is a core coin
owner = models.ForeignKey(Game, blank=True, null=True)
title = models.CharField(max_length=100)
# If the coin values are forced integers, else using float.
integer = models.BooleanField(default=False, blank=True)
def is_core(self):
""" A coin is a core coin, if it doesn't have an owner """
return self.owner is None
def format_value(self, amount):
if self.integer:
return int(round(amount))
return amount
def __unicode__(self):
return self.title or self.name
class Formula(Item, models.Model):
""" Define the way coin amounts are given to the user, based
on keyword arguments formulas.
A formula is owned by a game, or by the system (set owner to None)
"""
name = models.CharField(max_length=100, unique=True)
expression = models.CharField(max_length=1000, default='')
owner = models.ForeignKey(Game, null=True, blank=True)
description = models.CharField(max_length=500, default='')
@classmethod
def get(cls, id_string, default_string=None):
""" Performs a get lookup on the Formula table, if no formula exists
with the first id_string, returns the formula with the default_string
id.
"""
if not default_string:
return super(Formula, cls).get(id_string)
try:
formula = Formula.objects.get(name=id_string)
except cls.DoesNotExist:
formula = super(Formula, cls).get(default_string)
return formula
return formula
class History(models.Model):
""" Scoring history keeps track of scoring events per user, saving
the details from source to amount.
"""
timestamp = models.DateTimeField(default=datetime.now, blank=True)
user = models.ForeignKey(User)
game = models.ForeignKey(Game, blank=True, null=True, default=None)
# this is reserved for further use/debugging
external_id = models.IntegerField(default=0, null=True, blank=True)
formula = models.ForeignKey(Formula, blank=True, null=True, default=None)
coin = models.ForeignKey(Coin)
amount = models.FloatField(default=0)
percents = models.IntegerField(default=100)
# group same kind of bonuses together, using the same formula
tag = models.CharField(max_length=64, blank=True, null=True)
@classmethod
def add(cls, user=None, game=None, **kwargs):
ret = History.objects.create(user=user, game=game, **kwargs)
drop_cache(cls._user_points, user=user)
drop_cache(cls._user_coins, user=user)
return ret
@classmethod
def user_coins(cls, user):
return cls._user_coins(user=user)
@classmethod
def user_points(cls, user):
return cls._user_points(user=user)
@staticmethod
@cached_method
def _user_coins(user):
""" Returns a dictionary of coins and amounts for a specific user. """
allcoins = Coin.objects.all()
coins = {}
for coin in allcoins:
hs = History.objects.filter(user=user, coin=coin).aggregate(total=models.Sum('amount'))
if hs['total'] is not None:
coins[coin.name] = coin.format_value(hs['total'])
else:
if coin.is_core():
coins[coin.name] = 0
return coins
@staticmethod
@cached_method
def _user_points(user):
""" :return: a list of (game, points) - distribution of points per source """
points = {}
for game in get_games() + [None]:
pp = History.user_points_from_game(user=user, game=game, zeros=False)
if pp:
if game:
points[game.get_instance().verbose_name] = pp
else:
points['wouso'] = pp
return points
@staticmethod
def user_points_from_game(user, game, zeros=True):
# FIXME: add test
game = game.get_instance() if game else game
hs = History.objects.filter(user=user, game=game)
pp = {}
if zeros:
for c in Coin.objects.all():
pp[c.name] = 0
for h in hs:
pp[h.coin.name] = pp.get(h.coin.name, 0) + h.amount
return pp
def delete(self, using=None):
cls = self.__class__
drop_cache(cls._user_points, self.user)
drop_cache(cls._user_coins, self.user)
super(History, self).delete(using=using)
def __unicode__(self):
return "{user} {date}-{formula}[{ext}]: {amount}{coin}".format(user=self.user, date=self.timestamp, formula=self.formula, ext=self.external_id, amount=self.amount, coin=self.coin)
|
119004
|
from .flow.models import _META_ARCHITECTURES as _FLOW_META_ARCHITECTURES
from .stereo.models import _META_ARCHITECTURES as _STEREO_META_ARCHITECTURES
_META_ARCHITECTURES = dict()
_META_ARCHITECTURES.update(_FLOW_META_ARCHITECTURES)
_META_ARCHITECTURES.update(_STEREO_META_ARCHITECTURES)
def build_model(cfg):
meta_arch = _META_ARCHITECTURES[cfg.model.meta_architecture]
return meta_arch(cfg)
|
119026
|
import torch
def kl_divergence(mu, sigma, mu_prior, sigma_prior):
kl = 0.5 * (2 * torch.log(sigma_prior / sigma) - 1 + (sigma / sigma_prior).pow(2) + ((mu_prior - mu) / sigma_prior).pow(2)).sum()
return kl
def softplusinv(x):
return torch.log(torch.exp(x)-1.)
|
119038
|
def create_model(opt):
model = None
print(opt.model)
#Set dataset mode and load models based on the selected model (pGAN or cGAN)
if opt.model == 'cGAN':
opt.dataset_mode = 'unaligned_mat'
from .cgan_model import cGAN
model = cGAN()
elif opt.model == 'pGAN':
opt.dataset_mode = 'aligned_mat'
from .pgan_model import pGAN
model = pGAN()
else:
raise NotImplementedError('model [%s] not implemented.' % opt.model)
#Initizlize the model based on the arguments
model.initialize(opt)
print("model %s was created" % (model.name()))
return model
|
119053
|
from IPython.display import HTML
from jupyter_client import find_connection_file
from tornado.escape import url_escape
from tornado.httpclient import HTTPClient
import collections
import intrusion
import json
import ndstore
import neuroglancer
# volumes of all viewer instances
volumes = {}
class Viewer(neuroglancer.BaseViewer):
def __init__(self):
self.layers = collections.OrderedDict()
self.hostname = 'localhost:8888'
self.large = False
super(Viewer, self).__init__()
def set_large(self, large = True):
"""Let the viewer span the whole width of the browser window.
This will resize your IPython column to the same width.
"""
self.large = large
def set_hostname(self, hostname):
"""Set the name of the server running the Jupyter Notebook.
Defaults to "localhost:8888". Change this if you use a remote server.
"""
self.hostname = hostname
def show(self):
"""Show the viewer.
"""
viewer_url = self.get_server_url() + '/neuroglancer' + '#!' + self.get_encoded_state()
large_html = "<style>.container { width:100% !important; }</style>" if self.large else ""
return HTML(large_html + "<iframe src=\"" + viewer_url + "\" width=\"100%\" height=\"1024px\"><\iframe>")
def register_volume(self, volume):
# globally register volume
global volumes
volumes[volume.token] = volume
# globally register kernel client for this volume in the Jupyter server
cf = url_escape(find_connection_file())
http_client= HTTPClient()
try:
response = http_client.fetch(self.get_server_url() + '/register_token/' + volume.token.decode('utf8') + '/' + cf)
except Exception as e:
raise RuntimeError("could not register token: " + str(e))
http_client.close()
def get_server_url(self):
return 'http://' + self.hostname
|
119083
|
import gym
from tf_rl.common.wrappers import CartPole_Pixel
env = CartPole_Pixel(gym.make('CartPole-v0'))
for ep in range(2):
env.reset()
for t in range(100):
o, r, done, _ = env.step(env.action_space.sample())
print(o.shape, o.min(), o.max())
if done:
break
env.close()
|
119088
|
from __future__ import annotations
from typing import Optional, TypeVar, Union
import numpy as np
from typing_extensions import Final
from ...representation import FData
from ...representation._typing import NDArrayFloat
from .._math import cosine_similarity, cosine_similarity_matrix
from ._utils import pairwise_metric_optimization
T = TypeVar("T", bound=Union[NDArrayFloat, FData])
class AngularDistance():
r"""
Calculate the angular distance between two objects.
For each pair of observations x and y the angular distance between them is
defined as the normalized "angle" between them:
.. math::
d(x, y) = \frac{\arccos \left(\frac{\langle x, y \rangle}{
\sqrt{\langle x, x \rangle \langle y, y \rangle}} \right)}{\pi}
where :math:`\langle {}\cdot{}, {}\cdot{} \rangle` is the inner product.
This distance is defined in the interval [0, 1].
Args:
e1: First object.
e2: Second object.
Returns:
Numpy vector where the i-th coordinate has the angular distance between
the i-th element of the first object and the i-th element of the second
one.
Examples:
Computes the angular distances between an object containing functional
data corresponding to the functions y = 1 and y = x defined over the
interval [0, 1] and another ones containing data of the functions y
= 0 and y = x/2. The result then is an array of size 2 with the
computed l2 distance between the functions in the same position in
both.
>>> import skfda
>>> import numpy as np
>>>
>>> x = np.linspace(0, 1, 1001)
>>> fd = skfda.FDataGrid([np.ones(len(x)), x], x)
>>> fd2 = skfda.FDataGrid([2*np.ones(len(x)), np.cos(x)], x)
>>>
>>> skfda.misc.metrics.angular_distance(fd, fd2).round(2)
array([ 0. , 0.22])
"""
def __call__(
self,
e1: T,
e2: T,
) -> NDArrayFloat:
"""Compute the distance."""
return np.arccos(cosine_similarity(e1, e2)) / np.pi
def __repr__(self) -> str:
return (
f"{type(self).__name__}()"
)
angular_distance: Final = AngularDistance()
@pairwise_metric_optimization.register
def _pairwise_metric_optimization_angular(
metric: AngularDistance,
elem1: Union[NDArrayFloat, FData],
elem2: Optional[Union[NDArrayFloat, FData]],
) -> NDArrayFloat:
return np.arccos(cosine_similarity_matrix(elem1, elem2)) / np.pi
|
119111
|
import os
import sqlite3
import json
import datetime
from shutil import copyfile
from werkzeug._compat import iteritems, to_bytes, to_unicode
from jam.third_party.filelock import FileLock
import jam
LANG_FIELDS = ['id', 'f_name', 'f_language', 'f_country', 'f_abr', 'f_rtl']
LOCALE_FIELDS = [
'f_decimal_point', 'f_mon_decimal_point',
'f_mon_thousands_sep', 'f_currency_symbol', 'f_frac_digits', 'f_p_cs_precedes',
'f_n_cs_precedes', 'f_p_sep_by_space', 'f_n_sep_by_space', 'f_positive_sign',
'f_negative_sign', 'f_p_sign_posn', 'f_n_sign_posn', 'f_d_fmt', 'f_d_t_fmt'
]
FIELDS = LANG_FIELDS + LOCALE_FIELDS
def lang_con(task):
return sqlite3.connect(os.path.join(task.work_dir, 'langs.sqlite'))
def execute(task, sql, params=None):
result = None
con = lang_con(task)
try:
cursor = con.cursor()
if params:
cursor.execute(sql, params)
else:
cursor.execute(sql)
con.commit()
except Exception as e:
print(sql)
raise Exception(e)
finally:
con.close()
def select(task, sql):
result = None
con = lang_con(task)
try:
cursor = con.cursor()
cursor.execute(sql)
result = cursor.fetchall()
con.rollback()
except Exception as e:
print(sql)
raise Exception(e)
finally:
con.close()
return result
def copy_table(cursor, name):
cursor.execute('DROP TABLE IF EXISTS SYS_%s' % name)
cursor.execute("SELECT sql FROM LANGS.sqlite_master WHERE type='table' AND name='JAM_%s'" % name)
sql = cursor.fetchone()[0]
cursor.execute(sql.replace('JAM_%s' % name, 'SYS_%s' % name))
cursor.execute('INSERT INTO SYS_%s SELECT * FROM LANGS.JAM_%s' % (name, name))
def update_langs(task):
with task.lock('$langs'):
con = task.create_connection()
try:
cursor = con.cursor()
try:
cursor.execute('ALTER TABLE SYS_PARAMS ADD COLUMN F_JAM_VERSION TEXT')
except:
pass
cursor.execute('SELECT F_JAM_VERSION, F_LANGUAGE FROM SYS_PARAMS')
res = cursor.fetchall()
version = res[0][0]
language = res[0][1]
langs_path = os.path.join(task.work_dir, 'langs.sqlite')
if version != task.app.jam_version or not os.path.exists(langs_path):
# ~ task.log.info('Version changed!')
copyfile(os.path.join(os.path.dirname(jam.__file__), 'langs.sqlite'), langs_path)
os.chmod(os.path.join(task.work_dir, 'langs.sqlite'), 0o666)
cursor.execute('SELECT ID, F_NAME FROM SYS_LANGS')
langs = cursor.fetchall()
langs_list = []
langs_dict = {}
for l in langs:
langs_list.append(l[1])
langs_dict[l[1]] = l[0]
res = select(task, 'SELECT %s FROM JAM_LANGS ORDER BY ID' % ', '.join(FIELDS))
for r in res:
if langs_dict.get(r[1]):
del langs_dict[r[1]]
if not r[1] in langs_list:
fields = ['DELETED']
values = ['?']
field_values = [0]
for i, value in enumerate(r):
if i > 0:
fields.append(FIELDS[i])
values.append('?')
field_values.append(value)
sql = "INSERT INTO SYS_LANGS (%s) VALUES (%s)" % (','.join(fields), ','.join(values))
cursor.execute(sql, (field_values))
del_langs = list(langs_dict.values())
if len(del_langs):
if language in del_langs:
language = 1
sql = "DELETE FROM SYS_LANGS WHERE ID IN (%s)" % ','.join([str(d) for d in del_langs])
cursor.execute(sql)
if language is None:
language = 'NULL'
cursor.execute("UPDATE SYS_PARAMS SET F_JAM_VERSION='%s', F_LANGUAGE=%s" % (task.app.jam_version, language))
con.commit()
finally:
con.close()
def init_locale():
import locale
result = {}
try:
locale.setlocale(locale.LC_ALL, '')
loc = locale.localeconv()
for field in LOCALE_FIELDS:
setting = field[2:]
try:
result[field] = to_unicode(loc[setting], 'utf-8')
except:
result[field] = jam.common.DEFAULT_LOCALE[setting.upper()]
except:
pass
try:
result['f_d_fmt'] = locale.nl_langinfo(locale.D_FMT)
except:
result['f_d_fmt'] = '%Y-%m-%d'
result['f_d_t_fmt'] = '%s %s' % (result['f_d_fmt'], '%H:%M')
return result
def get_lang_dict(task, language):
res = select(task, '''
SELECT K.F_KEYWORD,
CASE WHEN TRIM(V1.F_VALUE) <> ''
THEN V1.F_VALUE
ELSE V2.F_VALUE
END
FROM JAM_LANG_KEYS AS K
LEFT OUTER JOIN JAM_LANG_VALUES AS V1 ON (K.ID = V1.F_KEY AND V1.F_LANG = %s)
LEFT OUTER JOIN JAM_LANG_VALUES AS V2 ON (K.ID = V2.F_KEY AND V2.F_LANG = %s)
''' % (language, 1))
result = {}
for key, value in res:
result[key] = value
return result
def get_locale_dict(task, language):
result = {}
con = task.create_connection()
try:
cursor = con.cursor()
cursor.execute('SELECT %s FROM SYS_LANGS WHERE ID=%s' % (', '.join(LOCALE_FIELDS), language))
res = cursor.fetchall()
if len(res):
for i, field in enumerate(LOCALE_FIELDS):
result[field[2:].upper()] = res[0][i]
else:
raise Exception('Language with id %s is not found' % language)
con.rollback()
except:
result = jam.common.DEFAULT_LOCALE
finally:
con.close()
return result
def get_translation(task, lang1, lang2):
res = select(task, '''
SELECT K.ID, K.F_KEYWORD, V1.F_VALUE, V2.F_VALUE
FROM JAM_LANG_KEYS AS K
LEFT OUTER JOIN JAM_LANG_VALUES AS V1 ON (K.ID = V1.F_KEY AND V1.F_LANG = %s)
LEFT OUTER JOIN JAM_LANG_VALUES AS V2 ON (K.ID = V2.F_KEY AND V2.F_LANG = %s)
''' % (lang1, lang2))
return res
def add_lang(task, lang_id, language, country, name, abr, rtl, copy_lang):
con = lang_con(task)
try:
cursor = con.cursor()
locale = init_locale()
fields = []
values = []
field_values = []
for key, value in iteritems(locale):
fields.append(key)
values.append('?')
field_values.append(to_unicode(value, 'utf-8'))
cursor.execute("INSERT INTO JAM_LANGS (ID, F_LANGUAGE, F_COUNTRY, F_NAME, F_ABR, F_RTL, %s) VALUES (?,?,?,?,?,?,%s)" % (','.join(fields), ','.join(values)),
([lang_id, language, country, name, abr, rtl] + field_values))
if copy_lang:
cursor.execute('''
SELECT JAM_LANG_KEYS.ID, F_VALUE
FROM JAM_LANG_VALUES LEFT OUTER JOIN JAM_LANG_KEYS ON JAM_LANG_KEYS.ID = JAM_LANG_VALUES.F_KEY
WHERE F_LANG = %s
''' % copy_lang)
res = cursor.fetchall()
recs = []
for key_id, value in res:
recs.append((key_id, lang_id, value))
cursor.executemany("INSERT INTO JAM_LANG_VALUES(F_KEY, F_LANG, F_VALUE) VALUES (?,?,?)", recs)
con.commit()
langs = task.sys_langs.copy()
langs.set_where(id=lang_id)
langs.open()
if langs.record_count():
langs.edit()
for key, value in iteritems(locale):
langs.field_by_name(key).value = to_unicode(value, 'utf-8')
langs.post()
langs.apply()
finally:
con.close()
def save_lang_field(task, lang_id, field_name, value):
execute(task, 'UPDATE JAM_LANGS SET %s=? WHERE ID=%s' % (field_name, lang_id), (value,))
con = task.create_connection()
try:
cursor = con.cursor()
cursor.execute('UPDATE SYS_LANGS SET %s=? WHERE ID=%s' % (field_name, lang_id), (value,))
con.commit()
finally:
con.close()
if task.language == lang_id:
task.update_lang(lang_id)
def save_translation(task, lang_id, key_id, value):
res = select(task, 'SELECT ID FROM JAM_LANG_VALUES WHERE F_LANG=%s AND F_KEY=%s' % (lang_id, key_id))
if len(res):
execute(task, 'UPDATE JAM_LANG_VALUES SET F_VALUE=? WHERE ID=%s' % (res[0][0]), (value,))
else:
execute(task, 'INSERT INTO JAM_LANG_VALUES (F_LANG, F_KEY, F_VALUE) VALUES (?, ?, ?)', (lang_id, key_id, value))
def add_key(task, key):
result = ''
con = lang_con(task)
try:
cursor = con.cursor()
cursor.execute("SELECT ID FROM JAM_LANG_KEYS WHERE F_KEYWORD='%s'" % key)
res = cursor.fetchall()
if len(res):
result = 'Keyword exists'
else:
cursor.execute('INSERT INTO JAM_LANG_KEYS (F_KEYWORD) VALUES (?)', (key,))
con.commit()
finally:
con.close()
return result
def del_key(task, key_id):
result = False
con = lang_con(task)
try:
cursor = con.cursor()
cursor.execute("DELETE FROM JAM_LANG_VALUES WHERE F_KEY=%s" % key_id)
cursor.execute("DELETE FROM JAM_LANG_KEYS WHERE ID=%s" % key_id)
con.commit()
result = True
finally:
con.close()
return result
def get_dict(task, language):
res = select(task, '''
SELECT JAM_LANG_KEYS.F_KEYWORD, F_VALUE
FROM JAM_LANG_VALUES LEFT OUTER JOIN JAM_LANG_KEYS ON JAM_LANG_KEYS.ID = JAM_LANG_VALUES.F_KEY
WHERE F_LANG = %s
''' % language)
result = {}
for key, value in res:
result[key] = value
return result
def export_lang(task, lang_id, host):
names = FIELDS[1:]
lang = select(task, 'SELECT %s FROM JAM_LANGS WHERE ID=%s' % (', '.join(names), lang_id))
if len(lang):
language = {}
for i in range(len(lang[0])):
language[names[i]] = lang[0][i]
translation = get_dict(task, lang_id)
content = json.dumps({'language': language, 'translation': translation})
name = language['f_name'].replace(' ', '_')
file_name = '%s_%s.lang' % (name, datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
return {'file_name': file_name, 'content': content}
def import_lang(task, file_path):
error = ''
try:
with open(file_path, 'r') as f:
content = to_unicode(f.read(), 'utf-8')
content = json.loads(content)
language = content['language']
translation = content['translation']
con = lang_con(task)
sys_con = task.create_connection()
try:
cursor = con.cursor()
cursor.execute('SELECT ID FROM JAM_LANGS WHERE F_LANGUAGE=%s AND F_COUNTRY=%s' % (language['f_language'], language['f_country']))
res = cursor.fetchall()
if len(res):
lang_id = res[0][0]
fields = []
field_values = []
for key, value in iteritems(language):
fields.append('%s=?' % key)
field_values.append(value)
fields = ',' .join(fields)
cursor.execute("UPDATE JAM_LANGS SET %s WHERE ID=%s" % (fields, lang_id), field_values)
sys_cursor = sys_con.cursor()
sys_cursor.execute("UPDATE SYS_LANGS SET %s WHERE ID=%s" % (fields, lang_id), field_values)
sys_con.commit()
else:
fields = []
values = []
field_values = []
for key, value in iteritems(language):
fields.append(key)
field_values.append(value)
values.append('?')
cursor.execute('INSERT INTO JAM_LANGS (%s) VALUES (%s)' % (','.join(fields), ','.join(values)), field_values)
cursor.execute('SELECT ID FROM JAM_LANGS WHERE F_LANGUAGE=%s AND F_COUNTRY=%s' % (language['f_language'], language['f_country']))
res = cursor.fetchall()
lang_id = res[0][0]
fields.append('DELETED')
values.append('?')
field_values.append(0)
sys_cursor = sys_con.cursor()
sys_cursor.execute('INSERT INTO SYS_LANGS (%s) VALUES (%s)' % (','.join(fields), ','.join(values)), field_values)
sys_con.commit()
if lang_id:
cursor.execute('SELECT ID, F_KEYWORD FROM JAM_LANG_KEYS')
res = cursor.fetchall()
keys = {}
for r in res:
keys[r[1]] = r[0]
recs = []
for keyword, value in iteritems(translation):
key_id = keys.get(keyword)
if key_id:
cursor.execute('SELECT ID FROM JAM_LANG_VALUES WHERE F_LANG=%s AND F_KEY=%s' % (lang_id, key_id))
res = cursor.fetchall()
if len(res):
cursor.execute('UPDATE JAM_LANG_VALUES SET F_VALUE=? WHERE ID=%s' % (res[0][0]), (value,))
else:
cursor.execute('INSERT INTO JAM_LANG_VALUES (F_LANG, F_KEY, F_VALUE) VALUES (?, ?, ?)', (lang_id, key_id, value))
con.commit()
finally:
con.close()
sys_con.close()
except Exception as e:
print(e)
error = 'Can not import language'
|
119221
|
from speculator.features.RSI import RSI
import unittest
class RSITest(unittest.TestCase):
def test_eval_rs(self):
gains = [0.07, 0.73, 0.51, 0.28, 0.34, 0.43, 0.25, 0.15, 0.68, 0.24]
losses = [0.23, 0.53, 0.18, 0.40]
self.assertAlmostEqual(RSI.eval_rs(gains, losses), 2.746, places=3)
def test_eval_algorithm(self):
gains = [0.07, 0.73, 0.51, 0.28, 0.34, 0.43, 0.25, 0.15, 0.68, 0.24]
losses = [0.23, 0.53, 0.18, 0.40]
self.assertAlmostEqual(RSI.eval_algorithm(gains, losses),
73.307, places=3)
|
119260
|
import laurelin
def test_1_brackets():
output = laurelin.balanced_brackets("[[]]({}[])")
assert output == True
def test_2_brackets():
output = laurelin.balanced_brackets("[[({}[])")
assert output == False
def test_3_brackets():
output = laurelin.balanced_brackets("")
assert output == True
def test_4_brackets():
output = laurelin.balanced_brackets("(5 * 3) + [10 / {2}]")
assert output == True
def test_5_brackets():
output = laurelin.balanced_brackets(")]})]}")
assert output == False
def test_6_brackets():
output = laurelin.balanced_brackets("([{(((")
assert output == False
def test_7_brackets():
output = laurelin.balanced_brackets("no brackets at all")
assert output == True
def test_8_brackets():
output = laurelin.balanced_brackets(">>> (<> are not brackets) >>>")
assert output == True
def test_9_brackets():
output = laurelin.balanced_brackets("[///\\|||]")
assert output == True
def test_10_brackets():
output = laurelin.balanced_brackets("!@#$%%^&*(;',.<>?/\|~`'")
assert output == False
|
119274
|
import torch
import numpy as np
from bc.dataset.dataset_lmdb import DatasetReader
from sim2real.augmentation import Augmentation
from sim2real.transformations import ImageTransform
CHANNEL2SPAN = {'depth': 1, 'rgb': 3, 'mask': 1}
class Frames:
def __init__(self,
path,
channels=('depth', ),
limit='',
max_demos=None,
augmentation='',
output_size=224):
"""
Args:
path: path of the dataset
channels: channels to load - rgb, depth, mask
limit: critera to limit first and last idx of getitem
based of the set index of reference
mode: type of array returned by frames
max_demos: maximum number of demos to load frames from
augmentation: augmentation to be applied to frames
"""
assert isinstance(channels, (list, tuple))
for channel in channels:
assert channel in CHANNEL2SPAN.keys()
self.channels = tuple(sorted(channels))
self._dataset = DatasetReader(path, self.channels)
self.infos = self._dataset.infos
self.keys = self._dataset.keys
self._limit = limit
self.keys.set_query_limit(limit)
assert isinstance(augmentation, str)
self._augmentation = Augmentation(augmentation)
self._im_keys = ['rgb', 'depth', 'mask']
self._output_size = output_size
channels_no_mask = [c for c in channels if c != 'mask']
self._num_channels = self.sum_channels(channels_no_mask)
if max_demos is not None:
self.keys.set_max_demos(max_demos)
self._seed_augmentation = None
def __len__(self):
return len(self._dataset)
def __getitem__(self, idx):
db = self._dataset
channels = self.channels
num_channels = self._num_channels
augmentation = self._augmentation
output_size = self._output_size
idx_min, idx_max = self._user2dbidx(idx)
frames = db[idx_min:idx_max]
# convert dic of observation to homogeneous array
frames, masks = self.dict_cat(frames, channels, num_channels)
# convert to tensor
frames = torch.tensor(frames)
masks = torch.tensor(masks)
# map array from [0, 255] to [0, 1]
frames = self.unit_range(frames)
# augment images, works only for depth images
# each sample_sequence generates a new augmentation sequence
# the transformation is consistent across frames
img_size = frames.size()[1:]
augmentation.sample_sequence(img_size=img_size)
if 'rgb' not in channels:
frames = augmentation(frames, masks)
# crop the image to fixed size
# if name is not '', do a random crop else do a center crop
centered_crop = augmentation.name == ''
params_crop = ImageTransform.sample_params(
name_transformation='cropping',
magn=(output_size, output_size),
img_size=img_size)
frames = Augmentation.crop(frames, params_crop, centered_crop)
# maps array from [0, 1] to [-1, 1]
frames = self.normalize(frames)
return frames
@staticmethod
def dict_cat(frames, channels, num_channels):
"""
Concatenate dictionnary of frames split by channels into an array
frames: list of dictionnary containing depth, rgb, mask keys
channels: channels to be concatenated
num_channels: number of channels per frame
"""
channels = [c for c in channels if 'mask' not in c]
size = frames[0][channels[0]].shape[0]
stack_frames = np.zeros((num_channels * len(frames), size, size),
dtype=np.uint8)
stack_masks = np.zeros((len(frames), size, size), dtype=int)
idx_stack = 0
for idx_frame, frame in enumerate(frames):
for channel in channels:
channel_span = CHANNEL2SPAN[channel]
channel_im = frame[channel]
if channel_span > 1:
# put the last dimension of rgb image (numpy way) to the first one (torch way)
channel_im = np.swapaxes(
np.swapaxes(channel_im, 2, 1), 1, 0)
stack_frames[idx_stack:idx_stack + channel_span] = channel_im
idx_stack += channel_span
if 'mask' in frame:
stack_masks[idx_frame] = frame['mask']
return stack_frames, stack_masks
def set_augmentation(self, path):
self._augmentation.set_augmentation(path)
@staticmethod
def unit_range(frames):
"""
frames: uint8 array or torch tensor in [0, 255]
return: float array in [0, 1]
"""
if type(frames) is np.ndarray:
unit_frames = frames.astype(float)
elif type(frames) is torch.Tensor:
unit_frames = frames.float()
# inplace operations
unit_frames /= 255
return unit_frames
@staticmethod
def normalize(frames):
"""
frames: uint8 array in [0, 1]
return: float array in [-1, 1]
"""
# inplace operations
frames -= 0.5
frames /= 0.5
return frames
@staticmethod
def dict_to_tensor(frames,
channels,
num_channels,
output_size=(224, 224),
augmentation_str='',
augmentation=None):
"""
Convert dictionnary of observation to normalized tensor,
augment the images on the way if an augmentation is passed
frames: dictionnary of observations (mime, mujoco, ...)
return: torch tensor in [-1, 1]
"""
frames, masks = Frames.dict_cat(frames, channels, num_channels)
frames = torch.tensor(frames)
masks = torch.tensor(masks)
frames = Frames.unit_range(frames)
if augmentation is None:
augmentation = Augmentation(augmentation_str)
augmentation.sample_sequence(frames.size()[1:])
if 'rgb' not in channels:
frames = augmentation(frames, masks)
# crop is centered if there are not augmentation set
centered_crop = augmentation_str == ''
img_size = frames.size()[1:]
params_crop = ImageTransform.sample_params(
name_transformation='cropping',
magn=output_size,
img_size=img_size)
frames = Augmentation.crop(frames, params_crop, centered_crop)
frames = Frames.normalize(frames)
return frames
@staticmethod
def adjust_shape(x, num_frames, channels):
"""
x: torch tensor with potentially missing num_frames
return: array where first frame is repeated to match num_frames size
"""
assert isinstance(channels, (tuple, list))
channels2num = {'depth': 1, 'rgb': 3, 'mask': 0}
num_channels = 0
for channel in channels:
num_channels += channels2num[channel]
x_chan = x.shape[0]
assert x_chan % num_channels == 0
if x_chan != num_frames * num_channels:
missing_frames = int(num_frames - x_chan / num_channels)
m = x[:num_channels].repeat(missing_frames, 1, 1)
x = torch.cat((m, x), dim=0)
return x
@staticmethod
def sum_channels(channels):
"""Sum of the span of channels"""
num_channels = 0
for channel in channels:
num_channels += CHANNEL2SPAN[channel]
return num_channels
def set_camera(self, camera_idx):
self._dataset.set_camera(camera_idx)
def get_num_demos(self):
return self.keys.get_num_demos()
def get_demo_indices(self, demo_idx):
"""Return (t, t+T) if demo starts at timestep"""
return self.keys.get_demo_indices(demo_idx)
def get_mask(self, idx):
assert 'mask' in self.channels
assert isinstance(idx, int)
frames = self._dataset[idx]
return frames['mask']
def _user2dbidx(self, idx):
"""convert user index to idx_min, idx_max within dataset range"""
keys = self.keys
if isinstance(idx, slice):
start, end, step = idx.indices(len(self))
# step bigger than 1 not handled
assert step == 1
# make sure all the frames come from the same demo
idx_min, idx_max = keys.get_idx_min_max(start, end)
elif isinstance(idx, int):
idx_min, idx_max = idx, idx + 1
else:
raise TypeError('{} is an unvalid index type.'.format(type(idx)))
return idx_min, idx_max
|
119295
|
from blesuite.pybt.gap import GAP
import blesuite.pybt.att as att
import logging
log = logging.getLogger(__name__)
# log.addHandler(logging.NullHandler())
class BTEventHandler(object):
"""
BTEventHandler is a event handling class passed to the BLEConnectionManager in order to
have user-controlled callbacks that are called when BLE events occur (ATT, SMP, L2CAP, Connection, scan, metadata,
and disconnect event). This class provides the skeleton for functions called by the stack when an event
is received. For instance, when an ATT packet is received, the stack will process the packet and other ATT hooks,
then trigger supplied BTEventHandler instance BTEventHandler.on_att_event(connection_handle, data)
:param connection_manager: BLEConnectionManager instance that allows the user to send packets whilst
processing an event hook trigger.
:type connection_manager: BLEConnectionManager
"""
def __init__(self, connection_manager):
self.connection_manager = connection_manager
def __del__(self):
self.connection_manager = None
def on_scan_event(self, address, address_type, data):
"""
Called when a scan event is received by the stack.
:param address: Address of the seen peer device
:type address: str
:param address_type: Address type of the seen peer device
:type address_type: int
:param data: GAP data from the peer device advertisement packet
:type data: list of strings or a single string
:return:
:rtype:
"""
log.debug("Saw %s (%s)" % (address, "public" if address_type == 0 else "random"))
if len(data) > 0:
try:
gap = GAP()
if isinstance(data, list):
log.debug("data was list!")
for i, j in enumerate(data):
gap.decode(str(data[i]))
else:
gap.decode(data)
log.debug("GAP: %s" % gap)
except Exception as e:
log.debug("Exception when reading GAP: %s" % e)
return
def on_metadata_event(self, status, connection_handle, meta, address, event):
"""
Called when a metadata event is triggered by the HCI device. This represents a metadata event not
associated with a scan or connection event.
:param status: Status of the LE Meta Event - Sub Event
:type status: int
:param connection_handle: The connection handle the event was received
:type connection_handle: int
:param meta: The metadata
:type meta: str
:param address: Peer address that caused the metadata event
:type address: str
:param event: The sub event code
:type event: int
:return:
:rtype:
"""
log.debug("Received LE Meta packet from %s Event: %s!" % (address, event))
def on_connect_event(self, status, connection_handle, meta, address, address_type):
"""
Called when a metadata event is triggered by the HCI device with a Connection Compete LE sub event.
:param status: Status of the connection
:type status: int
:param connection_handle: The connection handle the event was received
:type connection_handle: int
:param meta: The metadata
:type meta: str
:param address: Peer address that caused the metadata event
:type address: str
:param address_type: Peer address type
:type address_type: int
:return:
:rtype:
"""
log.debug("Connected to %s!" % address)
return
def on_disconnect_event(self, connection_handle, reason):
"""
Called when a disconnect event is received.
:param connection_handle: The connection handle the disconnect occurred on.
:type connection_handle: int
:param reason: The reason for the disconnect
:type reason: int
:return:
:rtype:
"""
log.debug("Disconnected! ConnectionHandle: %s reason: %s" % (connection_handle, reason))
return
def on_att_event(self, connection_handle, data):
"""
Called when an ATT event is received (after other ATT processing and handlers have been invoked).
:param connection_handle: Connection handle the event was received on
:type connection_handle: int
:param data: Packet data
:type data: Scapy ATT packet -- scapy.layers.bluetooth -- Contains an ATT Header and an ATT body
:return:
:rtype:
"""
log.debug("ATT Event Connection Handle: %s Data: %s" % (connection_handle, data))
return
def on_unknown_event(self, packet):
"""
Called when an unknown event is received. Note: These are usually packet types not supported currently
by the routing core of the stack.
:param packet: Scapy Bluetooth packet.
:type packet: Packet
:return:
:rtype:
"""
log.debug("Unknown Event Packet: %s" % packet)
return
class ATTSecurityHook(object):
"""
ATTSecurityHook is used by the blesuite.pybyt.gatt to hook, modify, or overwrite security decisions
made by the ATT database based on the current BLE connection security, the attribute properties, and
the attribute permissions. These hooks are called after each security evaluation step has completed and allows the
hook to view and modify the final result of the check. The hooks receive identifyin information about the target
attribute and the association permissions and properties.
"""
def __init__(self):
pass
def att_authorization_check_hook(self, att_opcode, uuid, att_property, att_read_permission, att_write_permission,
connection_permission, authorization_required):
"""
Called when an authorization check is made. This check is part of the security check workflow
and validates that if the attribute requires authorization in order to access it, then the
authorization procedure must succeed (implementation dependent procedure). In BLESuite, this function
acts as the authorization procedure.
:param att_opcode: ATT opcode of the request attempting to access the attribute
:type att_opcode: int
:param uuid: UUID (16-bit or 128-bit) of the target attribute
:type uuid: blesuite.pybt.gatt.UUID object instance
:param att_property: Attribute properties assigned to the attribute (blesuite.utils.att_utils.ATT_PROP_READ | blesuite.utils.att_utils.ATT_PROP_WRITE)
:type att_property: int
:param att_read_permission: Security requirements of attribute in order to read the value
:type att_read_permission: blesuite.pybt.sm.SecurityMode (has attributes security_level and security_mode)
:param att_write_permission: Security requirements of attribute in order to write to the value
:type att_write_permission: blesuite.pybt.sm.SecurityMode (has attributes security_level and security_mode)
:param connection_permission: Security Manager associated with the current BLE connection where the attribute is being accessed.
:type connection_permission: blesuite.pybt.sm.SecurityManager
:param authorization_required: Flag to indicate whether the attribute requires authorization
:type authorization_required: bool
:return: Result that indicates if the check passed or not (True = passed)
:rtype: bool
"""
check_passed = True
log.debug("ATT Authorization check invoked. Operation: %d Target Attribute: %s ATT Property: %d "
"ATT Read Security Mode: %d ATT Read Security Level: %d "
"ATT Read Security Mode: %d ATT Read Security Level: %d "
"Connection Security Mode: %d Connection Security Level: %d "
"Attribute requires authorization: %d" %
(att_opcode, uuid, att_property, att_read_permission.security_mode, att_read_permission.security_level,
att_write_permission.security_mode, att_write_permission.security_level,
connection_permission.get_security_mode_mode(), connection_permission.get_security_mode_level(),
authorization_required))
return check_passed
def att_authentication_check_hook(self, att_authentication_check_result,
att_opcode, uuid, att_property, att_read_permission,
att_write_permission, connection_permission):
"""
Called when an authentication check is made. This check is part of the security check workflow
and validates that the connection, on which the attribute access request is being made, has been
authenticated. (This means that the pairing method used to establish the encrypted connection must
be authenticated if authentication is required)
:param att_authentication_check_result: Result of the ATT server's authentication check
:type att_authentication_check_result: bool
:param att_opcode: ATT opcode of the request attempting to access the attribute
:type att_opcode: int
:param uuid: UUID (16-bit or 128-bit) of the target attribute
:type uuid: blesuite.pybt.gatt.UUID object instance
:param att_property: Attribute properties assigned to the attribute (blesuite.utils.att_utils.ATT_PROP_READ | blesuite.utils.att_utils.ATT_PROP_WRITE)
:type att_property: int
:param att_read_permission: Security requirements of attribute in order to read the value
:type att_read_permission: blesuite.pybt.sm.SecurityMode (has attributes security_level and security_mode)
:param att_write_permission: Security requirements of attribute in order to write to the value
:type att_write_permission: blesuite.pybt.sm.SecurityMode (has attributes security_level and security_mode)
:param connection_permission: Security Manager associated with the current BLE connection where the attribute is being accessed.
:type connection_permission: blesuite.pybt.sm.SecurityManager
:return: Result that indicates if the check passed or not (True = passed)
:rtype: bool
"""
check_passed = att_authentication_check_result
log.debug("ATT Authentication check invoked. Result: %d"
"Operation: %d Target Attribute: %s ATT Property: %d "
"ATT Read Security Mode: %d ATT Read Security Level: %d "
"ATT Read Security Mode: %d ATT Read Security Level: %d "
"Connection Security Mode: %d Connection Security Level: %d" %
(att_authentication_check_result,
att_opcode, uuid, att_property, att_read_permission.security_mode, att_read_permission.security_level,
att_write_permission.security_mode, att_write_permission.security_level,
connection_permission.get_security_mode_mode(), connection_permission.get_security_mode_level()))
return check_passed
def att_encryption_check_hook(self, att_encryption_check_result,
att_opcode, uuid, att_property, att_read_permission,
att_write_permission, connection_permission, is_connection_encrypted):
"""
Called when an encryption check is made. This check is part of the security check workflow
and validates that the connection, on which the attribute access request is being made, is
encrypted.
:param att_encryption_check_result: Result of the ATT server's encryption check
:type att_encryption_check_result: bool
:param att_opcode: ATT opcode of the request attempting to access the attribute
:type att_opcode: int
:param uuid: UUID (16-bit or 128-bit) of the target attribute
:type uuid: blesuite.pybt.gatt.UUID object instance
:param att_property: Attribute properties assigned to the attribute (blesuite.utils.att_utils.ATT_PROP_READ | blesuite.utils.att_utils.ATT_PROP_WRITE)
:type att_property: int
:param att_read_permission: Security requirements of attribute in order to read the value
:type att_read_permission: blesuite.pybt.sm.SecurityMode (has attributes security_level and security_mode)
:param att_write_permission: Security requirements of attribute in order to write to the value
:type att_write_permission: blesuite.pybt.sm.SecurityMode (has attributes security_level and security_mode)
:param connection_permission: Security Manager associated with the current BLE connection where the attribute is being accessed.
:type connection_permission: blesuite.pybt.sm.SecurityManager
:param is_connection_encrypted: Flag to indicate whether the connection requesting access to the attribute is encrypted
:type is_connection_encrypted: bool
:return: Result that indicates if the check passed or not (True = passed)
:rtype: bool
"""
check_passed = att_encryption_check_result
log.debug("ATT Encryption check invoked. Result: %d"
"Operation: %d Target Attribute: %s ATT Property: %d "
"ATT Read Security Mode: %d ATT Read Security Level: %d "
"ATT Read Security Mode: %d ATT Read Security Level: %d "
"Connection Security Mode: %d Connection Security Level: %d Is Connection Encrypted?: %s",
(att_encryption_check_result,
att_opcode, uuid, att_property, att_read_permission.security_mode, att_read_permission.security_level,
att_write_permission.security_mode, att_write_permission.security_level,
connection_permission.get_security_mode_mode(), connection_permission.get_security_mode_level(),
is_connection_encrypted))
return check_passed
def att_operation_supported_check_hook(self, att_operation_supported_check_result,
att_opcode, uuid, att_property):
"""
Called when an ATT operation check is made. This check is part of the security check workflow
and validates that the requested ATT operation (read, write) is supported by the target attribute.
:param att_operation_supported_check_result: Result of the ATT server's ATT operation check
:type att_operation_supported_check_result: bool
:param att_opcode: ATT opcode of the request attempting to access the attribute
:type att_opcode: int
:param uuid: UUID (16-bit or 128-bit) of the target attribute
:type uuid: blesuite.pybt.gatt.UUID object instance
:param att_property: Attribute properties assigned to the attribute (blesuite.utils.att_utils.ATT_PROP_READ | blesuite.utils.att_utils.ATT_PROP_WRITE)
:type att_property: int
:return: Result that indicates if the check passed or not (True = passed)
:rtype: bool
"""
check_passed = att_operation_supported_check_result
log.debug("ATT Operation supported check invoked. Result: %d"
"att_opcode: %d uuid: %s att_property: %d" % (
att_operation_supported_check_result, att_opcode,
uuid, att_property
))
return check_passed
def att_security_check_hook(self, att_operation_supported_check_result,
att_authorization_check_result,
att_encryption_check_result,
att_authentication_check_result,
att_opcode, uuid, att_property, att_read_permission, att_write_permission,
connection_permission, authorization_required, is_connection_encrypted):
"""
Called when a request to access an attribute has been made by a peer before the operation
is executed. This hook occurs at the end of the security check function that processes
the ATT operation, authorization requirements, encryption requirements,
and authentication requirements security checks. This hook receives all results of the security checks
and the returned result will notify the ATT server if the operation should continue or be discarded
with a particular error. (Errors will trigger based on the check that fails. The order of checks is
operation, authorization, encryption, and authentication)
:param att_operation_supported_check_result: Result of the ATT server's ATT operation check
:type att_operation_supported_check_result: bool
:param att_authorization_check_result: Result of the ATT server's authorization check
:type att_authorization_check_result: bool
:param att_encryption_check_result: Result of the ATT server's encryption check
:type att_encryption_check_result: bool
:param att_authentication_check_result: Result of the ATT server's authentication check
:type att_authentication_check_result: bool
:param att_opcode: ATT opcode of the request attempting to access the attribute
:type att_opcode: int
:param uuid: UUID (16-bit or 128-bit) of the target attribute
:type uuid: blesuite.pybt.gatt.UUID object instance
:param att_property: Attribute properties assigned to the attribute (blesuite.utils.att_utils.ATT_PROP_READ | blesuite.utils.att_utils.ATT_PROP_WRITE)
:type att_property: int
:param att_read_permission: Security requirements of attribute in order to read the value
:type att_read_permission: blesuite.pybt.sm.SecurityMode (has attributes security_level and security_mode)
:param att_write_permission: Security requirements of attribute in order to write to the value
:type att_write_permission: blesuite.pybt.sm.SecurityMode (has attributes security_level and security_mode)
:param connection_permission: Security Manager associated with the current BLE connection where the attribute is being accessed.
:param authorization_required: Flag to indicate whether the attribute requires authorization
:type authorization_required: bool
:type connection_permission: blesuite.pybt.sm.SecurityManager
:param is_connection_encrypted: Flag to indicate whether the connection requesting access to the attribute is encrypted
:type is_connection_encrypted: bool
:return: Result that indicates each check has passed (order - operation, authorization, encryption, authentication)
:rtype: tuple of bool (4 element)
"""
log.debug("ATT Security check hook invoked. "
"ATT Operation supported check result: %d "
"ATT Authorization security check result: %d "
"ATT encryption security check result: %d "
"ATT Authentication security check result: %d "
"Operation: %d Target Attribute: %s ATT Property: %d "
"ATT Read Security Mode: %d ATT Read Security Level: %d "
"ATT Read Security Mode: %d ATT Read Security Level: %d "
"Connection Security Mode: %d Connection Security Level: %d "
"Authorization required: %d "
"Is connection encrypted?: %s" %
(att_operation_supported_check_result,
att_authorization_check_result,
att_encryption_check_result,
att_authentication_check_result,
att_opcode, uuid, att_property, att_read_permission.security_mode,
att_read_permission.security_level,
att_write_permission.security_mode, att_write_permission.security_level,
connection_permission.get_security_mode_mode(), connection_permission.get_security_mode_level(),
authorization_required,
is_connection_encrypted))
return (att_operation_supported_check_result,
att_authorization_check_result,
att_encryption_check_result,
att_authentication_check_result)
class ATTEventHook(object):
"""
ATTEventHook is used by blesuite.pybt.att to allow the user to hook ATT operations triggered by a peer
ATT request. These hooks allow the user to view and/or modify outgoing ATT responses, incoming write requests,
and incoming long write requests (prepared write and execute write).
"""
def __init__(self):
pass
def att_response_hook(self, received_packet, our_response_packet):
"""
Called before an ATT response packet is sent to a peer device. This enables the response packet to be
viewed in order to modify read response data, send notifications/indications based on a read
or error operation, modify error messages, or send packets to a peer device based upon
the received packet and/or our response.
:param received_packet: ATT request packet received from peer
:type received_packet: scapy.layers.bluetooth ATT packet with ATT header
:param our_response_packet: ATT response packet to be sent to our peer
:type our_response_packet: scapy.layers.bluetooth ATT packet with ATT header
:return: A flag to indicate whether we should send the response packet and the packet to send.
:rtype: bool, ATT packet body (header is appended automatically)
"""
send_packet = True
log.debug("ATT response hook triggered. Received packet: %s Send packet: %s packet: %s" % (received_packet, send_packet, our_response_packet))
return (send_packet, our_response_packet)
def att_prepare_queued_write_hook(self, gatt_handle, offset, data):
"""
Called when the peer device sends a Prepare Write request. This enables the attribute handle, offset,
and data from the request to be viewed and/or modified. Additionally, this allows the user to
deny the write from being performed.
:param gatt_handle: ATT handle of the target attribute
:type gatt_handle: int
:param offset: Offset to begin the write operation to the prepared write queue
:type offset: int
:param data: Data to write to the prepared write queue
:type data: str
:return: A flag to indicate if the value should be written to the prepared write queue, the offset to begin the write, and the data to write
:rtype: bool, int, int, str
"""
write_value_to_queue = True
log.debug("ATT queued write hook triggered. Write value to attribute pepared write queue"
"for attribute: %s on offset: %d with value: %s" % (hex(gatt_handle), offset, data))
return (write_value_to_queue, gatt_handle, offset, data)
def att_execute_queued_write_hook(self, flags):
"""
Called when the peer device sends an Execute Write request. This enables the flag
from the request to be viewed and/or modified. Additionally, this allows the user to
deny the write from being performed.
:param flags: Execute write flags
:type flags: int
:return: Flag to indicate that the execute write should continue and the execute write flags to pass along
:rtype: bool, int
"""
execute = True
log.debug("ATT execute write hook triggered. Action: %d" % flags)
return (execute, flags)
def att_write_hook(self, gatt_handle, data):
"""
Called when the peer device sends a write request. This enables the attribute handle and data
from the request to be viewed and/or modified. Additionally, this allows the user to
deny the write from being performed.
:param gatt_handle: ATT handle of the target attribute
:type gatt_handle: int
:param data: Data to write to the attribute
:type data: str
:return: Flag to indicate that the write should continue, the target attribute handle, and the data to write
:rtype: bool, int, str
"""
write_value_to_attribute = True
log.debug("ATT write hook triggered. Write value to attribute: %s value: %s" % (hex(gatt_handle), data))
return (write_value_to_attribute, gatt_handle, data)
|
119302
|
config = {
# --------------------------------------------------------------------------
# Database Connections
# --------------------------------------------------------------------------
'database': {
'default': 'auth',
'connections': {
# SQLite
# 'auth': {
# 'driver': 'sqlite',
# 'dialect': None,
# 'host': None,
# 'port': None,
# 'database': ':memory',
# 'username': None,
# 'password': <PASSWORD>,
# 'prefix': 'auth_',
# },
# MySQL
'auth': {
'driver': 'mysql',
'dialect': 'pymysql',
'host': '127.0.0.1',
'port': 3306,
'database': 'uvicore_test',
'username': 'root',
'password': '<PASSWORD>',
'prefix': 'auth_',
},
},
},
}
|
119313
|
from machine.utils.collections import CaseInsensitiveDict
from machine.utils import sizeof_fmt
from tests.singletons import FakeSingleton
def test_Singleton():
c = FakeSingleton()
c2 = FakeSingleton()
assert c == c2
def test_CaseInsensitiveDict():
d = CaseInsensitiveDict({'foo': 'bar'})
assert 'foo' in d
assert 'FoO' in d
def test_size_fmt():
byte_size = 500
assert sizeof_fmt(byte_size) == '500.0B'
kb_size = 1124
assert sizeof_fmt(kb_size) == '1.1KB'
gb_size = 168963795964
assert sizeof_fmt(gb_size) == '157.4GB'
|
119371
|
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
import numpy as np
setup(
cmdclass = {'build_ext': build_ext},
ext_modules = [Extension("rf_classify_parallel", ["rf_classify_parallel.pyx"])],
extra_compile_args=['/openmp'],
include_dirs = [np.get_include()]
)
|
119372
|
import unittest
from mygrations.formats.mysql.file_reader.database import database as database_reader
class test_table_1215_regressions(unittest.TestCase):
def test_foreign_key_without_index(self):
""" Discovered that the system was not raising an error for a foreign key that didn't have an index for the table it was attached to """
db = database_reader([
"""CREATE TABLE `vendors` (`id` INT(10) UNSIGNED NOT NULL AUTO_INCREMENT, PRIMARY KEY (`id`));""",
"""CREATE TABLE `payment_requests_external` (
`id` INT(10) UNSIGNED NOT NULL AUTO_INCREMENT,
`account_id` int(10) UNSIGNED NOT NULL,
`vendor_id` int(10) UNSIGNED DEFAULT NULL,
`vendor_name` VARCHAR(255) NOT NULL DEFAULT '',
`vendor_city` VARCHAR(255) NOT NULL DEFAULT '',
`vendor_state` VARCHAR(255) NOT NULL DEFAULT '',
`vendor_zip` VARCHAR(255) NOT NULL DEFAULT '',
`vendor_list_id` VARCHAR(255) NOT NULL DEFAULT '',
`edit_sequence` VARCHAR(255) NOT NULL DEFAULT '',
`memo` VARCHAR(255) NOT NULL DEFAULT '',
`request_date` INT(10) UNSIGNED NOT NULL DEFAULT 0,
`guid` VARCHAR(255) NOT NULL DEFAULT '',
`po_number` VARCHAR(255) NOT NULL DEFAULT '',
`description` VARCHAR(255) NOT NULL DEFAULT '',
`property_address1` VARCHAR(255) NOT NULL DEFAULT '',
`property_address2` VARCHAR(255) NOT NULL DEFAULT '',
`property_city` VARCHAR(255) NOT NULL DEFAULT '',
`property_state` VARCHAR(255) NOT NULL DEFAULT '',
`property_zip` VARCHAR(255) NOT NULL DEFAULT '',
`customer_name` VARCHAR(255) NOT NULL DEFAULT '',
`customer_list_id` VARCHAR(255) NOT NULL DEFAULT '',
`contractor_completion_date` INT(10) UNSIGNED NOT NULL,
`po_amount` DECIMAL(20,2) NOT NULL,
`line_items` text,
created INT(10) UNSIGNED NOT NULL DEFAULT 0,
updated INT(10) UNSIGNED NOT NULL DEFAULT 0,
PRIMARY KEY (`id`),
KEY `account_id_pr_external` (`account_id`),
CONSTRAINT `vendor_id_pr_external_fk` FOREIGN KEY (`vendor_id`) REFERENCES `vendors` (`id`) ON DELETE SET NULL ON UPDATE CASCADE
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci;"""
])
self.assertEquals(1, len(db.errors_1215))
self.assertEquals(
'MySQL 1215 error for foreign key `vendor_id_pr_external_fk`: missing index. `payment_requests_external`.`vendor_id` does not have an index and therefore cannot be used in a foreign key constraint',
db.errors_1215[0]
)
|
119382
|
from remote.remote_util import RemoteMachineShellConnection
from .tuq import QueryTests
import time
from deepdiff import DeepDiff
from membase.api.exception import CBQError
class QueryWindowClauseTests(QueryTests):
def setUp(self):
super(QueryWindowClauseTests, self).setUp()
self.log.info("============== QueryWindowClauseTests setup has started ==============")
self.function_name = self.input.param("function_name", "CUME_DIST")
if self.load_sample:
self.rest.load_sample("travel-sample")
init_time = time.time()
while True:
next_time = time.time()
query_response = self.run_cbq_query("SELECT COUNT(*) FROM `" + self.bucket_name + "`")
self.log.info(f"{self.bucket_name}+ count: {query_response['results'][0]['$1']}")
if query_response['results'][0]['$1'] == 31591:
break
if next_time - init_time > 600:
break
time.sleep(2)
self.wait_for_all_indexes_online()
self.log.info("============== QueryWindowClauseTests setup has completed ==============")
self.log_config_info()
def suite_setUp(self):
super(QueryWindowClauseTests, self).suite_setUp()
self.log.info("============== QueryWindowClauseTests suite_setup has started ==============")
self.log.info("============== QueryWindowClauseTests suite_setup has completed ==============")
def tearDown(self):
self.log.info("============== QueryWindowClauseTests tearDown has started ==============")
travel_sample = self.get_bucket_from_name("travel-sample")
if travel_sample:
self.delete_bucket(travel_sample)
self.log.info("============== QueryWindowClauseTests tearDown has completed ==============")
super(QueryWindowClauseTests, self).tearDown()
def suite_tearDown(self):
self.log.info("============== QueryWindowClauseTests suite_tearDown has started ==============")
self.log.info("============== QueryWindowClauseTests suite_tearDown has completed ==============")
super(QueryWindowClauseTests, self).suite_tearDown()
def test_window_single(self):
window_clause_query = "SELECT d.id, d.destinationairport, \
AVG(d.distance) OVER ( window1 ) AS `rank` \
FROM `travel-sample` AS d WHERE d.type='route' \
WINDOW window1 AS (PARTITION BY d.destinationairport) \
ORDER BY 1 \
LIMIT 5"
window_query = "SELECT d.id, d.destinationairport, \
AVG(d.distance) OVER ( PARTITION BY d.destinationairport ) AS `rank` \
FROM `travel-sample` AS d WHERE d.type='route' \
ORDER BY 1 \
LIMIT 5"
window_clause_query_results = self.run_cbq_query(window_clause_query)
window_query_results = self.run_cbq_query(window_query)
self.assertEqual(window_clause_query_results['results'], window_query_results['results'])
def test_window_unused(self):
window_query = "SELECT d.id, d.destinationairport \
FROM `travel-sample` AS d WHERE d.type='route' \
WINDOW window1 AS (PARTITION BY d.destinationairport ORDER by d.distance) \
LIMIT 5"
self.run_cbq_query(window_query)
def test_window_multiple(self):
window_clause_query = "SELECT d.id, d.destinationairport, \
ROW_NUMBER() OVER ( w1 ) AS `row`, \
AVG(d.distance) OVER w1 AS `avg`, \
AVG(d.distance) OVER ( PARTITION BY d.destinationairport ORDER BY d.distance NULLS LAST ) AS `average`, \
LEAD(r.distance, 1, 'No next distance') OVER ( w3 ) AS `next-distance` \
FROM `travel-sample` AS d \
WHERE d.type='route' \
WINDOW w1 AS (PARTITION BY d.destinationairport ORDER BY d.distance NULLS LAST), \
w2 AS (PARTITION BY d.sourceairport ORDER BY d.distance), \
w3 AS (PARTITION BY r.airline ORDER BY r.distance NULLS LAST) \
ORDER BY 1 \
LIMIT 7"
window_query = "SELECT d.id, d.destinationairport, \
ROW_NUMBER() OVER ( PARTITION BY d.destinationairport ORDER BY d.distance NULLS LAST ) AS `row`, \
AVG(d.distance) OVER ( PARTITION BY d.destinationairport ORDER BY d.distance NULLS LAST ) AS `avg`, \
AVG(d.distance) OVER ( PARTITION BY d.destinationairport ORDER BY d.distance NULLS LAST ) AS `average`, \
LEAD(r.distance, 1, 'No next distance') OVER ( PARTITION BY r.airline ORDER BY r.distance NULLS LAST ) AS `next-distance` \
FROM `travel-sample` AS d \
WHERE d.type='route' \
ORDER BY 1 \
LIMIT 7"
window_clause_query_results = self.run_cbq_query(window_clause_query)
window_query_results = self.run_cbq_query(window_query)
self.assertEqual(window_clause_query_results['results'], window_query_results['results'])
def test_window_order(self):
window_clause_query = "SELECT d.id, d.destinationairport, \
CUME_DIST() OVER ( window1 ) AS `rank` \
FROM `travel-sample` AS d WHERE d.type='route' \
WINDOW window1 AS (PARTITION BY d.destinationairport ORDER by d.distance) \
ORDER BY 1 \
LIMIT 5"
window_query = "SELECT d.id, d.destinationairport, \
CUME_DIST() OVER ( PARTITION BY d.destinationairport ORDER by d.distance ) AS `rank` \
FROM `travel-sample` AS d WHERE d.type='route' \
ORDER BY 1 \
LIMIT 5"
window_clause_query_results = self.run_cbq_query(window_clause_query)
window_query_results = self.run_cbq_query(window_query)
self.assertEqual(window_clause_query_results['results'], window_query_results['results'])
def test_window_name_order(self):
window_clause_query = "SELECT d.id, d.destinationairport, \
CUME_DIST() OVER ( window1 ORDER by d.distance ) AS `rank` \
FROM `travel-sample` AS d WHERE d.type='route' \
WINDOW window1 AS (PARTITION BY d.destinationairport) \
ORDER BY 1 \
LIMIT 5"
window_query = "SELECT d.id, d.destinationairport, \
CUME_DIST() OVER ( PARTITION BY d.destinationairport ORDER by d.distance ) AS `rank` \
FROM `travel-sample` AS d WHERE d.type='route' \
ORDER BY 1 \
LIMIT 5"
window_clause_query_results = self.run_cbq_query(window_clause_query)
window_query_results = self.run_cbq_query(window_query)
self.assertEqual(window_clause_query_results['results'], window_query_results['results'])
def test_window_name_frame(self):
window_clause_query = "SELECT r.sourceairport, r.destinationairport, r.distance, \
NTH_VALUE(r.distance, 2) FROM FIRST OVER ( \
window1 \
ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING \
) AS `shortest_distance_but_1` \
FROM `travel-sample` AS r \
WHERE r.type='route' \
WINDOW window1 AS ( PARTITION BY r.sourceairport ORDER BY r.distance) \
ORDER BY 1 \
LIMIT 7"
window_query = "SELECT r.sourceairport, r.destinationairport, r.distance, \
NTH_VALUE(r.distance, 2) FROM FIRST OVER ( \
PARTITION BY r.sourceairport \
ORDER BY r.distance \
ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING \
) AS `shortest_distance_but_1` \
FROM `travel-sample` AS r \
WHERE r.type='route' \
ORDER BY 1 \
LIMIT 7"
window_clause_query_results = self.run_cbq_query(window_clause_query)
window_query_results = self.run_cbq_query(window_query)
self.assertEqual(window_clause_query_results['results'], window_query_results['results'])
def test_window_name_order_frame(self):
window_clause_query = "SELECT r.sourceairport, r.destinationairport, r.distance, \
NTH_VALUE(r.distance, 2) FROM FIRST OVER ( \
window1 \
ORDER BY r.distance \
ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING \
) AS `shortest_distance_but_1` \
FROM `travel-sample` AS r \
WHERE r.type='route' \
WINDOW window1 AS ( PARTITION BY r.sourceairport ) \
ORDER BY 1 \
LIMIT 7"
window_query = "SELECT r.sourceairport, r.destinationairport, r.distance, \
NTH_VALUE(r.distance, 2) FROM FIRST OVER ( \
PARTITION BY r.sourceairport \
ORDER BY r.distance \
ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING \
) AS `shortest_distance_but_1` \
FROM `travel-sample` AS r \
WHERE r.type='route' \
ORDER BY 1 \
LIMIT 7"
window_clause_query_results = self.run_cbq_query(window_clause_query)
window_query_results = self.run_cbq_query(window_query)
self.assertEqual(window_clause_query_results['results'], window_query_results['results'])
def test_window_no_parenthesis(self):
window_clause_query = "SELECT d.id, d.destinationairport, \
CUME_DIST() OVER window_name_with_no_parenthesis AS `rank` \
FROM `travel-sample` AS d WHERE d.type='route' \
WINDOW window_name_with_no_parenthesis AS (PARTITION BY d.destinationairport ORDER by d.distance) \
ORDER BY 1 \
LIMIT 5"
window_query = "SELECT d.id, d.destinationairport, \
CUME_DIST() OVER ( PARTITION BY d.destinationairport ORDER by d.distance ) AS `rank` \
FROM `travel-sample` AS d WHERE d.type='route' \
ORDER BY 1 \
LIMIT 5"
window_clause_query_results = self.run_cbq_query(window_clause_query)
window_query_results = self.run_cbq_query(window_query)
self.assertEqual(window_clause_query_results['results'], window_query_results['results'])
def test_window_name_function(self):
window_function = {
'CUME_DIST': {
'arg':'', 'select_list': 'd.id, d.destinationairport',
'wdef': 'PARTITION BY d.destinationairport ORDER BY d.distance NULLS LAST', 'frame_clause': '',
'from_where': '`travel-sample` AS d WHERE d.type=\'route\''
},
'DENSE_RANK': {
'arg':'', 'select_list': 'a.airportname, a.geo.alt',
'wdef': 'PARTITION BY a.country ORDER BY a.geo.alt NULLS LAST', 'frame_clause': '',
'from_where': '`travel-sample` AS a WHERE a.type=\'airport\''
},
'FIRST_VALUE': {
'arg':'r.distance', 'select_list': 'r.sourceairport, r.destinationairport, r.distance',
'wdef': 'PARTITION BY r.sourceairport ORDER BY r.distance', 'frame_clause': '',
'from_where': '`travel-sample` AS r WHERE r.type=\'route\''
},
'LAG': {
'arg':'r.distance, 1, "No previous distance"', 'select_list': 'r.airline, r.id, r.distance',
'wdef': 'PARTITION BY r.airline ORDER BY r.distance NULLS LAST', 'frame_clause': '',
'from_where': '`travel-sample` AS r WHERE r.type=\'route\''
},
'LAST_VALUE': {
'arg':'r.distance', 'select_list': 'r.sourceairport, r.destinationairport, r.distance',
'wdef': 'PARTITION BY r.sourceairport ORDER BY r.distance', 'frame_clause': 'ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING',
'from_where': '`travel-sample` AS r WHERE r.type=\'route\''
},
'LEAD': {
'arg': 'r.distance, 1, "No next distance"', 'select_list': 'r.airline, r.id, r.distance',
'wdef': 'PARTITION BY r.airline ORDER BY r.distance NULLS LAST', 'frame_clause': '',
'from_where': '`travel-sample` AS r WHERE r.type=\'route\''
},
'NTH_VALUE': {
'arg': 'r.distance, 2', 'select_list': 'r.sourceairport, r.destinationairport, r.distance',
'wdef': 'PARTITION BY r.sourceairport ORDER BY r.distance', 'frame_clause': 'ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING',
'from_where': '`travel-sample` AS r WHERE r.type=\'route\''
},
'NTILE': {
'arg': 3, 'select_list': 'r.airline, r.distance',
'wdef': 'PARTITION BY r.airline ORDER BY r.distance', 'frame_clause': '',
'from_where': '`travel-sample` AS r WHERE r.type=\'route\''
},
'PERCENT_RANK': {
'arg':'', 'select_list': 'd.id, d.destinationairport',
'wdef': 'PARTITION BY d.destinationairport ORDER BY d.distance NULLS LAST', 'frame_clause': '',
'from_where': '`travel-sample` AS d WHERE d.type=\'route\''
},
'RANK': {
'arg':'', 'select_list': 'a.airportname, a.geo.alt',
'wdef': 'PARTITION BY a.country ORDER BY a.geo.alt NULLS LAST', 'frame_clause': '',
'from_where': '`travel-sample` AS a WHERE a.type=\'airport\''
},
'RATIO_TO_REPORT': {
'arg':'d.distance', 'select_list': 'd.id, d.destinationairport',
'wdef': 'PARTITION BY d.destinationairport', 'frame_clause': '',
'from_where': '`travel-sample` AS d WHERE d.type=\'route\''
},
'ROW_NUMBER': {
'arg':'', 'select_list': 'd.id, d.destinationairport',
'wdef': 'PARTITION BY d.destinationairport ORDER BY d.distance NULLS LAST', 'frame_clause': '',
'from_where': '`travel-sample` AS d WHERE d.type=\'route\''
}
}
function = self.function_name
select_list = window_function[function]['select_list']
arg = window_function[function]['arg']
from_where = window_function[function]['from_where']
window_definition = window_function[function]['wdef']
frame_clause = window_function[function]['frame_clause']
# run query with window function
window_clause_query = f"SELECT {select_list}, {function}({arg}) OVER (window_name {frame_clause}) AS {function}_col FROM {from_where} WINDOW window_name AS ( {window_definition} ) LIMIT 7"
window_query = f"SELECT {select_list}, {function}({arg}) OVER ( {window_definition} {frame_clause}) AS {function}_col FROM {from_where} LIMIT 7"
window_clause_query_results = self.run_cbq_query(window_clause_query)
window_query_results = self.run_cbq_query(window_query)
self.assertEqual(window_clause_query_results['results'], window_query_results['results'])
def test_window_prepared(self):
prepare_window_query = "PREPARE window_query as SELECT d.id, d.destinationairport, CUME_DIST() OVER ( `Window-1` ) AS `rank` FROM `travel-sample` AS d WHERE d.type='route' WINDOW `Window-1` AS (PARTITION by d.destinationairport ORDER BY d.distance) limit 5"
self.run_cbq_query(prepare_window_query)
self.run_cbq_query(query="EXECUTE window_query")
def test_neg_non_unique(self):
error_code = 6500
error_message = "Duplicate window clause alias window1."
window_query = "SELECT d.id, d.destinationairport, \
CUME_DIST() OVER ( window1 ) AS `rank` \
FROM `travel-sample` AS d WHERE d.type='route' \
WINDOW window1 AS (PARTITION BY d.destinationairport ORDER by d.distance), \
window1 AS (PARTITION BY d.destinationairport)"
try:
self.run_cbq_query(window_query)
except CBQError as ex:
error = self.process_CBQE(ex)
self.assertEqual(error['code'], error_code)
self.assertEqual(error['msg'], error_message)
def test_neg_non_exists(self):
error_code = 6500
error_message = "Window window2 not in the scope of window clause"
window_query = "SELECT d.id, d.destinationairport, \
CUME_DIST() OVER ( window2 ) AS `rank` \
FROM `travel-sample` AS d WHERE d.type='route' \
WINDOW window1 AS (PARTITION BY d.destinationairport ORDER by d.distance)"
try:
self.run_cbq_query(window_query)
except CBQError as ex:
error = self.process_CBQE(ex)
self.assertEqual(error['code'], error_code)
self.assertEqual(error['msg'], error_message)
def test_neg_order(self):
error_code = 6500
error_message = "Window window1 shall not have a window ordering clause"
window_query = "SELECT d.id, d.destinationairport, \
CUME_DIST() OVER ( window1 ORDER BY d.distance NULLS LAST) AS `rank` \
FROM `travel-sample` AS d WHERE d.type='route' \
WINDOW window1 AS (PARTITION BY d.destinationairport ORDER by d.distance)"
try:
self.run_cbq_query(window_query)
except CBQError as ex:
error = self.process_CBQE(ex)
self.assertEqual(error['code'], error_code)
self.assertEqual(error['msg'], error_message)
def test_neg_frame(self):
error_code = 6500
error_message = "Window window1 shall not have a window framing clause"
window_query = "SELECT r.sourceairport, r.destinationairport, r.distance, \
LAST_VALUE(r.distance) OVER (window1) AS `longest_distance` \
FROM `travel-sample` AS r \
WHERE r.type='route' \
WINDOW window1 AS (PARTITION BY r.sourceairport ORDER BY r.distance ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING)"
try:
self.run_cbq_query(window_query)
except CBQError as ex:
error = self.process_CBQE(ex)
self.assertEqual(error['code'], error_code)
self.assertEqual(error['msg'], error_message)
def test_neg_partition(self):
error_code = 6500
error_message = "Window window1 shall not have a window partitioning clause"
window_query = "SELECT d.id, d.destinationairport, \
CUME_DIST() OVER ( window1 PARTITION BY d.destinationairport) AS `rank` \
FROM `travel-sample` AS d WHERE d.type='route' \
WINDOW window1 AS (PARTITION BY d.destinationairport ORDER by d.distance)"
try:
self.run_cbq_query(window_query)
except CBQError as ex:
error = self.process_CBQE(ex)
self.assertEqual(error['code'], error_code)
self.assertEqual(error['msg'], error_message)
|
119449
|
from os import path as osp
import cv2
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import lstsq
from scipy.ndimage import gaussian_filter
from scipy import interpolate
import argparse
import sys
sys.path.append("..")
import Basics.params as pr
import Basics.sensorParams as psp
from Basics.Geometry import Circle
parser = argparse.ArgumentParser()
parser.add_argument("-data_path", nargs='?', default='../data/calib_ball/',
help="Path to the folder with data pack.")
args = parser.parse_args()
class PolyTable:
""" each list contains the N*N tables for one (x,y,v) pair"""
value_list = []
locx_list = []
locy_list = []
class Grads:
"""each grad contains the N*N*params table for one (normal_mag, normal_dir) pair"""
grad_r = None
grad_g = None
grad_b = None
countmap = None
class polyCalibration:
"""
Calibrate the polynomial table from the data pack
"""
def __init__(self,fn):
self.fn = osp.join(fn, "dataPack.npz")
data_file = np.load(self.fn,allow_pickle=True)
self.f0 = data_file['f0']
self.BallRad = psp.ball_radius
self.Pixmm = psp.pixmm
self.imgs = data_file['imgs']
self.radius_record = data_file['touch_radius']
self.touchCenter_record = data_file['touch_center']
self.bg_proc = self.processInitialFrame()
self.grads = Grads()
self.poly_table = PolyTable()
self.img_data_dir = fn
def processInitialFrame(self):
# gaussian filtering with square kernel with
# filterSize : kscale*2+1
# sigma : kscale
kscale = pr.kscale
img_d = self.f0.astype('float')
convEachDim = lambda in_img : gaussian_filter(in_img, kscale)
f0 = self.f0.copy()
for ch in range(img_d.shape[2]):
f0[:,:, ch] = convEachDim(img_d[:,:,ch])
frame_ = img_d
# Checking the difference between original and filtered image
diff_threshold = pr.diffThreshold
dI = np.mean(f0-frame_, axis=2)
idx = np.nonzero(dI<diff_threshold)
# Mixing image based on the difference between original and filtered image
frame_mixing_per = pr.frameMixingPercentage
h,w,ch = f0.shape
pixcount = h*w
for ch in range(f0.shape[2]):
f0[:,:,ch][idx] = frame_mixing_per*f0[:,:,ch][idx] + (1-frame_mixing_per)*frame_[:,:,ch][idx]
return f0
def calibrate_all(self):
num_img = np.shape(self.imgs)[0]
# loop through all the data points
for idx in range(num_img):
print("# iter " + str(idx))
self.calibrate_single(idx)
# final interpolation
grad_r, grad_g, grad_b = self.lookuptable_smooth()
# save the calibrated file
out_fn_path = osp.join(self.img_data_dir, "polycalib.npz")
np.savez(out_fn_path,\
bins=psp.numBins,
grad_r = grad_r,
grad_g = grad_g,
grad_b = grad_b)
print("Saved!")
def calibrate_single(self,idx):
# keep adding items
frame = self.imgs[idx,:,:,:]
# remove background
dI = frame.astype("float") - self.bg_proc
circle = Circle(int(self.touchCenter_record[idx,0]), int(self.touchCenter_record[idx,1]),int(self.radius_record[idx]))
bins = psp.numBins
ball_radius_pix = psp.ball_radius/psp.pixmm
center = circle.center
radius = circle.radius
sizey, sizex = dI.shape[:2]
[xqq, yqq] = np.meshgrid(range(sizex), range(sizey))
xq = xqq - center[0]
yq = yqq - center[1]
rsqcoord = xq*xq + yq*yq
rad_sq = radius*radius
# get the contact area
valid_rad = min(rad_sq, int(ball_radius_pix*ball_radius_pix))
valid_mask = rsqcoord < (valid_rad)
validId = np.nonzero(valid_mask)
xvalid = xq[validId]; yvalid = yq[validId]
rvalid = np.sqrt( xvalid*xvalid + yvalid*yvalid)
# get gradients
gradxseq = np.arcsin(rvalid/ball_radius_pix)
gradyseq = np.arctan2(-yvalid, -xvalid)
binm = bins - 1
x_binr = 0.5*np.pi/binm # x [0,pi/2]
y_binr = 2*np.pi/binm # y [-pi, pi]
# discritize the gradients
idx_x = np.floor(gradxseq/x_binr).astype('int')
idx_y = np.floor((gradyseq+np.pi)/y_binr).astype('int')
# r channel
value_map = np.zeros((bins,bins,3))
loc_x_map = np.zeros((bins,bins))
loc_y_map = np.zeros((bins,bins))
valid_r = dI[:,:,0][validId]
valid_x = xqq[validId]
valid_y = yqq[validId]
value_map[idx_x, idx_y, 0] += valid_r
# g channel
valid_g = dI[:,:,1][validId]
value_map[idx_x, idx_y, 1] += valid_g
# b channel
valid_b = dI[:,:,2][validId]
value_map[idx_x, idx_y, 2] += valid_b
loc_x_map[idx_x, idx_y] += valid_x
loc_y_map[idx_x, idx_y] += valid_y
loc_x_map = self.interpolate(loc_x_map)
loc_y_map = self.interpolate(loc_y_map)
value_map[:,:,0] = self.interpolate(value_map[:,:,0])
value_map[:,:,1] = self.interpolate(value_map[:,:,1])
value_map[:,:,2] = self.interpolate(value_map[:,:,2])
self.poly_table.value_list.append(value_map)
self.poly_table.locx_list.append(loc_x_map)
self.poly_table.locy_list.append(loc_y_map)
def interpolate(self,img):
# here we assume there are some zero value holes in the image,
# and we hope to fill these holes with interpolation
x = np.arange(0, img.shape[1])
y = np.arange(0, img.shape[0])
#mask invalid values
array = np.ma.masked_where(img == 0, img)
xx, yy = np.meshgrid(x, y)
#get only the valid values
x1 = xx[~array.mask]
y1 = yy[~array.mask]
newarr = img[~array.mask]
GD1 = interpolate.griddata((x1, y1), newarr.ravel(),
(xx, yy),
method='nearest', fill_value = 0) # cubic # nearest
return GD1
def lookuptable_smooth(self):
# final refine
[h,w,c] = self.bg_proc.shape
xx,yy = np.meshgrid(np.arange(w),np.arange(h))
xf = xx.flatten()
yf = yy.flatten()
A = np.array([xf*xf,yf*yf,xf*yf,xf,yf,np.ones(h*w)]).T
table_v = np.array(self.poly_table.value_list)
table_x = np.array(self.poly_table.locx_list)
table_y = np.array(self.poly_table.locy_list)
bins = psp.numBins
self.grads.grad_r = np.zeros((bins,bins,6))
self.grads.grad_g = np.zeros((bins,bins,6))
self.grads.grad_b = np.zeros((bins,bins,6))
for i in range(table_v.shape[1]):
for j in range(table_v.shape[2]):
params_r = self.fitPolyParams(table_x[:,i,j],table_y[:,i,j],table_v[:,i,j,0])
params_g = self.fitPolyParams(table_x[:,i,j],table_y[:,i,j],table_v[:,i,j,1])
params_b = self.fitPolyParams(table_x[:,i,j],table_y[:,i,j],table_v[:,i,j,2])
self.grads.grad_r[i,j,:] = params_r
self.grads.grad_g[i,j,:] = params_g
self.grads.grad_b[i,j,:] = params_b
return self.grads.grad_r, self.grads.grad_g, self.grads.grad_b
def fitPolyParams(self,xf,yf,b):
A = np.array([xf*xf,yf*yf,xf*yf,xf,yf,np.ones(xf.shape)]).T
params, res, rnk, s = lstsq(A, b)
return params
if __name__ == "__main__":
polyCalib = polyCalibration(args.data_path)
polyCalib.calibrate_all()
|
119453
|
from sklearn.metrics import classification_report
from metrics import conlleval
# label_test_file = 'output/MSRA/crf/result.txt'
# eval_file = 'output/MSRA/crf/eval_crf.txt'
# label_test_file = 'output/ywevents/crf/result.txt'
# eval_file = 'output/ywevents/crf/eval_crf.txt'
label_test_file = 'output/ywevents/char2vec_blstm/results/test_label.txt'
eval_file = 'output/ywevents/char2vec_blstm/eval.txt'
def main():
targets = ['B-PER', 'I-PER', 'B-LOC', 'I-LOC', 'B-ORG', 'I-ORG', 'O']
y_test = []
y_pred = []
with open(label_test_file, 'r', encoding='UTF-8') as fr:
line = fr.readline()
while line:
elements = line.strip().split('\t')
if len(elements) == 3:
y_test.append(elements[1])
y_pred.append(elements[2])
else:
print(line)
line = fr.readline()
print('Test: {}\nPred: {}'.format(len(y_test), len(y_pred)))
report = classification_report(y_test, y_pred, digits=4, target_names=targets)
with open(eval_file, 'w+', encoding='UTF-8') as fw:
fw.write('Classification report: \n')
fw.write(report)
test_report = conlleval.return_report(label_test_file)
with open(eval_file, 'a+', encoding='UTF-8') as wf:
wf.write(''.join(test_report))
if __name__ == '__main__':
main()
|
119502
|
from tgt_grease.core.Types import Command
from tgt_grease.core import ImportTool
import importlib
class Help(Command):
"""The Help Command for GREASE
Meant to provide a rich CLI Experience to users to enable quick help
"""
purpose = "Provide Help Information"
help = """
Provide help information to users of GREASE about available commands. This though
is just a demo of what you could print. Really it could be anything I suppose!
Args:
None
"""
__author__ = "<NAME>."
__version__ = "1.0.0"
def __init__(self):
super(Help, self).__init__()
def execute(self, context):
print("")
print("Welcome to GREASE Help")
impTool = ImportTool(self.ioc.getLogger())
for route in self.ioc.getConfig().get('Import', 'searchPath'):
mod = importlib.import_module(route)
for attr in dir(mod):
cmd = impTool.load(attr)
if cmd and isinstance(cmd, Command):
print("<======================>")
print("[{0}] Purpose: [{1}]".format(
cmd.__class__.__name__,
cmd.purpose
))
print("Author: {0}".format(cmd.__author__))
print("Current Version: {0}".format(cmd.__version__))
if cmd.os_needed:
print('Needs OS: {0}'.format(cmd.os_needed))
print(cmd.help)
print("<======================>")
return True
|
119508
|
import arcade
def test_point_in_rectangle():
polygon = [
(0, 0),
(0, 50),
(50, 50),
(50, 0),
]
result = arcade.is_point_in_polygon(25, 25, polygon)
assert result is True
def test_point_not_in_empty_polygon():
polygon = []
result = arcade.is_point_in_polygon(25, 25, polygon)
assert result is False
|
119515
|
import pandas as pd
from pathlib import Path
def process_files(input_dir, output_dir, record_name):
img_dir = output_dir / 'images'
labels_dir = output_dir / 'labels'
record_path = output_dir / record_name
class_path = output_dir / 'classes.names'
img_dir.mkdir(exist_ok=True)
labels_dir.mkdir(exist_ok=True)
copy_images(input_dir, img_dir)
copy_labels(input_dir, labels_dir, record_path, class_path)
def copy_images(input_dir, img_dir):
for input_img_path in input_dir.glob('*png'):
img_path = img_dir / input_img_path.name
print('Writing', img_path)
img_path.write_bytes(input_img_path.read_bytes())
def copy_labels(input_dir, labels_dir, record_path, class_path):
input_labels_path = input_dir / 'labels.csv'
df = pd.read_csv(input_labels_path)
class_names = df['class'].unique()
class_ids = dict(zip(class_names, range(len(class_names))))
df['class_id'] = df['class'].map(class_ids)
# write class ids to file
with open(class_path, 'w') as class_file:
for class_name in class_ids.keys():
class_file.write(f'{class_name}\n')
# write box coordinates to files
with open(record_path, 'w') as record_file:
print('Writing', record_path)
for input_filename, dfg in df.groupby('filename'):
labels_path = labels_dir / Path(input_filename).with_suffix('.txt')
# write all boxes to a single file
with open(labels_path, 'w') as labels_file:
print('Writing', labels_path)
for _, row in dfg.iterrows():
labels_file.write(convert_boxes(row))
# add image filename to record
record_file.write(f'data/images/{input_filename}\n')
def convert_boxes(row):
''' Extract box coordinates from dataframe row '''
class_id = row['class_id']
x_center = (row['xmax'] + row['xmin']) * 0.5 / row['width']
y_center = (row['ymax'] + row['ymin']) * 0.5 / row['height']
width = (row['xmax'] - row['xmin']) / row['width']
height = (row['ymax'] - row['ymin']) / row['height']
return f'{class_id} {x_center} {y_center} {width} {height}\n'
if __name__ == '__main__':
process_files(
input_dir=Path('tensorflow/data/train'),
output_dir=Path('pytorch/data'),
record_name='train.txt'
)
process_files(
input_dir=Path('tensorflow/data/test'),
output_dir=Path('pytorch/data'),
record_name='test.txt'
)
|
119544
|
import subprocess
import sys
import eventlet.queue
import eventlet.tpool
import eventlet.green.subprocess
from eventlet import green
from eventlet.greenpool import GreenPool
from .BaseTerminal import BaseTerminal
import logging
logger = logging.getLogger(__name__)
class SubprocessTerminal(BaseTerminal):
def __init__(self, cmd):
self.process = make_simple_process(cmd)
self.queue = eventlet.queue.Queue()
self.greenpool = self._start_consume()
def _start_consume(self):
greenpool = GreenPool(5)
greenpool.spawn_n(self._consume_stream, self.process.stdout)
greenpool.spawn_n(self._consume_stream, self.process.stderr)
return greenpool
def _consume_stream(self, stream):
while True:
data = stream.read()
if not data:
break
self._send_to_slave(data)
def recv(self, count=None):
return self.master_to_slave(self.queue.get())
def _send_to_slave(self, data):
self.queue.put(data)
def send(self, data):
data = self.slave_to_master(data)
self.process.stdin.write(data)
def slave_to_master(self, x):
return x
def master_to_slave(self, x):
return x
def close(self):
self.process.kill()
class LinuxTerminal(SubprocessTerminal):
def __init__(self, cmd=None):
if cmd is None:
cmd = ['bash']
import shlex
cmd = " ".join(map(shlex.quote, cmd))
cmd = ['script', '-qfc', cmd, '/dev/null']
super().__init__(cmd)
class WindowsTerminal(SubprocessTerminal):
def __init__(self, cmd=None):
if cmd is None:
cmd = ['cmd']
super().__init__(cmd)
def slave_to_master(self, data):
data = data.replace(b'\r', b'\r\n')
self._send_to_slave(data)
return data
def master_to_slave(self, x):
return x.replace(b'\n', b'\r\n')
class NonBlockingSimplePipe:
def __init__(self, stream):
logger.debug("NonBlockingSimplePipe.__init__ type(stream) == {}".format(type(stream)))
logger.debug("NonBlockingSimplePipe.__init__ type(stream).__name__ == {!r}".format(type(stream).__name__))
self.needs_thread = not is_greenpipe(stream)
self.stream = stream
def read(self):
if self.needs_thread:
return eventlet.tpool.execute(self._read)
return self._read()
def write(self, data):
if self.needs_thread:
return eventlet.tpool.execute(self._write, data)
return self._write(data)
def _read(self):
return self.stream.read(2048)
def _write(self, data):
self.stream.write(data)
self.stream.flush()
class NonBlockingSimpleProcess:
def __init__(self, cmd):
self.proc = make_subprocess(cmd)
self.stdin = NonBlockingSimplePipe(self.proc.stdin)
self.stdout = NonBlockingSimplePipe(self.proc.stdout)
self.stderr = NonBlockingSimplePipe(self.proc.stderr)
def kill(self):
self.proc.kill()
def is_greenpipe(obj):
# GreenFileIO is not exposed and GreenPipe is not a class, so checking by name
return type(obj).__name__ == "GreenFileIO"
def os_terminal():
return OS_TERMINALS[sys.platform]()
def make_subprocess(obj):
def green_popen(cmd):
p = subprocess.PIPE
return green.subprocess.Popen(cmd, stdin=p, stdout=p, stderr=p, bufsize=0)
if isinstance(obj, str):
return green_popen([obj])
if isinstance(obj, list):
return green_popen(obj)
if isinstance(obj, subprocess.Popen):
return obj
if isinstance(obj, green.subprocess.Popen):
return obj
raise Exception("Invalid argument to make_subprocess: {}".format(type(obj)))
def make_simple_process(obj):
if isinstance(obj, NonBlockingSimpleProcess):
return obj
proc = make_subprocess(obj)
return NonBlockingSimpleProcess(proc)
OS_TERMINALS = {
'linux': LinuxTerminal,
'win32': WindowsTerminal
}
|
119547
|
import torch.nn as nn
from Models.PathEncoder import PathEncoder
from Models.SequenceEncoder import SequenceEncoder
from Models.Transformer import Transformer
from Models.OperationMix import OperationMix
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence, pack_sequence
import torch
class Encoder(nn.Module):
def __init__(self, path_vocab_size, src_tgt_vocab_size, position_vocab_size, in_dim, h_dim, num_layers, dropout, device, ctx_mode, padding_idx=0):
super(Encoder, self).__init__()
self.path_encoder = PathEncoder(path_vocab_size, src_tgt_vocab_size, position_vocab_size, in_dim, h_dim, num_layers, dropout, padding_idx)
if ctx_mode == 'lstm':
self.ctx_encoder = SequenceEncoder(h_dim, h_dim, num_layers, dropout)
elif ctx_mode == 'transformer':
self.ctx_encoder = Transformer(in_dim=h_dim, dropout=dropout, device=device)
else:
self.ctx_encoder = None
self.split_token = nn.Parameter(torch.Tensor(1, h_dim))
torch.nn.init.xavier_uniform_(self.split_token.data)
self.operation_mix = OperationMix(h_dim, dropout)
self.h_dim = h_dim
self.num_layers = num_layers
self.device = device
def create_split_tokens(self, batch_size):
return self.split_token.repeat(batch_size, 1).unsqueeze(dim=0)
def forward(self, packed_srcs, packed_srcs_positions, packed_tgts, packed_tgts_positions, packed_paths, packed_paths_positions, focus_num_of_paths, before_ctx_num_of_paths, after_ctx_num_of_paths):
"""
:param packed_srcs: PackedSequence of shape (src_length, batch_1)
:param packed_srcs_positions: PackedSequence of shape (src_length, batch_1)
:param packed_tgts: PackedSequence of shape (tgt_length, batch_1)
:param packed_tgts_positions: PackedSequence of shape (tgt_length, batch_1)
:param packed_paths: PackedSequence of shape (path_length, batch_1)
:param packed_paths_positions: PackedSequence of shape (path_length, batch_1)
:return: path_encoded, ctx: of shape (batch, h_dim)
"""
batch_size = len(focus_num_of_paths)
# (num_all_paths, h_dim)
encoded_path = self.path_encoder(packed_srcs, packed_srcs_positions, packed_tgts, packed_tgts_positions,
packed_paths, packed_paths_positions)
num_of_paths = focus_num_of_paths + before_ctx_num_of_paths + after_ctx_num_of_paths
encoded_path_list = torch.split(encoded_path, num_of_paths, dim=0)
encoded_focus_path_list = encoded_path_list[:batch_size]
before_ctx_encoded_path_list = encoded_path_list[batch_size: 2 * batch_size]
after_ctx_encoded_path_list = encoded_path_list[-batch_size:]
# h_list = list(map(lambda t: torch.mean(torch.cat(t, dim=0), dim=0).unsqueeze(dim=0).unsqueeze(dim=0), zip(before_ctx_encoded_path_list, encoded_focus_path_list, after_ctx_encoded_path_list)))
#
# # (num_layers, batch_size, h_dim)
# h = torch.cat(h_list, dim=1).repeat(self.num_layers, 1, 1)
packed_encoded_path = pack_sequence(encoded_focus_path_list, enforce_sorted=False)
# TODO: Consider mixing the paths with Transformer before the operation_mix
packed_mixed_encoded_path = self.operation_mix(packed_encoded_path)
split_tokens = [self.split_token] * batch_size
# real_batch_size * ((before_ctx_num_of_paths, h_dim),(1, h_dim),(after_ctx_num_of_paths, h_dim))
ctx_encoded_path_list = list(map(lambda t: torch.cat(t, dim=0), zip(before_ctx_encoded_path_list, split_tokens, after_ctx_encoded_path_list)))
ctx_encoded_path_packed = pack_sequence(ctx_encoded_path_list, enforce_sorted=False)
packed_encoded_ctx = ctx_encoded_path_packed
if self.ctx_encoder is not None:
# (real_batch_size, num_of_paths, num_directions * h_dim)
packed_encoded_ctx = self.ctx_encoder(ctx_encoded_path_packed)
padded_encoded_path, encoded_lengths = pad_packed_sequence(packed_encoded_path, batch_first=True)
padded_encoded_ctx_path, encoded_ctx_lengths = pad_packed_sequence(packed_encoded_ctx, batch_first=True)
lengths = encoded_lengths + encoded_ctx_lengths
h = (padded_encoded_path.sum(dim=1) + padded_encoded_ctx_path.sum(dim=1)) / lengths.to(self.device).view(-1, 1)
h = h.unsqueeze(dim=0).repeat(self.num_layers, 1, 1)
return packed_mixed_encoded_path, packed_encoded_ctx, h
|
119569
|
import os
from os.path import abspath, dirname, join
from setuptools import setup, find_packages
INIT_FILE = join(dirname(abspath(__file__)), 'pydux', '__init__.py')
def long_description():
if os.path.exists('README.txt'):
return open('README.txt').read()
else:
return 'Python implementation of Redux'
def get_version():
with open(INIT_FILE) as fd:
for line in fd:
if line.startswith('__version__'):
version = line.split()[-1].strip('\'')
return version
raise AttributeError('Package does not have a __version__')
setup(
name='pydux',
description="Python + Redux = Pydux",
long_description=long_description(),
url="http://github.com/usrlocalben/pydux/",
version=get_version(),
author='<NAME>',
author_email='<EMAIL>',
packages=['pydux'],
install_requires=[],
license='MIT',
)
|
119629
|
from aiohttp import web
from redbull import Manager
mg = Manager(web.Application())
@mg.api()
async def say_hi(name: str, please: bool):
"Says hi if you say please"
if please:
return 'hi ' + name
return 'um hmm'
mg.run()
|
119648
|
from haro.plugins.alias_models import UserAliasName
from haro.slack import get_slack_id_by_name
def get_slack_id(session, user_name):
"""指定したユーザー名のSlackのuser_idを返す
Slackのユーザー名として存在すればAPIからuser_idを取得
取得できない場合、user_alias_nameにエイリアス名として登録されたユーザー名であれば、
それに紐づくSlackのuser_idを返す
:params session: sqlalchemy.orm.Session
:params str name: ユーザーのエイリアス名
"""
slack_id = get_slack_id_by_name(user_name)
if not slack_id:
slack_id = (session.query(UserAliasName.slack_id)
.filter(UserAliasName.alias_name == user_name)
.scalar())
return slack_id
|
119679
|
def get_version_from_win32_pe(file):
# http://windowssdk.msdn.microsoft.com/en-us/library/ms646997.aspx
sig = struct.pack("32s", u"VS_VERSION_INFO".encode("utf-16-le"))
# This pulls the whole file into memory, so not very feasible for
# large binaries.
try:
filedata = open(file).read()
except IOError:
return "Unknown"
offset = filedata.find(sig)
if offset == -1:
return "Unknown"
filedata = filedata[offset + 32 : offset + 32 + (13*4)]
version_struct = struct.unpack("13I", filedata)
ver_ms, ver_ls = version_struct[4], version_struct[5]
return "%d.%d.%d.%d" % (ver_ls & 0x0000ffff, (ver_ms & 0xffff0000) >> 16,
ver_ms & 0x0000ffff, (ver_ls & 0xffff0000) >> 16)
|
119689
|
class NodeConfig:
def __init__(self, node_name: str, ws_url: str) -> None:
self.node_name = node_name
self.ws_url = ws_url
|
119693
|
import inspect
import torch.optim.lr_scheduler as lr_scheduler
from ocpmodels.common.utils import warmup_lr_lambda
class LRScheduler:
"""
Learning rate scheduler class for torch.optim learning rate schedulers
Notes:
If no learning rate scheduler is specified in the config the default
scheduler is warmup_lr_lambda (ocpmodels.common.utils) not no scheduler,
this is for backward-compatibility reasons. To run without a lr scheduler
specify scheduler: "Null" in the optim section of the config.
Args:
config (dict): Optim dict from the input config
optimizer (obj): torch optim object
"""
def __init__(self, optimizer, config):
self.optimizer = optimizer
self.config = config.copy()
if "scheduler" in self.config:
self.scheduler_type = self.config["scheduler"]
else:
self.scheduler_type = "LambdaLR"
scheduler_lambda_fn = lambda x: warmup_lr_lambda(x, self.config)
self.config["lr_lambda"] = scheduler_lambda_fn
if self.scheduler_type != "Null":
self.scheduler = getattr(lr_scheduler, self.scheduler_type)
scheduler_args = self.filter_kwargs(config)
self.scheduler = self.scheduler(optimizer, **scheduler_args)
def step(self, metrics=None, epoch=None):
if self.scheduler_type == "Null":
return
if self.scheduler_type == "ReduceLROnPlateau":
if metrics is None:
raise Exception(
"Validation set required for ReduceLROnPlateau."
)
self.scheduler.step(metrics)
else:
self.scheduler.step()
def filter_kwargs(self, config):
# adapted from https://stackoverflow.com/questions/26515595/
sig = inspect.signature(self.scheduler)
filter_keys = [
param.name
for param in sig.parameters.values()
if param.kind == param.POSITIONAL_OR_KEYWORD
]
filter_keys.remove("optimizer")
scheduler_args = {
arg: self.config[arg] for arg in self.config if arg in filter_keys
}
return scheduler_args
def get_lr(self):
for group in self.optimizer.param_groups:
return group["lr"]
|
119740
|
from __future__ import absolute_import
from typing import List, Dict, Any, Optional
from tinydb import TinyDB, Query
from tinydb.operations import add, decrement, set
import logging
import random
db = TinyDB('../data/list.json', indent=4)
teamdata = Query()
file = '../teams/2020ICPCJinan'
f = open(file, 'r')
ranklist = f.read().split('\n')
for team in ranklist:
data = team.split(' ')
# print(data)
l = len(data)
if l > 5:
teamname = ''
for i in range(2, l - 6):
teamname = teamname + data[i]
if teamname[0] == '☆':
teamname = teamname[1:]
# print(data[1], teamname, data[-6], data[-4], data[-2])
nowteam = db.search((teamdata.chschool == data[1])
& (teamdata.chname == teamname))
if len(nowteam) == 0:
db.insert({
'chname': f"{teamname}",
'enname': f"",
'chschool': f"{data[1]}",
'enschool': "",
'rating': 1500,
'members': [
data[-6],
data[-4],
data[-2],
],
'history': [],
'bestrank': 65536,
})
# print(data[1], en, data[-4], data[-3], data[-2], data[-1])
print(data[1], teamname, data[-6], data[-4], data[-2])
else:
db.update(
set('members', [
data[-6],
data[-4],
data[-2],
]),
(teamdata.chschool == data[1]) & (teamdata.chname == teamname),
)
|
119793
|
from typing import List
import datasets
# Citation, taken from https://github.com/microsoft/CodeXGLUE
_DEFAULT_CITATION = """@article{CodeXGLUE,
title={CodeXGLUE: A Benchmark Dataset and Open Challenge for Code Intelligence},
year={2020},}"""
class Child:
_DESCRIPTION = None
_FEATURES = None
_CITATION = None
SPLITS = {"train": datasets.Split.TRAIN}
_SUPERVISED_KEYS = None
def __init__(self, info):
self.info = info
def homepage(self):
return self.info["project_url"]
def _info(self):
# This is the description that will appear on the datasets page.
return datasets.DatasetInfo(
description=self.info["description"] + "\n\n" + self._DESCRIPTION,
features=datasets.Features(self._FEATURES),
homepage=self.homepage(),
citation=self._CITATION or _DEFAULT_CITATION,
supervised_keys=self._SUPERVISED_KEYS,
)
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
SPLITS = self.SPLITS
_URL = self.info["raw_url"]
urls_to_download = {}
for split in SPLITS:
if split not in urls_to_download:
urls_to_download[split] = {}
for key, url in self.generate_urls(split):
if not url.startswith("http"):
url = _URL + "/" + url
urls_to_download[split][key] = url
downloaded_files = {}
for k, v in urls_to_download.items():
downloaded_files[k] = dl_manager.download_and_extract(v)
return [
datasets.SplitGenerator(
name=SPLITS[k],
gen_kwargs={"split_name": k, "file_paths": downloaded_files[k]},
)
for k in SPLITS
]
def check_empty(self, entries):
all_empty = all([v == "" for v in entries.values()])
all_non_empty = all([v != "" for v in entries.values()])
if not all_non_empty and not all_empty:
raise RuntimeError("Parallel data files should have the same number of lines.")
return all_empty
class TrainValidTestChild(Child):
SPLITS = {
"train": datasets.Split.TRAIN,
"valid": datasets.Split.VALIDATION,
"test": datasets.Split.TEST,
}
|
119798
|
from tkinter import Tk
from pyDEA.core.gui_modules.custom_canvas_gui import StyledCanvas
from pyDEA.core.utils.dea_utils import bg_color
def test_bg_color():
parent = Tk()
canvas = StyledCanvas(parent)
assert canvas.cget('background') == bg_color
parent.destroy()
|
119812
|
from nose.tools import assert_equal, assert_raises
class TestProdThree(object):
def test_prod_three(self):
solution = Solution()
assert_raises(TypeError, solution.max_prod_three, None)
assert_raises(ValueError, solution.max_prod_three, [1, 2])
assert_equal(solution.max_prod_three([5, -2, 3]), -30)
assert_equal(solution.max_prod_three([5, -2, 3, 1, -1, 4]), 60)
print('Success: test_prod_three')
def main():
test = TestProdThree()
test.test_prod_three()
if __name__ == '__main__':
main()
|
119841
|
import ChromaPy32 as Chroma # Import the Chroma Module
from time import sleep # Import the sleep-function
Headset = Chroma.Headset() # Initialize a new Headset Instance
RED = (255, 0, 0) # Initialize a new color by RGB (RED,GREEN,BLUE)
for x in range(0, Headset.MaxLED): # for-loop with Headset.MaxLED as iteration border
Headset.setbyLED(x, RED) # set the x-th led to red
Headset.applyLED() # applies the Headset-Grid to the connected Headset
sleep(0.1) # sleeps 100ms until next row will be filled
|
119890
|
import re
def soundex(name):
return ' '.join(process_word(word).upper()
for word in
name.split(' ')
)
def process_word(word):
fl = word[0]
word = word.lower()
word = word[0] + re.sub('[hw]', '', word[1:])
word = word.translate(str.maketrans('bfpvcgjkqsxzdtlmnr','111122222222334556'))
word = re.sub(r'(\d)(\1)+', lambda d: d.group()[0], word)
word = word[0] + re.sub('[aeiouy]', '', word[1:])
if word[0].isdigit(): word = fl + word[1:]
return (word + '000')[:4] if len(re.findall('\d', word)) < 3 else word[:4]
|
119908
|
from typing import List
from boa3.builtin import public
@public
def main(string: str, sep: str, maxsplit: int) -> List[str]:
return string.split(sep, maxsplit)
|
119926
|
from twindb_backup.configuration import TwinDBBackupConfig
def test_gcs(config_file):
tbc = TwinDBBackupConfig(config_file=str(config_file))
assert tbc.gcs.gc_credentials_file == 'XXXXX'
assert tbc.gcs.gc_encryption_key == ''
assert tbc.gcs.bucket == 'twindb-backups'
def test_no_gcs_section(tmpdir):
cfg_file = tmpdir.join('twindb-backup.cfg')
with open(str(cfg_file), 'w') as fp:
fp.write('')
tbc = TwinDBBackupConfig(config_file=str(cfg_file))
assert tbc.gcs is None
|
119977
|
import os
import argparse
from sentivi import Pipeline
from sentivi.data import DataLoader, TextEncoder
from sentivi.classifier import *
from sentivi.text_processor import TextProcessor
CLASSIFIER = SVMClassifier
ENCODING_TYPE = ['one-hot', 'bow', 'tf-idf', 'word2vec']
if __name__ == '__main__':
argument_parser = argparse.ArgumentParser(description='Sentiment Analysis Experiments')
argument_parser.add_argument('--n_grams', type=int, default=1)
argument_parser.add_argument('--train_file', type=str, default=os.path.join('data', 'data_done.txt'))
argument_parser.add_argument('--test_file', type=str, default=os.path.join('data', 'test_data.txt'))
argument_parser.add_argument('--log', type=str, default=os.path.join('data', 'logs', f'{CLASSIFIER.__name__}.txt'))
args = argument_parser.parse_args()
text_processor = TextProcessor(methods=['word_segmentation', 'remove_punctuation'])
file_writer = open(args.log, 'w+')
for encoding in ENCODING_TYPE:
train_pipeline = Pipeline(DataLoader(text_processor=text_processor, n_grams=args.n_grams),
TextEncoder(encode_type=encoding, model_path='./pretrained/wiki.vi.model.bin.gz'),
CLASSIFIER(num_labels=3, verbose=True))
train_results = train_pipeline(train=args.train_file, test=args.test_file, num_epochs=10, batch_size=4)
print(f'Experiment_{encoding}_{CLASSIFIER.__name__}:\n{train_results}')
file_writer.write(f'Experiment_{encoding}_{CLASSIFIER.__name__}:\n{train_results}\n')
file_writer.write('*'*15 + '\n')
file_writer.close()
|
119978
|
import abc
from typing import Union
import lunzi.nn as nn
class BasePolicy(abc.ABC):
@abc.abstractmethod
def get_actions(self, states):
pass
BaseNNPolicy = Union[BasePolicy, nn.Module] # should be Intersection, see PEP544
|
119998
|
import os
import pytest
@pytest.fixture
def test_data():
import numpy as np
test_data_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'data', 'laserembeddings-test-data.npz')
return np.load(test_data_file) if os.path.isfile(test_data_file) else None
|
120023
|
from typing import Iterable, Optional
class Professor:
"""
A professor is one or many people in charge of a given academical event.
:param name: the name(s)
:type name: str
:param email: the email(s)
:type email: Optional[str]
"""
def __init__(self, name: str, email: Optional[str] = None):
self.name = name
self.email = email
def __str__(self):
if self.email is not None and len(self.email) > 0:
return f"{self.name} ({self.email})"
else:
return self.name
def merge_professors(professors: Iterable[Professor]) -> Professor:
"""
Merges multiple professors into one.
:param professors: multiple professors
:type professors: Iterable[Professor]
:return: the new professor
:rtype: Professor
:Example:
>>> p1 = Professor('<NAME>', '<EMAIL>')
>>> p2 = Professor('<NAME>', '<EMAIL>')
>>> p3 = merge_professors((p1, p2))
"""
name = " & ".join(professor.name for professor in professors)
email = " & ".join(professor.email for professor in professors if professor.email)
if len(email) > 0:
return Professor(name, email)
else:
return Professor(name)
|
120063
|
import itertools
import numpy as np
import matplotlib.pyplot as plt
import pdb
import ad3.factor_graph as fg
num_nodes = 5 #30
max_num_states = 2 #5
lower_bound = 3 #5 # Minimum number of zeros.
upper_bound = 4 #10 # Maximum number of zeros.
# Create a random tree.
max_num_children = 5
parents = [-1] * num_nodes
available_nodes = range(1, num_nodes)
nodes_to_process = [0]
while len(nodes_to_process) > 0:
i = nodes_to_process.pop()
num_children = 1 + np.floor(np.random.uniform() * max_num_children)
if num_children > len(available_nodes):
num_children = len(available_nodes)
ind_children = np.random.permutation(len(available_nodes))[0:num_children]
children = []
for ind in ind_children:
children.append(available_nodes[ind])
for j in children:
parents[j] = i
nodes_to_process.insert(0, j)
available_nodes.remove(j)
#parents = range(-1, num_nodes-1)
print parents
# Design number of states for each node.
num_states_array = 1 + np.floor(np.random.uniform(size=num_nodes) * max_num_states)
num_states = [int(x) for x in num_states_array]
print num_states
# 1) Build a factor graph using DENSE factors.
pairwise_factor_graph = fg.PFactorGraph()
multi_variables = []
for i in xrange(num_nodes):
multi_variable = pairwise_factor_graph.create_multi_variable(num_states[i])
for state in xrange(num_states[i]):
value = np.random.normal()
multi_variable.set_log_potential(state, value)
multi_variables.append(multi_variable)
description = ''
num_factors = 0
edge_log_potentials = []
edge_log_potentials.append([])
for i in xrange(1, num_nodes):
p = parents[i]
edge_log_potentials.append([])
for k in xrange(num_states[p]):
for j in xrange(num_states[i]):
value = np.random.normal()
edge_log_potentials[i].append(value)
edge_variables = []
edge_variables.append(multi_variables[p])
edge_variables.append(multi_variables[i])
pairwise_factor_graph.create_factor_dense(edge_variables, edge_log_potentials[i])
num_factors += 1
# Print factor to string.
description += 'DENSE ' + str(num_states[p] + num_states[i])
for k in xrange(num_states[p]):
description += ' ' + str(1 + multi_variables[p].get_state(k).get_id())
for j in xrange(num_states[i]):
description += ' ' + str(1 + multi_variables[i].get_state(j).get_id())
description += ' ' + str(2)
description += ' ' + str(num_states[p])
description += ' ' + str(num_states[i])
t = 0
for k in xrange(num_states[p]):
for j in xrange(num_states[i]):
description += ' ' + str(edge_log_potentials[i][t])
t += 1
description += '\n'
if upper_bound >= 0 or lower_bound >= 0:
variables = []
for i in xrange(num_nodes):
variables.append(multi_variables[i].get_state(0))
# Budget factor for upper bound.
negated = [False] * num_nodes
pairwise_factor_graph.create_factor_budget(variables, negated, upper_bound)
num_factors += 1
# Print factor to string.
description += 'BUDGET ' + str(num_nodes)
for i in xrange(num_nodes):
description += ' ' + str(1 + multi_variables[i].get_state(0).get_id())
description += ' ' + str(upper_bound)
description += '\n'
# Budget factor for lower bound.
negated = [True] * num_nodes
pairwise_factor_graph.create_factor_budget(variables, negated, num_nodes - lower_bound)
num_factors += 1
# Print factor to string.
description += 'BUDGET ' + str(num_nodes)
for i in xrange(num_nodes):
description += ' ' + str(-(1 + multi_variables[i].get_state(0).get_id()))
description += ' ' + str(num_nodes - lower_bound)
description += '\n'
# Write factor graph to file.
f = open('example_budget.fg', 'w')
f.write(str(sum(num_states)) + '\n')
f.write(str(num_factors) + '\n')
for i in xrange(num_nodes):
for j in xrange(num_states[i]):
f.write(str(multi_variables[i].get_log_potential(j)) + '\n')
f.write(description)
f.close()
# Run AD3.
pairwise_factor_graph.set_eta_ad3(.1)
pairwise_factor_graph.adapt_eta_ad3(True)
pairwise_factor_graph.set_max_iterations_ad3(1000)
value, posteriors, additional_posteriors, status = pairwise_factor_graph.solve_lp_map_ad3()
# Print solution.
t = 0
best_states = []
for i in xrange(num_nodes):
local_posteriors = posteriors[t:(t+num_states[i])]
j = np.argmax(local_posteriors)
best_states.append(j)
t += num_states[i]
print best_states
# 2) Build a factor graph using a GENERAL_TREE factor.
factor_graph = fg.PFactorGraph()
variable_log_potentials = []
additional_log_potentials = []
num_current_states = num_states[0]
for j in xrange(num_current_states):
value = multi_variables[0].get_log_potential(j)
variable_log_potentials.append(value)
for i in xrange(1, num_nodes):
p = parents[i]
num_previous_states = num_states[p]
num_current_states = num_states[i]
for j in xrange(num_current_states):
value = multi_variables[i].get_log_potential(j)
variable_log_potentials.append(value)
count = 0
for k in xrange(num_previous_states):
for j in xrange(num_current_states):
value = edge_log_potentials[i][count]
count += 1
additional_log_potentials.append(value)
if upper_bound >= 0 or lower_bound >= 0:
for b in xrange(num_nodes+1):
if b >= lower_bound and b <= upper_bound:
variable_log_potentials.append(0.0)
else:
variable_log_potentials.append(-1000.0)
#variable_log_potentials.append(-np.inf)
binary_variables = []
factors = []
for i in xrange(len(variable_log_potentials)):
binary_variable = factor_graph.create_binary_variable()
binary_variable.set_log_potential(variable_log_potentials[i])
binary_variables.append(binary_variable)
#pdb.set_trace()
if upper_bound >= 0 or lower_bound >= 0:
factor = fg.PFactorGeneralTreeCounts()
f = open('example_general_tree_counts.fg', 'w')
f.write(str(len(binary_variables)) + '\n')
f.write(str(1) + '\n')
for i in xrange(len(binary_variables)):
f.write(str(variable_log_potentials[i]) + '\n')
f.write('GENERAL_TREE_COUNTS ' + str(len(binary_variables)))
for i in xrange(len(binary_variables)):
f.write(' ' + str(i+1))
f.write(' ' + str(num_nodes))
for i in xrange(num_nodes):
f.write(' ' + str(num_states[i]))
for i in xrange(num_nodes):
f.write(' ' + str(parents[i]))
for i in xrange(len(additional_log_potentials)):
f.write(' ' + str(additional_log_potentials[i]))
f.write('\n')
f.close()
else:
factor = fg.PFactorGeneralTree()
variables = binary_variables
factor_graph.declare_factor(factor, variables, True)
factor.initialize(parents, num_states)
factor.set_additional_log_potentials(additional_log_potentials)
factors.append(factor)
# Run AD3.
factor_graph.set_eta_ad3(.1)
factor_graph.adapt_eta_ad3(True)
factor_graph.set_max_iterations_ad3(1000)
value, posteriors, additional_posteriors, status = factor_graph.solve_lp_map_ad3()
# Print solution.
t = 0
best_states = []
for i in xrange(num_nodes):
local_posteriors = posteriors[t:(t+num_states[i])]
j = np.argmax(local_posteriors)
best_states.append(j)
t += num_states[i]
print best_states
|
120075
|
import math
from rlbot.agents.base_agent import BaseAgent, SimpleControllerState
from rlbot.utils.structures.game_data_struct import GameTickPacket
class Vector3:
def __init__(self,a,b,c):
self.data = [a,b,c]
def __getitem__(self,key):
return self.data[key]
def __str__(self):
return str(self.data)
def __add__(self,value):
return Vector3(self[0]+value[0], self[1]+value[1], self[2]+value[2])
def __sub__(self,value):
return Vector3(self[0]-value[0],self[1]-value[1],self[2]-value[2])
def __mul__(self,value):
return Vector3(self[0]*value, self[1]*value, self[2]*value)
__rmul__ = __mul__
def __div__(self,value):
return Vector3(self[0]/value, self[1]/value, self[2]/value)
def magnitude(self):
return math.sqrt((self[0]*self[0]) + (self[1] * self[1]) + (self[2]* self[2]))
def normalize(self):
mag = self.magnitude()
if mag != 0:
return Vector3(self[0]/mag, self[1]/mag, self[2]/mag)
else:
return Vector3(0,0,0)
def dot(self,value):
return self[0]*value[0] + self[1]*value[1] + self[2]*value[2]
def cross(self,value):
return Vector3((self[1]*value[2]) - (self[2]*value[1]),(self[2]*value[0]) - (self[0]*value[2]),(self[0]*value[1]) - (self[1]*value[0]))
def flatten(self):
return Vector3(self[0],self[1],0)
class carobject:
def __init__(self):
self.loc = Vector3(0,0,0)
self.vel = Vector3(0,0,0)
self.rot = Vector3(0,0,0)
self.Rotvel = Vector3(0,0,0)
self.matrix = Matrix3D(self.rot)
self.goals = 0
self.saves = 0
self.name = ""
self.jumped = False
self.doublejumped = False
self.team = 0
self.boostAmount = 0
self.wheelcontact = False
self.supersonic = False
def update(self,TempVar):
self.loc.data = [TempVar.physics.location.x,TempVar.physics.location.y,TempVar.physics.location.z]
self.vel.data = [TempVar.physics.velocity.x,TempVar.physics.velocity.y,TempVar.physics.velocity.z]
self.rot.data = [TempVar.physics.rotation.pitch,TempVar.physics.rotation.yaw,TempVar.physics.rotation.roll]
self.matrix = Matrix3D(self.rot)
TempRot = Vector3(TempVar.physics.angular_velocity.x,TempVar.physics.angular_velocity.y,TempVar.physics.angular_velocity.z)
self.Rotvel = self.matrix.dot(TempRot)
self.goals = TempVar.score_info.goals
self.saves = TempVar.score_info.saves
self.name = TempVar.name
self.jumped = TempVar.jumped
self.doublejumped = TempVar.double_jumped
self.team = TempVar.team
self.boostAmount = TempVar.boost
self.wheelcontact = TempVar.has_wheel_contact
self.supersonic = TempVar.is_super_sonic
class Matrix3D:
def __init__(self,r):
CR = math.cos(r[2])
SR = math.sin(r[2])
CP = math.cos(r[0])
SP = math.sin(r[0])
CY = math.cos(r[1])
SY = math.sin(r[1])
self.data = [Vector3(CP*CY, CP*SY, SP),Vector3(CY*SP*SR-CR*SY, SY*SP*SR+CR*CY, -CP * SR),Vector3(-CR*CY*SP-SR*SY, -CR*SY*SP+SR*CY, CP*CR)]
def dot(self,vector):
return Vector3(self.data[0].dot(vector),self.data[1].dot(vector),self.data[2].dot(vector))
class ballobject:
def __init__(self):
self.loc = Vector3(0,0,0)
self.vel = Vector3(0,0,0)
self.rot = Vector3(0,0,0)
self.Rotvel = Vector3(0,0,0)
def update(self,TempVar):
self.loc.data = [TempVar.physics.location.x,TempVar.physics.location.y,TempVar.physics.location.z]
self.vel.data = [TempVar.physics.velocity.x,TempVar.physics.velocity.y,TempVar.physics.velocity.z]
self.rot.data = [TempVar.physics.rotation.pitch,TempVar.physics.rotation.yaw,TempVar.physics.rotation.roll]
self.Rotvel.data = [TempVar.physics.angular_velocity.x,TempVar.physics.angular_velocity.y,TempVar.physics.angular_velocity.z]
class Zoomelette(BaseAgent):
def initialize_agent(self):
self.controller_state = SimpleControllerState()
self.car = carobject()
self.ball = ballobject()
def get_output(self, packet: GameTickPacket) -> SimpleControllerState:
self.preprocess(packet)
self.renderer.begin_rendering()
self.renderer.draw_line_3d(self.ball.loc, self.ball.loc + ((self.ball.loc - Vector3(0, -5150 * side(self.team),0)).normalize() * ((self.car.loc - self.ball.loc).magnitude() / 2)),self.renderer.black())
self.renderer.draw_rect_3d(Vector3(0,0,10) + self.ball.loc + ((self.ball.loc - Vector3(0, -5150 * side(self.team),0)).normalize() * ((self.car.loc - self.ball.loc).magnitude() / 2)), 10, 10, True, self.renderer.red())
self.renderer.draw_line_3d(self.car.loc, self.ball.loc + ((self.ball.loc - Vector3(0, -5150 * side(self.team),0)).normalize() * ((self.car.loc - self.ball.loc).magnitude() / 2)), self.renderer.black())
self.renderer.end_rendering()
return self.Brain()
def preprocess(self, gamepacket):
self.car.update(gamepacket.game_cars[self.index])
self.ball.update(gamepacket.game_ball)
def Brain(self):
if self.ball.loc[1]==0:
print("KickOff")
return KickOff(self)
elif (self.ball.loc-self.car.loc)[1] * side(self.team) > 0:
print("Recovery")
return Recovery(self)
else:
print("Shooting")
return Shooting(self)
def KickOff(agent):
target = agent.ball.loc
speed = 2300
return Controller_output(agent,target,speed)
def Recovery(agent):
target = Vector3(0,5150*side(agent.team),0)
speed = 2300
return Controller_output(agent,target,speed)
def Shooting(agent):
target = agent.ball.loc + ((agent.ball.loc - Vector3(0, -5150 * side(agent.team),0)).normalize() * ((agent.car.loc - agent.ball.loc).magnitude() / 2))
speed = 2300
return Controller_output(agent,target,speed)
def Controller_output(agent,target,speed):
Controller = SimpleControllerState()
LocalTagret = agent.car.matrix.dot(target-agent.car.loc)
angle_target = math.atan2(LocalTagret[1],LocalTagret[0])
Controller.steer = steer(angle_target)
agentSpeed = velocity2D(agent.car)
Controller.throttle,Controller.boost = throttle(speed,agentSpeed)
if abs(angle_target) > 2:
Controller.handbrake = True
else:
Controller.handbrake = False
return Controller
def side(x):
if x <= 0:
return -1
return 1
def cap(x, low, high):
if x < low:
return low
elif x > high:
return high
else:
return x
def steer(angle):
final = ((35 * angle)**3) / 20
return cap(final,-1,1)
def velocity2D(target_object):
return math.sqrt(target_object.vel[0]**2 + target_object.vel[1]**2)
def throttle(speed, agent_speed):
final = ((speed - agent_speed)/100)
if final > 1:
boost = True
else:
boost = False
if final > 0 and speed > 1400:
final = 1
return cap(final,-1,1),boost
|
120109
|
import sys
from webob import Request
from pydap.responses.error import ErrorResponse
from pydap.lib import __version__
import unittest
class TestErrorResponse(unittest.TestCase):
def setUp(self):
# create an exception that would happen in runtime
try:
1/0
except Exception:
error = ErrorResponse(sys.exc_info())
req = Request.blank('/')
self.res = req.get_response(error)
def test_status(self):
self.assertEqual(self.res.status, "500 Internal Error")
def test_content_type(self):
self.assertEqual(self.res.content_type, "text/plain")
def test_charset(self):
self.assertEqual(self.res.charset, "utf-8")
def test_headers(self):
self.assertEqual(self.res.headers['Content-Type'],
'text/plain; charset=utf-8')
self.assertEqual(self.res.headers['Content-description'], 'dods_error')
self.assertEqual(self.res.headers['XDODS-Server'],
'pydap/' + __version__)
def test_body(self):
self.assertRegexpMatches(self.res.text, r"""Error {
code = -1;
message = "Traceback \(most recent call last\):
File .*
1/0
ZeroDivisionError:( integer)? division( or modulo)? by zero
";
}""")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.