text stringlengths 957 885k |
|---|
import sys
from PySide2 import QtWidgets, QtGui
from auto_assistant.controller import actions_controller, action_execution
from auto_assistant.view import action_list_widgit
class MainWindow(QtWidgets.QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.__configure_menu_bar()
self.__configure_layout()
self.__engine = action_execution.Engine(self.__action_list_view.get_model(),
self._engine_execution_finished_callback)
self.resize(800, 600)
def __configure_layout(self):
top_widgit = QtWidgets.QWidget()
top_grid = QtWidgets.QGridLayout()
button_bar = QtWidgets.QGridLayout()
self.__add_action_button = QtWidgets.QPushButton('Add action')
self.__edit_action_button = QtWidgets.QPushButton('Edit action')
self.__edit_action_button.setEnabled(False)
button_bar.addWidget(self.__add_action_button, 0, 0)
button_bar.addWidget(self.__edit_action_button, 0, 1)
top_grid.addLayout(button_bar, 0, 0)
self.__action_list_view = action_list_widgit.ActionListWidgit()
top_grid.addWidget(self.__action_list_view, 1, 0)
execution_bar = QtWidgets.QGridLayout()
self.__start_execution_button = QtWidgets.QPushButton('Start')
self.__stop_execution_button = QtWidgets.QPushButton('Stop')
self.__stop_execution_button.setEnabled(False)
execution_bar.addWidget(self.__start_execution_button, 0, 0)
execution_bar.addWidget(self.__stop_execution_button, 0, 1)
top_grid.addLayout(execution_bar, 2, 0)
top_widgit.setLayout(top_grid)
self.setCentralWidget(top_widgit)
# connect up the button functionality
print(f'action list is {self.__action_list_view}')
self.__add_action_button.clicked.connect(
lambda: actions_controller.add_action(self.__action_list_view))
self.__start_execution_button.clicked.connect(self.__execute_actions)
self.__stop_execution_button.clicked.connect(self.__stop_action_execution)
def toggle_buttons(self, is_executing: bool):
if is_executing:
self.__add_action_button.setEnabled(False)
self.__edit_action_button.setEnabled(False)
self.__start_execution_button.setEnabled(False)
self.__stop_execution_button.setEnabled(True)
else:
self.__add_action_button.setEnabled(True)
self.__edit_action_button.setEnabled(True)
self.__start_execution_button.setEnabled(True)
self.__stop_execution_button.setEnabled(False)
def __execute_actions(self):
# configure the buttons for execution (only let the stop button be clickable)
self.toggle_buttons(True)
self.__engine.start_execution()
def __stop_action_execution(self):
# configure the buttons for non-execution (only disable the stop button)
self.__engine.stop_execution()
self.toggle_buttons(False)
def __configure_menu_bar(self):
self.my_menu_bar = self.menuBar()
self.file_menu = self.my_menu_bar.addMenu('File')
self.exit_action = self.file_menu.addAction('Exit')
self.exit_action.triggered.connect(self.close)
self.exit_action.setShortcut(QtGui.QKeySequence('Ctrl+q'))
def closeEvent(self, event: QtGui.QCloseEvent):
print(f'Closing the window due to event: {event}')
def _engine_execution_finished_callback(self):
self.toggle_buttons(False)
def main():
app = QtWidgets.QApplication(sys.argv)
mw = MainWindow()
mw.show()
return app.exec_()
if __name__ == '__main__':
main()
|
<filename>src/Models/UNet/DualAttentionUNet.py
from tensorflow.python.keras.layers import Input, Dense, Convolution2D, MaxPooling2D, Dropout, Flatten, SpatialDropout2D, \
ZeroPadding2D, Activation, AveragePooling2D, UpSampling2D, BatchNormalization, ConvLSTM2D, \
TimeDistributed, Concatenate, Lambda, Reshape, UpSampling3D, Convolution3D, MaxPooling3D, SpatialDropout3D,\
Conv2DTranspose, Conv3DTranspose, add, multiply, Reshape, Softmax, AveragePooling3D, Add, Layer
from tensorflow.python.keras.models import Model
import tensorflow as tf
import numpy as np
import math
class CAM(Layer):
"""
Implementation from https://github.com/niecongchong/DANet-keras/
"""
def __init__(self,
gamma_initializer=tf.zeros_initializer(),
gamma_regularizer=None,
gamma_constraint=None,
**kwargs):
super(CAM, self).__init__(**kwargs)
self.gamma_initializer = gamma_initializer
self.gamma_regularizer = gamma_regularizer
self.gamma_constraint = gamma_constraint
def build(self, input_shape):
self.gamma = self.add_weight(shape=(1, ),
initializer=self.gamma_initializer,
name='gamma',
regularizer=self.gamma_regularizer,
constraint=self.gamma_constraint)
self.built = True
def compute_output_shape(self, input_shape):
return input_shape
def call(self, x):
input_shape = x.get_shape().as_list()
_, h, w, d, filters = input_shape
vec_a = Reshape(target_shape=(h * w * d, filters))(x)
vec_aT = tf.transpose(vec_a, perm=[0, 2, 1])
aTa = tf.linalg.matmul(vec_aT, vec_a)
softmax_aTa = Activation('softmax')(aTa)
aaTa = tf.linalg.matmul(vec_a, softmax_aTa)
aaTa = Reshape(target_shape=(h, w, d, filters))(aaTa)
out = (self.gamma * aaTa) + x
return out
class PAM(Layer):
"""
Implementation from https://github.com/niecongchong/DANet-keras/
"""
def __init__(self,
gamma_initializer=tf.zeros_initializer(),
gamma_regularizer=None,
gamma_constraint=None,
**kwargs):
super(PAM, self).__init__(**kwargs)
self.gamma_initializer = gamma_initializer
self.gamma_regularizer = gamma_regularizer
self.gamma_constraint = gamma_constraint
def build(self, input_shape):
self.gamma = self.add_weight(shape=(1, ),
initializer=self.gamma_initializer,
name='gamma',
regularizer=self.gamma_regularizer,
constraint=self.gamma_constraint)
self.built = True
def compute_output_shape(self, input_shape):
return input_shape
def call(self, x):
input_shape = x.get_shape().as_list()
_, h, w, d, filters = input_shape
b_layer = Convolution3D(filters // 8, 1, use_bias=False)(x)
c_layer = Convolution3D(filters // 8, 1, use_bias=False)(x)
d_layer = Convolution3D(filters, 1, use_bias=False)(x)
b_layer = tf.transpose(Reshape(target_shape=(h * w * d, filters // 8))(b_layer), perm=[0, 2, 1])
c_layer = Reshape(target_shape=(h * w * d, filters // 8))(c_layer)
d_layer = Reshape(target_shape=(h * w * d, filters))(d_layer)
# The bc_mul matrix should be of size (H*W*D) * (H*W*D)
bc_mul = tf.linalg.matmul(c_layer, b_layer)
activation_bc_mul = Activation(activation='softmax')(bc_mul)
bcd_mul = tf.linalg.matmul(activation_bc_mul, d_layer)
bcd_mul = Reshape(target_shape=(h, w, d, filters))(bcd_mul)
out = (self.gamma * bcd_mul) + x
return out
def convolution_block(x, nr_of_convolutions, use_bn=False, spatial_dropout=None):
for i in range(2):
x = Convolution3D(nr_of_convolutions, 3, padding='same')(x)
if use_bn:
x = BatchNormalization()(x)
x = Activation('relu')(x)
if spatial_dropout:
x = SpatialDropout3D(spatial_dropout)(x)
return x
def encoder_block(x, nr_of_convolutions, use_bn=False, spatial_dropout=None):
x_before_downsampling = convolution_block(x, nr_of_convolutions, use_bn, spatial_dropout)
downsample = [2, 2, 2]
for i in range(1, 4):
if x.shape[i] <= 4:
downsample[i-1] = 1
x = MaxPooling3D(downsample)(x_before_downsampling)
return x, x_before_downsampling
def encoder_block_pyramid(x, input_ds, nr_of_convolutions, use_bn=False, spatial_dropout=None):
pyramid_conv = Convolution3D(filters=nr_of_convolutions, kernel_size=(3, 3, 3), padding='same', activation='relu')(input_ds)
x = Concatenate(axis=-1)([pyramid_conv, x])
x_before_downsampling = convolution_block(x, nr_of_convolutions, use_bn, spatial_dropout)
downsample = [2, 2, 2]
for i in range(1, 4):
if x.shape[i] <= 4:
downsample[i-1] = 1
x = MaxPooling3D(downsample)(x_before_downsampling)
return x, x_before_downsampling
def decoder_block(x, cross_over_connection, nr_of_convolutions, use_bn=False, spatial_dropout=None):
x = Conv3DTranspose(nr_of_convolutions, kernel_size=3, padding='same', strides=2)(x)
x = Concatenate()([cross_over_connection, x])
if use_bn:
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = convolution_block(x, nr_of_convolutions, use_bn, spatial_dropout)
return x
def decoder_block_guided(x, cross_over_connection, nr_of_convolutions, iteration, attention_layer, use_bn=False, spatial_dropout=None):
x = Conv3DTranspose(nr_of_convolutions, kernel_size=3, padding='same', strides=2)(x)
upsampling_factor = int(math.pow(2, iteration))
attention_layer_up = Conv3DTranspose(nr_of_convolutions, kernel_size=3, padding='same', strides=upsampling_factor)(attention_layer)
x = Concatenate()([attention_layer_up, cross_over_connection, x])
if use_bn:
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = convolution_block(x, nr_of_convolutions, use_bn, spatial_dropout)
return x
class DualAttentionUnet():
def __init__(self, input_shape, nb_classes, deep_supervision=False, input_pyramid=False, attention_guiding=False):
if len(input_shape) != 3 and len(input_shape) != 4:
raise ValueError('Input shape must have 3 or 4 dimensions')
if nb_classes <= 1:
raise ValueError('Segmentation classes must be > 1')
self.dims = 3
self.input_shape = input_shape
self.nb_classes = nb_classes
self.deep_supervision = deep_supervision
self.input_pyramid = input_pyramid
self.attention_guided = attention_guiding
self.convolutions = None
self.encoder_use_bn = True
self.decoder_use_bn = True
self.encoder_spatial_dropout = None
self.decoder_spatial_dropout = None
def set_convolutions(self, convolutions):
self.convolutions = convolutions
def get_dice_loss(self):
def dice_loss(target, output, epsilon=1e-10):
smooth = 1.
dice = 0
for object in range(0, self.nb_classes):
if self.dims == 2:
output1 = output[:, :, :, object]
target1 = target[:, :, :, object]
else:
output1 = output[:, :, :, :, object]
target1 = target[:, :, :, :, object]
intersection1 = tf.reduce_sum(output1 * target1)
union1 = tf.reduce_sum(output1 * output1) + tf.reduce_sum(target1 * target1)
dice += (2. * intersection1 + smooth) / (union1 + smooth)
dice /= (self.nb_classes - 1)
return tf.clip_by_value(1. - dice, 0., 1. - epsilon)
return dice_loss
def create(self):
"""
Create model and return it
:return: keras model
"""
input_layer = Input(shape=self.input_shape)
x = input_layer
init_size = max(self.input_shape[:-1])
size = init_size
convolutions = self.convolutions
connection = []
i = 0
if self.input_pyramid:
scaled_input = []
scaled_input.append(x)
for i, nbc in enumerate(self.convolutions[:-1]):
ds_input = AveragePooling3D(pool_size=(2, 2, 2))(scaled_input[i])
scaled_input.append(ds_input)
for i, nbc in enumerate(self.convolutions[:-1]):
if not self.input_pyramid or (i == 0):
x, x_before_ds = encoder_block(x, nbc, use_bn=self.encoder_use_bn,
spatial_dropout=self.encoder_spatial_dropout)
else:
x, x_before_ds = encoder_block_pyramid(x, scaled_input[i], nbc, use_bn=self.encoder_use_bn,
spatial_dropout=self.encoder_spatial_dropout)
connection.insert(0, x_before_ds) # Append in reverse order for easier use in the next block
x = convolution_block(x, self.convolutions[-1], self.encoder_use_bn, self.encoder_spatial_dropout)
connection.insert(0, x)
pam = PAM()(x)
pam = Convolution3D(self.convolutions[-1], 3, padding='same')(pam)
pam = BatchNormalization()(pam)
pam = Activation('relu')(pam)
pam = SpatialDropout3D(0.5)(pam)
pam = Convolution3D(self.convolutions[-1], 3, padding='same')(pam)
cam = CAM()(x)
cam = Convolution3D(self.convolutions[-1], 3, padding='same')(cam)
cam = BatchNormalization()(cam)
cam = Activation('relu')(cam)
cam = SpatialDropout3D(0.5)(cam)
cam = Convolution3D(self.convolutions[-1], 3, padding='same')(cam)
x = add([pam, cam])
x = SpatialDropout3D(0.5)(x)
x = Convolution3D(self.convolutions[-1], 1, padding='same')(x)
x_bottom = x = BatchNormalization()(x)
inverse_conv = self.convolutions[::-1]
inverse_conv = inverse_conv[1:]
decoded_layers = []
for i, nbc in enumerate(inverse_conv):
if not self.attention_guided:
x = decoder_block(x, connection[i+1], nbc, use_bn=self.decoder_use_bn,
spatial_dropout=self.decoder_spatial_dropout)
else:
x = decoder_block_guided(x, connection[i + 1], nbc, iteration=i+1, attention_layer=x_bottom,
use_bn=self.decoder_use_bn, spatial_dropout=self.decoder_spatial_dropout)
decoded_layers.append(x)
if not self.deep_supervision:
# Final activation layer
x = Convolution3D(self.nb_classes, 1, activation='softmax')(x)
else:
recons_list = []
for i, lay in enumerate(decoded_layers):
x = Convolution3D(self.nb_classes, 1, activation='softmax')(lay)
recons_list.append(x)
x = recons_list[::-1]
return Model(inputs=input_layer, outputs=x)
|
#! /usr/bin/env python
import sys
import time
import json
import socket
import threading
try:
import weaver.client as client
except ImportError:
import client
global eventTime
data = []
eventTime = 0
eventCount = 0
eventInterval = 0
finished = False
def addVertex(weaver, vertex, state):
try:
weaver.create_node(handle=vertex)
weaver.set_node_property(node=vertex, key='state', value=state)
except:
pass
def addEdge(weaver, sourceVertex, targetVertex, state):
edgeName = sourceVertex + "-" + targetVertex
try:
weaver.create_edge(handle=edgeName, node1=sourceVertex, node2=targetVertex)
weaver.set_edge_property(node=sourceVertex, edge=edgeName, key='state', value=state)
except:
pass
def removeVertex(weaver, vertex):
try:
weaver.delete_node(vertex)
except:
pass
def removeEdge(weaver, sourceVertex, targetVertex):
edgeName = sourceVertex + "-" + targetVertex
try:
weaver.delete_edge(edgeName)
except:
pass
def updateVertex(weaver, vertex, state):
try:
weaver.set_node_property(node=vertex, key='state', value=state)
except:
pass
def updateEdge(weaver, sourceVertex, targetVertex, state):
edgeName = sourceVertex + "-" + targetVertex
try:
weaver.set_edge_property(node=sourceVertex, edge=edgeName, key='state', value=state)
except:
pass
def logCount():
global eventTime
global eventInterval
if not finished:
threading.Timer(1.0, logCount).start()
data.append((eventTime, eventInterval))
eventInterval = 0
eventTime += 1
if __name__ == '__main__':
weaver = client.Client('127.0.0.1', 2002)
host = '0.0.0.0'
port = int(sys.argv[1])
aggregation = int(sys.argv[2])
print('waiting for connection...')
mySocket = socket.socket()
mySocket.bind((host, port))
mySocket.listen(1)
conn, addr = mySocket.accept()
print('Connection from: ' + str(addr))
logCount()
f = conn.makefile()
for line in f:
event = json.loads(line)
try:
if (eventCount % aggregation == 0):
weaver.begin_tx()
if (event['command'] == 'CREATE_VERTEX'):
addVertex(weaver, str(event['targetVertex']), json.dumps(event['payload']))
elif (event['command'] == 'CREATE_EDGE'):
addEdge(weaver, str(event['sourceVertex']), str(event['targetVertex']), json.dumps(event['payload']))
elif (event['command'] == 'UPDATE_EDGE'):
updateEdge(weaver, str(event['sourceVertex']), str(event['targetVertex']), json.dumps(event['payload']))
elif (event['command'] == 'UPDATE_VERTEX'):
updateVertex(weaver, str(event['targetVertex']), json.dumps(event['payload']))
elif (event['command'] == 'REMOVE_EDGE'):
removeEdge(weaver, str(event['sourceVertex']), str(event['targetVertex']))
elif (event['command'] == 'REMOVE_VERTEX'):
removeVertex(weaver, str(event['targetVertex']))
if (eventCount % aggregation == (aggregation - 1)):
weaver.end_tx()
except:
pass
eventCount += 1
eventInterval += 1
finished = True
data.append((eventTime, eventInterval))
f.close()
conn.close()
print(str(data))
with open('results.txt', 'w') as outfile:
for entry in data:
outfile.write(str(entry[0]) + '\t' + str(entry[1]) + '\n')
|
""" PySpark ALS Recommendation
Alternate Least Squared matrix representation of Users and Items matrix,
not suitable for high ColdStart ratio of users at inference.
"""
import json
import itertools
from pathlib import Path
import pandas as pd
from sklearn.neighbors import NearestNeighbors
from pyspark.ml.evaluation import RegressionEvaluator
from pyspark.ml.recommendation import ALS, ALSModel
from pyspark.sql import Row
import pyspark.sql.functions as F
from pyspark.sql.types import *
from pyspark import SparkContext, SparkConf, SQLContext
MAX_PART_SIZE = 10 * (1024**2)
def read_file(sc, fpath):
""" Read a file
"""
_fsize = Path(fpath).stat().st_size
return sc.textFile(fpath, _fsize // MAX_PART_SIZE )
def read_json(sc, fpath):
""" Read JSON-rowed file parsed in to RDD
"""
data = read_file(sc, fpath)\
.map(lambda x: json.loads(x))
return data
def create_spark():
""" Method to create Spark Context
Returns:
-----
sc : pyspark.SparkContext
"""
conf = SparkConf()\
.setAppName("ALS")\
.setMaster("local[3]")\
.set("spark.executor.memory","4g")\
.set("spark.executor.cores", "4")\
.set("spark.driver.cores", "2")\
.set("spark.driver.memory", "2g")
sc = SparkContext(conf=conf)
return sc
sc = create_spark()
spark = SQLContext(sc)
print("-"*50, '\nALS CF Recommender System')
# Data
lines = read_json(sc, '../../data/project/train_review.json')
parts = lines.map(lambda r: (r['user_id'], r['business_id'],r['stars']))
user_map = parts.map(lambda x: x[0]).distinct().zipWithIndex().collectAsMap()
print("Found Users: ", len(user_map))
biz_map = parts.map(lambda x: x[1]).distinct().zipWithIndex().collectAsMap()
print("Found Businesses: ", len(user_map))
ratingsRDD = parts.map(lambda p: Row(
userId=int(user_map[p[0]]),
bizId=int(biz_map[p[1]]),
rating=float(p[2])
)
)
ratings = spark.createDataFrame(ratingsRDD)
(training, val) = ratings.randomSplit([0.9, 0.1])
training.show(5)
# hyper parameters
ranks_ = [2] #[8, 10, 12, 14, 16, 18, 20]
regs_ = [0.01] #[0.001, 0.01, 0.05, 0.1, 0.2]
niters = 1
import os
MODEL_NAME = 'weights/als_18_double_reg0.2_rank50.model'
if os.path.exists(MODEL_NAME):
print("Loading model ....")
model = ALSModel.load(MODEL_NAME)
predictions = model.transform(val)
evaluator = RegressionEvaluator(metricName="rmse", labelCol="rating", predictionCol="prediction")
val_rmse = evaluator.evaluate(predictions)
print("[VAL] Root-mean-square error = " + str(val_rmse))
else:
for (rg, r) in itertools.product(regs_, ranks_):
# Build the recommendation model using ALS on the training data
als = ALS(maxIter=niters, rank=r, regParam=rg, userCol="userId", itemCol="bizId", ratingCol="rating", coldStartStrategy='nan')
model = als.fit(training)
predictions = model.transform(val)
evaluator = RegressionEvaluator(metricName="rmse", labelCol="rating", predictionCol="prediction")
val_rmse = evaluator.evaluate(predictions)
print('-'*50,'\nALS Rank:', r, 'Reg:', rg)
print("[VAL] Root-mean-square error = " + str(val_rmse))
model.save(f'als_{niters}_reg{rg}_rank{r}.model')
# print("Business")
#model.itemFactors.show()
# print("Users")
# model.userFactors.show()
# ####### TEST
# Evaluate the model by computing the RMSE on the test data
test = read_json(sc, '../../data/project/test_review_ratings.json')\
.map(lambda r: (r['user_id'], r['business_id'],r['stars']))
# Update Mappings
miss_biz = set(test.map(lambda x: x[1]).distinct().collect()) - set(biz_map)
for m in miss_biz:
biz_map.update({m: biz_map.__len__()})
miss_user = set(test.map(lambda x: x[0]).distinct().collect()) - set(user_map)
for m in miss_user:
user_map.update({m: user_map.__len__()})
testRDD = test.map(lambda p: Row(
userId=int(user_map[p[0]]),
bizId=int(biz_map[p[1]]),
rating=float(p[2])
)
)
inv_idxs = {
"user": {v:k for k,v in user_map.items()},
"biz": {v:k for k,v in biz_map.items()}
}
testDF = spark.createDataFrame(testRDD)
predictions = model.transform(testDF)
evaluator = RegressionEvaluator(metricName="rmse", labelCol="rating", predictionCol="prediction")
# Coldstart
predictions = predictions.fillna({'prediction': 2.5}).cache()
def wrj(i):
with open('ALS.preds', 'a') as f:
f.write(json.dumps(i)+'\n')
predictions.rdd.map(lambda r: wrj({'user_id': inv_idxs['user'][r.userId], 'business_id': inv_idxs['biz'][r.bizId], 'stars':r.prediction})).count()
rmse = evaluator.evaluate(predictions)
print("[TEST] Root-mean-square error = ", rmse)
# k-NN predictions
biz_feats = model.itemFactors.orderBy('id').select('features').toPandas()\
.features.apply(pd.Series).to_numpy()
knn_mdl = NearestNeighbors(n_neighbors=500, algorithm='brute', metric='cosine')\
.fit(biz_feats)
testbizs = [b_ix for b,b_ix in biz_map.items() if b_ix < biz_feats.shape[0]]
rates_by_user = ratingsRDD.map(lambda r: (r.userId, (r.bizId, r.rating)))\
.groupByKey().mapValues(dict).collectAsMap()
neighs_cache = knn_mdl.kneighbors(biz_feats[testbizs])
neighs_cache = {t: (neighs_cache[0][j], neighs_cache[1][j]) for j, t in enumerate(testbizs)}
kpreds = testDF.rdd.map(lambda r: Row(
bizId=r.bizId, userId=r.userId,
rating=r.rating, neighs=neighs_cache.get(r.bizId,([],[])),
userRates=rates_by_user.get(r.userId, {})
)).cache()
def w_avg(ngs, rts):
num_, den_ = 0., 0.
for w_i, n_i in zip(*ngs):
if n_i in rts:
num_ += (w_i*rts[n_i])
den_ += abs(w_i)
if den_ == 0:
# Cold start
return 2.5
return num_ / den_
print("Inter stats", kpreds.map(lambda r: set(r.userRates).intersection(set(r.neighs[0])).__len__()).stats() )
kpredsDF = kpreds.map(lambda r: Row(userId=r.userId, bizId=r.bizId, rating=r.rating, prediction=float(w_avg(r.neighs, r.userRates)))).toDF()
print("KNN- preds")
kpredsDF.show(5)
krmse = evaluator.evaluate(kpredsDF)
print("[TEST] K-NN Root-mean-square error = ", krmse)
print("No Available resp:", kpredsDF.filter(kpredsDF.prediction == 2.5).count())
def wrj(i):
with open('kNNALS.preds', 'a') as f:
f.write(json.dumps(i)+'\n')
kpredsDF.rdd.map(lambda r: wrj({'user_id': inv_idxs['user'][r.userId], 'business_id': inv_idxs['biz'][r.bizId], 'stars':r.prediction})).count()
sc.stop() |
<reponame>itsShnik/visual-relationship-classifier
import _init_paths
import numpy as np
import json
import base64
import copy
import glob
import time
import random
import matplotlib.pyplot as plt
import pickle
import torch.nn as nn
import torch
import torch.utils.data as Data
from PIL import Image
from torchvision import transforms
from vis_rel.function.config import config, update_config
from vis_rel.modules.frcnn_classifier import Net
update_config('cfgs/vis_rel/frcnn.yaml')
"""
Load the bounding box pairs
"""
print('Loading the bb pairs dataset')
bb_pairs_dataset = pickle.load(open('data/bb_pairs_dataset.pkl', 'rb'))
print('Loaded the bb pairs dataset')
"""
The dataloader for the dataset
"""
import random
class DatasetLoader(Data.Dataset):
def __init__(self, path, bb_pairs_dataset):
self.image_dir = path
self.normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])
self.transform = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.ToTensor(),
self.normalize,
])
self.bb_pairs = bb_pairs_dataset
self.data_size = len(self.bb_pairs)
def __getitem__(self, idx):
bb_pair = self.bb_pairs[idx]
img = Image.open(glob.glob(self.image_dir + '/*/*' + str(bb_pair['image_id']) + '.jpg')[0])
img = img.convert('RGB')
img = self.transform(img)
inputs = {}
# convert inputs to tensors
inputs['subj_bbox'] = torch.from_numpy(bb_pair['subj_bbox'])
inputs['obj_bbox'] = torch.from_numpy(bb_pair['obj_bbox'])
inputs['union_bbox'] = torch.from_numpy(bb_pair['union_bbox'])
inputs['im_info'] = torch.tensor(bb_pair['im_info'])
inputs['image'] = img
# image_id
image_ids = torch.tensor(bb_pair['image_id'])
return inputs, image_ids
def __len__(self):
return self.data_size
"""
The evaluator
"""
# batch_size
batch_size = 16
# Load the dataset
val_dataset = DatasetLoader('data/coco', bb_pairs_dataset)
val_dataloader = Data.DataLoader(
val_dataset,
batch_size,
shuffle = False,
num_workers = 4,
pin_memory = True,
sampler = None,
drop_last = True
)
print('Loaded the dataset')
# os env
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '4,5,6,7'
# Load the net
model = Net(config)
# define a softmax obj
soft = nn.Softmax(-1)
# Load the state dict
path = 'output/output/vis_rel/ckpt_frcnn_train+val_low_lr_epoch7.pkl'
state_dict = torch.load(path, map_location=torch.device('cpu'))['state_dict']
new_state_dict = {k.replace('module.', ''):state_dict[k] for k in state_dict}
model.load_state_dict(new_state_dict)
model.eval()
model = nn.DataParallel(model, device_ids = [0,1,2,3]).cuda()
# Load the idx to label relationship
rel_classes = json.load(open('data/relationship_classes.json'))
class_rel = {v:k for k, v in rel_classes.items()}
print(class_rel)
# define a threshold
threshold = 0.6
relationships = {}
start_time = time.time()
with torch.no_grad():
for step, (
inputs,
image_ids
) in enumerate(val_dataloader):
for k, v in inputs.items():
inputs[k] = v.to(torch.device('cuda'))
feats, pred = model(inputs)
# softmax over pred
pred = soft(pred)
for i in range(len(pred)):
pred_ind = int(torch.argmax(pred[i]))
pred_val = torch.max(pred[i])
if pred_ind < 20 and pred_val > threshold:
temp_rel = {
'predicate': str(class_rel[int(pred_ind)]),
'features': feats.cpu().detach().tolist(),
'subj_bbox': inputs['subj_bbox'][i].cpu().detach().tolist(),
'obj_bbox': inputs['obj_bbox'][i].cpu().detach().tolist()
}
if str(int(image_ids[i])) not in relationships:
relationships[str(int(image_ids[i]))] = []
relationships[str(int(image_ids[i]))].append(temp_rel)
print("\rProgress {}/{}".format(step, val_dataset.data_size/batch_size), end=' ')
for k, v in relationships.items():
print("\rImage id : {}".format(str(k)), end=' ')
pickle.dump(v, open('data/coco/vqa_relationships/' + str(k) + '.pkl', 'wb'), pickle.HIGHEST_PROTOCOL)
end_time = time.time()
print('Time taken is : ', end_time - start_time)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
from django.test import TestCase
from moneyed import Money
from bazaar.goods.models import Product, CompositeProduct
from bazaar.listings.models import Listing
from bazaar.settings import bazaar_settings
from bazaar.warehouse.api import get_storage_quantity, get_storage_price, get_output_quantity, \
get_customer_quantity
from .. import factories as f
class TestProduct(TestCase):
def setUp(self):
self.old_setting_value = bazaar_settings.AUTOMATIC_LISTING_CREATION_ON_PRODUCT_CREATION
bazaar_settings.AUTOMATIC_LISTING_CREATION_ON_PRODUCT_CREATION = True
def test_model(self):
self.product = f.ProductFactory(ean="12345678")
self.assertEqual("%s" % self.product, "a product")
def test_product_cost_property(self):
self.product = f.ProductFactory(ean="12345678")
f.StockFactory(product=self.product, unit_price=10, quantity=10)
f.StockFactory(product=self.product, unit_price=5, quantity=30)
self.assertEqual(self.product.cost, Money(6.25, "EUR"))
def test_product_ean_property(self):
"""
Checks that the ean property is set
"""
self.product = f.ProductFactory(ean="12345678")
self.assertEqual(self.product.ean, "12345678")
def test_product_code_property(self):
"""
Checks that the ean property is set
"""
self.product = f.ProductFactory(code="thisisacode")
self.assertEqual(self.product.code, "thisisacode")
def test_ean_should_not_be_none(self):
self.product = f.ProductFactory(ean="12345678")
self.assertRaises(Exception, f.ProductFactory, ean=None)
def test_product_photo_property(self):
self.product = f.ProductFactory(ean="12345678")
self.product.photo = 'test.jpg'
self.product.save()
self.assertEqual(self.product.photo.name, 'test.jpg')
def test_on_product_creation_a_listing_should_be_created(self):
"""
Tests that on product creation a new 1x listing is created.
"""
self.assertFalse(Listing.objects.all().exists())
product = f.ProductFactory()
listings = Listing.objects.filter(product=product)
self.assertEqual(listings.count(), 1)
def test_that_a_listing_is_created_only_during_product_creation(self):
"""
Test that a listing is created only when saving a product for the first time (creation).
On subsequent saves, no more listings should be created.
"""
self.assertFalse(Listing.objects.all().exists())
product = f.ProductFactory()
listings = Listing.objects.filter(product=product)
self.assertEqual(listings.count(), 1)
product.name = "test"
product.save()
listings = Listing.objects.filter(product=product)
self.assertEqual(listings.count(), 1)
def tearDown(self):
bazaar_settings.AUTOMATIC_LISTING_CREATION_ON_PRODUCT_CREATION = self.old_setting_value
class TestCompositeProduct(TestCase):
def setUp(self):
self.product1 = Product.objects.create(name='Product1')
self.product2 = Product.objects.create(name='Product2')
self.composite = CompositeProduct.objects.create(name='Composite')
f.ProductSetFactory(composite=self.composite, product=self.product1, quantity=1)
f.ProductSetFactory(composite=self.composite, product=self.product2, quantity=1)
def test_products_added_to_composite(self):
self.assertIn(self.product1, self.composite.products.all())
self.assertIn(self.product2, self.composite.products.all())
class TestProductMovements(TestCase):
def setUp(self):
self.storage = f.StorageFactory()
self.lost_and_found = f.LostFoundFactory()
self.output = f.OutputFactory()
self.customer = f.CustomerFactory()
self.product_1 = f.ProductFactory()
self.product_2 = f.ProductFactory()
self.composite_1 = f.CompositeProductFactory()
self.composite_2 = f.CompositeProductFactory()
self.ps_1 = f.ProductSetFactory(product=self.product_1, composite=self.composite_1, quantity=1)
self.ps_2 = f.ProductSetFactory(product=self.product_1, composite=self.composite_2, quantity=2)
self.ps_3 = f.ProductSetFactory(product=self.product_2, composite=self.composite_2, quantity=3)
def test_add_one_product_creates_stock_only_for_composite_1(self):
self.product_1.move(from_location=self.lost_and_found, to_location=self.storage, quantity=1, price_multiplier=2)
self.assertEqual(get_storage_quantity(product=self.composite_1), 1)
self.assertEqual(get_storage_price(product=self.composite_1).amount, 2)
self.assertEqual(get_storage_quantity(product=self.composite_2), 0)
self.assertEqual(get_storage_quantity(product=self.product_1), 1)
def test_add_more_products_creates_correct_stocks(self):
self.product_1.move(from_location=self.lost_and_found, to_location=self.storage, quantity=1)
self.product_1.move(from_location=self.lost_and_found, to_location=self.storage, quantity=1)
self.product_2.move(from_location=self.lost_and_found, to_location=self.storage, quantity=3)
self.assertEqual(get_storage_quantity(product=self.composite_1), 2)
self.assertEqual(get_storage_quantity(product=self.composite_2), 1)
self.assertEqual(get_storage_price(product=self.composite_1).amount, 1)
self.assertEqual(get_storage_price(product=self.composite_2).amount, 5)
self.assertEqual(get_storage_quantity(product=self.product_1), 2)
self.assertEqual(get_storage_quantity(product=self.product_2), 3)
def test_add_more_products_creates_correct_stocks_with_single_movement(self):
self.product_1.move(from_location=self.lost_and_found, to_location=self.storage, quantity=2)
self.product_2.move(from_location=self.lost_and_found, to_location=self.storage, quantity=3)
self.assertEqual(get_storage_quantity(product=self.composite_1), 2)
self.assertEqual(get_storage_quantity(product=self.composite_2), 1)
self.assertEqual(get_storage_price(product=self.composite_1).amount, 1)
self.assertEqual(get_storage_price(product=self.composite_2).amount, 5)
self.assertEqual(get_storage_quantity(product=self.product_1), 2)
self.assertEqual(get_storage_quantity(product=self.product_2), 3)
def test_remove_products_update_composite_quantity_correctly(self):
self.product_1.move(from_location=self.lost_and_found, to_location=self.storage, quantity=2)
self.product_2.move(from_location=self.lost_and_found, to_location=self.storage, quantity=3)
self.assertEqual(get_storage_quantity(product=self.composite_1), 2)
self.assertEqual(get_storage_quantity(product=self.composite_2), 1)
self.product_1.move(from_location=self.storage, to_location=self.output)
self.assertEqual(get_storage_quantity(product=self.composite_1), 1)
self.assertEqual(get_storage_quantity(product=self.composite_2), 0)
def test_move_composites_from_storage_to_output(self):
self.product_1.move(from_location=self.lost_and_found, to_location=self.storage, quantity=2)
self.product_2.move(from_location=self.lost_and_found, to_location=self.storage, quantity=4)
self.assertEqual(get_storage_quantity(product=self.composite_1), 2)
self.assertEqual(get_storage_quantity(product=self.composite_2), 1)
self.composite_1.move(from_location=self.storage, to_location=self.output)
self.assertEqual(get_storage_quantity(product=self.composite_1), 1)
self.assertEqual(get_storage_quantity(product=self.composite_2), 0)
self.assertEqual(get_storage_quantity(product=self.product_1), 1)
def test_move_composites_not_negative_quantities(self):
self.product_1.move(from_location=self.lost_and_found, to_location=self.storage)
self.product_1.move(from_location=self.lost_and_found, to_location=self.storage)
self.product_2.move(from_location=self.lost_and_found, to_location=self.storage)
self.product_2.move(from_location=self.lost_and_found, to_location=self.storage)
self.product_2.move(from_location=self.lost_and_found, to_location=self.storage)
self.assertEqual(get_storage_quantity(product=self.composite_1), 2)
self.assertEqual(get_storage_quantity(product=self.composite_2), 1)
self.composite_1.move(from_location=self.storage, to_location=self.output, quantity=3)
self.assertEqual(get_storage_quantity(product=self.composite_1), 0)
self.assertEqual(get_storage_quantity(product=self.composite_2), 0)
self.assertEqual(get_storage_quantity(product=self.product_1), -1)
def test_move_composite_with_not_allowed_locations(self):
self.composite_1.move(from_location=self.lost_and_found, to_location=self.storage)
self.assertEqual(get_storage_quantity(product=self.composite_1), 0)
self.assertEqual(get_storage_quantity(product=self.composite_2), 0)
self.assertEqual(get_storage_quantity(product=self.product_1), 0)
self.assertEqual(get_storage_quantity(product=self.product_2), 0)
def test_move_composite_right_quantities(self):
self.product_1.move(from_location=self.lost_and_found, to_location=self.storage, quantity=2)
self.product_2.move(from_location=self.lost_and_found, to_location=self.storage, quantity=3)
self.composite_2.move(from_location=self.storage, to_location=self.customer)
self.assertEqual(get_storage_quantity(product=self.composite_2), 0)
self.assertEqual(get_storage_quantity(product=self.product_1), 0)
self.assertEqual(get_storage_quantity(product=self.product_2), 0)
self.assertEqual(get_customer_quantity(product=self.composite_2), 1)
|
from expedition import Expedition
from fleet import Fleet
from quests import Quests
from config import Config
from fight_runner import FightRunner
from expedition_runner import ExpeditionRunner
from return_fleet_checker import ReturnFleetChecker
from resupply_runner import ResupplyRunner
from fight_checker import FightChecker
from composite_runner import CompositeRunner
from docking_runner import DockingRunner
from quest_runner import QuestRunner
from cron import Cron
from dismantling_runner import DismantlingRunner
from enable_runner import EnableRunner
from while_runner import WhileRunner
from message import Message
import random
config = None
level_up_runner = None
expedition_runner = None
docking_runner = None
questing_runner = None
dismantling_runner = None
def logged(f):
def wrapped(*args, **kwargs):
print f.__class__.__name__ + " start"
return f(*args, **kwargs)
return wrapped
def clickWithResetMouse(img):
wait(img,30)
click(img)
reset_mouse()
def clickIfExistsWithResetMouse(img):
if exists(img,1):
clickWithResetMouse(img)
def reset_mouse():
hover(Location(0,0))
@logged
def doAllJob(count):
# Level UP
level_up_runner.run()
# Docking
docking_runner.run()
# Fleet expedition
expedition_runner.run()
# Quest check
questing_runner.run()
# Dismantling
dismantling_runner.run()
reset_mouse()
@logged
def mainloopWithException():
count = 0
while(True):
try:
print(count)
switchApp(config.browser)
doAllJob(count)
random_sleep_time = random.randrange(int(config.sleep_time*0.9), int(config.sleep_time*1.1)) # sleeping with random offset time
print "sleep " + str(random_sleep_time) + " sec..."
sleep(random_sleep_time)
count += 1
except FindFailed :
print("find failed")
returnToBase()
@logged
def returnToBase():
clickIfExistsWithResetMouse("base.png")
clickIfExistsWithResetMouse("back.png")
sleep(3)
if exists("sortie.png"):
return
# Can not back to base, restart kancolle
restartKancolle()
@logged
def restartKancolle():
isOnWelcomePage = False
wait_welcome_page_time = 20
while not isOnWelcomePage:
clickWithResetMouse(Pattern("reload.png").similar(0.80))
sleep(wait_welcome_page_time)
wait_welcome_page_time = wait_welcome_page_time *2
isOnWelcomePage = exists(Pattern("welcome_page.png").targetOffset(209,156))
clickWithResetMouse(Pattern("welcome_page.png").targetOffset(209,156))
sleep(wait_welcome_page_time)
if __name__ == "__main__":
#config_path = sys.argv[0] + ".sikuli/../config.ini" #Executing from sikuli IDE
config_path = sys.argv[0] + "/../config.ini" #Executing from console
config = Config(config_path)
expedition_msg = Message()
return_fleet_checker = ReturnFleetChecker()
# Level up first fleet
level_up_runner = CompositeRunner()
level_up_runner.add_runner(EnableRunner(config.fight_enabled))
level_up_runner.add_runner(return_fleet_checker)
level_up_runner.add_runner(FightChecker())
level_up_runner.add_runner(ResupplyRunner(config.fight_fleets, from_small_resuppy=True))
level_up_runner.add_runner(return_fleet_checker)
level_up_runner.add_runner(FightRunner())
# Repair ships
docking_runner = CompositeRunner()
docking_runner.add_runner(EnableRunner(config.docking_enabled))
docking_runner.add_runner(return_fleet_checker)
docking_runner.add_runner(DockingRunner(config.docker_num, config.fight_fleets, is_fight=config.fight_enabled))
# resupply exception fleets
resupply_runner = CompositeRunner()
resupply_runner.add_runner(EnableRunner(config.expedition_enabled))
resupply_runner.add_runner(return_fleet_checker)
resupply_runner.add_runner(ResupplyRunner(config.expedition_fleets, enable_expedition_check=True, message=expedition_msg))
# go exception
expedition_runner = CompositeRunner()
expedition_runner.add_runner(EnableRunner(config.expedition_enabled))
expedition_runner.add_runner(resupply_runner)
expedition_runner.add_runner(WhileRunner("return_fleet_message.png", resupply_runner))
expedition_runner.add_runner(ExpeditionRunner(config.expedition_fleets, config.expeditions, message=expedition_msg))
# active quests
questing_runner = CompositeRunner()
questing_runner.add_runner(EnableRunner(config.quest_enabled))
questing_runner.add_runner(Cron(5))
questing_runner.add_runner(return_fleet_checker)
questing_runner.add_runner(QuestRunner(config.quests_list))
# dismantling ships
dismantling_runner = CompositeRunner()
dismantling_runner.add_runner(EnableRunner(config.dismantling_enabled))
dismantling_runner.add_runner(Cron(30))
dismantling_runner.add_runner(return_fleet_checker)
dismantling_runner.add_runner(DismantlingRunner())
mainloopWithException()
|
<reponame>nataliemcmullen/WikiMiney
urls = [
"pagecounts-20121201-000000.gz",
"pagecounts-20121201-010000.gz",
"pagecounts-20121201-020001.gz",
"pagecounts-20121201-030000.gz",
"pagecounts-20121201-040000.gz",
"pagecounts-20121201-050000.gz",
"pagecounts-20121201-060000.gz",
"pagecounts-20121201-070000.gz",
"pagecounts-20121201-080000.gz",
"pagecounts-20121201-090000.gz",
"pagecounts-20121201-100000.gz",
"pagecounts-20121201-110000.gz",
"pagecounts-20121201-120000.gz",
"pagecounts-20121201-130000.gz",
"pagecounts-20121201-140000.gz",
"pagecounts-20121201-150000.gz",
"pagecounts-20121201-160001.gz",
"pagecounts-20121201-170000.gz",
"pagecounts-20121201-180000.gz",
"pagecounts-20121201-190000.gz",
"pagecounts-20121201-200000.gz",
"pagecounts-20121201-210000.gz",
"pagecounts-20121201-220000.gz",
"pagecounts-20121201-230000.gz",
"pagecounts-20121202-000000.gz",
"pagecounts-20121202-010000.gz",
"pagecounts-20121202-020000.gz",
"pagecounts-20121202-030000.gz",
"pagecounts-20121202-040001.gz",
"pagecounts-20121202-050000.gz",
"pagecounts-20121202-060000.gz",
"pagecounts-20121202-070000.gz",
"pagecounts-20121202-080000.gz",
"pagecounts-20121202-090000.gz",
"pagecounts-20121202-100000.gz",
"pagecounts-20121202-110000.gz",
"pagecounts-20121202-120000.gz",
"pagecounts-20121202-130000.gz",
"pagecounts-20121202-140000.gz",
"pagecounts-20121202-150000.gz",
"pagecounts-20121202-160000.gz",
"pagecounts-20121202-170001.gz",
"pagecounts-20121202-180000.gz",
"pagecounts-20121202-190000.gz",
"pagecounts-20121202-200000.gz",
"pagecounts-20121202-210000.gz",
"pagecounts-20121202-220000.gz",
"pagecounts-20121202-230000.gz",
"pagecounts-20121203-000000.gz",
"pagecounts-20121203-010000.gz",
"pagecounts-20121203-020000.gz",
"pagecounts-20121203-030000.gz",
"pagecounts-20121203-040000.gz",
"pagecounts-20121203-050000.gz",
"pagecounts-20121203-060001.gz",
"pagecounts-20121203-070000.gz",
"pagecounts-20121203-080000.gz",
"pagecounts-20121203-090000.gz",
"pagecounts-20121203-100000.gz",
"pagecounts-20121203-110000.gz",
"pagecounts-20121203-120000.gz",
"pagecounts-20121203-130000.gz",
"pagecounts-20121203-140000.gz",
"pagecounts-20121203-150000.gz",
"pagecounts-20121203-160000.gz",
"pagecounts-20121203-170000.gz",
"pagecounts-20121203-180000.gz",
"pagecounts-20121203-190001.gz",
"pagecounts-20121203-200000.gz",
"pagecounts-20121203-210000.gz",
"pagecounts-20121203-220000.gz",
"pagecounts-20121203-230000.gz",
"pagecounts-20121204-000000.gz",
"pagecounts-20121204-010000.gz",
"pagecounts-20121204-020000.gz",
"pagecounts-20121204-030000.gz",
"pagecounts-20121204-040000.gz",
"pagecounts-20121204-050000.gz",
"pagecounts-20121204-060000.gz",
"pagecounts-20121204-070000.gz",
"pagecounts-20121204-080001.gz",
"pagecounts-20121204-090000.gz",
"pagecounts-20121204-100000.gz",
"pagecounts-20121204-110000.gz",
"pagecounts-20121204-120000.gz",
"pagecounts-20121204-130000.gz",
"pagecounts-20121204-140000.gz",
"pagecounts-20121204-150000.gz",
"pagecounts-20121204-160000.gz",
"pagecounts-20121204-170000.gz",
"pagecounts-20121204-180000.gz",
"pagecounts-20121204-190000.gz",
"pagecounts-20121204-200001.gz",
"pagecounts-20121204-210000.gz",
"pagecounts-20121204-220000.gz",
"pagecounts-20121204-230000.gz",
"pagecounts-20121205-000000.gz",
"pagecounts-20121205-010000.gz",
"pagecounts-20121205-020000.gz",
"pagecounts-20121205-030000.gz",
"pagecounts-20121205-040000.gz",
"pagecounts-20121205-050000.gz",
"pagecounts-20121205-060000.gz",
"pagecounts-20121205-070000.gz",
"pagecounts-20121205-080001.gz",
"pagecounts-20121205-090000.gz",
"pagecounts-20121205-100000.gz",
"pagecounts-20121205-110000.gz",
"pagecounts-20121205-120000.gz",
"pagecounts-20121205-130000.gz",
"pagecounts-20121205-140000.gz",
"pagecounts-20121205-150000.gz",
"pagecounts-20121205-160000.gz",
"pagecounts-20121205-170000.gz",
"pagecounts-20121205-180000.gz",
"pagecounts-20121205-190000.gz",
"pagecounts-20121205-200000.gz",
"pagecounts-20121205-210001.gz",
"pagecounts-20121205-220000.gz",
"pagecounts-20121205-230000.gz",
"pagecounts-20121206-000000.gz",
"pagecounts-20121206-010000.gz",
"pagecounts-20121206-020000.gz",
"pagecounts-20121206-030000.gz",
"pagecounts-20121206-040000.gz",
"pagecounts-20121206-050000.gz",
"pagecounts-20121206-060000.gz",
"pagecounts-20121206-070000.gz",
"pagecounts-20121206-080000.gz",
"pagecounts-20121206-090000.gz",
"pagecounts-20121206-100001.gz",
"pagecounts-20121206-110000.gz",
"pagecounts-20121206-120000.gz",
"pagecounts-20121206-130000.gz",
"pagecounts-20121206-140000.gz",
"pagecounts-20121206-150000.gz",
"pagecounts-20121206-160000.gz",
"pagecounts-20121206-170000.gz",
"pagecounts-20121206-180000.gz",
"pagecounts-20121206-190000.gz",
"pagecounts-20121206-200000.gz",
"pagecounts-20121206-210000.gz",
"pagecounts-20121206-220000.gz",
"pagecounts-20121206-230001.gz",
"pagecounts-20121207-000000.gz",
"pagecounts-20121207-010000.gz",
"pagecounts-20121207-020000.gz",
"pagecounts-20121207-030000.gz",
"pagecounts-20121207-040000.gz",
"pagecounts-20121207-050000.gz",
"pagecounts-20121207-060000.gz",
"pagecounts-20121207-070000.gz",
"pagecounts-20121207-080000.gz",
"pagecounts-20121207-090000.gz",
"pagecounts-20121207-100000.gz",
"pagecounts-20121207-110000.gz",
"pagecounts-20121207-120001.gz",
"pagecounts-20121207-130000.gz",
"pagecounts-20121207-140000.gz",
"pagecounts-20121207-150000.gz",
"pagecounts-20121207-160000.gz",
"pagecounts-20121207-170000.gz",
"pagecounts-20121207-180000.gz",
"pagecounts-20121207-190000.gz",
"pagecounts-20121207-200000.gz",
"pagecounts-20121207-210000.gz",
"pagecounts-20121207-220000.gz",
"pagecounts-20121207-230000.gz",
"pagecounts-20121208-000000.gz",
"pagecounts-20121208-010001.gz",
"pagecounts-20121208-020000.gz",
"pagecounts-20121208-030000.gz",
"pagecounts-20121208-040000.gz",
"pagecounts-20121208-050000.gz",
"pagecounts-20121208-060000.gz",
"pagecounts-20121208-070000.gz",
"pagecounts-20121208-080000.gz",
"pagecounts-20121208-090000.gz",
"pagecounts-20121208-100000.gz",
"pagecounts-20121208-110000.gz",
"pagecounts-20121208-120000.gz",
"pagecounts-20121208-130000.gz",
"pagecounts-20121208-140001.gz",
"pagecounts-20121208-150000.gz",
"pagecounts-20121208-160000.gz",
"pagecounts-20121208-170000.gz",
"pagecounts-20121208-180000.gz",
"pagecounts-20121208-190000.gz",
"pagecounts-20121208-200000.gz",
"pagecounts-20121208-210000.gz",
"pagecounts-20121208-220000.gz",
"pagecounts-20121208-230000.gz",
"pagecounts-20121209-000000.gz",
"pagecounts-20121209-010000.gz",
"pagecounts-20121209-020000.gz",
"pagecounts-20121209-030001.gz",
"pagecounts-20121209-040000.gz",
"pagecounts-20121209-050000.gz",
"pagecounts-20121209-060000.gz",
"pagecounts-20121209-070000.gz",
"pagecounts-20121209-080000.gz",
"pagecounts-20121209-090000.gz",
"pagecounts-20121209-100000.gz",
"pagecounts-20121209-110000.gz",
"pagecounts-20121209-120000.gz",
"pagecounts-20121209-130000.gz",
"pagecounts-20121209-140000.gz",
"pagecounts-20121209-150000.gz",
"pagecounts-20121209-160001.gz",
"pagecounts-20121209-170000.gz",
"pagecounts-20121209-180000.gz",
"pagecounts-20121209-190000.gz",
"pagecounts-20121209-200000.gz",
"pagecounts-20121209-210000.gz",
"pagecounts-20121209-220000.gz",
"pagecounts-20121209-230000.gz",
"pagecounts-20121210-000000.gz",
"pagecounts-20121210-010000.gz",
"pagecounts-20121210-020000.gz",
"pagecounts-20121210-030000.gz",
"pagecounts-20121210-040000.gz",
"pagecounts-20121210-050001.gz",
"pagecounts-20121210-060000.gz",
"pagecounts-20121210-070000.gz",
"pagecounts-20121210-080000.gz",
"pagecounts-20121210-090000.gz",
"pagecounts-20121210-100000.gz",
"pagecounts-20121210-110000.gz",
"pagecounts-20121210-120000.gz",
"pagecounts-20121210-130000.gz",
"pagecounts-20121210-140000.gz",
"pagecounts-20121210-150000.gz",
"pagecounts-20121210-160000.gz",
"pagecounts-20121210-170001.gz",
"pagecounts-20121210-180000.gz",
"pagecounts-20121210-190000.gz",
"pagecounts-20121210-200000.gz",
"pagecounts-20121210-210000.gz",
"pagecounts-20121210-220000.gz",
"pagecounts-20121210-230000.gz",
"pagecounts-20121211-000000.gz",
"pagecounts-20121211-010000.gz",
"pagecounts-20121211-020000.gz",
"pagecounts-20121211-030000.gz",
"pagecounts-20121211-040000.gz",
"pagecounts-20121211-050000.gz",
"pagecounts-20121211-060001.gz",
"pagecounts-20121211-070000.gz",
"pagecounts-20121211-080000.gz",
"pagecounts-20121211-090000.gz",
"pagecounts-20121211-100000.gz",
"pagecounts-20121211-110000.gz",
"pagecounts-20121211-120000.gz",
"pagecounts-20121211-130000.gz",
"pagecounts-20121211-140000.gz",
"pagecounts-20121211-150000.gz",
"pagecounts-20121211-160000.gz",
"pagecounts-20121211-170000.gz",
"pagecounts-20121211-180001.gz",
"pagecounts-20121211-190000.gz",
"pagecounts-20121211-200000.gz",
"pagecounts-20121211-210000.gz",
"pagecounts-20121211-220000.gz",
"pagecounts-20121211-230000.gz",
"pagecounts-20121212-000000.gz",
"pagecounts-20121212-010000.gz",
"pagecounts-20121212-020000.gz",
"pagecounts-20121212-030000.gz",
"pagecounts-20121212-040000.gz",
"pagecounts-20121212-050000.gz",
"pagecounts-20121212-060001.gz",
"pagecounts-20121212-070000.gz",
"pagecounts-20121212-080000.gz",
"pagecounts-20121212-090000.gz",
"pagecounts-20121212-100000.gz",
"pagecounts-20121212-110000.gz",
"pagecounts-20121212-120000.gz",
"pagecounts-20121212-130000.gz",
"pagecounts-20121212-140000.gz",
"pagecounts-20121212-150000.gz",
"pagecounts-20121212-160000.gz",
"pagecounts-20121212-170000.gz",
"pagecounts-20121212-180001.gz",
"pagecounts-20121212-190000.gz",
"pagecounts-20121212-200000.gz",
"pagecounts-20121212-210000.gz",
"pagecounts-20121212-220000.gz",
"pagecounts-20121212-230000.gz",
"pagecounts-20121213-000000.gz",
"pagecounts-20121213-010000.gz",
"pagecounts-20121213-020000.gz",
"pagecounts-20121213-030000.gz",
"pagecounts-20121213-040000.gz",
"pagecounts-20121213-050000.gz",
"pagecounts-20121213-060000.gz",
"pagecounts-20121213-070001.gz",
"pagecounts-20121213-080000.gz",
"pagecounts-20121213-090000.gz",
"pagecounts-20121213-100000.gz",
"pagecounts-20121213-110000.gz",
"pagecounts-20121213-120000.gz",
"pagecounts-20121213-130000.gz",
"pagecounts-20121213-140000.gz",
"pagecounts-20121213-150000.gz",
"pagecounts-20121213-160000.gz",
"pagecounts-20121213-170000.gz",
"pagecounts-20121213-180000.gz",
"pagecounts-20121213-190000.gz",
"pagecounts-20121213-200001.gz",
"pagecounts-20121213-210000.gz",
"pagecounts-20121213-220000.gz",
"pagecounts-20121213-230000.gz",
"pagecounts-20121214-000000.gz",
"pagecounts-20121214-010000.gz",
"pagecounts-20121214-020000.gz",
"pagecounts-20121214-030000.gz",
"pagecounts-20121214-040000.gz",
"pagecounts-20121214-050000.gz",
"pagecounts-20121214-060000.gz",
"pagecounts-20121214-070000.gz",
"pagecounts-20121214-080000.gz",
"pagecounts-20121214-090001.gz",
"pagecounts-20121214-100000.gz",
"pagecounts-20121214-110000.gz",
"pagecounts-20121214-120000.gz",
"pagecounts-20121214-130000.gz",
"pagecounts-20121214-140000.gz",
"pagecounts-20121214-150000.gz",
"pagecounts-20121214-160000.gz",
"pagecounts-20121214-170000.gz",
"pagecounts-20121214-180000.gz",
"pagecounts-20121214-190000.gz",
"pagecounts-20121214-200000.gz",
"pagecounts-20121214-210001.gz",
"pagecounts-20121214-220000.gz",
"pagecounts-20121214-230000.gz",
"pagecounts-20121215-000000.gz",
"pagecounts-20121215-010000.gz",
"pagecounts-20121215-020000.gz",
"pagecounts-20121215-030000.gz",
"pagecounts-20121215-040000.gz",
"pagecounts-20121215-050000.gz",
"pagecounts-20121215-060000.gz",
"pagecounts-20121215-070000.gz",
"pagecounts-20121215-080000.gz",
"pagecounts-20121215-090000.gz",
"pagecounts-20121215-100001.gz",
"pagecounts-20121215-110000.gz",
"pagecounts-20121215-120000.gz",
"pagecounts-20121215-130000.gz",
"pagecounts-20121215-140000.gz",
"pagecounts-20121215-150000.gz",
"pagecounts-20121215-160000.gz",
"pagecounts-20121215-170000.gz",
"pagecounts-20121215-180000.gz",
"pagecounts-20121215-190000.gz",
"pagecounts-20121215-200000.gz",
"pagecounts-20121215-210000.gz",
"pagecounts-20121215-220001.gz",
"pagecounts-20121215-230000.gz",
"pagecounts-20121216-000000.gz",
"pagecounts-20121216-010000.gz",
"pagecounts-20121216-020000.gz",
"pagecounts-20121216-030000.gz",
"pagecounts-20121216-040000.gz",
"pagecounts-20121216-050000.gz",
"pagecounts-20121216-060000.gz",
"pagecounts-20121216-070000.gz",
"pagecounts-20121216-080000.gz",
"pagecounts-20121216-090000.gz",
"pagecounts-20121216-100000.gz",
"pagecounts-20121216-110001.gz",
"pagecounts-20121216-120000.gz",
"pagecounts-20121216-130000.gz",
"pagecounts-20121216-140000.gz",
"pagecounts-20121216-150000.gz",
"pagecounts-20121216-160000.gz",
"pagecounts-20121216-170000.gz",
"pagecounts-20121216-180000.gz",
"pagecounts-20121216-190000.gz",
"pagecounts-20121216-200000.gz",
"pagecounts-20121216-210000.gz",
"pagecounts-20121216-220000.gz",
"pagecounts-20121216-230000.gz",
"pagecounts-20121217-000001.gz",
"pagecounts-20121217-010000.gz",
"pagecounts-20121217-020000.gz",
"pagecounts-20121217-030000.gz",
"pagecounts-20121217-040000.gz",
"pagecounts-20121217-050000.gz",
"pagecounts-20121217-060000.gz",
"pagecounts-20121217-070000.gz",
"pagecounts-20121217-080000.gz",
"pagecounts-20121217-090000.gz",
"pagecounts-20121217-100000.gz",
"pagecounts-20121217-110000.gz",
"pagecounts-20121217-120000.gz",
"pagecounts-20121217-130001.gz",
"pagecounts-20121217-140000.gz",
"pagecounts-20121217-150000.gz",
"pagecounts-20121217-160000.gz",
"pagecounts-20121217-170000.gz",
"pagecounts-20121217-180000.gz",
"pagecounts-20121217-190000.gz",
"pagecounts-20121217-200000.gz",
"pagecounts-20121217-210000.gz",
"pagecounts-20121217-220000.gz",
"pagecounts-20121217-230000.gz",
"pagecounts-20121218-000000.gz",
"pagecounts-20121218-010001.gz",
"pagecounts-20121218-020000.gz",
"pagecounts-20121218-030000.gz",
"pagecounts-20121218-040000.gz",
"pagecounts-20121218-050000.gz",
"pagecounts-20121218-060000.gz",
"pagecounts-20121218-070000.gz",
"pagecounts-20121218-080000.gz",
"pagecounts-20121218-090000.gz",
"pagecounts-20121218-100000.gz",
"pagecounts-20121218-110000.gz",
"pagecounts-20121218-120000.gz",
"pagecounts-20121218-130001.gz",
"pagecounts-20121218-140000.gz",
"pagecounts-20121218-150000.gz",
"pagecounts-20121218-160000.gz",
"pagecounts-20121218-170000.gz",
"pagecounts-20121218-180000.gz",
"pagecounts-20121218-190000.gz",
"pagecounts-20121218-200000.gz",
"pagecounts-20121218-210000.gz",
"pagecounts-20121218-220000.gz",
"pagecounts-20121218-230000.gz",
"pagecounts-20121219-000000.gz",
"pagecounts-20121219-010000.gz",
"pagecounts-20121219-020001.gz",
"pagecounts-20121219-030000.gz",
"pagecounts-20121219-040000.gz",
"pagecounts-20121219-050000.gz",
"pagecounts-20121219-060000.gz",
"pagecounts-20121219-070000.gz",
"pagecounts-20121219-080000.gz",
"pagecounts-20121219-090000.gz",
"pagecounts-20121219-100000.gz",
"pagecounts-20121219-110000.gz",
"pagecounts-20121219-120000.gz",
"pagecounts-20121219-130000.gz",
"pagecounts-20121219-140000.gz",
"pagecounts-20121219-150001.gz",
"pagecounts-20121219-160000.gz",
"pagecounts-20121219-170000.gz",
"pagecounts-20121219-180000.gz",
"pagecounts-20121219-190000.gz",
"pagecounts-20121219-200000.gz",
"pagecounts-20121219-210000.gz",
"pagecounts-20121219-220000.gz",
"pagecounts-20121219-230000.gz",
"pagecounts-20121220-000000.gz",
"pagecounts-20121220-010000.gz",
"pagecounts-20121220-020001.gz",
"pagecounts-20121220-030000.gz",
"pagecounts-20121220-040000.gz",
"pagecounts-20121220-050000.gz",
"pagecounts-20121220-060000.gz",
"pagecounts-20121220-070000.gz",
"pagecounts-20121220-080000.gz",
"pagecounts-20121220-090000.gz",
"pagecounts-20121220-100000.gz",
"pagecounts-20121220-110000.gz",
"pagecounts-20121220-120000.gz",
"pagecounts-20121220-130000.gz",
"pagecounts-20121220-140000.gz",
"pagecounts-20121220-150001.gz",
"pagecounts-20121220-160000.gz",
"pagecounts-20121220-170000.gz",
"pagecounts-20121220-180000.gz",
"pagecounts-20121220-190000.gz",
"pagecounts-20121220-200000.gz",
"pagecounts-20121220-210000.gz",
"pagecounts-20121220-220000.gz",
"pagecounts-20121220-230000.gz",
"pagecounts-20121221-000000.gz",
"pagecounts-20121221-010000.gz",
"pagecounts-20121221-020000.gz",
"pagecounts-20121221-030000.gz",
"pagecounts-20121221-040001.gz",
"pagecounts-20121221-050000.gz",
"pagecounts-20121221-060000.gz",
"pagecounts-20121221-070000.gz",
"pagecounts-20121221-080000.gz",
"pagecounts-20121221-090000.gz",
"pagecounts-20121221-100000.gz",
"pagecounts-20121221-110000.gz",
"pagecounts-20121221-120000.gz",
"pagecounts-20121221-130000.gz",
"pagecounts-20121221-140000.gz",
"pagecounts-20121221-150000.gz",
"pagecounts-20121221-160001.gz",
"pagecounts-20121221-170000.gz",
"pagecounts-20121221-180000.gz",
"pagecounts-20121221-190000.gz",
"pagecounts-20121221-200000.gz",
"pagecounts-20121221-210000.gz",
"pagecounts-20121221-220000.gz",
"pagecounts-20121221-230000.gz",
"pagecounts-20121222-000000.gz",
"pagecounts-20121222-010000.gz",
"pagecounts-20121222-020000.gz",
"pagecounts-20121222-030000.gz",
"pagecounts-20121222-040001.gz",
"pagecounts-20121222-050000.gz",
"pagecounts-20121222-060000.gz",
"pagecounts-20121222-070000.gz",
"pagecounts-20121222-080000.gz",
"pagecounts-20121222-090000.gz",
"pagecounts-20121222-100000.gz",
"pagecounts-20121222-110000.gz",
"pagecounts-20121222-120000.gz",
"pagecounts-20121222-130000.gz",
"pagecounts-20121222-140000.gz",
"pagecounts-20121222-150000.gz",
"pagecounts-20121222-160001.gz",
"pagecounts-20121222-170000.gz",
"pagecounts-20121222-180000.gz",
"pagecounts-20121222-190000.gz",
"pagecounts-20121222-200000.gz",
"pagecounts-20121222-210000.gz",
"pagecounts-20121222-220000.gz",
"pagecounts-20121222-230000.gz",
"pagecounts-20121223-000000.gz",
"pagecounts-20121223-010000.gz",
"pagecounts-20121223-020000.gz",
"pagecounts-20121223-030000.gz",
"pagecounts-20121223-040001.gz",
"pagecounts-20121223-050000.gz",
"pagecounts-20121223-060000.gz",
"pagecounts-20121223-070000.gz",
"pagecounts-20121223-080000.gz",
"pagecounts-20121223-090000.gz",
"pagecounts-20121223-100000.gz",
"pagecounts-20121223-110000.gz",
"pagecounts-20121223-120000.gz",
"pagecounts-20121223-130000.gz",
"pagecounts-20121223-140000.gz",
"pagecounts-20121223-150000.gz",
"pagecounts-20121223-160000.gz",
"pagecounts-20121223-170001.gz",
"pagecounts-20121223-180000.gz",
"pagecounts-20121223-190000.gz",
"pagecounts-20121223-200000.gz",
"pagecounts-20121223-210000.gz",
"pagecounts-20121223-220000.gz",
"pagecounts-20121223-230000.gz",
"pagecounts-20121224-000000.gz",
"pagecounts-20121224-010000.gz",
"pagecounts-20121224-020000.gz",
"pagecounts-20121224-030000.gz",
"pagecounts-20121224-040000.gz",
"pagecounts-20121224-050001.gz",
"pagecounts-20121224-060000.gz",
"pagecounts-20121224-070000.gz",
"pagecounts-20121224-080000.gz",
"pagecounts-20121224-090000.gz",
"pagecounts-20121224-100000.gz",
"pagecounts-20121224-110000.gz",
"pagecounts-20121224-120000.gz",
"pagecounts-20121224-130000.gz",
"pagecounts-20121224-140000.gz",
"pagecounts-20121224-150000.gz",
"pagecounts-20121224-160000.gz",
"pagecounts-20121224-170000.gz",
"pagecounts-20121224-180001.gz",
"pagecounts-20121224-190000.gz",
"pagecounts-20121224-200000.gz",
"pagecounts-20121224-210000.gz",
"pagecounts-20121224-220000.gz",
"pagecounts-20121224-230000.gz",
"pagecounts-20121225-000000.gz",
"pagecounts-20121225-010000.gz",
"pagecounts-20121225-020000.gz",
"pagecounts-20121225-030000.gz",
"pagecounts-20121225-040000.gz",
"pagecounts-20121225-050000.gz",
"pagecounts-20121225-060000.gz",
"pagecounts-20121225-070001.gz",
"pagecounts-20121225-080000.gz",
"pagecounts-20121225-090000.gz",
"pagecounts-20121225-100000.gz",
"pagecounts-20121225-110000.gz",
"pagecounts-20121225-120000.gz",
"pagecounts-20121225-130000.gz",
"pagecounts-20121225-140000.gz",
"pagecounts-20121225-150000.gz",
"pagecounts-20121225-160000.gz",
"pagecounts-20121225-170000.gz",
"pagecounts-20121225-180000.gz",
"pagecounts-20121225-190001.gz",
"pagecounts-20121225-200000.gz",
"pagecounts-20121225-210000.gz",
"pagecounts-20121225-220000.gz",
"pagecounts-20121225-230000.gz",
"pagecounts-20121226-000000.gz",
"pagecounts-20121226-010000.gz",
"pagecounts-20121226-020000.gz",
"pagecounts-20121226-030000.gz",
"pagecounts-20121226-040000.gz",
"pagecounts-20121226-050000.gz",
"pagecounts-20121226-060000.gz",
"pagecounts-20121226-070001.gz",
"pagecounts-20121226-080000.gz",
"pagecounts-20121226-090000.gz",
"pagecounts-20121226-100000.gz",
"pagecounts-20121226-110000.gz",
"pagecounts-20121226-120000.gz",
"pagecounts-20121226-130000.gz",
"pagecounts-20121226-140000.gz",
"pagecounts-20121226-150000.gz",
"pagecounts-20121226-160000.gz",
"pagecounts-20121226-170000.gz",
"pagecounts-20121226-180000.gz",
"pagecounts-20121226-190000.gz",
"pagecounts-20121226-200001.gz",
"pagecounts-20121226-210000.gz",
"pagecounts-20121226-220000.gz",
"pagecounts-20121226-230000.gz",
"pagecounts-20121227-000000.gz",
"pagecounts-20121227-010000.gz",
"pagecounts-20121227-020000.gz",
"pagecounts-20121227-030000.gz",
"pagecounts-20121227-040000.gz",
"pagecounts-20121227-050000.gz",
"pagecounts-20121227-060000.gz",
"pagecounts-20121227-070000.gz",
"pagecounts-20121227-080001.gz",
"pagecounts-20121227-090000.gz",
"pagecounts-20121227-100000.gz",
"pagecounts-20121227-110000.gz",
"pagecounts-20121227-120000.gz",
"pagecounts-20121227-130000.gz",
"pagecounts-20121227-140000.gz",
"pagecounts-20121227-150000.gz",
"pagecounts-20121227-160000.gz",
"pagecounts-20121227-170000.gz",
"pagecounts-20121227-180000.gz",
"pagecounts-20121227-190000.gz",
"pagecounts-20121227-200000.gz",
"pagecounts-20121227-210001.gz",
"pagecounts-20121227-220000.gz",
"pagecounts-20121227-230000.gz",
"pagecounts-20121228-000000.gz",
"pagecounts-20121228-010000.gz",
"pagecounts-20121228-020000.gz",
"pagecounts-20121228-030000.gz",
"pagecounts-20121228-040000.gz",
"pagecounts-20121228-050000.gz",
"pagecounts-20121228-060000.gz",
"pagecounts-20121228-070000.gz",
"pagecounts-20121228-080000.gz",
"pagecounts-20121228-090001.gz",
"pagecounts-20121228-100000.gz",
"pagecounts-20121228-110000.gz",
"pagecounts-20121228-120000.gz",
"pagecounts-20121228-130000.gz",
"pagecounts-20121228-140000.gz",
"pagecounts-20121228-150000.gz",
"pagecounts-20121228-160000.gz",
"pagecounts-20121228-170000.gz",
"pagecounts-20121228-180000.gz",
"pagecounts-20121228-190000.gz",
"pagecounts-20121228-200000.gz",
"pagecounts-20121228-210000.gz",
"pagecounts-20121228-220001.gz",
"pagecounts-20121228-230000.gz",
"pagecounts-20121229-000000.gz",
"pagecounts-20121229-010000.gz",
"pagecounts-20121229-020000.gz",
"pagecounts-20121229-030000.gz",
"pagecounts-20121229-040000.gz",
"pagecounts-20121229-050000.gz",
"pagecounts-20121229-060000.gz",
"pagecounts-20121229-070000.gz",
"pagecounts-20121229-080000.gz",
"pagecounts-20121229-090000.gz",
"pagecounts-20121229-100000.gz",
"pagecounts-20121229-110001.gz",
"pagecounts-20121229-120000.gz",
"pagecounts-20121229-130000.gz",
"pagecounts-20121229-140000.gz",
"pagecounts-20121229-150000.gz",
"pagecounts-20121229-160000.gz",
"pagecounts-20121229-170000.gz",
"pagecounts-20121229-180000.gz",
"pagecounts-20121229-190000.gz",
"pagecounts-20121229-200000.gz",
"pagecounts-20121229-210000.gz",
"pagecounts-20121229-220000.gz",
"pagecounts-20121229-230001.gz",
"pagecounts-20121230-000000.gz",
"pagecounts-20121230-010000.gz",
"pagecounts-20121230-020000.gz",
"pagecounts-20121230-030000.gz",
"pagecounts-20121230-040000.gz",
"pagecounts-20121230-050000.gz",
"pagecounts-20121230-060000.gz",
"pagecounts-20121230-070000.gz",
"pagecounts-20121230-080000.gz",
"pagecounts-20121230-090000.gz",
"pagecounts-20121230-100000.gz",
"pagecounts-20121230-110001.gz",
"pagecounts-20121230-120000.gz",
"pagecounts-20121230-130000.gz",
"pagecounts-20121230-140000.gz",
"pagecounts-20121230-150000.gz",
"pagecounts-20121230-160000.gz",
"pagecounts-20121230-170000.gz",
"pagecounts-20121230-180000.gz",
"pagecounts-20121230-190000.gz",
"pagecounts-20121230-200000.gz",
"pagecounts-20121230-210000.gz",
"pagecounts-20121230-220000.gz",
"pagecounts-20121230-230000.gz",
"pagecounts-20121231-000001.gz",
"pagecounts-20121231-010000.gz",
"pagecounts-20121231-020000.gz",
"pagecounts-20121231-030000.gz",
"pagecounts-20121231-040000.gz",
"pagecounts-20121231-050000.gz",
"pagecounts-20121231-060000.gz",
"pagecounts-20121231-070000.gz",
"pagecounts-20121231-080000.gz",
"pagecounts-20121231-090000.gz",
"pagecounts-20121231-100000.gz",
"pagecounts-20121231-110000.gz",
"pagecounts-20121231-120000.gz",
"pagecounts-20121231-130001.gz",
"pagecounts-20121231-140000.gz",
"pagecounts-20121231-150000.gz",
"pagecounts-20121231-160000.gz",
"pagecounts-20121231-170000.gz",
"pagecounts-20121231-180000.gz",
"pagecounts-20121231-190000.gz",
"pagecounts-20121231-200000.gz",
"pagecounts-20121231-210000.gz",
"pagecounts-20121231-220000.gz",
"pagecounts-20121231-230000.gz",
]
import os
base = "http://dumps.wikimedia.org/other/pagecounts-raw/"
tail = "2012/2012-12/"
i = 0
for url in urls:
i = i + 1
one = "en-" + url[:-3]
two = url[:-3]
three = url
if not (os.path.isfile(one) or os.path.isfile(two) or os.path.isfile(three)):
#os.system("curl --silent -O %s >> /dev/null" % (base + tail + url))
os.system("curl -O %s" % (base + tail + url))
print "%d completeted of %d total. %d remaining" % (i, len(urls), len(urls) - i)
|
<gh_stars>0
# test_unpacker.py
"""Unit tests for lta/unpacker.py."""
from unittest.mock import call, mock_open, patch
import pytest # type: ignore
from tornado.web import HTTPError # type: ignore
from lta.unpacker import Unpacker, main
from .test_util import AsyncMock
@pytest.fixture
def config():
"""Supply a stock Unpacker component configuration."""
return {
"COMPONENT_NAME": "testing-unpacker",
"DEST_SITE": "WIPAC",
"FILE_CATALOG_REST_TOKEN": "fake-file-catalog-token",
"FILE_CATALOG_REST_URL": "http://kVj74wBA1AMTDV8zccn67pGuWJqHZzD7iJQHrUJKA.com/",
"HEARTBEAT_PATCH_RETRIES": "3",
"HEARTBEAT_PATCH_TIMEOUT_SECONDS": "30",
"HEARTBEAT_SLEEP_DURATION_SECONDS": "60",
"INPUT_STATUS": "unpacking",
"LTA_REST_TOKEN": "fake-lta-rest-token",
"LTA_REST_URL": "http://RmMNHdPhHpH2ZxfaFAC9d2jiIbf5pZiHDqy43rFLQiM.com/",
"OUTPUT_STATUS": "completed",
"RUN_ONCE_AND_DIE": "False",
"SOURCE_SITE": "NERSC",
"UNPACKER_OUTBOX_PATH": "/tmp/lta/testing/unpacker/outbox",
"UNPACKER_WORKBOX_PATH": "/tmp/lta/testing/unpacker/workbox",
"WORK_RETRIES": "3",
"WORK_SLEEP_DURATION_SECONDS": "60",
"WORK_TIMEOUT_SECONDS": "30",
}
def test_constructor_missing_config():
"""Fail with a TypeError if a configuration object isn't provided."""
with pytest.raises(TypeError):
Unpacker()
def test_constructor_missing_logging():
"""Fail with a TypeError if a logging object isn't provided."""
with pytest.raises(TypeError):
config = {
"PAN_GALACTIC_GARGLE_BLASTER": "Yummy"
}
Unpacker(config)
def test_constructor_config_missing_values(mocker):
"""Fail with a ValueError if the configuration object is missing required configuration variables."""
config = {
"PAN_GALACTIC_GARGLE_BLASTER": "Yummy"
}
logger_mock = mocker.MagicMock()
with pytest.raises(ValueError):
Unpacker(config, logger_mock)
def test_constructor_config_poison_values(config, mocker):
"""Fail with a ValueError if the configuration object is missing required configuration variables."""
unpacker_config = config.copy()
unpacker_config["LTA_REST_URL"] = None
logger_mock = mocker.MagicMock()
with pytest.raises(ValueError):
Unpacker(unpacker_config, logger_mock)
def test_constructor_config(config, mocker):
"""Test that a Unpacker can be constructed with a configuration object and a logging object."""
logger_mock = mocker.MagicMock()
p = Unpacker(config, logger_mock)
assert p.heartbeat_sleep_duration_seconds == 60
assert p.lta_rest_url == "http://RmMNHdPhHpH2ZxfaFAC9d2jiIbf5pZiHDqy43rFLQiM.com/"
assert p.name == "testing-unpacker"
assert p.work_sleep_duration_seconds == 60
assert p.logger == logger_mock
def test_constructor_config_sleep_type_int(config, mocker):
"""Ensure that sleep seconds can also be provided as an integer."""
logger_mock = mocker.MagicMock()
p = Unpacker(config, logger_mock)
assert p.heartbeat_sleep_duration_seconds == 60
assert p.lta_rest_url == "http://RmMNHdPhHpH2ZxfaFAC9d2jiIbf5pZiHDqy43rFLQiM.com/"
assert p.name == "testing-unpacker"
assert p.work_sleep_duration_seconds == 60
assert p.logger == logger_mock
def test_constructor_state(config, mocker):
"""Verify that the Unpacker has a reasonable state when it is first constructed."""
logger_mock = mocker.MagicMock()
p = Unpacker(config, logger_mock)
assert p.last_work_begin_timestamp is p.last_work_end_timestamp
def test_do_status(config, mocker):
"""Verify that the Unpacker has no additional state to offer."""
logger_mock = mocker.MagicMock()
p = Unpacker(config, logger_mock)
assert p._do_status() == {}
@pytest.mark.asyncio
async def test_script_main(config, mocker, monkeypatch):
"""
Verify Unpacker component behavior when run as a script.
Test to make sure running the Unpacker as a script does the setup work
that we expect and then launches the unpacker service.
"""
for key in config.keys():
monkeypatch.setenv(key, config[key])
mock_event_loop = mocker.patch("asyncio.get_event_loop")
mock_root_logger = mocker.patch("logging.getLogger")
mock_status_loop = mocker.patch("lta.unpacker.status_loop")
mock_work_loop = mocker.patch("lta.unpacker.work_loop")
main()
mock_event_loop.assert_called()
mock_root_logger.assert_called()
mock_status_loop.assert_called()
mock_work_loop.assert_called()
@pytest.mark.asyncio
async def test_unpacker_logs_configuration(mocker):
"""Test to make sure the Unpacker logs its configuration."""
logger_mock = mocker.MagicMock()
unpacker_config = {
"COMPONENT_NAME": "logme-testing-unpacker",
"DEST_SITE": "WIPAC",
"FILE_CATALOG_REST_TOKEN": "fake-file-catalog-token",
"FILE_CATALOG_REST_URL": "http://kVj74wBA1AMTDV8zccn67pGuWJqHZzD7iJQHrUJKA.com/",
"HEARTBEAT_PATCH_RETRIES": "1",
"HEARTBEAT_PATCH_TIMEOUT_SECONDS": "20",
"HEARTBEAT_SLEEP_DURATION_SECONDS": "30",
"INPUT_STATUS": "unpacking",
"LTA_REST_TOKEN": "logme-fake-lta-rest-token",
"LTA_REST_URL": "logme-http://RmMNHdPhHpH2ZxfaFAC9d2jiIbf5pZiHDqy43rFLQiM.com/",
"OUTPUT_STATUS": "completed",
"RUN_ONCE_AND_DIE": "False",
"SOURCE_SITE": "NERSC",
"UNPACKER_OUTBOX_PATH": "logme/tmp/lta/testing/unpacker/outbox",
"UNPACKER_WORKBOX_PATH": "logme/tmp/lta/testing/unpacker/workbox",
"WORK_RETRIES": "5",
"WORK_SLEEP_DURATION_SECONDS": "70",
"WORK_TIMEOUT_SECONDS": "90",
}
Unpacker(unpacker_config, logger_mock)
EXPECTED_LOGGER_CALLS = [
call("unpacker 'logme-testing-unpacker' is configured:"),
call('COMPONENT_NAME = logme-testing-unpacker'),
call('DEST_SITE = WIPAC'),
call('FILE_CATALOG_REST_TOKEN = fake-file-catalog-token'),
call('FILE_CATALOG_REST_URL = http://kVj74wBA1AMTDV8zccn67pGuWJqHZzD7iJQHrUJKA.com/'),
call('HEARTBEAT_PATCH_RETRIES = 1'),
call('HEARTBEAT_PATCH_TIMEOUT_SECONDS = 20'),
call('HEARTBEAT_SLEEP_DURATION_SECONDS = 30'),
call('INPUT_STATUS = unpacking'),
call('LTA_REST_TOKEN = logme-fake-lta-rest-token'),
call('LTA_REST_URL = logme-http://RmMNHdPhHpH2ZxfaFAC9d2jiIbf5pZiHDqy43rFLQiM.com/'),
call('OUTPUT_STATUS = completed'),
call('RUN_ONCE_AND_DIE = False'),
call('SOURCE_SITE = NERSC'),
call('UNPACKER_OUTBOX_PATH = logme/tmp/lta/testing/unpacker/outbox'),
call('UNPACKER_WORKBOX_PATH = logme/tmp/lta/testing/unpacker/workbox'),
call('WORK_RETRIES = 5'),
call('WORK_SLEEP_DURATION_SECONDS = 70'),
call('WORK_TIMEOUT_SECONDS = 90'),
]
logger_mock.info.assert_has_calls(EXPECTED_LOGGER_CALLS)
@pytest.mark.asyncio
async def test_unpacker_run(config, mocker):
"""Test the Unpacker does the work the unpacker should do."""
logger_mock = mocker.MagicMock()
p = Unpacker(config, logger_mock)
p._do_work = AsyncMock()
await p.run()
p._do_work.assert_called()
@pytest.mark.asyncio
async def test_unpacker_run_exception(config, mocker):
"""Test an error doesn't kill the Unpacker."""
logger_mock = mocker.MagicMock()
p = Unpacker(config, logger_mock)
p.last_work_end_timestamp = None
p._do_work = AsyncMock()
p._do_work.side_effect = [Exception("bad thing happen!")]
await p.run()
p._do_work.assert_called()
assert p.last_work_end_timestamp
@pytest.mark.asyncio
async def test_unpacker_do_work_pop_exception(config, mocker):
"""Test that _do_work raises when the RestClient can't pop."""
logger_mock = mocker.MagicMock()
lta_rc_mock = mocker.patch("rest_tools.client.RestClient.request", new_callable=AsyncMock)
lta_rc_mock.side_effect = HTTPError(500, "LTA DB on fire. Again.")
p = Unpacker(config, logger_mock)
with pytest.raises(HTTPError):
await p._do_work()
lta_rc_mock.assert_called_with("POST", '/Bundles/actions/pop?source=NERSC&dest=WIPAC&status=unpacking', mocker.ANY)
@pytest.mark.asyncio
async def test_unpacker_do_work_no_results(config, mocker):
"""Test that _do_work goes on vacation when the LTA DB has no work."""
logger_mock = mocker.MagicMock()
claim_mock = mocker.patch("lta.unpacker.Unpacker._do_work_claim", new_callable=AsyncMock)
claim_mock.return_value = False
p = Unpacker(config, logger_mock)
await p._do_work()
@pytest.mark.asyncio
async def test_unpacker_do_work_claim_no_results(config, mocker):
"""Test that _do_work_claim returns False when the LTA DB has no work."""
logger_mock = mocker.MagicMock()
lta_rc_mock = mocker.patch("rest_tools.client.RestClient.request", new_callable=AsyncMock)
lta_rc_mock.return_value = {
"bundle": None
}
p = Unpacker(config, logger_mock)
assert not await p._do_work_claim()
lta_rc_mock.assert_called_with("POST", '/Bundles/actions/pop?source=NERSC&dest=WIPAC&status=unpacking', mocker.ANY)
@pytest.mark.asyncio
async def test_unpacker_do_work_yes_results(config, mocker):
"""Test that _do_work_claim processes each Bundle that it gets from the LTA DB."""
BUNDLE_OBJ = {
"uuid": "f74db80e-9661-40cc-9f01-8d087af23f56"
}
logger_mock = mocker.MagicMock()
lta_rc_mock = mocker.patch("rest_tools.client.RestClient.request", new_callable=AsyncMock)
lta_rc_mock.return_value = {
"bundle": BUNDLE_OBJ,
}
dwb_mock = mocker.patch("lta.unpacker.Unpacker._do_work_bundle", new_callable=AsyncMock)
p = Unpacker(config, logger_mock)
assert await p._do_work_claim()
lta_rc_mock.assert_called_with("POST", '/Bundles/actions/pop?source=NERSC&dest=WIPAC&status=unpacking', mocker.ANY)
dwb_mock.assert_called_with(lta_rc_mock, BUNDLE_OBJ)
@pytest.mark.asyncio
async def test_unpacker_do_work_raise_exception(config, mocker):
"""Test that _do_work_claim processes each Bundle that it gets from the LTA DB."""
BUNDLE_OBJ = {
"uuid": "f74db80e-9661-40cc-9f01-8d087af23f56"
}
logger_mock = mocker.MagicMock()
lta_rc_mock = mocker.patch("rest_tools.client.RestClient.request", new_callable=AsyncMock)
lta_rc_mock.return_value = {
"bundle": BUNDLE_OBJ,
}
dwb_mock = mocker.patch("lta.unpacker.Unpacker._do_work_bundle", new_callable=AsyncMock)
dwb_mock.side_effect = Exception("LTA DB started on fire again")
qb_mock = mocker.patch("lta.unpacker.Unpacker._quarantine_bundle", new_callable=AsyncMock)
p = Unpacker(config, logger_mock)
with pytest.raises(Exception):
await p._do_work_claim()
lta_rc_mock.assert_called_with("POST", '/Bundles/actions/pop?source=NERSC&dest=WIPAC&status=unpacking', mocker.ANY)
dwb_mock.assert_called_with(lta_rc_mock, BUNDLE_OBJ)
qb_mock.assert_called_with(lta_rc_mock, BUNDLE_OBJ, "LTA DB started on fire again")
@pytest.mark.asyncio
async def test_unpacker_do_work_bundle_once_and_die(config, mocker):
"""Test that _do_work goes on vacation when the LTA DB has no work."""
once = config.copy()
once["RUN_ONCE_AND_DIE"] = "True"
logger_mock = mocker.MagicMock()
claim_mock = mocker.patch("lta.unpacker.Unpacker._do_work_claim", new_callable=AsyncMock)
claim_mock.return_value = False
sys_exit_mock = mocker.patch("sys.exit")
p = Unpacker(once, logger_mock)
assert not await p._do_work()
sys_exit_mock.assert_not_called()
@pytest.mark.asyncio
async def test_unpacker_quarantine_bundle_with_reason(config, mocker):
"""Test that _do_work_claim attempts to quarantine a Bundle that fails to get unpacked."""
logger_mock = mocker.MagicMock()
lta_rc_mock = mocker.patch("rest_tools.client.RestClient", new_callable=AsyncMock)
p = Unpacker(config, logger_mock)
await p._quarantine_bundle(lta_rc_mock, {"uuid": "c4b345e4-2395-4f9e-b0eb-9cc1c9cdf003"}, "Rucio caught fire, then we roasted marshmellows.")
lta_rc_mock.request.assert_called_with("PATCH", "/Bundles/c4b345e4-2395-4f9e-b0eb-9cc1c9cdf003", mocker.ANY)
@pytest.mark.asyncio
async def test_unpacker_quarantine_bundle_with_reason_raises(config, mocker):
"""Test that _do_work_claim attempts to quarantine a Bundle that fails to get unpacked."""
logger_mock = mocker.MagicMock()
lta_rc_mock = mocker.patch("rest_tools.client.RestClient", new_callable=AsyncMock)
lta_rc_mock.request.side_effect = Exception("Marshmellows were poisoned")
p = Unpacker(config, logger_mock)
await p._quarantine_bundle(lta_rc_mock, {"uuid": "c4b345e4-2395-4f9e-b0eb-9cc1c9cdf003"}, "Rucio caught fire, then we roasted marshmellows.")
lta_rc_mock.request.assert_called_with("PATCH", "/Bundles/c4b345e4-2395-4f9e-b0eb-9cc1c9cdf003", mocker.ANY)
@pytest.mark.asyncio
async def test_unpacker_update_bundle_in_lta_db(config, mocker):
"""Test that _update_bundle_in_lta_db updates the status of the bundle in the LTA DB."""
logger_mock = mocker.MagicMock()
lta_rc_mock = mocker.patch("rest_tools.client.RestClient", new_callable=AsyncMock)
p = Unpacker(config, logger_mock)
assert await p._update_bundle_in_lta_db(lta_rc_mock, {"uuid": "c4b345e4-2395-4f9e-b0eb-9cc1c9cdf003"})
lta_rc_mock.request.assert_called_with("PATCH", "/Bundles/c4b345e4-2395-4f9e-b0eb-9cc1c9cdf003", mocker.ANY)
@pytest.mark.asyncio
async def test_unpacker_add_location_to_file_catalog(config, mocker):
"""Test that _add_location_to_file_catalog adds a location in the File Catalog."""
logger_mock = mocker.MagicMock()
fc_rc_mock = mocker.patch("rest_tools.client.RestClient.request", new_callable=AsyncMock)
p = Unpacker(config, logger_mock)
bundle_file = {
"checksum": {
"sha512": "09de7c539b724dee9543669309f978b172f6c7449d0269fecbb57d0c9cf7db51713fed3a94573c669fe0aa08fa122b41f84a0ea107c62f514b1525efbd08846b",
},
"file_size": 105311728,
"logical_name": "/data/exp/IceCube/2013/filtered/PFFilt/1109/PFFilt_PhysicsFiltering_Run00123231_Subrun00000000_00000066.tar.bz2",
"meta_modify_date": "2020-02-20 22:47:25.180303",
"uuid": "2f0cb3c8-6cba-49b1-8eeb-13e13fed41dd",
}
assert await p._add_location_to_file_catalog(bundle_file)
fc_rc_mock.assert_called_with("POST", "/api/files/2f0cb3c8-6cba-49b1-8eeb-13e13fed41dd/locations", {
"locations": [
{
"site": "WIPAC",
"path": "/data/exp/IceCube/2013/filtered/PFFilt/1109/PFFilt_PhysicsFiltering_Run00123231_Subrun00000000_00000066.tar.bz2",
}
]
})
@pytest.mark.asyncio
async def test_unpacker_do_work_bundle(config, mocker):
"""Test that _do_work_bundle does the work of preparing an archive."""
logger_mock = mocker.MagicMock()
lta_rc_mock = mocker.patch("rest_tools.client.RestClient.request", new_callable=AsyncMock)
mock_zipfile_init = mocker.patch("zipfile.ZipFile.__init__")
mock_zipfile_init.return_value = None
mock_zipfile_write = mocker.patch("zipfile.ZipFile.extractall")
mock_zipfile_write.return_value = None
mock_json_load = mocker.patch("json.load")
mock_json_load.return_value = {
"files": [
{
"logical_name": "/full/path/to/file/in/data/warehouse.tar.bz2",
"file_size": 1234567890,
"checksum": {
"adler32": "89d5efeb",
"sha512": "c919210281b72327c179e26be799b06cdaf48bf6efce56fb9d53f758c1b997099831ad05453fdb1ba65be7b35d0b4c5cebfc439efbdf83317ba0e38bf6f42570",
},
}
]
}
mock_shutil_move = mocker.patch("shutil.move")
mock_shutil_move.return_value = None
mock_lta_checksums = mocker.patch("lta.unpacker.lta_checksums")
mock_lta_checksums.return_value = {
"adler32": "89d5efeb",
"sha512": "c919210281b72327c179e26be799b06cdaf48bf6efce56fb9d53f758c1b997099831ad05453fdb1ba65be7b35d0b4c5cebfc439efbdf83317ba0e38bf6f42570",
}
mock_os_path_getsize = mocker.patch("os.path.getsize")
mock_os_path_getsize.return_value = 1234567890
mock_os_remove = mocker.patch("os.remove")
mock_os_remove.return_value = None
altfc_mock = mocker.patch("lta.unpacker.Unpacker._add_location_to_file_catalog", new_callable=AsyncMock)
altfc_mock.return_value = False
p = Unpacker(config, logger_mock)
BUNDLE_OBJ = {
"bundle_path": "/mnt/lfss/jade-lta/bundler_out/9a1cab0a395211eab1cbce3a3da73f88.zip",
"uuid": "f74db80e-9661-40cc-9f01-8d087af23f56",
"source": "NERSC",
"dest": "WIPAC",
"files": [{"logical_name": "/path/to/a/data/file", }],
}
with patch("builtins.open", mock_open(read_data="data")) as metadata_mock:
await p._do_work_bundle(lta_rc_mock, BUNDLE_OBJ)
metadata_mock.assert_called_with(mocker.ANY)
@pytest.mark.asyncio
async def test_unpacker_do_work_bundle_mismatch_size(config, mocker):
"""Test that _do_work_bundle does the work of preparing an archive."""
logger_mock = mocker.MagicMock()
lta_rc_mock = mocker.patch("rest_tools.client.RestClient.request", new_callable=AsyncMock)
mock_zipfile_init = mocker.patch("zipfile.ZipFile.__init__")
mock_zipfile_init.return_value = None
mock_zipfile_write = mocker.patch("zipfile.ZipFile.extractall")
mock_zipfile_write.return_value = None
mock_json_load = mocker.patch("json.load")
mock_json_load.return_value = {
"files": [
{
"logical_name": "/full/path/to/file/in/data/warehouse.tar.bz2",
"file_size": 1234567890,
"checksum": {
"adler32": "89d5efeb",
"sha512": "c919210281b72327c179e26be799b06cdaf48bf6efce56fb9d53f758c1b997099831ad05453fdb1ba65be7b35d0b4c5cebfc439efbdf83317ba0e38bf6f42570",
},
}
]
}
mock_shutil_move = mocker.patch("shutil.move")
mock_shutil_move.return_value = None
mock_lta_checksums = mocker.patch("lta.unpacker.lta_checksums")
mock_lta_checksums.return_value = {
"adler32": "89d5efeb",
"sha512": "c919210281b72327c179e26be799b06cdaf48bf6efce56fb9d53f758c1b997099831ad05453fdb1ba65be7b35d0b4c5cebfc439efbdf83317ba0e38bf6f42570",
}
mock_os_path_getsize = mocker.patch("os.path.getsize")
mock_os_path_getsize.return_value = 234567890
mock_os_remove = mocker.patch("os.remove")
mock_os_remove.return_value = None
altfc_mock = mocker.patch("lta.unpacker.Unpacker._add_location_to_file_catalog", new_callable=AsyncMock)
altfc_mock.return_value = False
p = Unpacker(config, logger_mock)
BUNDLE_OBJ = {
"bundle_path": "/mnt/lfss/jade-lta/bundler_out/9a1cab0a395211eab1cbce3a3da73f88.zip",
"uuid": "f74db80e-9661-40cc-9f01-8d087af23f56",
"source": "NERSC",
"dest": "WIPAC",
"files": [{"logical_name": "/path/to/a/data/file", }],
}
with patch("builtins.open", mock_open(read_data="data")) as metadata_mock:
with pytest.raises(Exception):
await p._do_work_bundle(lta_rc_mock, BUNDLE_OBJ)
metadata_mock.assert_called_with(mocker.ANY)
@pytest.mark.asyncio
async def test_unpacker_do_work_bundle_mismatch_checksum(config, mocker):
"""Test that _do_work_bundle does the work of preparing an archive."""
logger_mock = mocker.MagicMock()
lta_rc_mock = mocker.patch("rest_tools.client.RestClient.request", new_callable=AsyncMock)
mock_zipfile_init = mocker.patch("zipfile.ZipFile.__init__")
mock_zipfile_init.return_value = None
mock_zipfile_write = mocker.patch("zipfile.ZipFile.extractall")
mock_zipfile_write.return_value = None
mock_json_load = mocker.patch("json.load")
mock_json_load.return_value = {
"files": [
{
"logical_name": "/full/path/to/file/in/data/warehouse.tar.bz2",
"file_size": 1234567890,
"checksum": {
"adler32": "89d5efeb",
"sha512": "c919210281b72327c179e26be799b06cdaf48bf6efce56fb9d53f758c1b997099831ad05453fdb1ba65be7b35d0b4c5cebfc439efbdf83317ba0e38bf6f42570",
},
}
]
}
mock_shutil_move = mocker.patch("shutil.move")
mock_shutil_move.return_value = None
mock_lta_checksums = mocker.patch("lta.unpacker.lta_checksums")
mock_lta_checksums.return_value = {
"adler32": "89d5efeb",
"sha512": "919210281b72327c179e26be799b06cdaf48bf6efce56fb9d53f758c1b997099831ad05453fdb1ba65be7b35d0b4c5cebfc439efbdf83317ba0e38bf6f42570c",
}
mock_os_path_getsize = mocker.patch("os.path.getsize")
mock_os_path_getsize.return_value = 1234567890
mock_os_remove = mocker.patch("os.remove")
mock_os_remove.return_value = None
altfc_mock = mocker.patch("lta.unpacker.Unpacker._add_location_to_file_catalog", new_callable=AsyncMock)
altfc_mock.return_value = False
p = Unpacker(config, logger_mock)
BUNDLE_OBJ = {
"bundle_path": "/mnt/lfss/jade-lta/bundler_out/9a1cab0a395211eab1cbce3a3da73f88.zip",
"uuid": "f74db80e-9661-40cc-9f01-8d087af23f56",
"source": "NERSC",
"dest": "WIPAC",
"files": [{"logical_name": "/path/to/a/data/file", }],
}
with patch("builtins.open", mock_open(read_data="data")) as metadata_mock:
with pytest.raises(Exception):
await p._do_work_bundle(lta_rc_mock, BUNDLE_OBJ)
metadata_mock.assert_called_with(mocker.ANY)
|
import unittest
import os
import gzip
import tempfile
import target_snowflake.file_formats.csv as csv
def _mock_record_to_csv_line(record, schema, data_flattening_max_level=0):
return record
class TestCsv(unittest.TestCase):
def setUp(self):
self.maxDiff = None
self.config = {}
def test_write_record_to_uncompressed_file(self):
records = {
'pk_1': 'data1,data2,data3,data4',
'pk_2': 'data5,data6,data7,data8'
}
schema = {}
# Write uncompressed CSV file
csv_file = tempfile.NamedTemporaryFile(delete=False)
with open(csv_file.name, 'wb') as f:
csv.write_records_to_file(f, records, schema, _mock_record_to_csv_line)
# Read and validate uncompressed CSV file
with open(csv_file.name, 'rt') as f:
self.assertEqual(f.readlines(), ['data1,data2,data3,data4\n',
'data5,data6,data7,data8\n'])
os.remove(csv_file.name)
def test_write_records_to_compressed_file(self):
records = {
'pk_1': 'data1,data2,data3,data4',
'pk_2': 'data5,data6,data7,data8'
}
schema = {}
# Write gzip compressed CSV file
csv_file = tempfile.NamedTemporaryFile(delete=False)
with gzip.open(csv_file.name, 'wb') as f:
csv.write_records_to_file(f, records, schema, _mock_record_to_csv_line)
# Read and validate gzip compressed CSV file
with gzip.open(csv_file.name, 'rt') as f:
self.assertEqual(f.readlines(), ['data1,data2,data3,data4\n',
'data5,data6,data7,data8\n'])
os.remove(csv_file.name)
def test_record_to_csv_line(self):
record = {
'key1': '1',
'key2': '2030-01-22',
'key3': '10000-01-22 12:04:22',
'key4': '25:01:01',
'key5': 'I\'m good',
'key6': None,
}
schema = {
'key1': {
'type': ['null', 'string', 'integer'],
},
'key2': {
'anyOf': [
{'type': ['null', 'string'], 'format': 'date'},
{'type': ['null', 'string']}
]
},
'key3': {
'type': ['null', 'string'], 'format': 'date-time',
},
'key4': {
'anyOf': [
{'type': ['null', 'string'], 'format': 'time'},
{'type': ['null', 'string']}
]
},
'key5': {
'type': ['null', 'string'],
},
'key6': {
'type': ['null', 'string'], 'format': 'time',
},
}
self.assertEqual(csv.record_to_csv_line(record, schema),
'"1","2030-01-22","10000-01-22 12:04:22","25:01:01","I\'m good",')
def test_create_copy_sql(self):
self.assertEqual(csv.create_copy_sql(table_name='foo_table',
stage_name='foo_stage',
s3_key='foo_s3_key.csv',
file_format={'format_name': "'foo_file_format'"},
columns=[{'name': 'COL_1'},
{'name': 'COL_2'},
{'name': 'COL_3',
'trans': 'parse_json'}]),
"COPY INTO foo_table (COL_1, COL_2, COL_3) FROM "
"'@foo_stage/foo_s3_key.csv' "
"FILE_FORMAT = (format_name='foo_file_format')")
def test_create_merge_sql(self):
self.assertEqual(csv.create_merge_sql(table_name='foo_table',
stage_name='foo_stage',
s3_key='foo_s3_key.csv',
file_format_name='foo_file_format',
columns=[{'name': 'COL_1', 'trans': ''},
{'name': 'COL_2', 'trans': ''},
{'name': 'COL_3', 'trans': 'parse_json'}],
pk_merge_condition='s.COL_1 = t.COL_1'),
"MERGE INTO foo_table t USING ("
"SELECT ($1) COL_1, ($2) COL_2, parse_json($3) COL_3 "
"FROM '@foo_stage/foo_s3_key.csv' "
"(FILE_FORMAT => 'foo_file_format')) s "
"ON s.COL_1 = t.COL_1 "
"WHEN MATCHED THEN UPDATE SET COL_1=s.COL_1, COL_2=s.COL_2, COL_3=s.COL_3 "
"WHEN NOT MATCHED THEN "
"INSERT (COL_1, COL_2, COL_3) "
"VALUES (s.COL_1, s.COL_2, s.COL_3)")
|
<gh_stars>0
#!/usr/bin/env python
#
# Modified work Copyright (c) 2019 by VMware, Inc. ("VMware")
# Original work Copyright (c) 2018 by Network Device Education
# Foundation, Inc. ("NetDEF")
#
# Permission to use, copy, modify, and/or distribute this software
# for any purpose with or without fee is hereby granted, provided
# that the above copyright notice and this permission notice appear
# in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND VMWARE DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL VMWARE BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
#
"""
Following tests are covered to test AS-Path functionality:
Setup module:
- Create topology (setup module)
- Bring up topology
- Verify BGP convergence
Test cases:
1. Test next_hop attribute and verify best path is installed as per
reachable next_hop
2. Test aspath attribute and verify best path is installed as per
shortest AS-Path
3. Test localpref attribute and verify best path is installed as per
shortest local-preference
4. Test weight attribute and and verify best path is installed as per
highest weight
5. Test origin attribute and verify best path is installed as per
IGP>EGP>INCOMPLETE rule
6. Test med attribute and verify best path is installed as per lowest
med value
7. Test admin distance and verify best path is installed as per lowest
admin distance
Teardown module:
- Bring down the topology
- stop routers
"""
import os
import sys
import time
import pytest
# Save the Current Working Directory to find configuration files.
CWD = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(CWD, "../"))
# pylint: disable=C0413
# Import topogen and topotest helpers
from lib.topogen import Topogen, get_topogen
# Required to instantiate the topology builder class.
from lib.common_config import (
start_topology,
write_test_header,
write_test_footer,
reset_config_on_routers,
verify_rib,
create_static_routes,
create_prefix_lists,
create_route_maps,
check_address_types,
)
from lib.topolog import logger
from lib.bgp import (
verify_bgp_convergence,
create_router_bgp,
verify_best_path_as_per_bgp_attribute,
verify_best_path_as_per_admin_distance,
)
from lib.topojson import build_config_from_json
pytestmark = [pytest.mark.bgpd, pytest.mark.staticd]
# Address read from env variables
ADDR_TYPES = check_address_types()
####
def setup_module(mod):
"""
Sets up the pytest environment
* `mod`: module name
"""
global ADDR_TYPES
testsuite_run_time = time.asctime(time.localtime(time.time()))
logger.info("Testsuite start time: %s", testsuite_run_time)
logger.info("=" * 40)
logger.info("Running setup_module to create topology")
# This function initiates the topology build with Topogen...
json_file = "{}/bgp_path_attributes.json".format(CWD)
tgen = Topogen(json_file, mod.__name__)
global topo
topo = tgen.json_topo
# ... and here it calls Mininet initialization functions.
# Starting topology, create tmp files which are loaded to routers
# to start deamons and then start routers
start_topology(tgen)
# Creating configuration from JSON
build_config_from_json(tgen, topo)
# Don't run this test if we have any failure.
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
# Checking BGP convergence
result = verify_bgp_convergence(tgen, topo)
assert result is True, "setup_module :Failed \n Error:" " {}".format(result)
logger.info("Running setup_module() done")
def teardown_module():
"""
Teardown the pytest environment
"""
logger.info("Running teardown_module to delete topology")
tgen = get_topogen()
# Stop toplogy and Remove tmp files
tgen.stop_topology()
logger.info("Testsuite end time: %s", time.asctime(time.localtime(time.time())))
logger.info("=" * 40)
#####################################################
##
## Testcases
##
#####################################################
def test_next_hop_attribute(request):
"""
Verifying route are not getting installed in, as next_hop is
unreachable, Making next hop reachable using next_hop_self
command and verifying routes are installed.
"""
tgen = get_topogen()
# Don't run this test if we have any failure.
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
# test case name
tc_name = request.node.name
write_test_header(tc_name)
# Creating configuration from JSON
reset_config_on_routers(tgen)
# Api call to advertise networks
input_dict = {
"r7": {
"bgp": {
"address_family": {
"ipv4": {
"unicast": {
"advertise_networks": [
{"network": "192.168.127.12/32"},
{"network": "192.168.3.11/32"},
]
}
},
"ipv6": {
"unicast": {
"advertise_networks": [
{"network": "200:50:2::/128"},
{"network": "200:60:2::/128"},
]
}
},
}
}
}
}
result = create_router_bgp(tgen, topo, input_dict)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying RIB routes
dut = "r1"
protocol = "bgp"
# Verification should fail as nexthop-self is not enabled
for addr_type in ADDR_TYPES:
result = verify_rib(
tgen, addr_type, dut, input_dict, protocol=protocol, expected=False
)
assert (
result is not True
), "Testcase {} : Failed \n Error: " "{} routes are not present in RIB".format(
addr_type, tc_name
)
# Configure next-hop-self to bgp neighbor
input_dict_1 = {
"r2": {
"bgp": {
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r1": {"dest_link": {"r2": {"next_hop_self": True}}}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r1": {"dest_link": {"r2": {"next_hop_self": True}}}
}
}
},
}
}
},
"r3": {
"bgp": {
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r1": {"dest_link": {"r3": {"next_hop_self": True}}}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r1": {"dest_link": {"r3": {"next_hop_self": True}}}
}
}
},
}
}
},
}
result = create_router_bgp(tgen, topo, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying RIB routes
dut = "r1"
protocol = "bgp"
for addr_type in ADDR_TYPES:
result = verify_rib(tgen, addr_type, dut, input_dict, protocol=protocol)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
write_test_footer(tc_name)
def test_aspath_attribute(request):
"Verifying AS_PATH attribute functionality"
tgen = get_topogen()
# Don't run this test if we have any failure.
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
# test case name
tc_name = request.node.name
write_test_header(tc_name)
# Creating configuration from JSON
reset_config_on_routers(tgen)
# Api call to advertise networks
input_dict = {
"r7": {
"bgp": {
"address_family": {
"ipv4": {
"unicast": {
"advertise_networks": [
{"network": "192.168.127.12/32"},
{"network": "192.168.3.11/32"},
]
}
},
"ipv6": {
"unicast": {
"advertise_networks": [
{"network": "200:50:2::/128"},
{"network": "200:60:2::/128"},
]
}
},
}
}
},
"r2": {
"bgp": {
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r1": {"dest_link": {"r2": {"next_hop_self": True}}}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r1": {"dest_link": {"r2": {"next_hop_self": True}}}
}
}
},
}
}
},
"r3": {
"bgp": {
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r1": {"dest_link": {"r3": {"next_hop_self": True}}}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r1": {"dest_link": {"r3": {"next_hop_self": True}}}
}
}
},
}
}
},
}
result = create_router_bgp(tgen, topo, input_dict)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying best path
dut = "r1"
attribute = "path"
for addr_type in ADDR_TYPES:
result = verify_best_path_as_per_bgp_attribute(
tgen, addr_type, dut, {"r7": input_dict["r7"]}, attribute
)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
# Modify AS-Path and verify best path is changed
# Create Prefix list
input_dict_2 = {
"r3": {
"prefix_lists": {
"ipv4": {
"pf_ls_1_ipv4": [
{
"seqid": 10,
"network": "192.168.3.11/8",
"le": "32",
"action": "permit",
}
]
},
"ipv6": {
"pf_ls_1_ipv6": [
{
"seqid": 10,
"network": "200::/8",
"le": "128",
"action": "permit",
}
]
},
}
}
}
result = create_prefix_lists(tgen, input_dict_2)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Create route map
input_dict_3 = {
"r3": {
"route_maps": {
"RMAP_AS_PATH": [
{
"action": "permit",
"match": {"ipv4": {"prefix_lists": "pf_ls_1_ipv4"}},
"set": {"path": {"as_num": "111 222", "as_action": "prepend"}},
},
{
"action": "permit",
"match": {"ipv6": {"prefix_lists": "pf_ls_1_ipv6"}},
"set": {"path": {"as_num": "111 222", "as_action": "prepend"}},
},
]
}
}
}
result = create_route_maps(tgen, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Configure neighbor for route map
input_dict_4 = {
"r3": {
"bgp": {
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r5": {
"dest_link": {
"r3": {
"route_maps": [
{
"name": "RMAP_AS_PATH",
"direction": "in",
}
]
}
}
}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r5": {
"dest_link": {
"r3": {
"route_maps": [
{
"name": "RMAP_AS_PATH",
"direction": "in",
}
]
}
}
}
}
}
},
}
}
}
}
result = create_router_bgp(tgen, topo, input_dict_4)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying best path
dut = "r1"
attribute = "path"
for addr_type in ADDR_TYPES:
result = verify_best_path_as_per_bgp_attribute(
tgen, addr_type, dut, {"r7": input_dict["r7"]}, attribute
)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
write_test_footer(tc_name)
def test_localpref_attribute(request):
"Verifying LOCAL PREFERENCE attribute functionality"
tgen = get_topogen()
# Don't run this test if we have any failure.
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
# test case name
tc_name = request.node.name
write_test_header(tc_name)
# Creating configuration from JSON
reset_config_on_routers(tgen)
# Api call to advertise networks
input_dict = {
"r7": {
"bgp": {
"address_family": {
"ipv4": {
"unicast": {
"advertise_networks": [
{"network": "192.168.127.12/32"},
{"network": "192.168.3.11/32"},
]
}
},
"ipv6": {
"unicast": {
"advertise_networks": [
{"network": "200:50:2::/128"},
{"network": "200:60:2::/128"},
]
}
},
}
}
},
"r2": {
"bgp": {
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r1": {"dest_link": {"r2": {"next_hop_self": True}}}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r1": {"dest_link": {"r2": {"next_hop_self": True}}}
}
}
},
}
}
},
"r3": {
"bgp": {
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r1": {"dest_link": {"r3": {"next_hop_self": True}}}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r1": {"dest_link": {"r3": {"next_hop_self": True}}}
}
}
},
}
}
},
}
result = create_router_bgp(tgen, topo, input_dict)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Create Prefix list
input_dict_2 = {
"r2": {
"prefix_lists": {
"ipv4": {
"pf_ls_1_ipv4": [
{
"seqid": 10,
"network": "192.168.3.11/8",
"le": "32",
"action": "permit",
}
]
},
"ipv6": {
"pf_ls_1_ipv6": [
{
"seqid": 10,
"network": "200::/8",
"le": "128",
"action": "permit",
}
]
},
}
}
}
result = create_prefix_lists(tgen, input_dict_2)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Create route map
input_dict_3 = {
"r2": {
"route_maps": {
"RMAP_LOCAL_PREF": [
{
"action": "permit",
"seq_id": "10",
"match": {"ipv4": {"prefix_lists": "pf_ls_1_ipv4"}},
"set": {"locPrf": 1111},
},
{
"action": "permit",
"seq_id": "20",
"match": {"ipv6": {"prefix_lists": "pf_ls_1_ipv6"}},
"set": {"locPrf": 1111},
},
]
}
}
}
result = create_route_maps(tgen, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Configure neighbor for route map
input_dict_4 = {
"r2": {
"bgp": {
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r4": {
"dest_link": {
"r2-link1": {
"route_maps": [
{
"name": "RMAP_LOCAL_PREF",
"direction": "in",
}
]
}
}
}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r4": {
"dest_link": {
"r2-link1": {
"route_maps": [
{
"name": "RMAP_LOCAL_PREF",
"direction": "in",
}
]
}
}
}
}
}
},
}
}
}
}
result = create_router_bgp(tgen, topo, input_dict_4)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying best path
dut = "r1"
attribute = "locPrf"
for addr_type in ADDR_TYPES:
result = verify_best_path_as_per_bgp_attribute(
tgen, addr_type, dut, {"r7": input_dict["r7"]}, attribute
)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
# Modify route map
input_dict_3 = {
"r2": {
"route_maps": {
"RMAP_LOCAL_PREF": [
{
"action": "permit",
"seq_id": "10",
"match": {"ipv4": {"prefix_lists": "pf_ls_1_ipv4"}},
"set": {"locPrf": 50},
},
{
"action": "permit",
"seq_id": "20",
"match": {"ipv6": {"prefix_lists": "pf_ls_1_ipv6"}},
"set": {"locPrf": 50},
},
]
}
}
}
result = create_route_maps(tgen, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying best path
dut = "r1"
attribute = "locPrf"
for addr_type in ADDR_TYPES:
result = verify_best_path_as_per_bgp_attribute(
tgen, addr_type, dut, {"r7": input_dict["r7"]}, attribute
)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
write_test_footer(tc_name)
def test_weight_attribute(request):
"""
Test configure/modify weight attribute and
verify best path is installed as per highest weight
"""
tgen = get_topogen()
# Don't run this test if we have any failure.
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
# test case name
tc_name = request.node.name
write_test_header(tc_name)
# Creating configuration from JSON
reset_config_on_routers(tgen)
# Api call to advertise networks
input_dict = {
"r7": {
"bgp": {
"address_family": {
"ipv4": {
"unicast": {
"advertise_networks": [
{"network": "192.168.127.12/32"},
{"network": "192.168.3.11/32"},
]
}
},
"ipv6": {
"unicast": {
"advertise_networks": [
{"network": "200:50:2::/128"},
{"network": "200:60:2::/128"},
]
}
},
}
}
},
"r2": {
"bgp": {
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r1": {"dest_link": {"r2": {"next_hop_self": True}}}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r1": {"dest_link": {"r2": {"next_hop_self": True}}}
}
}
},
}
}
},
"r3": {
"bgp": {
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r1": {"dest_link": {"r3": {"next_hop_self": True}}}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r1": {"dest_link": {"r3": {"next_hop_self": True}}}
}
}
},
}
}
},
}
result = create_router_bgp(tgen, topo, input_dict)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Create Prefix list
input_dict_2 = {
"r1": {
"prefix_lists": {
"ipv4": {
"pf_ls_1_ipv4": [
{
"seqid": 10,
"network": "192.168.3.11/8",
"le": "32",
"action": "permit",
}
]
},
"ipv6": {
"pf_ls_1_ipv6": [
{
"seqid": 10,
"network": "200::/8",
"le": "128",
"action": "permit",
}
]
},
}
}
}
result = create_prefix_lists(tgen, input_dict_2)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Create route map
input_dict_3 = {
"r1": {
"route_maps": {
"RMAP_WEIGHT": [
{
"action": "permit",
"seq_id": "5",
"match": {"ipv4": {"prefix_lists": "pf_ls_1_ipv4"}},
"set": {"weight": 500},
},
{
"action": "permit",
"seq_id": "10",
"match": {"ipv6": {"prefix_lists": "pf_ls_1_ipv6"}},
"set": {"weight": 500},
},
]
}
}
}
result = create_route_maps(tgen, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Configure neighbor for route map
input_dict_4 = {
"r1": {
"bgp": {
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r2": {
"dest_link": {
"r1": {
"route_maps": [
{
"name": "RMAP_WEIGHT",
"direction": "in",
}
]
}
}
}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r2": {
"dest_link": {
"r1": {
"route_maps": [
{
"name": "RMAP_WEIGHT",
"direction": "in",
}
]
}
}
}
}
}
},
}
}
}
}
result = create_router_bgp(tgen, topo, input_dict_4)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying best path
dut = "r1"
attribute = "weight"
for addr_type in ADDR_TYPES:
result = verify_best_path_as_per_bgp_attribute(
tgen, addr_type, dut, {"r7": input_dict["r7"]}, attribute
)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
# Modify route map
input_dict_3 = {
"r1": {
"route_maps": {
"RMAP_WEIGHT": [
{
"action": "permit",
"seq_id": "5",
"match": {"ipv4": {"prefix_lists": "pf_ls_1_ipv4"}},
"set": {"weight": 1000},
},
{
"action": "permit",
"seq_id": "10",
"match": {"ipv6": {"prefix_lists": "pf_ls_1_ipv6"}},
"set": {"weight": 1000},
},
]
}
}
}
result = create_route_maps(tgen, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying best path
dut = "r1"
attribute = "weight"
for addr_type in ADDR_TYPES:
result = verify_best_path_as_per_bgp_attribute(
tgen, addr_type, dut, {"r7": input_dict["r7"]}, attribute
)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
write_test_footer(tc_name)
def test_origin_attribute(request):
"""
Test origin attribute and verify best path is
installed as per IGP>EGP>INCOMPLETE rule
"""
tgen = get_topogen()
# Don't run this test if we have any failure.
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
# test case name
tc_name = request.node.name
write_test_header(tc_name)
# Creating configuration from JSON
reset_config_on_routers(tgen)
# Api call to advertise networks
input_dict = {
"r4": {
"bgp": {
"address_family": {
"ipv4": {
"unicast": {
"advertise_networks": [
{"network": "192.168.127.12/32"},
{"network": "192.168.3.11/32"},
]
}
},
"ipv6": {
"unicast": {
"advertise_networks": [
{"network": "200:50:2::/128"},
{"network": "200:60:2::/128"},
]
}
},
}
}
},
"r2": {
"bgp": {
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r1": {"dest_link": {"r2": {"next_hop_self": True}}}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r1": {"dest_link": {"r2": {"next_hop_self": True}}}
}
}
},
}
}
},
"r3": {
"bgp": {
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r1": {"dest_link": {"r3": {"next_hop_self": True}}}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r1": {"dest_link": {"r3": {"next_hop_self": True}}}
}
}
},
}
}
},
"r5": {
"bgp": {
"address_family": {
"ipv4": {
"unicast": {
"redistribute": [
{"redist_type": "static"},
{"redist_type": "connected"},
]
}
},
"ipv6": {
"unicast": {
"redistribute": [
{"redist_type": "static"},
{"redist_type": "connected"},
]
}
},
}
}
},
}
result = create_router_bgp(tgen, topo, input_dict)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Api call to create static routes
input_dict_3 = {
"r5": {
"static_routes": [
{"network": "192.168.127.12/32", "next_hop": "Null0"},
{"network": "192.168.3.11/32", "next_hop": "Null0"},
{"network": "200:50:2::/128", "next_hop": "Null0"},
{"network": "200:60:2::/128", "next_hop": "Null0"},
]
}
}
result = create_static_routes(tgen, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying best path
dut = "r1"
attribute = "origin"
for addr_type in ADDR_TYPES:
result = verify_best_path_as_per_bgp_attribute(
tgen, addr_type, dut, {"r4": input_dict["r4"]}, attribute
)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
write_test_footer(tc_name)
def test_med_attribute(request):
"""
Test configure/modify MED attribute and verify best path
is installed as per lowest med value
"""
tgen = get_topogen()
# Don't run this test if we have any failure.
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
# test case name
tc_name = request.node.name
write_test_header(tc_name)
# Creating configuration from JSON
reset_config_on_routers(tgen)
# Api call to advertise networks
input_dict = {
"r4": {
"bgp": {
"address_family": {
"ipv4": {
"unicast": {
"advertise_networks": [
{"network": "192.168.127.12/32"},
{"network": "192.168.3.11/32"},
]
}
},
"ipv6": {
"unicast": {
"advertise_networks": [
{"network": "200:50:2::/128"},
{"network": "200:60:2::/128"},
]
}
},
}
}
},
"r5": {
"bgp": {
"address_family": {
"ipv4": {
"unicast": {
"advertise_networks": [
{"network": "192.168.127.12/32"},
{"network": "192.168.3.11/32"},
]
}
},
"ipv6": {
"unicast": {
"advertise_networks": [
{"network": "200:50:2::/128"},
{"network": "200:60:2::/128"},
]
}
},
}
}
},
}
result = create_router_bgp(tgen, topo, input_dict)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Create Prefix list
input_dict_2 = {
"r2": {
"prefix_lists": {
"ipv4": {
"pf_ls_r2_ipv4": [
{
"seqid": 10,
"network": "192.168.3.11/8",
"le": "32",
"action": "permit",
}
]
},
"ipv6": {
"pf_ls_r2_ipv6": [
{
"seqid": 20,
"network": "200::/8",
"le": "128",
"action": "permit",
}
]
},
}
},
"r3": {
"prefix_lists": {
"ipv4": {
"pf_ls_r3_ipv4": [
{
"seqid": 10,
"network": "192.168.3.11/8",
"le": "32",
"action": "permit",
}
]
},
"ipv6": {
"pf_ls_r3_ipv6": [
{
"seqid": 20,
"network": "200::/8",
"le": "128",
"action": "permit",
}
]
},
}
},
}
result = create_prefix_lists(tgen, input_dict_2)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Create route map
input_dict_3 = {
"r2": {
"route_maps": {
"RMAP_MED_R2": [
{
"action": "permit",
"seq_id": "10",
"match": {"ipv4": {"prefix_lists": "pf_ls_r2_ipv4"}},
"set": {"metric": 100},
},
{
"action": "permit",
"seq_id": "20",
"match": {"ipv6": {"prefix_lists": "pf_ls_r2_ipv6"}},
"set": {"metric": 100},
},
]
}
},
"r3": {
"route_maps": {
"RMAP_MED_R3": [
{
"action": "permit",
"seq_id": "10",
"match": {"ipv4": {"prefix_lists": "pf_ls_r3_ipv4"}},
"set": {"metric": 10},
},
{
"action": "permit",
"seq_id": "20",
"match": {"ipv6": {"prefix_lists": "pf_ls_r3_ipv6"}},
"set": {"metric": 10},
},
]
}
},
}
result = create_route_maps(tgen, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Configure neighbor for route map
input_dict_4 = {
"r2": {
"bgp": {
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r4": {
"dest_link": {
"r2-link1": {
"route_maps": [
{
"name": "RMAP_MED_R2",
"direction": "in",
}
]
}
}
},
"r1": {"dest_link": {"r2": {"next_hop_self": True}}},
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r4": {
"dest_link": {
"r2-link1": {
"route_maps": [
{
"name": "RMAP_MED_R2",
"direction": "in",
}
]
}
}
},
"r1": {"dest_link": {"r2": {"next_hop_self": True}}},
}
}
},
}
}
},
"r3": {
"bgp": {
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r1": {"dest_link": {"r3": {"next_hop_self": True}}},
"r5": {
"dest_link": {
"r3": {
"route_maps": [
{
"name": "RMAP_MED_R3",
"direction": "in",
}
]
}
}
},
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r1": {"dest_link": {"r3": {"next_hop_self": True}}},
"r5": {
"dest_link": {
"r3": {
"route_maps": [
{
"name": "RMAP_MED_R3",
"direction": "in",
}
]
}
}
},
}
}
},
}
}
},
}
result = create_router_bgp(tgen, topo, input_dict_4)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying best path
dut = "r1"
attribute = "metric"
for addr_type in ADDR_TYPES:
result = verify_best_path_as_per_bgp_attribute(
tgen, addr_type, dut, input_dict, attribute
)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
# Modify route-map to set med value
input_dict_3 = {
"r3": {
"route_maps": {
"RMAP_MED_R3": [
{
"action": "permit",
"seq_id": "10",
"match": {"ipv4": {"prefix_lists": "pf_ls_r3_ipv4"}},
"set": {"metric": 200},
},
{
"action": "permit",
"seq_id": "20",
"match": {"ipv6": {"prefix_lists": "pf_ls_r3_ipv6"}},
"set": {"metric": 200},
},
]
}
}
}
result = create_route_maps(tgen, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying best path
dut = "r1"
attribute = "metric"
for addr_type in ADDR_TYPES:
result = verify_best_path_as_per_bgp_attribute(
tgen, addr_type, dut, input_dict, attribute
)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
write_test_footer(tc_name)
# Uncomment next line for debugging
# tgen.mininet_cli()
def test_admin_distance(request):
"Verifying admin distance functionality"
tgen = get_topogen()
# Don't run this test if we have any failure.
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
# test case name
tc_name = request.node.name
write_test_header(tc_name)
# Creating configuration from JSON
reset_config_on_routers(tgen)
# Api call to create static routes
input_dict = {
"r2": {
"static_routes": [
{
"network": "192.168.127.12/32",
"admin_distance": 80,
"next_hop": "10.0.0.14",
},
{
"network": "192.168.127.12/32",
"admin_distance": 60,
"next_hop": "10.0.0.18",
},
{
"network": "200:50:2::/128",
"admin_distance": 80,
"next_hop": "fd00::1",
},
{
"network": "200:50:2::/128",
"admin_distance": 60,
"next_hop": "fd00::1",
},
]
}
}
result = create_static_routes(tgen, input_dict)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Api call to redistribute static routes
input_dict_2 = {
"r2": {
"bgp": {
"address_family": {
"ipv4": {
"unicast": {
"redistribute": [
{"redist_type": "static"},
{"redist_type": "connected"},
]
}
},
"ipv6": {
"unicast": {
"redistribute": [
{"redist_type": "static"},
{"redist_type": "connected"},
]
}
},
}
}
}
}
result = create_router_bgp(tgen, input_dict_2)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
# Verifying best path
dut = "r1"
attribute = "admin_distance"
input_dict = {
"ipv4": {
"r2": {
"static_routes": [
{
"network": "192.168.127.12/32",
"admin_distance": 80,
"next_hop": "10.0.0.14",
},
{
"network": "192.168.127.12/32",
"admin_distance": 60,
"next_hop": "10.0.0.18",
},
]
}
},
"ipv6": {
"r2": {
"static_routes": [
{
"network": "200:50:2::/128",
"admin_distance": 80,
"next_hop": "fd00::1",
},
{
"network": "200:50:2::/128",
"admin_distance": 60,
"next_hop": "fd00::1",
},
]
}
},
}
for addr_type in ADDR_TYPES:
result = verify_best_path_as_per_admin_distance(
tgen, addr_type, dut, input_dict[addr_type], attribute
)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
write_test_footer(tc_name)
if __name__ == "__main__":
args = ["-s"] + sys.argv[1:]
sys.exit(pytest.main(args))
|
<gh_stars>10-100
import unittest
from nose.tools import assert_equal
from mock import Mock, patch, ANY
from tests.tools import create_mock_json
from twilio.rest.resources import Call, Calls
AUTH = ('foo', 'bar')
class CallFeedbackTest(unittest.TestCase):
@patch('twilio.rest.resources.base.make_twilio_request')
def test_get_call_feedback(self, request):
resp = create_mock_json('tests/resources/call_feedback.json')
request.return_value = resp
mock = Mock()
mock.uri = '/base'
call = Call(mock, 'CA123')
call.load_subresources()
feedback = call.feedback.get()
assert_equal(5, feedback.quality_score, 5)
assert_equal(['imperfect-audio', 'post-dial-delay'], feedback.issues)
@patch('twilio.rest.resources.base.make_twilio_request')
def test_create_call_feedback(self, request):
resp = create_mock_json('tests/resources/call_feedback.json')
resp.status_code = 201
request.return_value = resp
mock = Mock()
mock.uri = '/base'
mock.auth = AUTH
call = Call(mock, 'CA123')
call.load_subresources()
feedback = call.feedback.create(
quality_score=5,
issues=['imperfect-audio', 'post-dial-delay'],
)
exp_data = {
'QualityScore': 5,
'Issues': ['imperfect-audio', 'post-dial-delay'],
}
assert_equal(5, feedback.quality_score, 5)
assert_equal(['imperfect-audio', 'post-dial-delay'], feedback.issues)
request.assert_called_with(
"POST", "/base/CA123/Feedback",
data=exp_data, auth=AUTH,
timeout=ANY, use_json_extension=True,
)
@patch('twilio.rest.resources.base.make_twilio_request')
def test_create_call_feedback_one_request(self, request):
resp = create_mock_json('tests/resources/call_feedback.json')
resp.status_code = 201
request.return_value = resp
base_uri = 'https://api.twilio.com/2010-04-01/Accounts/AC123'
account_sid = 'AC123'
auth = (account_sid, "token")
calls = Calls(base_uri, auth)
uri = "%s/Calls/CA123/Feedback" % base_uri
feedback = calls.feedback(
'CA123',
quality_score=5,
issue=['imperfect-audio', 'post-dial-delay']
)
exp_data = {
'QualityScore': 5,
'Issue': ['imperfect-audio', 'post-dial-delay'],
}
assert_equal(['imperfect-audio', 'post-dial-delay'], feedback.issues)
request.assert_called_with(
"POST", uri,
data=exp_data, auth=auth,
use_json_extension=True,
)
class CallFeedbackSummaryTest(unittest.TestCase):
@patch('twilio.rest.resources.base.make_twilio_request')
def test_get_call_feedback_summary(self, request):
resp = create_mock_json('tests/resources/call_feedback_summary.json')
request.return_value = resp
base_uri = 'https://api.twilio.com/2010-04-01/Accounts/AC123'
account_sid = 'AC123'
auth = (account_sid, "token")
calls = Calls(base_uri, auth)
uri = "%s/Calls/Summary" % base_uri
feedback = calls.summary.get()
assert_equal(10200, feedback.call_count)
assert_equal(729, feedback.call_feedback_count)
request.assert_called_with('GET', uri, params={}, auth=auth,
use_json_extension=True)
|
"""
main.py
Handles all accessible URI's for flask
"""
from flask import Flask, render_template, request, g
from darkweb.modules.base.crawlerconfig import CrawlerConfig
from darkweb.modules.irc.irc import IRC
from darkweb.modules.web.WebCrawler import WebCrawler
from darkweb.modules.parser.search import search
from darkweb.modules.parser.parser import parser
import SocketServer
import threading
import datetime
import sys
from threading import Thread
from darkweb import app
"""
before_first
Initialize parser before first request is served.
"""
@app.before_first_request
def before_first():
print("views.py - init start")
HOST, PORT = "0.0.0.0", 4443
sserver = SocketServer.ThreadingTCPServer((HOST, PORT), parser)
Thread(target=sserver.serve_forever).start()
print("views.py - init end")
"""
addParam
Add a search term or a regular expression to the parser.
Future work: This code causes parser and website to be on same box.
Refactor to utilize connections to send terms to the parser.
"""
@app.route("/addParam", methods=["POST"])
def addParam():
# add new param
if request.form['addST']:
print("Adding searchterm")
st_terms = request.form['addST'].split(",")
# add new search term
for st in st_terms:
search().add_searchterm(st)
if request.form['addRE']:
re_terms = request.form['addRE'].split(",")
# add new regex
for re in re_terms:
search().add_searchterm(re)
print("Adding regularexpression")
result = readSearchFile()
msg = "Successfull added search parameters"
return render_template("index.html", result=result)
"""
clearParams
Remove all search terms from the parser.
Future work: This code causes parser and website to be on same box.
Refactor to utilize connections to send terms to the parser.
"""
@app.route("/clearParams", methods=["POST"])
def clearParams():
# clear all params
result = readSearchFile()
msg = "Successfull cleared search parameters"
return render_template("index.html", result=result)
"""
home (get)
Serve index page.
"""
@app.route("/", methods=["GET"])
def home():
result = readSearchFile()
return render_template("index.html", result=result)
"""
home (post)
Process a search
"""
@app.route("/", methods=["POST"])
def createCrawlerConfig():
print(str(request.form))
print("Post recieved.")
searchName = str(request.form['searchName'])
protocol = str(request.form['protocol']).lower()
print("Post parameters parsed.")
speed = str(request.form['speed'])
maxDepth = str(request.form['maxDepth'])
location = request.form['location']
options_input = str(request.form['options'])
options = makeOptionsDict(options_input)
config = CrawlerConfig(location, protocol, speed, maxDepth, searchName, options)
msg = "Search \"" + searchName + "\" started."
crawler = None
search_params = None
if searchName == "":
msg = "Search failed. Search must be given a name."
elif location == "":
msg = "Search failed. Must give a search location."
elif protocol == "irc":
crawler = IRC(config)
elif protocol == "tor" or protocol == "web":
if not speed.isdigit():
msg = "Search falied. Speed much be an integer."
elif not maxDepth.isdigit():
msg = "Search failed. Max Crawl Depth must be an integer."
crawler = WebCrawler(config)
else:
msg = "Search failed invalid protocol.\nMust be TOR, IRC, or web"
if(crawler):
run_crawl(crawler)
search_params = [("Search Name", searchName), ("Protocol", protocol), ("Speed", speed), ("Max Depth", maxDepth), ("Location", location)]
for label, val in options.iteritems():
search_params.append((label, val))
result = readSearchFile()
return render_template("index.html", msg=msg, search_params=search_params, result=result)
"""
readSearchFile
read from /tmp/searches.txt and return list of 5 lines
"""
def readSearchFile():
result = []
i = 0
with open("/tmp/searches.txt", "r") as f:
for line in f:
if(i >= 5):
break
result.append(line)
i += 1
return result
"""
writeSearchFile
Writes to file in /tmp the datetime a search was started
"""
def writeSearchFile(searchName):
with open("/tmp/searches.txt", "a") as f:
out_string = "\"" + searchName + "\": " + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + "\n"
f.write(out_string)
return
"""
run_crawl
Run the specified cralwer
Call do crawl in new thread
"""
def run_crawl(crawler, args=None):
#t = threading.Thread(target=crawler.doCrawl, args=(args, ))
t = threading.Thread(target=crawler.doCrawl)
t.start()
writeSearchFile(crawler.config.name)
return
"""
makeOptionsDict
format options string from form and create options dict for crawlerconfig
returns options dict
"""
def makeOptionsDict(options_input):
options = {}
split_input = options_input.split(',')
for s in split_input:
s = s.replace(" ", "")
o = s.split(':')
if len(o) == 2:
options[o[0]] = o[1]
return options
|
import duckdb
try:
import pyarrow as pa
import pyarrow.parquet
import numpy as np
import pandas as pd
import pytest
can_run = True
except:
can_run = False
def compare_results(query):
true_answer = duckdb.query(query).fetchall()
from_arrow = duckdb.from_arrow(duckdb.query(query).arrow()).fetchall()
assert true_answer == from_arrow
def arrow_to_pandas(query):
return duckdb.query(query).arrow().to_pandas()['a'].values.tolist()
class TestArrowNested(object):
def test_lists_basic(self,duckdb_cursor):
if not can_run:
return
#Test Constant List
query = duckdb.query("SELECT a from (select list_value(3,5,10) as a) as t").arrow()['a'].to_numpy()
assert query[0][0] == 3
assert query[0][1] == 5
assert query[0][2] == 10
# Empty List
query = duckdb.query("SELECT a from (select list_value() as a) as t").arrow()['a'].to_numpy()
assert len(query[0]) == 0
#Test Constant List With Null
query = duckdb.query("SELECT a from (select list_value(3,NULL) as a) as t").arrow()['a'].to_numpy()
assert query[0][0] == 3
assert np.isnan(query[0][1])
def test_list_types(self,duckdb_cursor):
if not can_run:
return
#Large Lists
data = pyarrow.array([[1],None, [2]], type=pyarrow.large_list(pyarrow.int64()))
arrow_table = pa.Table.from_arrays([data],['a'])
rel = duckdb.from_arrow(arrow_table)
res = rel.execute().fetchall()
assert res == [([1],), (None,), ([2],)]
#Fixed Size Lists
data = pyarrow.array([[1],None, [2]], type=pyarrow.list_(pyarrow.int64(),1))
arrow_table = pa.Table.from_arrays([data],['a'])
rel = duckdb.from_arrow(arrow_table)
res = rel.execute().fetchall()
assert res == [([1],), (None,), ([2],)]
#Complex nested structures with different list types
data = [pyarrow.array([[1],None, [2]], type=pyarrow.list_(pyarrow.int64(),1)),pyarrow.array([[1],None, [2]], type=pyarrow.large_list(pyarrow.int64())),pyarrow.array([[1,2,3],None, [2,1]], type=pyarrow.list_(pyarrow.int64()))]
arrow_table = pa.Table.from_arrays([data[0],data[1],data[2]],['a','b','c'])
rel = duckdb.from_arrow(arrow_table)
res = rel.project('a').execute().fetchall()
assert res == [([1],), (None,), ([2],)]
res = rel.project('b').execute().fetchall()
assert res == [([1],), (None,), ([2],)]
res = rel.project('c').execute().fetchall()
assert res == [([1,2,3],), (None,), ([2,1],)]
#Struct Holding different List Types
struct = [pa.StructArray.from_arrays( data,['fixed', 'large','normal'])]
arrow_table = pa.Table.from_arrays(struct,['a'])
rel = duckdb.from_arrow(arrow_table)
res = rel.execute().fetchall()
assert res == [({'fixed': [1], 'large': [1], 'normal': [1, 2, 3]},), ({'fixed': None, 'large': None, 'normal': None},), ({'fixed': [2], 'large': [2], 'normal': [2, 1]},)]
def test_lists_roundtrip(self,duckdb_cursor):
if not can_run:
return
# Integers
compare_results("SELECT a from (select list_value(3,5,10) as a) as t")
compare_results("SELECT a from (select list_value(3,5,NULL) as a) as t")
compare_results("SELECT a from (select list_value(NULL,NULL,NULL) as a) as t")
compare_results("SELECT a from (select list_value() as a) as t")
#Strings
compare_results("SELECT a from (select list_value('test','test_one','test_two') as a) as t")
compare_results("SELECT a from (select list_value('test','test_one',NULL) as a) as t")
#Big Lists
compare_results("SELECT a from (SELECT LIST(i) as a FROM range(10000) tbl(i)) as t")
#Multiple Lists
compare_results("SELECT a from (SELECT LIST(i) as a FROM range(10000) tbl(i) group by i%10) as t")
#Unique Constants
compare_results("SELECT a from (SELECT list_value(1) as a FROM range(10) tbl(i)) as t")
#Nested Lists
compare_results("SELECT LIST(le) FROM (SELECT LIST(i) le from range(100) tbl(i) group by i%10) as t")
#LIST[LIST[LIST[LIST[LIST[INTEGER]]]]]]
compare_results("SELECT list (lllle) llllle from (SELECT list (llle) lllle from (SELECT list(lle) llle from (SELECT LIST(le) lle FROM (SELECT LIST(i) le from range(100) tbl(i) group by i%10) as t) as t1) as t2) as t3")
compare_results('''SELECT grp,lst,cs FROM (select grp, lst, case when grp>1 then lst else list_value(null) end as cs
from (SELECT a%4 as grp, list(a) as lst FROM range(7) tbl(a) group by grp) as lst_tbl) as T;''')
#Tests for converting multiple lists to/from Arrow with NULL values and/or strings
compare_results("SELECT list(st) from (select i, case when i%10 then NULL else i::VARCHAR end as st from range(1000) tbl(i)) as t group by i%5")
def test_struct_roundtrip(self,duckdb_cursor):
if not can_run:
return
compare_results("SELECT a from (SELECT STRUCT_PACK(a := 42, b := 43) as a) as t")
compare_results("SELECT a from (SELECT STRUCT_PACK(a := NULL, b := 43) as a) as t")
compare_results("SELECT a from (SELECT STRUCT_PACK(a := NULL) as a) as t")
compare_results("SELECT a from (SELECT STRUCT_PACK(a := i, b := i) as a FROM range(10000) tbl(i)) as t")
compare_results("SELECT a from (SELECT STRUCT_PACK(a := LIST_VALUE(1,2,3), b := i) as a FROM range(10000) tbl(i)) as t")
def test_map_roundtrip(self,duckdb_cursor):
if not can_run:
return
compare_results("SELECT a from (select MAP(LIST_VALUE(1, 2, 3, 4),LIST_VALUE(10, 9, 8, 7)) as a) as t")
with pytest.raises(Exception):
compare_results("SELECT a from (select MAP(LIST_VALUE(1, 2, 3, 4,2, NULL),LIST_VALUE(10, 9, 8, 7,11,42)) as a) as t")
compare_results("SELECT a from (select MAP(LIST_VALUE(),LIST_VALUE()) as a) as t")
compare_results("SELECT a from (select MAP(LIST_VALUE('<NAME>', 'Backstreet Boys', 'Tenacious D','<NAME>' ),LIST_VALUE(10,9,10,11)) as a) as t")
with pytest.raises(Exception):
compare_results("SELECT a from (select MAP(LIST_VALUE('<NAME>', NULL, 'Tenacious D',NULL,NULL ),LIST_VALUE(10,9,10,11,13)) as a) as t")
with pytest.raises(Exception):
compare_results("SELECT a from (select MAP(LIST_VALUE(NULL, NULL, NULL,NULL,NULL ),LIST_VALUE(10,9,10,11,13)) as a) as t")
with pytest.raises(Exception):
compare_results("SELECT a from (select MAP(LIST_VALUE(NULL, NULL, NULL,NULL,NULL ),LIST_VALUE(NULL, NULL, NULL,NULL,NULL )) as a) as t")
compare_results("SELECT m from (select MAP(list_value(1), list_value(2)) from range(5) tbl(i)) tbl(m)")
compare_results("SELECT m from (select MAP(lsta,lstb) as m from (SELECT list(i) as lsta, list(i) as lstb from range(10000) tbl(i) group by i%5) as lst_tbl) as T")
def test_map_arrow_to_pandas(self,duckdb_cursor):
if not can_run:
return
assert arrow_to_pandas("SELECT a from (select MAP(LIST_VALUE(1, 2, 3, 4),LIST_VALUE(10, 9, 8, 7)) as a) as t") == [[(1, 10), (2, 9), (3, 8), (4, 7)]]
assert arrow_to_pandas("SELECT a from (select MAP(LIST_VALUE(),LIST_VALUE()) as a) as t") == [[]]
assert arrow_to_pandas("SELECT a from (select MAP(LIST_VALUE('<NAME>', 'Backstreet Boys', 'Tenacious D','<NAME>' ),LIST_VALUE(10,9,10,11)) as a) as t") == [[('<NAME>', 10), ('Backstreet Boys', 9), ('Tenacious D', 10), ('<NAME>', 11)]]
assert arrow_to_pandas("SELECT a from (select MAP(list_value(1), list_value(2)) from range(5) tbl(i)) tbl(a)") == [[(1, 2)], [(1, 2)], [(1, 2)], [(1, 2)], [(1, 2)]]
assert arrow_to_pandas("SELECT MAP(LIST_VALUE({'i':1,'j':2},{'i':3,'j':4}),LIST_VALUE({'i':1,'j':2},{'i':3,'j':4})) as a") == [[({'i': 1, 'j': 2}, {'i': 1, 'j': 2}), ({'i': 3, 'j': 4}, {'i': 3, 'j': 4})]]
def test_frankstein_nested(self,duckdb_cursor):
if not can_run:
return
# List of structs W/ Struct that is NULL entirely
compare_results("SELECT [{'i':1,'j':2},NULL,{'i':2,'j':NULL}]")
# Lists of structs with lists
compare_results("SELECT [{'i':1,'j':[2,3]},NULL]")
# Maps embedded in a struct
compare_results("SELECT {'i':mp,'j':mp2} FROM (SELECT MAP(LIST_VALUE(1, 2, 3, 4),LIST_VALUE(10, 9, 8, 7)) as mp, MAP(LIST_VALUE(1, 2, 3, 5),LIST_VALUE(10, 9, 8, 7)) as mp2) as t")
# List of maps
compare_results("SELECT [mp,mp2] FROM (SELECT MAP(LIST_VALUE(1, 2, 3, 4),LIST_VALUE(10, 9, 8, 7)) as mp, MAP(LIST_VALUE(1, 2, 3, 5),LIST_VALUE(10, 9, 8, 7)) as mp2) as t")
# Map with list as key and/or value
compare_results("SELECT MAP(LIST_VALUE([1,2],[3,4],[5,4]),LIST_VALUE([1,2],[3,4],[5,4]))")
# Map with struct as key and/or value
compare_results("SELECT MAP(LIST_VALUE({'i':1,'j':2},{'i':3,'j':4}),LIST_VALUE({'i':1,'j':2},{'i':3,'j':4}))")
# Struct that is NULL entirely
compare_results("SELECT * FROM (VALUES ({'i':1,'j':2}), (NULL), ({'i':1,'j':2}), (NULL)) as a")
# Null checks on lists with structs
compare_results("SELECT [{'i':1,'j':[2,3]},NULL,{'i':1,'j':[2,3]}]")
# MAP that is NULL entirely
compare_results("SELECT * FROM (VALUES (MAP(LIST_VALUE(1,2),LIST_VALUE(3,4))),(NULL), (MAP(LIST_VALUE(1,2),LIST_VALUE(3,4))), (NULL)) as a")
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 2 14:07:44 2021
@author: fa19
"""
import numpy as np
import matplotlib.pyplot as plt
ds_arr = np.load('/home/fa19/Documents/Benchmarking/data/bayley/full.npy', allow_pickle=True)
import numpy as np
import torch
import random
from scipy.interpolate import griddata
import nibabel as nb
means_birth_age = torch.Tensor([1.18443463, 0.0348339 , 1.02189593, 0.12738451])
stds_birth_age = torch.Tensor([0.39520042, 0.19205919, 0.37749157, 4.16265044])
means_birth_age_confounded = means_birth_age
stds_birth_age_confounded = stds_birth_age
means_scan_age = torch.Tensor([1.16332048, 0.03618059, 1.01341462, 0.09550486])
stds_scan_age = torch.Tensor([0.39418309, 0.18946538, 0.37818974, 4.04483381])
means_bayley = torch.Tensor([0.03561912, 0.1779468, 1.02368241, 1.30365072, 1.42005161, 1.80373678, 1.0485854, 1.44855442, 0.74604417])
stds_bayley = torch.Tensor([0.19094736, 4.11706815, 0.37789417, 4.61303946, 5.08495779, 4.94774891, 4.72248912, 4.22112396, 4.48455344])
means = means_bayley
stds = stds_bayley
rotation_arr = np.load('data/rotations_array.npy').astype(int)
reversing_arr = np.load('data/reversing_arr.npy')
xy_points = np.load('data/equirectangular_ico_6_points.npy')
xy_points[:,0] = (xy_points[:,0] + 0.1)%1
grid = np.load('data/grid_170_square.npy')
grid_x, grid_y = np.meshgrid(np.linspace(0.02, 0.98, 170), np.linspace(0.02, 0.98, 170))
grid[:,0] = grid_x.flatten()
grid[:,1] = grid_y.flatten()
from scipy.interpolate import griddata
from torch_geometric.data import Data
class My_dHCP_Data(torch.utils.data.Dataset):
def __init__(self, input_arr, warped_files_directory, unwarped_files_directory, rotations = False,
number_of_warps = 0, parity_choice = 'left', sample_only = True, output_as_torch = True ):
"""
A Full Dataset for the dHCP Data. Can include warps, rotations and parity flips.
Fileanme style:
in the array: only 'sub-X-ses-Y'
but for the filenames themselves
Left = 'sub-X_ses-Y_L'
Right = 'sub-X_ses-Y_R'
if warped:
'sub-X_ses-Y_L_W1'
INPUT ARGS:
1. input_arr:
Numpy array size Nx2
FIRST index MUST be the filename (excluding directory AND L or R ) of MERGED nibabel files
LAST index must be the (float) label
(OPTIONAL) Middle index if size 3 (optional) is any confounding metadata (Also float, e.g scan age for predicting birth age)
2 . rotations - boolean: to add rotations or not to add rotations
3. number of warps to include - INT
NB WARPED AR INCLUDED AS FILENAME CHANGES. WARP NUMBER X IS WRITTEN AS filename_WX
NUMBER OF WARPS CANNOT EXCEED NUMBER OF WARPES PRESENT IN FILES
4. Particy Choice (JMPORTANT!) - defines left and right-ness
If: 'left'- will output ONLY LEFT
If: 'both' - will randomly choose L or R
If 'combined' - will output a combined array (left first), will be eventually read as a file with twice the number of input channels. as they will be stacked together
5. smoothing - boolean, will clip extremal values according to the smoothing_array
6. normalisation - str. Will normalise according to 'range', 'std' or 'None'
Range is from -1 to 1
Std is mean = 0, std = 1
7. output_as_torch - boolean:
outputs values as torch Tensors if you want (usually yes)
"""
self.input_arr = input_arr
self.image_files = input_arr[:,0]
self.label = input_arr[:,-1]
self.rotations = rotations
self.number_of_warps = number_of_warps
self.parity = parity_choice
self.sample_only = sample_only
self.output_as_torch = output_as_torch
if self.number_of_warps != 0 and self.number_of_warps != None:
self.directory = warped_files_directory
else:
self.directory = unwarped_files_directory
def __len__(self):
L = len(self.input_arr)
if self.number_of_warps !=0:
if self.sample_only == False:
L = L*self.number_of_warps
if self.parity == 'both':
L = 2* L
return L
def __test_input_params__(self):
assert self.input_arr.shape[1] >=2, 'check your input array is a nunpy array of files and labels'
assert type(self.number_of_warps) == int, "number of warps must be an in integer (can be 0)"
assert self.parity in ['left', 'both', 'combined'], "parity choice must be either left, combined or both"
if self.number_of_rotations != 0:
assert self.rotation_arr != None,'Must specify a rotation file containing rotation vertex ids if rotations are non-zero'
assert self.rotations == bool, 'rotations must be boolean'
assert self.normalisation in [None, 'none', 'std', 'range'], 'Normalisation must be either std or range'
def __genfilename__(self,idx, right):
"""
gets the appropriate file based on input parameters on PARITY and on WARPS
"""
# grab raw filename
raw_filename = self.image_files[idx]
# add parity to it. IN THE FORM OF A LIST! If requries both will output a list of length 2
filename = []
if self.parity != 'combined':
if right == True:
filename.append(raw_filename + '_R')
else:
filename.append(raw_filename + '_L')
# if self.parity == 'left':
# filename.append(raw_filename + '_L')
#
# elif self.parity == 'both':
# coin_flip = random.randint(0,1)
# if coin_flip == 0:
# filename.append(raw_filename + '_L')
# elif coin_flip == 1:
# filename.append(raw_filename + '_R')
# right = True
if self.parity == 'combined':
filename.append(raw_filename + '_L')
filename.append(raw_filename+'_R')
# filename is now a list of the correct filenames.
# now add warps if required
if self.number_of_warps != 0:
warp_choice = str(random.randint(0,self.number_of_warps))
if warp_choice !='0':
filename = [s + '_W'+warp_choice for s in filename ]
return filename
def __getitem__(self, idx):
"""
First load the images and collect them as numpy arrays
then collect the label
then collect the metadata (though might be None)
"""
if self.parity == 'both':
T = self.__len__()//2
idx, right = idx % T, idx // T
filename = self.__genfilename__(idx, right)
else:
right = False
filename = self.__genfilename__(idx, right)
image_gifti = [nb.load(self.directory + '/'+individual_filename+'.shape.gii').darrays for individual_filename in filename]
image = []
if self.rotations == True:
rotation_choice = random.randint(0, len(rotation_arr)-1)
if rotation_choice !=0:
for file in image_gifti:
image.extend(item.data[rotation_arr[rotation_choice]] for item in file)
else:
for file in image_gifti:
image.extend(item.data for item in file)
else:
for file in image_gifti:
image.extend(item.data for item in file)
if right == True:
image = [item[reversing_arr] for item in image]
### labels
# if self.number_of_warps != 0:
#
# idx = idx%len(self.input_arr)
# label = self.label[idx]
###### metadata grabbing if necessary
label = self.label[idx]
# torchify if required:
if self.output_as_torch:
image = torch.Tensor( image )
label = torch.Tensor( [label] )
sample = {'image': image,'label': label}
return sample
#my_ds = My_dHCP_Data(ds_arr, "", '/home/fa19/Documents/dHCP_Data_merged/merged/', parity_choice='both')
my_ds = My_dHCP_Data(ds_arr, "", '/data/rsn/merged/', parity_choice='both')
mean_counts = np.zeros([len(my_ds), 9])
var_counts = np.zeros([len(my_ds), 9])
birth_ages = []
for i in range(len(my_ds)):
item = my_ds[i]
im = item['image']
ms = []
vs = []
for image in im:
image = image[torch.logical_not(torch.isnan(image))]
ms.append(torch.mean(image))
vs.append(torch.var(image))
mean_counts[i]= ms
var_counts[i] = vs
global_mean = np.mean(mean_counts, axis=0)
global_diffs = mean_counts - global_mean
global_diffs_squared = np.square(global_diffs)
diffs_plus_vars = global_diffs_squared + var_counts
global_var = np.sum( diffs_plus_vars, axis=0) / len(mean_counts)
global_stds = np.sqrt(global_var)
print(global_mean)
print(global_stds)
|
print('\n' * 100 + '@ TelegramBackup is loading...')
import os.path
from tgback_utils import (
TelegramAccount, TgbackAES, restore,
VERSION, qr_available, image_error, scanqrcode
)
from getpass import getpass
from sys import platform
from traceback import print_exc
from time import ctime, strftime
from reedsolo import ReedSolomonError
from asyncio import run as asyncio_run
from os import system as os_system
from telethon.errors.rpcerrorlist import (
AuthKeyUnregisteredError, PhoneCodeInvalidError,
PasswordHashInvalidError, PhoneNumberOccupiedError,
PhoneNumberInvalidError, FloodWaitError, PhoneCodeEmptyError,
PhoneCodeInvalidError, FreshChangePhoneForbiddenError
)
if platform.startswith('win'):
clear_command = 'cls'
elif platform.startswith('cygwin'):
clear_command = 'printf "\033c"'
else:
clear_command = "printf '\33c\e[3J' || cls || clear"
def clear_terminal():
os_system(clear_command)
print('\n' * 100)
async def main():
try:
async def request_confirmation_code(request_coroutine, phone: str, account: TelegramAccount=None) -> tuple:
request_code, code_hash = True, None
phone = phone.replace(' ','')
while True:
clear_terminal()
if request_code:
print('@ Requesting confirmation code...')
if account: # if account specified then it's request to change phone
if not code_hash:
code_hash = await request_coroutine(phone)
# code_hash is for request_change_phone_code
else:
await account.resend_code(phone, code_hash)
else:
code_hash = await request_coroutine()
request_time = f'{strftime("%I:%M:%S %p")}'
clear_terminal()
print(f'@ Please wait for message or call with code ({phone})')
print(f'@ Last request sended at {request_time}\n')
print('> 1) I received the code')
print('>> 2) I haven\'t recieved code')
print('>>> 3) Return to main page')
mode = input('\n@ Input: ')
if mode == '1':
clear_terminal()
code = input('> Confirmation Code: ')
break
elif mode == '2':
request_code = True
elif mode == '3':
await main()
else:
request_code = False
clear_terminal()
return (code, code_hash)
while True:
clear_terminal()
about_qr = '' if qr_available else '(not available)'
print(
f''' - TelegramBackup {VERSION} (bit.ly/tgback) -\n\n'''
'''> 0) Switch to the help page\n'''
'''>> 1) Backup Telegram account\n'''
'''>>> 2) Open .tgback backup\n'''
'''>>>> 3) Exit from TelegramBackup'''
'''\n\n% Press Ctrl+C to back here'''
)
selected_section = input('\n@ Input: ')
if selected_section and selected_section in '0123':
break
return_to_main = False
while True:
if return_to_main:
await main()
clear_terminal()
if selected_section == '0':
while True:
clear_terminal()
print(
'''@ Welcome You!\n\n'''
''' 01) Why is it needed?\n'''
''' 02) How it works?\n'''
''' 03) Can tgback back up my messages?\n'''
''' 04) What information do you store?\n'''
''' 05) Do you transfer any of my data?\n'''
''' 06) Why we are need to refresh backups?\n'''
''' 07) Can i use tgback on my Android?\n'''
''' 08) What encryption is used in tgback?\n'''
''' 09) What version do you prefer me to use?\n'''
''' 10) Passwords isn\'t visible. Why?\n'''
''' 11) QR features is unavailable. Why?\n'''
''' 12) Can i use tgback with TOR network?\n'''
''' 13) Tell me more about backup\'s refreshing and disconnecting.\n'''
''' 14) I found a bug or have an idea. How can i help?\n'''
''' 15) I don\'t trust you. Any alternatives?\n\n'''
'''00) Back to the main page\n'''
)
mode = input('@ Input: ')
mode = mode if not mode else mode.zfill(2)
clear_terminal()
if mode == '00':
break
elif mode == '01':
print(
'''01) Why is it needed?\n\n'''
'''o Telegram is designed so that you cannot enter your account '''
'''without receiving a code. This code can be obtained in two ways: '''
'''by receiving the code in Telegram or on your phone. The first method '''
'''is available only if you are logged into your account from another device '''
'''(for example, the desktop version). If you are logged into your account on only '''
'''one device (or not logged at all), then if you lose access to your SIM card, you '''
'''will also lose access to your Telegram account forever.'''
)
elif mode == '02':
print(
'''02) How it works?\n\n'''
'''o Telegram has an open client API on which official clients are built. Anyone who knows '''
'''one of the many programming languages can create their own client. Tgback can be called a '''
'''very stripped-down client. This program can only log into the account and change the number. '''
'''When you log in to your account, Telegram sends you a special session token, which is used to manage '''
'''your account. This rule works on all clients, including official ones. Tgback saves this token '''
'''along with metadata (for example, your account's username or the time when the backup will be off) '''
'''and encrypts it to a file and QR. Whenever you need, you can decrypt this backup and change the phone '''
'''number (if you have lost access to the old one) or enter the TelegramDesktop. In fact, Tgback adds an '''
'''alternative login method. Your session token is not transferred anywhere and all code is open. However, '''
'''beware, the only safe sources you can get Tgback from are these:\n\n '''
'''o https://github.com/NotStatilko/tgback (bit.ly/tgback)\n o https://t.me/nontgback'''
)
elif mode == '03':
print(
'''03) Can tgback back up my messages?\n\n'''
'''o No, tgback allows you only create backups from which you can login or change your phone number. '''
'''However, session token (which tgback backups store) can allow get FULL access over your account. '''
'''So don\'t use very simple passwords, such as "<PASSWORD>" or "<PASSWORD>".'''
)
elif mode == '04':
print(
'''04) What information do you store?\n\n'''
'''o Any at all. Tgback backups itself store token session, account username, account id, '''
'''time when backup will be disconnected and other data needed by tgback. Nothing will be transferred. '''
'''To get more details you can visit official GitHub page on bit.ly/tgback.'''
)
elif mode == '05':
print('05) Do you transfer any of my data?\n\no No.')
elif mode == '06':
print(
'''06) Why we are need to refresh backups?\n\n'''
'''o Because Telegram (seems to) disconnect inactive sessions. At the time of discovery this problem, '''
'''the disabled backup had not been refreshed for six months, but this has not been sufficiently researched. '''
'''At the moment, the backup needs to be refreshed every two months, and you recieve a delayed message '''
'''one week in advance as a reminder. Please note that Tgback doesn't turn off backups by itself after two '''
'''months, and you will probably still have about two more months. Disconnection of sessions is performed by the Telegram server.'''
)
elif mode == '07':
print(
'''07) Can i use tgback on my Android?\n\n'''
'''o Sure, you can use Termux which doesn\'t require root. Check out installation steps for Linux on official '''
'''tgback\'s Github page: bit.ly/tgback'''
)
elif mode == '08':
print(
'''08) What encryption is used in tgback?\n\n'''
'''o Started from 4.0 version of tgback, the AES-256 CBC with Scrypt (1 GB of RAM) as PBKDF.'''
)
elif mode == '09':
print(
'''09) What version do you prefer me to use?\n\n'''
'''o Latest which >= v4.0. Others is considered as not secure.'''
)
elif mode == '10':
print(
'''10) Passwords isn't visible. Why?\n\n'''
'''o The password is not displayed by default but you enter it. If you have any problems '''
'''with creating a backup, you can use the config-file (mode 1->2). Create an empty file and fill it with this template:\n\n'''
''' o "phone_number; telegram_password; backup_password; backup_filename"'''
)
elif mode == '11':
print(
'''11) QR features is unavailable. Why?\n\n'''
'''If you on linux then make sure that you already *installed the LibZBar package. If you are on other system, '''
'''then open issue on official tgback *repository.\n\n o sudo apt install libzbar0\n o https://github.com/NotStatilko/tgback'''
)
elif mode == '12':
print('12) Can i use tgback with TOR network?\n\no Sure, use torsocks or torify for this.')
elif mode == '13':
print(
'''13) Tell me more about backup's refreshing and disconnecting\n\n'''
'''o After backup refreshing you get new backups, but you can also use the old ones to log '''
'''into your account or change the number, they will just show the wrong time before the date when '''
'''the backup needs to be refreshed. To destroy a backup, it\'s not enough to delete only the file, you need '''
'''to disconnect your backup session. This can be done in two ways: either through the tgback itself or via Telegram. '''
'''After disconnecting the session in any way, all copies of backups associated with this session cease '''
'''to be active. Also, please note that changing the password with which you encrypted the backup only changes '''
'''the password for the backup that you opened. If the attacker somehow received the password for your backup, '''
'''immediately log in to Telegram and disconnect all sessions that you do not recognize as your own. '''
)
elif mode == '14':
print(
'''14) I found a bug or have an idea. How can i help?\n\n'''
'''o You are always welcome on tgback\'s GitHub! Open issues or send pull-requests!\n\n '''
'''o https://github.com/NotStatilko/tgback (bit.ly/tgback)'''
)
elif mode == '15':
print(
'''5) I don't trust you. Any alternatives?\n\n'''
'''o You can back up the Telegram\'s tdata folder or log in to more than one device. '''
'''You can also give a reaction to *this commit and maybe Telegram will add TOTP codes.\n\n '''
'''o https://github.com/telegramdesktop/tdesktop/issues/10253'''
)
else: continue
input('\n@ Press Enter to back ')
if selected_section == '1':
while True:
clear_terminal()
print(
'''> 1) Manual input\n'''
'''>> 2) Load config file\n'''
'''>>> 3) Return to main page\n'''
)
selected_section = input('@ Input: ')
if selected_section == '1':
clear_terminal()
phone = input('> Phone number: ')
try:
clear_terminal()
print('@ Checking number for correctness...')
account = TelegramAccount(phone)
await account.connect()
code, _ = await request_confirmation_code(
account.request_code, phone
)
password = getpass('>> Your Telegram password: ')
clear_terminal()
try:
print('@ Trying to connect with Telegram...')
await account.login(password,code)
while True:
clear_terminal()
tgback_filename = input('> Backup filename: ')
if len(tgback_filename) > 32:
input('@: ! Backup filename is too long! (Must be < 33).')
tgback_filename = input('> Backup filename: ')
else:
break
clear_terminal()
while True:
print('@ To create backup you need at least 1GB free for now.\n')
tgback_password = getpass('> Backup password: ')
c_tgback_password = getpass('>> Re-enter password: ')
clear_terminal()
if tgback_password != c_tgback_password:
print('@: ! Password mismatch! Try again.\n')
elif not tgback_password:
print('@: ! Password can\'t be empty.\n')
else:
break
clear_terminal()
print('@ Creating key with your password...')
filename = await account.backup(tgback_password, tgback_filename)
clear_terminal()
input(f'@ Successfully encrypted and backuped! ({filename})')
await main()
except (KeyboardInterrupt, EOFError):
await main()
except PhoneCodeInvalidError:
clear_terminal()
input('\n@: ! Code you entered is invalid. Try again.')
except PasswordHashInvalidError:
clear_terminal()
input('\n@: ! Password you entered is invalid. Try again.')
except FloodWaitError as e:
clear_terminal()
input('''@: ! Telegram servers returned FloodWaitError. '''
f'''Please wait {e.seconds} seconds ''')
await main()
except KeyboardInterrupt:
await main()
except (PhoneNumberInvalidError, TypeError):
clear_terminal()
input(f'@: ! The provided number ({phone}) is invalid. Try again.')
await main()
elif selected_section == '2': # Config file
clear_terminal()
config = input('> Path to tgback-config file: ')
if not os.path.exists(config):
clear_terminal()
input('@: ! Can\'t open config file. Check your path. ')
else:
config_template = (
'''phone_number; telegram_password; '''
'''backup_password; backup_filename'''
)
try:
config = open(config).read()
# Invalid format but ok. I try to predict it :)
config = config.replace('"','')
config = config.replace(' ','')
config = config.replace('\n','')
config = config.split(';')
config = config[:4] if not config[-1] else config
assert len(config) == 4
if not config[3]:
config[3] = str(int(time()))
if len(config[3]) > 32:
raise TypeError
except AssertionError:
clear_terminal()
input(
'''@: ! It\'s not a tgback-config file\n\n'''
f'''@: ? Correct format: "{config_template}"\n\n'''
'''@: ? Use manual input if your password contain ";".'''
)
await main()
except TypeError:
clear_terminal()
input('@: ! Backup filename is too long! (Must be < 33).')
await main()
try:
clear_terminal()
print('@ Trying to connect with Telegram...')
account = TelegramAccount(phone_number=config[0])
await account.connect()
try:
code, _ = await request_confirmation_code(
account.request_code, config[0]
)
clear_terminal()
print('@ Trying to login...')
await account.login(config[1],code)
clear_terminal()
except PhoneCodeInvalidError:
clear_terminal()
input('@: ! Invalid code. Try again. ')
await main()
except PasswordHashInvalidError:
clear_terminal()
input('@: ! Invalid password. Try again. ')
await main()
else:
print('@ Creating key with your password...')
filename = await account.backup(config[2],config[3])
clear_terminal()
input(f'@ Successfully encrypted and backuped! ({filename})')
return_to_main = True; break
except ConnectionError:
clear_terminal()
input('@: ! Unable to connect with Telegram servers. Check your internet connection.')
await main()
except:
clear_terminal()
input(
'''@: ! Something wrong in your config file.\n\n'''
f'''@: ? Correct format: "{config_template}"\n\n'''
'''@: ? If your password contain ";", please, use manual input.'''
)
elif selected_section == '3':
await main()
elif selected_section == '2': # Open .tgback
while True:
clear_terminal()
print('> 1) Load from QR ' + about_qr)
print('>> 2) Use .tgback file')
print('>>> 3) Back to main page')
open_mode = input('\n@ Input: ')
if open_mode == '1' and not qr_available:
clear_terminal()
input('@: ! Can\'t reach ZBar or PIL. Please check installed dependecies. ')
await main()
if open_mode and open_mode in '123':
clear_terminal(); break
if open_mode == '3':
await main()
backup_type = 'QR' if open_mode == '1' else 'file'
is_qr = True if open_mode == '1' else False
path_to_tgback = input(f'> Path to .tgback {backup_type}: ')
if not os.path.exists(path_to_tgback):
clear_terminal()
input(f'@: ! Can\'t find .tgback {backup_type}. Check entered path.')
await main()
else:
while True:
clear_terminal()
print('@ To decrypt backup you need at least 1GB free for now.\n')
tgback_password = getpass(f'>> Password to .tgback {backup_type}: ')
if not tgback_password:
clear_terminal()
input('@: ! Password can\'t be empty. Try again or press Ctrl+C.')
else: break
clear_terminal()
print('@ Creating key with your password...')
try:
restored = restore(path_to_tgback, tgback_password, is_qr=is_qr)
assert len(restored) == 6
except (AssertionError, ValueError, ReedSolomonError):
clear_terminal()
input('\n@: ! Incorrect Password or corrupted backup. ')
await main()
except (IndexError, image_error):
clear_terminal()
input('''\n@: ! Impossible to read QR code. '''
'''Are you sure that image is correct and in good quality?''')
await main()
else:
account = TelegramAccount(session=restored[1])
await account.connect()
while True:
clear_terminal()
return_to_page = False
about_qr = '' if qr_available else '(not available)'
print(
f'''% Hello, {restored[3] + ' ' + restored[5]}! (id{restored[4]})\n'''
f'''@ Backup valid until {ctime(float(restored[2]))}\n\n'''
'''> 1) Change account phone number\n'''
'''>> 2) Refresh .tgback backup\n'''
f'''>>> 3) Log in to TelegramDesktop {about_qr}\n'''
'''>>>> 4) Change backup password\n'''
'''>>>>> 5) Destroy backup\n'''
'''>>>>>> 6) Return to main page'''
)
selected_section = input('\n@ Input: ')
if selected_section == '1':
clear_terminal()
while True:
if return_to_page:
break
clear_terminal()
new_phone = input('> Enter your new phone number: ')
try:
code, code_hash = await request_confirmation_code(
account.request_change_phone_code, new_phone, account=account,
)
await account.change_phone(code, code_hash, new_phone)
except FreshChangePhoneForbiddenError:
return_to_page = True
clear_terminal()
input('\n@: ! Can\'t change phone number now. Please, wait some time.')
break
except PhoneCodeInvalidError:
clear_terminal()
return_to_page = True
input('@: ! The code you entered is invalid. Try again.')
except AuthKeyUnregisteredError:
clear_terminal()
return_to_page = True
input('\n@: ! Backup was disconnected.'); break
except PhoneNumberOccupiedError:
clear_terminal()
return_to_page = True
input(f'\n@: ! Number {new_phone} already in use. ')
except PhoneNumberInvalidError:
clear_terminal()
return_to_page = True
input(f'\n@: ! Number {new_phone} is invalid. ')
except FloodWaitError as e:
clear_terminal()
input('''@: ! Telegram servers returned FloodWaitError. '''
f'''Please wait {e.seconds} seconds ''')
return_to_page = True
else:
break
if not return_to_page:
clear_terminal()
input('''@: Your phone has been successfully changed! '''
f'''Now you can login to your Telegram account with phone {new_phone}!''')
await main()
elif selected_section == '2':
try:
clear_terminal()
print('@ Refreshing...')
await account.refresh_backup(restored, path_to_tgback)
except:
clear_terminal()
input('\n\n@: ! Backup was disconnected.')
elif selected_section == '3' and not qr_available:
clear_terminal()
input('@: ! Can\'t reach ZBar or PIL. Please check installed dependecies. ')
elif selected_section == '3':
while True:
clear_terminal()
print(
'''% Please open TelegramDesktop and choose "Login via QR" option.\n'''
''' If you already logged in then tap burger icon and "Add Account".\n'''
''' Telegram refreshes this QR every 30 seconds, so do it quick!\n\n'''
'''> 1) Okay, i already screenshoted QR\n>> 2) Go back\n'''
)
choice = input('@ Input: ')
clear_terminal()
if choice == '1':
qrcode_path = input('@ Telegram QR path: ')
clear_terminal()
if os.path.exists(qrcode_path):
try:
print('@ Scanning Telegram auth QR code...')
token = scanqrcode(qrcode_path).split(b'token=')[1]
await account.accept_login_token(token)
clear_terminal()
input('@: Successfully logged in! ')
break
except:
clear_terminal()
input('''@: ! Can\'t log in. Try to increase '''
'''size of QR or wait 30 seconds and screenshot new QR code.''')
else:
input(
'''@: ! Sorry, i can\'t open path that you provided. '''
'''Re-screenshot new QR and check your path.'''
)
elif choice == '2':
break
elif selected_section == '4':
clear_terminal()
print('@ To change password you need at least 1GB free for now.\n')
new_password = getpass('> Your new password: ')
c_new_password = getpass('>> Confirm password: ')
if new_password != c_<PASSWORD>:
clear_terminal()
input('@: ! Password mismatch. Please try again.')
elif not new_password:
clear_terminal()
input('@: ! Password can\'t be empty. Try again.')
else:
clear_terminal()
print('@ Creating key with your password...')
restored[0] = TgbackAES(b'0')._make_scrypt_key(new_password.encode()).digest()
clear_terminal()
print('@ Refreshing...')
await account.refresh_backup(restored, path_to_tgback)
clear_terminal()
input('@: Your password has been successfully changed! ')
elif selected_section == '5':
while True:
clear_terminal()
print(
'''% Are you sure you want to destroy your backup?\n\n'''
'''> 1) Yes\n>> 2) No\n'''
)
confirm = input('@ Input: ')
if confirm == '1':
clear_terminal()
print('''% No, seriously. After this operation, you will no longer be '''
'''able to change your phone number through this backup.\n''')
print('% Are you totally sure? Type "yes" or "no"\n')
if input('@ Input: ') == 'yes':
clear_terminal()
if await account.logout():
input('@: Successfully. Now you can delete your backup file.')
await main()
else:
input('@: ! Something went wrong, can\'t disconnect session. Try again.')
break
else:
break
elif confirm == '2':
break
elif selected_section == '6':
await main()
elif selected_section == '3':
raise SystemExit
else:
await main()
except (KeyboardInterrupt, EOFError):
await main()
except SystemExit:
raise SystemExit
except ConnectionError:
clear_terminal()
input('@: ! Unable to connect with Telegram servers. Check your internet connection.')
await main()
except Exception as e:
clear_terminal()
print_exc(file=open('tgback.log','a'))
print(
f'''@: ! Oops, something went wrong! Unknown error was '''
'''written to the "tgback.log" file, so please, '''
'''send it to me on Telegram (t.me/not_statilko), '''
'''or open issue on GitHub (bit.ly/tgback). '''
'''I will fix it as soon as possible. Thanks in advance!\n\n'''
f'''Short error: {e}\n'''
)
input('@ Press Enter to return')
await main()
asyncio_run(main())
|
"""
Catalog Receptor Plugin
Forwards HTTP GET and POST requests to the Ansible Tower
The plugin is invoked by the receptor when it gets requests
from the platform controller.
"""
from urllib.parse import urlparse
from urllib.parse import parse_qsl
from urllib.parse import urljoin
from distutils.util import strtobool
import json
import gzip
import logging
import ssl
import asyncio
import aiohttp
import jmespath
def configure_logger():
""" Configure the logger """
logger = logging.getLogger(__name__)
receptor_logger = logging.getLogger("receptor")
logger.setLevel(receptor_logger.level)
for handler in receptor_logger.handlers:
logger.addHandler(handler)
return logger
def receptor_export(func):
""" Decorator function for receptor. """
setattr(func, "receptor_export", True)
return func
class Run:
""" The Run class to execute the work recieved from the controller """
VALID_POST_CODES = [200, 201, 202]
JOB_COMPLETION_STATUSES = ["successful", "failed", "error", "canceled"]
DEFAULT_REFRESH_INTERVAL = 10
ARTIFACTS_KEY_PREFIX = "expose_to_cloud_redhat_com_"
MAX_ARTIFACTS_SIZE = 1024
def __init__(self, queue, payload, config, logger):
""" Initialize a Run instance with the following
param: queue: The response queue
param: payload: The payload recieved from the platform controller
param: config: The config parameters read from receptor.conf
param: logger: The logger instance to use
"""
self.result_queue = queue
self.config = config
self.logger = logger
self.href_slug = payload.pop("href_slug")
self.method = payload.pop("method", "get").lower()
self.fetch_all_pages = payload.pop("fetch_all_pages", False)
if isinstance(self.fetch_all_pages, str):
self.fetch_all_pages = strtobool(self.fetch_all_pages)
self.encoding = payload.pop("accept_encoding", None)
self.params = payload.pop("params", {})
self.ssl_context = None
self.apply_filters = payload.pop("apply_filter", None)
self.refresh_interval_seconds = payload.pop(
"refresh_interval_seconds", self.DEFAULT_REFRESH_INTERVAL
)
@classmethod
def from_raw(cls, queue, payload, plugin_config, logger):
""" Class method to create a new instance """
return cls(queue, payload, plugin_config, logger)
def initialize_ssl(self):
""" Configure SSL for the current session """
self.ssl_context = ssl.SSLContext()
# if self.config.get('ca_file', None):
# self.ssl_context.load_verify_locations(ca_file=self.config['ca_file'])
verify_ssl = self.config.get("verify_ssl", True)
if isinstance(verify_ssl, str):
verify_ssl = strtobool(verify_ssl)
if not verify_ssl:
self.ssl_context.verify_mode = ssl.CERT_NONE
async def get_page(self, session, url, params):
""" Get a single page from the Tower API """
self.logger.debug(f"Making get request for {url} {params}")
async with session.get(url, params=params, ssl=self.ssl_context) as response:
response_text = dict(status=response.status, body=await response.text())
return response_text
async def get(self, session, url):
""" Send an HTTP Get request to the Ansible Tower API
supports
Fetching all pages from the end point using fetch_all_pages = True
Compressing the response payload using accept_encoding = gzip
"""
url_info = urlparse(url)
params = dict(parse_qsl(url_info.query))
if isinstance(self.params, dict):
params.update(self.params)
while True:
response = await self.get_page(session, url, params)
if response["status"] != 200:
raise Exception(
f"Get failed {url} status {response['status']} body {response.get('body','empty')}"
)
json_body = json.loads(response["body"])
json_body = self.reconstitute_body(json_body)
response["body"] = json.dumps(json_body)
self.logger.debug(f"Response from filter {response}")
self.send_response(response)
if self.fetch_all_pages:
if json_body.get("next", None):
params["page"] = params.get("page", 1) + 1
else:
break
else:
break
def reconstitute_body(self, json_body):
if self.apply_filters:
json_body = self.filter_body(json_body)
if isinstance(json_body.get("artifacts", None), dict):
json_body = self.filter_artifacts(json_body)
return json_body
def send_response(self, response):
if self.encoding and self.encoding == "gzip":
self.result_queue.put(self.zip_json_contents(response))
else:
self.result_queue.put(response)
def zip_json_contents(self, data):
""" Compress the data using gzip """
self.logger.debug(f"Compressing response data for URL {self.href_slug}")
return gzip.compress(json.dumps(data).encode("utf-8"))
def filter_body(self, json_body):
""" Apply JMESPath filters to the json body"""
self.logger.debug(f"Filtering response data for URL {self.href_slug}")
if isinstance(self.apply_filters, dict):
for key, jmes_filter in self.apply_filters.items():
json_body[key] = jmespath.search(jmes_filter, json_body)
elif isinstance(self.apply_filters, str):
json_body = jmespath.search(self.apply_filters, json_body)
return json_body
def filter_artifacts(self, json_body):
artifacts = {}
for key in json_body["artifacts"]:
if key.startswith(self.ARTIFACTS_KEY_PREFIX):
artifacts[key] = json_body["artifacts"][key]
if len(json.dumps(artifacts)) > self.MAX_ARTIFACTS_SIZE:
raise Exception(f"Artifacts is over {self.MAX_ARTIFACTS_SIZE} bytes")
json_body["artifacts"] = artifacts
return json_body
async def monitor(self, session, url):
""" Monitor a Ansible Tower Job """
self.logger.debug(f"Monitor Job {url} data {self.params}")
url_info = urlparse(url)
params = dict(parse_qsl(url_info.query))
if isinstance(self.params, dict):
params.update(self.params)
while True:
response = await self.get_page(session, url, params)
if response["status"] != 200:
raise Exception(
f"Get failed {url} status {response['status']} body {response.get('body','empty')}"
)
json_body = json.loads(response["body"])
if json_body["status"] not in self.JOB_COMPLETION_STATUSES:
await asyncio.sleep(self.refresh_interval_seconds)
continue
json_body = self.reconstitute_body(json_body)
response["body"] = json.dumps(json_body)
self.logger.debug(f"Response from filter {response}")
self.send_response(response)
break
async def post(self, session, url):
""" Post the data to the Ansible Tower """
self.logger.debug(f"Making post request for {url} data {self.params}")
headers = {"Content-Type": "application/json"}
async with session.post(
url, data=json.dumps(self.params), headers=headers, ssl=self.ssl_context
) as post_response:
response = dict(
status=post_response.status, body=await post_response.text()
)
if response["status"] not in self.VALID_POST_CODES:
raise Exception(
f"Post failed {url} status {response['status']} body {response.get('body', 'empty')}"
)
json_body = json.loads(response["body"])
json_body = self.reconstitute_body(json_body)
response["body"] = json.dumps(json_body)
self.logger.debug(f"Response from filter {response}")
self.send_response(response)
def auth_headers(self):
""" Create proper authentication headers based on Basic Auth or Token """
headers = {}
if len(self.config.get("token", "")) > 0:
headers["Authorization"] = "Bearer " + self.config["token"]
elif (
len(self.config.get("username", "")) > 0
and len(self.config.get("password", "")) > 0
):
auth = aiohttp.BasicAuth(self.config["username"], self.config["password"])
headers["Authorization"] = auth.encode()
else:
raise Exception(
"Either token or username and password needs to be set in the receptor.conf"
)
return headers
async def start(self):
""" Start the asynchronous process to send requests to the tower api """
url = urljoin(self.config["url"], self.href_slug)
if url.startswith("https"):
self.initialize_ssl()
async with aiohttp.ClientSession(headers=self.auth_headers()) as session:
if self.method == "get":
await self.get(session, url)
elif self.method == "post":
await self.post(session, url)
elif self.method == "monitor":
await self.monitor(session, url)
def run(coroutine):
""" Run the worker """
loop = asyncio.new_event_loop()
loop.run_until_complete(coroutine)
# This hack is the recommended approach for graceful shutdown
# https://docs.aiohttp.org/en/stable/client_advanced.html#graceful-shutdown
# https://github.com/aio-libs/aiohttp/issues/1925
# Without this hack in place sockets go into CLOSE_WAIT state
loop.run_until_complete(asyncio.sleep(0.250))
return loop.close()
@receptor_export
def execute(message, config, queue):
""" Entrypoint for the receptor
:param message: has message header information including payload coming in
from the platform controller.
payload
href_slug:
accept_encoding:
params:
method: get|post
:param config: is the parameters loaded from the receptor.conf for this worker.
:param queue: is the response channel used to send messages back to the receptor.
which forwards it to the platform controller.
"""
logger = configure_logger()
logger.debug(
"Payload Type: %s Data %s", type(message.raw_payload), message.raw_payload
)
if isinstance(message.raw_payload, str):
try:
payload = json.loads(message.raw_payload)
except json.JSONDecodeError as err:
logger.exception(err)
raise
else:
payload = message.raw_payload
logger.debug("Parsed payload: %s", payload)
try:
logger.debug("Start called")
run(Run.from_raw(queue, payload, config, logger).start())
logger.debug("Start finished")
except Exception as err:
logger.exception(err)
raise
return queue
|
<filename>pareto.py
__author__ = "jeanlouis.mbaka"
import csv
import sys
import pandas as pd
import numpy as np
class Pareto():
def __init__(self):
pass
def read_data(self, filename):
"""
Read data from filename
"""
results = read_csv(filename)
self.header = results[0]
self.raw_data = results[1]
self.data = self.raw_data
def write_data(self, filename):
"""
Write data to filename
"""
write_csv(filename, self.header, self.data)
def format_data(self):
pass
def pareto(self):
"""
Pareto analysis
"""
self.valorised_consumption()
self.cumulative_valorised_consumption()
self.abc_segmentation()
self.fmr_segmentation()
def valorised_consumption(self):
"""
computes the valorised consumption.
"""
self.header.append("consommation valorisee")
num_of_data = len(self.data)
print(num_of_data)
for i in range(num_of_data):
try:
monthly_consumption = self.data[i][1]
unit_price = self.data[i][2]
self.data[i].append(unit_price * monthly_consumption)
except IndexError:
print("faulty index = {}".format(i))
print("len(self.data) = {}".format(len(self.data)))
print(self.data[i])
sys.exit(2)
def cumulative_valorised_consumption(self):
"""
Sorted and cumululative consumption.
"""
self.header.append("consommation cumulee")
self.data = sorted(self.data, key=lambda row: row[-1], reverse=True) # sort by valorised consumption
for index in range(len(self.data)):
if index == 0:
self.data[index].append(self.data[index][-1])
else:
self.data[index].append(self.data[index][-1] + self.data[index-1][-1])
self.print_debug(heading="sorted cumulalive valorised consumption")
def abc_segmentation(self):
"""
ABC segmentation.
"""
self.header.append("PCT cumule")
self.header.append("ABC Categorie")
for index in range(len(self.data)):
cumulative_pct_consumption = self.data[index][-1] / self.data[-1][-1]
self.data[index].append(cumulative_pct_consumption)
if (cumulative_pct_consumption <= 0.8):
self.data[index].append("A")
elif (cumulative_pct_consumption > 0.8 and cumulative_pct_consumption <= 0.95):
self.data[index].append("B")
else:
self.data[index].append("C")
self.print_debug(heading="ABC Segmentation")
def fmr_segmentation(self):
"""
FRM Segmention.
"""
self.header.append("QTY cumule")
self.header.append("FMR Categorie")
# cumulative quantity
# sort by quantity
self.data = sorted(self.data, key=lambda row:row[1], reverse=True)
# compute cumulative quantities
for index in range(len(self.data)):
if index == 0:
self.data[index].append(self.data[index][1])
else:
self.data[index].append(self.data[index][1] + self.data[index-1][-1])
# FRM Segmentation
for index in range(len(self.data)):
cumulative_pct_qty = self.data[index][-1] / self.data[-1][-1]
self.data[index].append(cumulative_pct_qty)
if (cumulative_pct_qty <= 0.8):
self.data[index].append("F")
elif (cumulative_pct_qty > 0.8 and cumulative_pct_qty <= 0.95):
self.data[index].append("M")
else:
self.data[index].append("R")
self.print_debug(heading="ABC/FMR Segmention")
def print_debug(self, heading=""):
print("<====={}=====>".format(heading))
print(self.header)
[print(row) for row in self.data]
def read_csv(filename):
"""
Read a csv file and print the content row by row
:param filename: name/path to the csv file
"""
delimiter = ";"
header = []
data = []
with open(filename, 'rt') as csv_file:
reader = csv.reader(csv_file, dialect='excel-tab') # type CSV.READER
i = 0
for row in reader:
row_content = row[0] # in excel-tab dialect, a row is an array containing one str
row_content = str.split(row_content, delimiter) # split the string by ;
# header content
if i == 0:
header = row_content
i += 1
continue
# data content
data_row = []
for item in row_content:
if str.strip(item) == "":
break
try:
data_row.append(float(item.replace(",", ".")))
except ValueError:
data_row.append(item)
if data_row != []:
data.append(data_row)
i += 1
return [header, data]
def write_csv(filename, header, data):
"""
Write paretor data to csv
:param filename: output filename
"""
with open(filename, 'wt') as csv_file:
writer = csv.writer(csv_file, dialect="excel")
writer.writerow(header)
for row in data:
writer.writerow(row)
def parse_filename():
"""
:return: filename
"""
filename = ""
if (len(sys.argv) > 1):
filename = sys.argv[1]
else:
sys.exit(2)
return filename
if __name__ == "__main__":
my_pareto = Pareto()
filename = parse_filename()
my_pareto.read_data(filename)
my_pareto.pareto()
my_pareto.write_data("{}_pareto.csv".format(filename)) |
import pprint
# Get the html end.
def get_html_begin():
return "<!DOCTYPE html> \n" + " <html> \n"
# Get the html end.
def get_html_end():
return "\n </html>"
def writeErrorMessageHtml(error_message):
html_code = ""
html_code = html_code + get_html_begin()
html_code = html_code + "<body>"
html_code = html_code + error_message
html_code = html_code + "</body>"
html_code = html_code + get_html_end()
return html_code
# Get the memory check table code.
def get_memory_check_table_code(tests_sets_results):
return ["", ""]
def get_performance_check_table_code(tests_sets_results):
return ["", ""]
# Get the image comparison table code.
def get_image_comparison_table_code(tests_sets_results):
max_image_comparison_counts = 0
# Find table width
for current_test_group_name in tests_sets_results['Tests Groups']:
current_test_group = tests_sets_results['Tests Groups'][current_test_group_name]
if current_test_group['Enabled'] == True:
if 'Results' in current_test_group:
if 'Screen Capture Checks' in current_test_group['Results']:
screen_captures_list_all = current_test_group['Results']['Screen Capture Checks']
for screen_captures_list in screen_captures_list_all:
count = max(len(screen_captures_list['Frame Screen Captures']), len(screen_captures_list['Time Screen Captures']))
screen_captures_list['Capture Count'] = count
max_image_comparison_counts = max(max_image_comparison_counts, count)
if max_image_comparison_counts == 0:
return ["", ""]
else:
image_comparison_table_code = '<table style="width:100%" border="1">\n'
image_comparison_table_code += '<tr>\n'
image_comparison_table_code += '<th colspan=\'' + str(max_image_comparison_counts + 1) + '\'>Image Compare Tests</th>\n'
image_comparison_table_code += '</tr>\n'
image_comparison_table_code += '<th>Test</th>\n'
image_comparison_errors_code = ""
for i in range (0, max_image_comparison_counts):
image_comparison_table_code += '<th>SS' + str(i) + '</th>\n'
for current_test_group_name in tests_sets_results['Tests Groups']:
current_test_group = tests_sets_results['Tests Groups'][current_test_group_name]
# Check if the current test group is enabled.
if current_test_group['Enabled'] == True:
if 'Results' in current_test_group:
if 'Screen Capture Checks' in current_test_group['Results']:
screen_captures_list = current_test_group['Results']['Screen Capture Checks']
# Construct the list of captures.
# For every test run (every time executable is ran with arguments)
for test_index, test_captures in enumerate(screen_captures_list):
image_comparison_table_code += '<tr>\n'
test_name = current_test_group_name + '_' + str(test_index)
# If zero captures, test probably failed to run. Color the test name red
if test_captures['Capture Count'] == 0:
image_comparison_table_code += '<td bgcolor="red"><font color="white">' + test_name + '</font></td>\n'
# If this failure has an error message, add it to output
if 'Errors' in current_test_group['Results'] and test_index in current_test_group['Results']['Errors']:
image_comparison_errors_code += "<p><b> Error running test " + test_name + "</b>: " + current_test_group['Results']['Errors'][test_index] + "<br></p>\n"
if test_captures['Capture Count'] > 0:
image_comparison_table_code += '<td>' + test_name + '</td>\n'
# Get the frame or time capture list, whichever one has contents
screen_capture_types = ['Frame Screen Captures', 'Time Screen Captures']
for capture_type in screen_capture_types:
# For each single capture comparison result
for capture_index, capture_result in enumerate(test_captures[capture_type]):
# Check if comparison was successful. It should be convertible to a number if it was
try:
result_value = float(capture_result['Compare Result'])
except ValueError:
image_comparison_errors_code += "<p><b>" + test_name + " failed to compare screen capture " + str(capture_index) + "</b><br> \n"
image_comparison_errors_code += "<b>Source</b> : " + capture_result["Source Filename"] + " <br> <b>Reference</b> : " + capture_result["Reference Filename"] + " <br> \n"
image_comparison_errors_code += "Please check whether the images are output correctly, whether the reference exists and whether they are the same size. <br></p>"
image_comparison_table_code += '<td bgcolor="red"><font color="white">Error</font></td>\n'
continue
if not capture_result['Test Passed']:
image_comparison_table_code += '<td bgcolor="red"><font color="white">' + str(result_value) + '</font></td>\n'
else:
image_comparison_table_code += '<td>' + str(result_value) + '</td>\n'
image_comparison_table_code += '</tr>\n'
image_comparison_table_code += '</table>\n'
return [image_comparison_table_code, image_comparison_errors_code]
# Write the provided Tests Set Results to HTML and Return them.
def write_test_set_results_to_html(tests_set_results):
html_code = ""
html_code = html_code + get_html_begin()
html_code = html_code + "<body>"
if tests_set_results['Error'] is not None:
html_code = html_code + '<p>' + tests_set_results['Error'] + '</p>'
else:
image_comparisons = get_image_comparison_table_code(tests_set_results)
html_code = html_code + image_comparisons[0]
html_code = html_code + '\n <hr> \n'
html_code = html_code + '\n <hr> \n'
html_code = html_code + image_comparisons[1]
html_code = html_code + "</body>"
html_code = html_code + get_html_end()
return html_code |
#!/usr/bin/env python
#
import argparse
import os
from souvenir_utils import *
parser = argparse.ArgumentParser()
parser.add_argument('source_dir', help='source root directory')
parser.add_argument('target_dir', help='backup root directory')
parser.add_argument('-p', '--path', help='display full path', action='store_true')
parser.add_argument('-c', '--cmd', help='display processing command', action='store_true')
parser.add_argument('-m', '--mode', help=\
'CHECK_SOURCE - list files which are in the source directory but not in the target, ' +
'CHECK_TARGET - list files which are in the target directory but not in the source, ' +
'CHECK (default) = CHECK_SOURCE + CHECK_TARGET', default='CHECK')
ARGS = parser.parse_args()
SOURCE_ROOT = ''
TARGET_ROOT = ''
TO_BE_PROCESSED = []
CMD_TYPE_CP = 1
CMD_TYPE_RM = 2
CMD_TYPE = ''
class ItemInfo:
def __init__(self, dir_path, name):
self.name = name
self.dir_path = dir_path
self.source_path = os.path.abspath(os.path.join(SOURCE_ROOT, dir_path, name))
self.target_path = os.path.abspath(os.path.join(TARGET_ROOT, dir_path, name))
self.is_dir = os.path.isdir(self.source_path)
def has_target(self):
if self.is_dir:
return os.path.isdir(self.target_path)
return os.path.isfile(self.target_path)
def get_report(self):
sub_path = os.path.join(self.dir_path, self.name)
if self.is_dir:
res = textc('[{}]'.format(sub_path), Colors.BOLD)
else:
res = textc(sub_path, Colors.BOLD)
if ARGS.path:
res += textc(" ('{}')".format(self.source_path), Colors.MODEST)
# TODO: disable or correct commands on Windows
if ARGS.cmd:
if CMD_TYPE == CMD_TYPE_CP:
(path, name) = os.path.split(self.target_path)
if self.is_dir:
cmd = "cp -r '{}' '{}'".format(self.source_path, path)
else:
cmd = "cp '{}' '{}'".format(self.source_path, path)
elif CMD_TYPE == CMD_TYPE_RM:
if self.is_dir:
cmd = "rm -rf '{}'".format(self.source_path)
else:
cmd = "rm '{}'".format(self.source_path)
if cmd:
res += '\n ' + textc(cmd, Colors.CMD)
return res
def process_dir(dir_name):
dirs_count, files_count = (0, 0)
for name in os.listdir(os.path.join(SOURCE_ROOT, dir_name)):
item = ItemInfo(dir_name, name)
if item.is_dir:
dirs_count += 1
else:
files_count += 1
if item.has_target():
if item.is_dir:
d_count, f_count = process_dir(os.path.join(dir_name, item.name))
dirs_count += d_count
files_count += f_count
else:
TO_BE_PROCESSED.append(item)
return dirs_count, files_count
def process_root_dir(source_root, target_root, cmd_type):
global SOURCE_ROOT
global TARGET_ROOT
global CMD_TYPE
SOURCE_ROOT = source_root
TARGET_ROOT = target_root
CMD_TYPE = cmd_type
dirs_count, files_count = process_dir('')
dirs_count += 1 # include root dir
print('Directories checked: {}, files checked: {}'.format(dirs_count, files_count))
def run_check_source():
printc('\nChecking source directory...', Colors.HEADER)
process_root_dir(ARGS.source_dir, ARGS.target_dir, CMD_TYPE_CP)
if TO_BE_PROCESSED:
print('These items should be backuped ({}):'.format(len(TO_BE_PROCESSED)))
for item in TO_BE_PROCESSED:
print(' ' + item.get_report())
else:
printc('All files already backuped', Colors.OKGREEN)
def run_check_target():
printc('\nChecking target directory...', Colors.HEADER)
process_root_dir(ARGS.target_dir, ARGS.source_dir, CMD_TYPE_RM)
if TO_BE_PROCESSED:
print('These items are backuped but not found in the source directory ({}):'.format(len(TO_BE_PROCESSED)))
for item in TO_BE_PROCESSED:
print(' ' + item.get_report())
else:
printc('All backuped items have corresponding sources', Colors.OKGREEN)
#-----------------------------------------------------------------------
if __name__ == '__main__':
print_welcome()
print('Source directory: ' + textc(ARGS.source_dir, Colors.BOLD))
if not os.path.isdir(ARGS.source_dir):
print_error_and_exit('Source directory does not exist')
print('Target directory: ' + textc(ARGS.target_dir, Colors.BOLD))
if not os.path.isdir(ARGS.target_dir):
print_error_and_exit('Target directory does not exist')
if ARGS.mode == 'CHECK':
run_check_source()
TO_BE_PROCESSED = []
run_check_target()
elif ARGS.mode == 'CHECK_SOURCE':
run_check_source()
elif ARGS.mode == 'CHECK_TARGET':
run_check_target()
else:
print_error_and_exit('Unknown run mode ' + ARGS.mode)
print('')
|
import bottleneck as bn
import numpy as np
from scipy import sparse
def NDCG_binary_at_k_batch(X_pred, heldout_batch, k=100, input_batch=None, normalize=True):
'''
normalized discounted cumulative gain@k for binary relevance
ASSUMPTIONS: all the 0's in heldout_data indicate 0 relevance
If normalize is set to False, then we actually return DCG, not NDCG.
'''
if input_batch is not None:
X_pred[input_batch.nonzero()] = -np.inf
batch_users = X_pred.shape[0]
# Get the indexes of the top K predictions.
idx_topk_part = bn.argpartition(-X_pred, k, axis=1)
# Get only the top k predictions.
topk_part = X_pred[np.arange(batch_users)[:, np.newaxis],
idx_topk_part[:, :k]]
# Get sorted index...
idx_part = np.argsort(-topk_part, axis=1)
# X_pred[np.arange(batch_users)[:, np.newaxis], idx_topk] is the sorted
# topk predicted score
# Get sorted index...
idx_topk = idx_topk_part[np.arange(batch_users)[:, np.newaxis], idx_part]
# build the discount template
tp = 1. / np.log2(np.arange(2, k + 2))
# You add up the ones you've seen, scaled by their discount...
# top_k_results = heldout_batch[np.arange()]
maybe_sparse_top_results = heldout_batch[np.arange(batch_users)[:, np.newaxis], idx_topk]
try:
top_results = maybe_sparse_top_results.toarray()
except:
top_results = maybe_sparse_top_results
try:
number_non_zero = heldout_batch.getnnz(axis=1)
except:
number_non_zero = ((heldout_batch > 0) * 1).sum(axis=1)
DCG = (top_results * tp).sum(axis=1)
# DCG = (heldout_batch[np.arange(batch_users)[:, np.newaxis],
# idx_topk].toarray() * tp).sum(axis=1)
# Gets denominator, could be the whole sum, could be only part of it if there's not many.
IDCG = np.array([(tp[:min(n, k)]).sum()
for n in number_non_zero])
IDCG = np.maximum(0.1, IDCG) #Necessary, because sometimes you're not given ANY heldout things to work with. Crazy...
# IDCG = np.array([(tp[:min(n, k)]).sum()
# for n in heldout_batch.getnnz(axis=1)])
# to_return = DCG / IDCG
# if np.any(np.isnan(to_return)):
# print("bad?!")
# import ipdb; ipdb.set_trace()
# print("dab!?")
if normalize:
result = (DCG / IDCG)
else:
result = DCG
result = result.astype(np.float32)
return result
def Recall_at_k_batch(X_pred, heldout_batch, k=100, input_batch=None):
if input_batch is not None:
X_pred[input_batch.nonzero()] = -np.inf
batch_users = X_pred.shape[0]
idx = bn.argpartition(-X_pred, k, axis=1)
X_pred_binary = np.zeros_like(X_pred, dtype=bool)
X_pred_binary[np.arange(batch_users)[:, np.newaxis], idx[:, :k]] = True
X_true_binary = (heldout_batch > 0)#.toarray()
try:
X_true_binary = X_true_binary.toarray()
except:
# print("Wasn't sparse")
pass
tmp = (np.logical_and(X_true_binary, X_pred_binary).sum(axis=1)).astype(
np.float32)
recall = tmp / np.maximum(np.minimum(k, X_true_binary.sum(axis=1)), 1)
recall = recall.astype(np.float32)
return recall
def average_precision_at_k(scores, ground_truth, k=100):
"""
Assumes that ground-truth is 0 for false and 1 for true. This rests heavily on that.
"""
assert scores.shape == ground_truth.shape
assert len(scores.shape) == 1
if len(ground_truth) < k:
k = len(ground_truth)
total_num_good = np.sum(ground_truth)
if total_num_good < 1:
return 0.0 #If there are no true items, say the precision is 0. Just convention.
# Argpartition on the whole length does nothing and throws an error.
if k == len(ground_truth):
idx_topk_part = np.arange(k)
else:
idx_topk_part = bn.argpartition(-1 * scores, k)
topk_part = scores[idx_topk_part[:k]]
idx_part = np.argsort(-topk_part)
top_k_sorted_indices = idx_topk_part[idx_part]
running_score = 0.0
num_good_seen = 0.0
for i in range(k):
ranked_k_index = top_k_sorted_indices[i]
if ground_truth[ranked_k_index]:
num_good_seen += 1
precision = num_good_seen / (i + 1.0)
running_score += precision
recall_scaler = min(total_num_good, k)
return running_score / recall_scaler
def average_precision_at_k_batch(X_pred, heldout_batch, k=100, input_batch=None):
if input_batch is not None:
X_pred[input_batch.nonzero()] = -np.inf
assert X_pred.shape == heldout_batch.shape
assert len(X_pred.shape) == 2
zipped = zip(X_pred, heldout_batch)
aps = [average_precision_at_k(scores, ground_truth, k=k) for scores, ground_truth in zipped]
aps = np.asarray(aps, dtype=np.float32)
return aps
|
<reponame>betochimas/cugraph<filename>python/cugraph/cugraph/gnn/graph_store.py
# Copyright (c) 2022, <NAME>.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cudf
import cugraph
from cugraph.experimental import PropertyGraph
from cugraph.community.egonet import batched_ego_graphs
from cugraph.utilities.utils import sample_groups
import numpy as np
class CuGraphStore:
"""
A wrapper around a cuGraph Property Graph that
then adds functions to basically match the DGL GraphStorage API.
This is not a full duck-types match to a DGL GraphStore. This class
return cuGraph types and had additional functional arguments.
For true integration with DGL, a second class would need to be written
in DGL that handles the conversion to other types, like DGLGraph, and
handles the extra arguments.
homogeneous graphs, graphs with no attributes - use Property Graph
hetrogeneous graphs - use PropertyGraph
"""
@property
def ndata(self):
return self.__G._vertex_prop_dataframe
@property
def edata(self):
return self.__G._edge_prop_dataframe
@property
def gdata(self):
return self.__G
def __init__(self, graph):
if isinstance(graph, PropertyGraph):
self.__G = graph
else:
raise ValueError("graph must be a PropertyGraph")
######################################
# Utilities
######################################
@property
def num_vertices(self):
return self.__G.num_vertices
@property
def num_edges(self):
return self.__G.num_edges
def get_vertex_ids(self):
return self.__G.vertices_ids()
######################################
# Sampling APIs
######################################
def sample_neighbors(self,
nodes,
fanout=-1,
edge_dir='in',
prob=None,
replace=False):
"""
Sample neighboring edges of the given nodes and return the subgraph.
Parameters
----------
nodes : array (single dimension)
Node IDs to sample neighbors from.
fanout : int
The number of edges to be sampled for each node on each edge type.
edge_dir : str {"in" or "out"}
Determines whether to sample inbound or outbound edges.
Can take either in for inbound edges or out for outbound edges.
prob : str
Feature name used as the (unnormalized) probabilities associated
with each neighboring edge of a node. Each feature must be a
scalar. The features must be non-negative floats, and the sum of
the features of inbound/outbound edges for every node must be
positive (though they don't have to sum up to one). Otherwise,
the result will be undefined. If not specified, sample uniformly.
replace : bool
If True, sample with replacement.
Returns
-------
CuPy array
The sampled arrays for bipartite graph.
"""
num_nodes = len(nodes)
current_seeds = nodes.reindex(index=np.arange(0, num_nodes))
_g = self.__G.extract_subgraph(create_using=cugraph.Graph,
allow_multi_edges=True)
ego_edge_list, seeds_offsets = batched_ego_graphs(_g,
current_seeds,
radius=1)
# filter and get a certain size neighborhood
# Step 1
# Get Filtered List of ego_edge_list corresposing to current_seeds
# We filter by creating a series of destination nodes
# corresponding to the offsets and filtering non matching vallues
seeds_offsets_s = cudf.Series(seeds_offsets).values
offset_lens = seeds_offsets_s[1:] - seeds_offsets_s[0:-1]
dst_seeds = current_seeds.repeat(offset_lens)
dst_seeds.index = ego_edge_list.index
filtered_list = ego_edge_list[ego_edge_list["dst"] == dst_seeds]
# Step 2
# Sample Fan Out
# for each dst take maximum of fanout samples
filtered_list = sample_groups(filtered_list,
by="dst",
n_samples=fanout)
return filtered_list['dst'].values, filtered_list['src'].values
def node_subgraph(self,
nodes=None,
create_using=cugraph.Graph,
directed=False,
multigraph=True):
"""
Return a subgraph induced on the given nodes.
A node-induced subgraph is a graph with edges whose endpoints are both
in the specified node set.
Parameters
----------
nodes : Tensor
The nodes to form the subgraph.
Returns
-------
cuGraph
The sampled subgraph with the same node ID space with the original
graph.
"""
# expr="(_SRC in nodes) | (_DST_ in nodes)"
_g = self.__G.extract_subgraph(
create_using=cugraph.Graph(directed=directed),
allow_multi_edges=multigraph)
if nodes is None:
return _g
else:
_n = cudf.Series(nodes)
_subg = cugraph.subgraph(_g, _n)
return _subg
def egonet(self, nodes, k):
"""Return the k-hop egonet of the given nodes.
A k-hop egonet of a node is the subgraph induced by the k-hop neighbors
of the node.
Parameters
----------
nodes : single dimension array
The center nodes of the egonets.
Returns
-------
ego_edge_lists : cudf.DataFrame
GPU data frame containing all induced sources identifiers,
destination identifiers, edge weights
seeds_offsets: cudf.Series
Series containing the starting offset in the returned edge list
for each seed.
"""
_g = self.__G.extract_subgraph(create_using=cugraph.Graph,
allow_multi_edges=True)
ego_edge_list, seeds_offsets = batched_ego_graphs(_g, nodes, radius=k)
return ego_edge_list, seeds_offsets
def randomwalk(self,
nodes,
length,
prob=None,
restart_prob=None):
"""
Perform randomwalks starting from the given nodes and return the
traces.
A k-hop egonet of a node is the subgraph induced by the k-hop
neighbors of the node.
Parameters
----------
nodes : single dimension array
The nodes to start the walk.
length : int
Walk length.
prob : str
Feature name used as the (unnormalized) probabilities associated
with each neighboring edge of a node. Each feature must be a
scalar.
The features must be non-negative floats, and the sum of the
features of inbound/outbound edges for every node must be positive
(though they don't have to sum up to one). Otherwise, the result
will be undefined. If not specified, pick the next stop uniformly.
restart_prob : float
Probability to terminate the current trace before each transition.
Returns
-------
traces : Tensor
A 2-D tensor of shape (len(nodes), length + 1). traces[i] stores
the node IDs reached by the randomwalk starting from nodes[i]. -1
means the walk has stopped.
"""
_g = self.__G.extract_subgraph(create_using=cugraph.Graph,
allow_multi_edges=True)
p, w, s = cugraph.random_walks(_g, nodes,
max_depth=length, use_padding=True)
return p, w, s
class CuFeatureStorage:
"""Storage for node/edge feature data.
Either subclassing this class or implementing the same set of interfaces
is fine. DGL simply uses duck-typing to implement its sampling pipeline.
"""
def __getitem__(self, ids):
"""Fetch the features of the given node/edge IDs.
Parameters
----------
ids : Tensor
Node or edge IDs.
Returns
-------
Tensor
Feature data stored in PyTorch Tensor.
"""
pass
async def async_fetch(self, ids, device):
"""Asynchronously fetch the features of the given node/edge IDs to the
given device.
Parameters
----------
ids : Tensor
Node or edge IDs.
device : Device
Device context.
Returns
-------
Tensor
Feature data stored in PyTorch Tensor.
"""
# Default implementation uses synchronous fetch.
return self.__getitem__(ids).to(device)
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# GNU General Public License v3.0
#
# Permissions of this strong copyleft license are conditioned on making available
# complete source code of licensed works and modifications, which include larger works
# using a licensed work, under the same license. Copyright and license notices must be
# preserved. Contributors provide an express grant of patent rights.
#
# For more information on this, and how to apply and follow theGNU GPL, see:
# http://www.gnu.org/licenses
#
# (ɔ) <NAME> 2019
import errno
from statux._conversions import set_bytes
from statux._errors import ValueNotFoundError
_PROC_STAT = "/proc/net/dev"
_SYS_NET_PTH = "/sys/class/net/"
# Cache:
_last = None
_interfaces_checked = []
def _get_stat():
with open(_PROC_STAT, "r") as file:
stat = file.readlines()
res = {}
for i in range(2, len(stat)):
ln = stat[i].split()
res[ln[0][:-1]] = (int(ln[1]), int(ln[9]))
return res
def _check_interface(interface: str, stat: dict):
if interface not in _interfaces_checked:
if interface not in stat.keys():
raise ValueNotFoundError(interface, _PROC_STAT, errno.ENODEV)
else:
_interfaces_checked.append(interface)
return interface
def _get_bytes(interface: str, direction: int):
# param direction: Download: 0, Upload: 1, Both: 2
stat = _get_stat()
_check_interface(interface, stat)
for key, value in stat.items():
if key == interface:
return value[direction] if direction != 2 else value
def _set_delta(interface: str, interval=0.0):
# Speed average per second
# param direction: Download: 0, Upload: 1
from time import sleep, time
global _last
if _last is None or interval > 0.0:
# print("Debug. Sleeping %d seconds" % interval)
old_stat = _get_stat()
sleep(interval)
elapsed = interval
else:
# print("Debug. Not sleeping")
old_stat = _last[0]
elapsed = round(time() - _last[1], 3) # milliseconds
new_stat = _get_stat()
_check_interface(interface, new_stat)
_last = new_stat, time()
delta = new_stat[interface][0] - old_stat[interface][0], new_stat[interface][1] - old_stat[interface][1]
return (0.0, 0.0) if not elapsed else (delta[0] / elapsed, delta[1] / elapsed)
def get_interfaces() -> list:
"""Returns a list with all network interfaces"""
res = []
for item in _get_stat().keys():
res.append(item)
return res
def get_address(interface: str) -> str:
"""Returns MAC address assigned to a network interface"""
with open("%s%s/%s" % (_SYS_NET_PTH, _check_interface(interface, _get_stat()), "address"), "r") as file:
return file.read()[:-1]
def get_state(interface: str) -> str:
"""Returns operational state of a network interface (up, down, unknown, dormant, etc)"""
with open("%s%s/%s" % (_SYS_NET_PTH, _check_interface(interface, _get_stat()), "operstate"), "r") as file:
return file.read()[:-1]
def download_bytes(interface: str, scale="bytes", precision=2):
"""Returns total bytes downloaded in the given interface
:Params:
:interface (str): Interface name
:scale (str): Return scale (bytes, KiB, MiB, GiB, TiB, kB, MB, TB or auto)
:precision (int): Number of rounding decimals
"""
return set_bytes(_get_bytes(interface, 0), scale_in="bytes", scale_out=scale, precision=precision)
def upload_bytes(interface: str, scale="bytes", precision=2):
"""Returns total bytes uploaded in the given interface
:Params:
:interface (str): Interface name
:scale (str): Chosen scale (bytes, KiB, MiB, GiB, TiB, kB, MB, GB, TB or auto)
:precision (int): Number of rounding decimals
"""
return set_bytes(_get_bytes(interface, 1), scale_in="bytes", scale_out=scale, precision=precision)
def down_up_bytes(interface: str, scale="bytes", precision=2):
"""Returns a tuple with bytes down-uploaded in the given interface
:Params:
:interface (str): Interface name
:scale (str): Chosen scale (bytes, KiB, MiB, GiB, TiB, kB, MB, GB, TB or auto)
:precision (int): Number of rounding decimals
"""
bytes_ = _get_bytes(interface, 2)
return set_bytes(*bytes_, scale_in="bytes", scale_out=scale, precision=precision)
def download_speed(interface: str, interval=0.0, scale="bytes", precision=2):
"""Returns average download speed per second in the given interface
:Params:
:interface (str): Interface name
:interval (float): Interval in seconds.
:scale (str): Chosen scale (bytes, KiB, MiB, GiB, TiB, kB, Mb, GB, TB or auto)
:precision (int): Number of rounding decimals
"""
return set_bytes(_set_delta(interface, interval)[0], scale_in="bytes", scale_out=scale, precision=precision)
def upload_speed(interface: str, interval=0.0, scale="bytes", precision=2):
"""Returns average upload speed per second in the given interface
:Params:
:interface (str): Interface name
:interval (float): Interval in seconds.
:scale (str): Chosen scale (bytes, KiB, MiB, GiB, TiB, kB, Mb, GB, TB or auto)
:precision (int): Number of rounding decimals
"""
return set_bytes(_set_delta(interface, interval)[1], scale_in="bytes", scale_out=scale, precision=precision)
def down_up_speed(interface: str, interval=0.0, scale="bytes", precision=2):
"""Returns a tuple with average download-upload speed per second in the given interface
:Params:
:interface (str): Interface name
:interval (float): Interval in seconds.
:scale (str): Chosen scale (bytes, KiB, MiB, GiB, TiB, kB, Mb, GB, TB or auto)
:precision (int): Number of rounding decimals
"""
stat = _set_delta(interface, interval)
return set_bytes(stat[0], stat[1], scale_in="bytes", scale_out=scale, precision=precision)
|
# -*- coding: utf-8 -*-
import json
import math
class RasterStatsError(Exception):
pass
class OGRError(Exception):
pass
def bbox_to_pixel_offsets(gt, bbox):
originX = gt[0]
originY = gt[3]
pixel_width = gt[1]
pixel_height = gt[5]
x1 = int(math.floor((bbox[0] - originX) / pixel_width))
x2 = int(math.ceil((bbox[2] - originX) / pixel_width))
y1 = int(math.floor((bbox[3] - originY) / pixel_height))
y2 = int(math.ceil((bbox[1] - originY) / pixel_height))
xsize = x2 - x1
ysize = y2 - y1
return (x1, y1, xsize, ysize)
def raster_extent_as_bounds(gt, size):
east1 = gt[0]
east2 = gt[0] + (gt[1] * size[0])
west1 = gt[3] + (gt[5] * size[1])
west2 = gt[3]
return (east1, west1, east2, west2)
def feature_to_geojson(feature):
""" This duplicates the feature.ExportToJson ogr method
but is safe across gdal versions since it was fixed only in 1.8+
see http://trac.osgeo.org/gdal/ticket/3870"""
geom = feature.GetGeometryRef()
if geom is not None:
geom_json_string = geom.ExportToJson()
geom_json_object = json.loads(geom_json_string)
else:
geom_json_object = None
output = {'type':'Feature',
'geometry': geom_json_object,
'properties': {}
}
fid = feature.GetFID()
if fid:
output['id'] = fid
for key in feature.keys():
output['properties'][key] = feature.GetField(key)
return output
def shapely_to_ogr_type(shapely_type):
from osgeo import ogr
if shapely_type == "Polygon":
return ogr.wkbPolygon
elif shapely_type == "LineString":
return ogr.wkbLineString
elif shapely_type == "MultiPolygon":
return ogr.wkbMultiPolygon
elif shapely_type == "MultiLineString":
return ogr.wkbLineString
raise TypeError("shapely type %s not supported" % shapely_type)
def parse_geo(thing):
""" Given a python object, try to get a geo-json like mapping from it
"""
from shapely.geos import ReadingError
from shapely import wkt, wkb
# object implementing geo_interface
try:
geo = thing.__geo_interface__
return geo
except AttributeError:
pass
# wkt
try:
shape = wkt.loads(thing)
return shape.__geo_interface__
except (ReadingError, TypeError):
pass
# geojson-like python mapping
try:
assert thing['type'] in ["Feature", "Point", "LineString", "Polygon",
"MultiPoint", "MultiLineString", "MultiPolygon"]
return thing
except (AssertionError, TypeError):
pass
# geojson string
try:
maybe_geo = json.loads(thing)
assert maybe_geo['type'] in ["Feature", "Point", "LineString", "Polygon",
"MultiPoint", "MultiLineString", "MultiPolygon"]
return maybe_geo
except (ValueError, AssertionError):
pass
# wkb
try:
shape = wkb.loads(thing)
return shape.__geo_interface__
except (ReadingError, TypeError):
pass
raise RasterStatsError("Can't parse %s as a geo-like object" % thing)
def get_ogr_ds(vds):
from osgeo import ogr
if not isinstance(vds, basestring):
raise OGRError("OGR cannot open %r: not a string" % vds)
ds = ogr.Open(vds)
if not ds:
raise OGRError("OGR cannot open %r" % vds)
return ds
def ogr_records(vector, layer_num=0):
ds = get_ogr_ds(vector)
layer = ds.GetLayer(layer_num)
for i in range(layer.GetFeatureCount()):
feature = layer.GetFeature(i)
yield feature_to_geojson(feature)
def geo_records(vectors):
for vector in vectors:
yield parse_geo(vector)
def get_features(vectors, layer_num=0):
if isinstance(vectors, basestring):
try:
# either an OGR layer ...
get_ogr_ds(vectors)
features_iter = ogr_records(vectors, layer_num)
strategy = "ogr"
except OGRError:
# ... or a single string to be parsed as wkt/wkb/json
feat = parse_geo(vectors)
features_iter = [feat]
strategy = "single_geo"
elif hasattr(vectors, '__geo_interface__'):
# ... or an single object
feat = parse_geo(vectors)
features_iter = [feat]
strategy = "single_geo"
elif isinstance(vectors, dict):
# ... or an python mapping
feat = parse_geo(vectors)
features_iter = [feat]
strategy = "single_geo"
else:
# ... or an iterable of objects
features_iter = geo_records(vectors)
strategy = "iter_geo"
return features_iter, strategy
|
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 1 11:34:39 2018
@author: MaggieYC_Pang
"""
import sys
sys.path.append("../")
from mongodb_api import mongodb_api
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def moving_func(ip_list, step, func=np.mean, arg=None):
op_list=[]
i=0
for data in ip_list[step:]:
op_list.append(func(ip_list[i:i+step], arg))
i=i+1
return op_list
class wifi_diag_api:
def __init__(self):
label_list = []
label_index_dict = {}
topicdata_dict = {}
# ============================== ML OUTPUT ===============================
label_list.append({"Name":"Delay", "Topic":"Ping", "MLType":"Out", "Process":[np.mean, np.std, len]})
label_list.append({"Name":"Tput", "Topic":"Iperf", "MLType":"Out", "Process":[np.mean]})
label_list.append({"Name":"Jitter", "Topic":"Iperf", "MLType":"Out", "Process":[np.mean]})
label_list.append({"Name":"Loss", "Topic":"Iperf", "MLType":"Out", "Process":[np.mean]})
label_list.append({"Name":"Tx_bitrate", "Topic":"Stationinfo", "MLType":"Out"})
label_list.append({"Name":"Rx_bitrate", "Topic":"Stationinfo", "MLType":"Out"})
label_list.append({"Name":"Signal", "Topic":"Stationinfo", "MLType":"Out"})
label_list.append({"Name":"FER", "Topic":"Stationinfo", "MLType":"Out"})
# ============================== ML INPUT ===============================
label_list.append({"Name":"SS_Sigval", "Topic":"Spectralscan", "MLType":"In", "Process":[np.array]})
label_list.append({"Name":"SS_Sigval_Std", "Topic":"Spectralscan", "MLType":"In", "Process":[np.array]})
label_list.append({"Name":"SS_Portion", "Topic":"Spectralscan", "MLType":"In", "Process":[np.array]})
label_list.append({"Name":"SS_Count", "Topic":"Spectralscan", "MLType":"In", "Process":[np.sum]})
label_list.append({"Name":"SS_Rssi", "Topic":"Spectralscan", "MLType":"In", "Process":[np.mean]})
label_list.append({"Name":"SS_Noise", "Topic":"Spectralscan", "MLType":"In", "Process":[np.mean]})
label_list.append({"Name":"Busy", "Topic":"Survey", "MLType":"In"})
label_list.append({"Name":"Noise", "Topic":"Survey", "MLType":"In"})
label_list.append({"Name":"Rcv", "Topic":"Survey", "MLType":"In"})
label_list.append({"Name":"Tx", "Topic":"Survey", "MLType":"In"})
label_list.append({"Name":"FCSError", "Topic":"Statistics", "MLType":"In"})
ERR_list = ["CRC-ERR", "LENGTH-ERR", "PHY-ERR", "SPECTRAL"] # USEFUL
# ERR_list = ["CRC-ERR", "DECRYPT-BUSY-ERR", "DECRYPT-CRC-ERR", "LENGTH-ERR", "MIC-ERR", "OOM-ERR", "PHY-ERR", "POST-DELIM-CRC-ERR", "PRE-DELIM-CRC-ERR", "RATE-ERR", "SPECTRAL"]
for data in ERR_list:
label_list.append({"Name":data, "Topic":"ath9kERR", "MLType":"In"})
# ERR_list = ["chan_idle_dur", "chan_idle_dur_valid", "dcu_arb_state", "dcu_complete_state", "dcu_fp_state",
# "qcu_complete_state", "qcu_fetch_state", "qcu_stitch_state",
# "txfifo_dcu_num_0", "txfifo_dcu_num_1", "txfifo_valid_0", "txfifo_valid_1"]
ERR_list = ["chan_idle_dur", "chan_idle_dur_valid"] #USEFUL
for data in ERR_list:
label_list.append({"Name":data, "Topic":"ath9kDMA", "MLType":"In"})
# ERR_list = ["ANI_RESET", "CCK_ERRORS", "CCK_LEVEL", "FIR-STEP_DOWN", "FIR-STEP_UP", "INV_LISTENTIME", "MRC-CCK_OFF", "MRC-CCK_ON",
# "OFDM_ERRORS", "OFDM_LEVEL", "OFDM_WS-DET_OFF", "OFDM_WS-DET_ON", "SPUR_DOWN", "SPUR_UP"]
ERR_list = ["CCK_ERRORS", "OFDM_ERRORS", "SPUR_DOWN", "SPUR_UP"] #USEFUL
for data in ERR_list:
label_list.append({"Name":data, "Topic":"ath9kANI", "MLType":"In"})
# ============================== END ===============================
for labeldata in label_list:
label_index_dict[labeldata["Name"]] = label_list.index(labeldata)
label_index_dict[label_list.index(labeldata)] = labeldata["Name"]
if(labeldata["Topic"] not in topicdata_dict):
topicdata_dict[labeldata["Topic"]]=[]
if("Process" not in labeldata):
topicdata_dict[labeldata["Topic"]].append([labeldata["Name"], "single"])
else:
topicdata_dict[labeldata["Topic"]].append([labeldata["Name"], "list"])
# =========================================================================================================
process_name_dict={}
process_name_dict[np.mean] = "mean"
process_name_dict[np.std] = "std"
process_name_dict[np.sum] = "sum"
process_name_dict[np.array] = "array"
process_name_dict[len] = "len"
self.label_list = label_list
self.process_name_dict = process_name_dict
self.label_index_dict = label_index_dict
self.topicdata_dict = topicdata_dict
def GetDataList(self, dev, found_data, name, proc):
retlist = []
for data in found_data:
target = data[dev]
if(name not in target):
retlist.append(-1)
else:
if(proc==None):
retlist.append(target[name])
else:
retlist.append(proc(target[name]))
return retlist
def plot_all(self, mdb):
print("collection = " + mdb.get_full_name())
found_data = mdb.find(key_value = {}, ftype='many')
print("len(found_data) = " + str(len(found_data)))
ML_data_AP = {}
ML_data_STA = {}
for labeldata in self.label_list:
if(labeldata["Name"] not in found_data[0]["AP"]):
continue
if("Process" not in labeldata):
ML_data_AP[labeldata["Name"]] = self.GetDataList("AP", found_data, labeldata["Name"], None)
else:
for proc in labeldata["Process"]:
ML_data_AP[labeldata["Name"] + '_' + self.process_name_dict[proc]] = self.GetDataList("AP", found_data, labeldata["Name"], proc)
if("Process" not in labeldata):
ML_data_STA[labeldata["Name"]] = self.GetDataList("STA", found_data, labeldata["Name"], None)
else:
for proc in labeldata["Process"]:
ML_data_STA[labeldata["Name"] + '_' + self.process_name_dict[proc]] = self.GetDataList("STA", found_data, labeldata["Name"], proc)
for pkey in ML_data_AP:
if("array" in pkey):
continue
plt.plot(moving_func(ML_data_AP[pkey],10), 'b.')
plt.plot(moving_func(ML_data_STA[pkey],10), 'g.')
plt.show()
print("pkey: " + pkey)
APdf = pd.DataFrame(ML_data_AP)
STAdf = pd.DataFrame(ML_data_STA)
return APdf, STAdf
def create_df(self, mdb, step=1, func=np.mean, arg=None):
print("collection = " + mdb.get_full_name())
found_data = mdb.find(key_value = {}, ftype='many')
print("len(found_data) = " + str(len(found_data)))
ML_data = {}
for labeldata in self.label_list:
if(labeldata["Name"] not in found_data[0]["AP"]):
continue
if("Process" not in labeldata):
ML_data["AP-" + labeldata["Name"]] = self.GetDataList("AP", found_data, labeldata["Name"], None)
else:
for proc in labeldata["Process"]:
ML_data["AP-" + labeldata["Name"] + '_' + self.process_name_dict[proc]] = self.GetDataList("AP", found_data, labeldata["Name"], proc)
if("Process" not in labeldata):
ML_data["STA-" + labeldata["Name"]] = self.GetDataList("STA", found_data, labeldata["Name"], None)
else:
for proc in labeldata["Process"]:
ML_data[ "STA-" + labeldata["Name"] + '_' + self.process_name_dict[proc]] = self.GetDataList("STA", found_data, labeldata["Name"], proc)
df = pd.DataFrame(ML_data)
return df
def classification(self, ML_data):
classify_dict={}
classify_dict["AP-Delay_mean"] = [5,10,20]
classify_dict["AP-Delay_len"] = [7,8,9,9.5,10]
classify_result={}
for ckey, cdata in classify_dict.items():
target_list = ML_data[ckey]
classify_result[ckey]=[]
for target in target_list:
index = 0
while (index < len(cdata)):
if(target < cdata[index]):
break
index = index + 1
classify_result[ckey].append(index)
fclass = [0,0,0,0,0]
for index in range(len(classify_result["AP-Delay_mean"])):
if(classify_result["AP-Delay_len"][index] < 4):
fclass[4] = fclass[4]+1
else:
fclass[classify_result["AP-Delay_mean"][index]] = fclass[classify_result["AP-Delay_mean"][index]]+1
class_df = pd.DataFrame(classify_result)
return class_df, fclass |
<filename>salt/transport/table/public/pycrypto_rsa.py
# -*- coding: utf-8 -*-
'''
Manage RSA encryption via pycrypto
The keydata consists of the following:
pub: PEM encoded public key
priv: PEM encoded private key
'''
SEC_BACKEND = 'pycrypto_aes'
# Import pycrypto libs
import Crypto.Cipher
import Crypto.PublicKey
import Crypto.Signature # pylint: disable=E0611
import Crypto.Hash
import Crypto.Util.number
# Import table libs
import salt.transport.table
class Key(object):
'''
The management interface for rsa keys
'''
def __init__(self, keydata=None, **kwargs):
self.kwargs = kwargs
self.__generate(keydata)
def __generate(self, keydata):
'''
Generate the pycrypto rsa object
'''
if keydata:
if 'components' not in keydata:
raise ValueError('Invalid keydata, no components')
key = Crypto.PublicKey.RSA.construct(keydata['components'])
if key.has_private():
self.priv = key
self.pub = key.publickey()
self.sign_key = Crypto.Signature.PKCS1_PSS.new(self.priv)
self.verify_key = Crypto.Signature.PKCS1_PSS.new(self.pub)
self.decrypter = Crypto.Cipher.PKCS1_OAEP.new(self.priv)
else:
self.pub = key
self.verify_key = Crypto.Signature.PKCS1_PSS.new(self.pub)
self.keydata = keydata
else:
self.priv = self._gen_key()
self.pub = self.priv.publickey()
self.sign_key = Crypto.Signature.PKCS1_PSS.new(self.priv)
self.verify_key = Crypto.Signature.PKCS1_PSS.new(self.pub)
self.keydata = self._gen_keydata(self.priv)
self.decrypter = Crypto.Cipher.PKCS1_OAEP.new(self.priv)
self.encrypter = Crypto.Cipher.PKCS1_OAEP.new(self.pub)
self.max_msg_size = self.get_max_msg_size()
self.enc_chunk_size = self.get_enc_chunk_size()
def _gen_keydata(self, key):
'''
Return the keydata of a given key
'''
keydata = {'components': []}
for attr in key.keydata:
keydata['components'].append(getattr(key, attr))
keydata['ctime'] = salt.transport.table.now()
return keydata
def _gen_key(self):
'''
Generate an RSA key, ensure that it is no smaller than 2048 bits
'''
size = self.kwargs.get('size', 2048)
if size < 2048:
raise ValueError('Key size too small')
return Crypto.PublicKey.RSA.generate(size)
def _string_chunks(self, msg, size, i=None):
'''
Yield the message in the sized chunks
'''
if i is None:
i = 0
msg_len = len(msg)
while i < msg_len:
top = i + size
if top > msg_len:
top = msg_len
yield msg[i:top]
i = top
def get_max_msg_size(self):
'''
Return the max size of a message chunk
'''
return (Crypto.Util.number.size(self.pub.n) / 8) - 2 - (Crypto.Hash.SHA.digest_size * 2)
def get_enc_chunk_size(self):
'''
Return the size of all encrypted chunks
'''
return Crypto.Util.number.size(self.pub.n) / 8
def encrypt(self, pub, msg):
'''
Sign and encrypt a message
'''
ret = ''
hash_ = Crypto.Hash.SHA.new()
hash_.update(msg)
ret += self.sign_key.sign(hash_)
for chunk in self._string_chunks(msg, pub._key.max_msg_size):
ret += pub._key.encrypter.encrypt(chunk)
return ret
def decrypt(self, pub, msg):
'''
Decrypt the given message against the given public key
'''
c_size = pub._key.get_enc_chunk_size()
sig = msg[0:c_size]
clear = ''
for chunk in self._string_chunks(msg, c_size, c_size):
clear += self.decrypter.decrypt(chunk)
return pub._key.verify(sig + clear)
def sign(self, msg):
'''
Sign a message
'''
hash_ = Crypto.Hash.SHA.new()
hash_.update(msg)
sig = self.sign_key.sign(hash_)
return sig + msg
def signature(self, msg):
'''
Return only signature string resulting from signing the msg
'''
hash_ = Crypto.Hash.SHA.new()
hash_.update(msg)
sig = self.sign_key.sign(hash_)
return sig
def verify(self, msg):
'''
Verify a message
'''
sig = msg[0:self.enc_chunk_size]
msg = msg[self.enc_chunk_size:]
hash_ = Crypto.Hash.SHA.new()
hash_.update(msg)
if self.verify_key.verify(hash_, sig):
return msg
return False
|
import copy
import pickle
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision.utils import save_image
import numpy as np
from Evaluation.Reviewer import *
from Generative_Models.discriminator import Discriminator, Discriminator_Cifar
from Generative_Models.generator import Generator, Generator_Cifar
from log_utils import save_images
from utils import variable
from copy import deepcopy
from Classifiers.Cifar_Classifier import Cifar_Classifier
class GenerativeModel(object):
def __init__(self, args):
self.args = args
# parameters
self.epoch = args.epoch_G
self.sample_num = 100
self.batch_size = args.batch_size
self.dataset = args.dataset
self.gpu_mode = args.gpu_mode
self.model_name = args.gan_type
self.conditional = args.conditional
self.seed = args.seed
self.generators = []
self.c_criterion = nn.NLLLoss()
self.size_epoch = args.size_epoch
self.BCELoss = nn.BCELoss()
self.device = args.device
self.verbose = args.verbose
self.save_dir = args.save_dir
self.result_dir = args.result_dir
self.data_dir = args.data_dir
self.log_dir = args.log_dir
self.gen_dir = args.gen_dir
self.sample_dir = args.sample_dir
self.task_type = args.task_type
self.num_task = args.num_task
self.num_classes = args.num_classes
if self.dataset == 'mnist' or self.dataset == 'fashion':
if self.model_name == 'VAE' or self.model_name == 'CVAE':
self.z_dim = 20
else:
self.z_dim = 62
self.input_size = 1
self.size = 28
elif self.dataset == 'cifar10':
self.z_dim = 100
self.input_size = 3
self.size = 32
if self.verbose:
print("create G and D")
if self.dataset=='cifar10':
self.G = Generator_Cifar(self.z_dim, self.conditional)
self.D = Discriminator_Cifar(self.conditional)
else:
self.G = Generator(self.z_dim, self.dataset, self.conditional, self.model_name)
self.D = Discriminator(self.dataset, self.conditional, self.model_name)
if self.verbose:
print("create G and D 's optimizers")
self.G_optimizer = optim.Adam(self.G.parameters(), lr=args.lrG, betas=(args.beta1, args.beta2))
self.D_optimizer = optim.Adam(self.D.parameters(), lr=args.lrD, betas=(args.beta1, args.beta2))
if self.gpu_mode:
self.G=self.G.cuda(self.device)
self.D=self.D.cuda(self.device)
if self.verbose:
print('---------- Networks architecture -------------')
utils.print_network(self.G)
utils.print_network(self.D)
print('-----------------------------------------------')
# fixed noise
#self.sample_z_ = variable(torch.rand((self.sample_num, self.z_dim, 1, 1)), volatile=True)
self.sample_z_ = variable(self.random_tensor(self.sample_num, self.z_dim))
if self.dataset == 'mnist':
self.Classifier = Mnist_Classifier(self.args)
elif self.dataset == 'fashion':
self.Classifier = Fashion_Classifier(self.args)
elif self.dataset == 'cifar10':
self.Classifier = Cifar_Classifier(self.args)
if self.gpu_mode:
self.Classifier.net = self.Classifier.net.cuda(self.device)
self.expert = copy.deepcopy(self.Classifier)
self.expert.load_expert()
# Logs
self.train_hist = {}
self.train_hist['D_loss'] = []
self.train_hist['G_loss'] = []
self.train_hist['per_epoch_time'] = []
self.train_hist['total_time'] = []
# usefull for all GAN
self.y_real_ = variable(torch.ones(self.batch_size, 1))
self.y_fake_ = variable(torch.zeros(self.batch_size, 1))
def test(self, predict, labels):
correct = 0
pred = predict.data.max(1)[1]
correct = pred.eq(labels.data).cpu().sum()
return correct, len(labels.data)
def random_tensor(self, batch_size, z_dim):
# Uniform distribution
return torch.rand((batch_size, z_dim, 1, 1))
# produce sample from one generator for visual inspection of a generator during training
def visualize_results(self, epoch, classe=None, fix=True):
sample_size=100
# index allows, if there 5 task, to plot 2 classes for first task
index = int(self.num_classes / self.num_task) * (classe + 1)
self.G.eval()
dir_path = self.result_dir
if classe is not None:
dir_path = self.result_dir + '/classe-' + str(classe)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
image_frame_dim = int(np.floor(np.sqrt(self.sample_num)))
if self.conditional:
y = torch.LongTensor(range(self.sample_num)) % self.num_classes
y=y.view(self.sample_num, 1)
y_onehot = torch.FloatTensor(self.sample_num, self.num_classes)
y_onehot.zero_()
y_onehot.scatter_(1, y, 1.0)
y_onehot = variable(y_onehot)
else:
y_onehot = None
if fix:
""" fixed noise """
if self.conditional:
samples = self.G(self.sample_z_, y_onehot)
else:
samples = self.G(self.sample_z_)
else:
""" random noise """
sample_z_ = variable(self.random_tensor(self.sample_num, self.z_dim), volatile=True)
if self.conditional:
samples = self.G(sample_z_, y_onehot)
else:
samples = self.G(self.sample_z_)
if self.input_size == 1:
if self.gpu_mode:
samples = samples.cpu().data.numpy()
else:
samples = samples.data.numpy()
samples = samples.transpose(0, 2, 3, 1)
save_images(samples[:image_frame_dim * image_frame_dim, :, :, :], [image_frame_dim, image_frame_dim],
dir_path + '/' + self.model_name + '_epoch%03d' % epoch + '.png')
else:
save_image(samples[:self.sample_num].data, dir_path + '/' + self.model_name + '_epoch%03d' % epoch + '.png',
padding=0)
# produce sample from all classes and return a batch of images and label
# if no task2generate are given we generate all labellize for all task
# if task2generate and annotate == false we generate only for the actual task
# if task2generate and annotate == true we generate only for all past tasks
def sample(self, batch_size=100, task2generate=None, multi_annotation=False):
self.G.eval()
y = None
z_ = self.random_tensor(batch_size, self.z_dim)
output = self.G(variable(z_))
if not (task2generate is None):
self.expert.net.eval()
if multi_annotation:
y = self.expert.labelize(output, task2generate)
else:# if we generate only from actual task
y = torch.ones(batch_size, 1).long() * task2generate
y = y.long()
else: # if no task2generate specified
# if we generate from all task
y = self.expert.labelize(output, self.num_classes)
return output.data, y
# load a conditonal generator, encoders and discriminators
def load_G(self, ind_task):
self.G.load_state_dict(
torch.load(os.path.join(self.save_dir, self.model_name + '-' + str(ind_task) + '_G.pkl')))
# save a generator in a given class
def save_G(self, task):
if not os.path.exists(self.save_dir):
os.makedirs(self.save_dir)
torch.save(self.G.state_dict(), os.path.join(self.save_dir, self.model_name + '-' + str(task) + '_G.pkl'))
# save a generator, encoder and discriminator in a given class
def save(self):
if not os.path.exists(self.save_dir):
os.makedirs(self.save_dir)
torch.save(self.G.state_dict(), os.path.join(self.save_dir, self.model_name + '_G.pkl'))
torch.save(self.D.state_dict(), os.path.join(self.save_dir, self.model_name + '_D.pkl'))
with open(os.path.join(self.save_dir, self.model_name + '_history.pkl'), 'wb') as f:
pickle.dump(self.train_hist, f)
def train(self):
self.G.train()
self.D.train()
def eval(self):
self.G.eval()
self.D.eval()
def generate_batch4Task(self, nb_sample_train, task2generate, multi_annotation):
return self.sample(batch_size=nb_sample_train, task2generate=task2generate, multi_annotation=multi_annotation)
def create_data_loader(self, nb_sample_train, task2generate, multi_annotation):
c1 = 0
c2 = 1
tasks_tr = []
x_tr, y_tr = self.generate_batch4Task(nb_sample_train, task2generate=task2generate,
multi_annotation=multi_annotation)
if self.gpu_mode:
x_tr, y_tr = x_tr.cpu(), y_tr.cpu()
tasks_tr.append([(c1, c2), x_tr.clone().view(-1, 784), y_tr.clone().view(-1)])
return DataLoader(tasks_tr, self.args)
def generate_task(self, nb_sample_train, multi_annotation=False, classe2generate=None):
if nb_sample_train >= 1000:
for i in range(int(nb_sample_train / 1000)):
if i == 0:
data_loader = self.create_data_loader(1000, classe2generate, multi_annotation)
else:
new_loader = self.create_data_loader(1000, classe2generate, multi_annotation)
data_loader.concatenate(new_loader)
# here we generate the remaining samples
if nb_sample_train % 1000 != 0:
new_loader = self.create_data_loader(nb_sample_train % 1000, classe2generate, multi_annotation)
data_loader.concatenate(new_loader)
else:
data_loader = self.create_data_loader(nb_sample_train, classe2generate, multi_annotation)
return data_loader
# This function generate a dataset for one class or for all class until ind_task included
def generate_dataset(self, ind_task, nb_sample_per_task, one_task=True, Train=True, classe2generate=None):
# to generate 10 classes classe2generate is 9 as classes 0 to 9
if classe2generate is not None:
assert classe2generate <= self.num_classes
if self.task_type != "disjoint":
assert classe2generate == self.num_classes
else:
classe2generate = ind_task+1
train_loader_gen=None
if Train:
path = os.path.join(self.gen_dir, 'train_Task_' + str(ind_task) + '.pt')
path_samples = os.path.join(self.sample_dir, 'samples_train_' + str(ind_task) + '.png')
else:
path = os.path.join(self.gen_dir, 'test_Task_' + str(ind_task) + '.pt')
path_samples = os.path.join(self.sample_dir, 'samples_test_' + str(ind_task) + '.png')
# if we have only on task to generate
if one_task or ind_task == 0: # generate only for the task ind_task
train_loader_gen = self.generate_task(nb_sample_per_task, multi_annotation=False, classe2generate=classe2generate)
else: # else case we generate for all previous task
for i in range(ind_task): # we generate nb_sample_per_task * (ind_task+1) samples
train_loader_ind = self.generate_task(nb_sample_per_task, multi_annotation=True, classe2generate=classe2generate)
if i == 0:
train_loader_gen = deepcopy(train_loader_ind)
else:
train_loader_gen.concatenate(train_loader_ind)
# we save the concatenation of all generated with the actual task for train and test
train_loader_gen.save(path)
train_loader_gen.visualize_sample(path_samples, self.sample_num, [self.size, self.size, self.input_size])
# return the the train loader with all data
return train_loader_gen # test_loader_gen # for instance we don't use the test set
# this generation only works for Baseline, disjoint
# we generate the dataset based on one generator by task to get normally the best generated dataset
# can be used to generate train or test data
def generate_best_dataset(self, ind_task, nb_sample_per_task, one_task=True, Train=True, classe2generate=None):
# to generate 10 classes classe2generate is 9 as classes 0 to 9
if classe2generate is not None:
assert classe2generate <= self.num_classes
if self.task_type != "disjoint":
assert classe2generate == self.num_classes
else:
classe2generate = ind_task+1
if Train:
path = os.path.join(self.gen_dir, 'Best_train_Task_' + str(ind_task) + '.pt')
else:
path = os.path.join(self.gen_dir, 'Best_test_Task_' + str(ind_task) + '.pt')
# if we have only on task to generate
if ind_task == 0: # generate only for the task ind_task
# we do not need automatic annotation since we have one generator by class
previous_data_train = self.generate_task(nb_sample_per_task, multi_annotation=False, classe2generate=classe2generate)
#previous_data_train = DataLoader(tasks_tr, self.args)
else: # else we load the previous dataset and add the new data
previous_path_train = os.path.join(self.gen_dir, 'Best_train_Task_' + str(ind_task - 1) + '.pt')
previous_data_train = DataLoader(torch.load(previous_path_train), self.args)
# we do not need automatic annotation since we have one generator by class
train_loader_ind = self.generate_task(nb_sample_per_task, multi_annotation=False, classe2generate=classe2generate)
previous_data_train.concatenate(train_loader_ind)
# we save the concatenation of all generated with the actual task for train and test
previous_data_train.save(path)
# return nothing
|
<filename>src/beanmachine/graph/tests/graph_test.py
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import numpy as np
from beanmachine import graph
class TestBayesNet(unittest.TestCase):
def test_simple_dep(self):
g = graph.Graph()
c1 = g.add_constant_col_simplex_matrix(np.array([0.8, 0.2]))
d1 = g.add_distribution(
graph.DistributionType.TABULAR, graph.AtomicType.BOOLEAN, [c1]
)
g.add_operator(graph.OperatorType.SAMPLE, [d1])
def test_tabular(self):
g = graph.Graph()
c1 = g.add_constant_col_simplex_matrix(np.array([0.8, 0.2]))
# negative test
with self.assertRaises(ValueError) as cm:
g.add_distribution(
graph.DistributionType.TABULAR, graph.AtomicType.BOOLEAN, []
)
self.assertTrue("must be COL_SIMPLEX" in str(cm.exception))
g = graph.Graph()
c1 = g.add_constant_col_simplex_matrix(np.array([0.8, 0.2]))
var1 = g.add_operator(
graph.OperatorType.SAMPLE,
[
g.add_distribution(
graph.DistributionType.TABULAR, graph.AtomicType.BOOLEAN, [c1]
)
],
)
var2 = g.add_operator(
graph.OperatorType.SAMPLE,
[
g.add_distribution(
graph.DistributionType.TABULAR, graph.AtomicType.BOOLEAN, [c1]
)
],
)
# since the following has two parents it must have a tabular dist with
# 3 dimensions in the tensor
with self.assertRaises(ValueError) as cm:
g.add_operator(
graph.OperatorType.SAMPLE,
[
g.add_distribution(
graph.DistributionType.TABULAR,
graph.AtomicType.BOOLEAN,
[c1, var1, var2],
)
],
)
self.assertTrue("expected 4 dims got 1" in str(cm.exception))
c2 = g.add_constant_col_simplex_matrix(np.array([[0.6, 0.99], [0.4, 0.01]]))
g.add_distribution(
graph.DistributionType.TABULAR,
graph.AtomicType.BOOLEAN,
[c2, g.add_constant_bool(True)],
)
with self.assertRaises(ValueError) as cm:
g.add_distribution(
graph.DistributionType.TABULAR,
graph.AtomicType.BOOLEAN,
[c2, g.add_constant_natural(1)],
)
self.assertTrue("only supports boolean parents" in str(cm.exception))
c3 = g.add_constant_real_matrix(np.array([1.1, -0.1]))
with self.assertRaises(ValueError) as cm:
g.add_distribution(
graph.DistributionType.TABULAR, graph.AtomicType.BOOLEAN, [c3]
)
self.assertTrue("must be COL_SIMPLEX" in str(cm.exception))
c4 = g.add_constant_col_simplex_matrix(np.array([0.6, 0.3, 0.1]))
with self.assertRaises(ValueError) as cm:
g.add_distribution(
graph.DistributionType.TABULAR, graph.AtomicType.BOOLEAN, [c4]
)
self.assertTrue("must have two rows" in str(cm.exception))
def test_bernoulli(self):
g = graph.Graph()
c1 = g.add_constant_probability(1.0)
c2 = g.add_constant_probability(0.8)
# negative tests on number of parents
# 0 parents not allowed
with self.assertRaises(ValueError) as cm:
g.add_distribution(
graph.DistributionType.BERNOULLI, graph.AtomicType.BOOLEAN, []
)
self.assertTrue(
"Bernoulli distribution must have exactly one parent" in str(cm.exception)
)
# 2 parents not allowed
with self.assertRaises(ValueError) as cm:
g.add_distribution(
graph.DistributionType.BERNOULLI, graph.AtomicType.BOOLEAN, [c1, c2]
)
self.assertTrue(
"Bernoulli distribution must have exactly one parent" in str(cm.exception)
)
# 1 parent is OK
d1 = g.add_distribution(
graph.DistributionType.BERNOULLI, graph.AtomicType.BOOLEAN, [c1]
)
# negative test on type of parent
c3 = g.add_constant_natural(1)
with self.assertRaises(ValueError) as cm:
g.add_distribution(
graph.DistributionType.BERNOULLI, graph.AtomicType.BOOLEAN, [c3]
)
self.assertTrue("must be a probability" in str(cm.exception))
# negative test on value of parent
with self.assertRaises(ValueError) as cm:
g.add_constant_probability(1.1)
self.assertTrue("must be between 0 and 1" in str(cm.exception))
v1 = g.add_operator(graph.OperatorType.SAMPLE, [d1])
g.query(v1)
samples = g.infer(1)
self.assertEqual(type(samples[0][0]), bool)
self.assertTrue(samples[0][0])
means = g.infer_mean(1)
self.assertEqual(len(means), 1, "exactly one node queried")
def test_beta(self):
g = graph.Graph()
c1 = g.add_constant_pos_real(1.1)
c2 = g.add_constant_pos_real(5.0)
# negative tests on number of parents
# 0 parents not allowed
with self.assertRaises(ValueError) as cm:
g.add_distribution(
graph.DistributionType.BETA, graph.AtomicType.PROBABILITY, []
)
self.assertTrue(
"Beta distribution must have exactly two parents" in str(cm.exception)
)
# 1 parent not allowed
with self.assertRaises(ValueError) as cm:
g.add_distribution(
graph.DistributionType.BETA, graph.AtomicType.PROBABILITY, [c1]
)
self.assertTrue(
"Beta distribution must have exactly two parents" in str(cm.exception)
)
# negative test on type of parent
c3 = g.add_constant_bool(True)
with self.assertRaises(ValueError) as cm:
g.add_distribution(
graph.DistributionType.BETA, graph.AtomicType.PROBABILITY, [c3, c3]
)
self.assertTrue("must be positive real-valued" in str(cm.exception))
# negative test on sample type
with self.assertRaises(ValueError) as cm:
g.add_distribution(
graph.DistributionType.BETA, graph.AtomicType.REAL, [c1, c2]
)
self.assertTrue("Beta produces probability samples" in str(cm.exception))
# 2 real-valued parents with probability sample type are OK
d1 = g.add_distribution(
graph.DistributionType.BETA, graph.AtomicType.PROBABILITY, [c1, c2]
)
# now let's draw some samples from the Beta distribution
v1 = g.add_operator(graph.OperatorType.SAMPLE, [d1])
g.query(v1)
samples = g.infer(1, graph.InferenceType.REJECTION)
self.assertEqual(type(samples[0][0]), float)
self.assertTrue(samples[0][0] > 0 and samples[0][0] < 1)
means = g.infer_mean(10000, graph.InferenceType.REJECTION)
self.assertAlmostEqual(means[0], 1.1 / (1.1 + 5.0), 2, "beta mean")
def test_binomial(self):
g = graph.Graph()
c1 = g.add_constant_natural(10)
c2 = g.add_constant_probability(0.55)
d1 = g.add_distribution(
graph.DistributionType.BINOMIAL, graph.AtomicType.NATURAL, [c1, c2]
)
v1 = g.add_operator(graph.OperatorType.SAMPLE, [d1])
g.query(v1)
samples = g.infer(1, graph.InferenceType.REJECTION)
self.assertEqual(type(samples[0][0]), int)
self.assertTrue(samples[0][0] <= 10)
means = g.infer_mean(10000, graph.InferenceType.REJECTION)
self.assertTrue(means[0] > 5 and means[0] < 6)
def test_categorical(self):
g = graph.Graph()
simplex = [0.5, 0.25, 0.125, 0.125]
c1 = g.add_constant_col_simplex_matrix(np.array(simplex))
# Negative test: Number of parents must be exactly one:
with self.assertRaises(ValueError) as cm:
g.add_distribution(
graph.DistributionType.CATEGORICAL, graph.AtomicType.NATURAL, []
)
self.assertTrue(
"Categorical distribution must have exactly one parent" in str(cm.exception)
)
with self.assertRaises(ValueError) as cm:
g.add_distribution(
graph.DistributionType.CATEGORICAL, graph.AtomicType.NATURAL, [c1, c1]
)
self.assertEqual(
"Categorical distribution must have exactly one parent", str(cm.exception)
)
# Negative test: parent must be simplex:
c3 = g.add_constant_natural(1)
with self.assertRaises(ValueError) as cm:
g.add_distribution(
graph.DistributionType.CATEGORICAL, graph.AtomicType.NATURAL, [c3]
)
self.assertEqual(
"Categorical parent must be a one-column simplex", str(cm.exception)
)
# Negative test: type must be natural
with self.assertRaises(ValueError) as cm:
g.add_distribution(
graph.DistributionType.CATEGORICAL, graph.AtomicType.REAL, [c1]
)
self.assertEqual(
"Categorical produces natural valued samples", str(cm.exception)
)
# Positive test:
d1 = g.add_distribution(
graph.DistributionType.CATEGORICAL, graph.AtomicType.NATURAL, [c1]
)
v1 = g.add_operator(graph.OperatorType.SAMPLE, [d1])
g.query(v1)
num_samples = 10000
# TODO: We use rejection sampling in this test because at present NMC
# does not support inference over naturals. If inference over discrete
# variables is important for BMG, we should create a Uniform Proposer
# similar to how it's done in Bean Machine proper.
samples = g.infer(
num_samples=num_samples,
algorithm=graph.InferenceType.REJECTION,
seed=123,
n_chains=1,
)[0]
# The distribution of the samples should closely match the simplex used to
# generate them.
histogram = [0, 0, 0, 0]
for sample in samples:
histogram[sample[0]] += 1
self.assertAlmostEqual(simplex[0], histogram[0] / num_samples, delta=0.01)
self.assertAlmostEqual(simplex[1], histogram[1] / num_samples, delta=0.01)
self.assertAlmostEqual(simplex[2], histogram[2] / num_samples, delta=0.01)
self.assertAlmostEqual(simplex[3], histogram[3] / num_samples, delta=0.01)
def _create_graph(self):
g = graph.Graph()
c1 = g.add_constant_col_simplex_matrix(np.array([0.8, 0.2]))
c2 = g.add_constant_col_simplex_matrix(np.array([[0.6, 0.99], [0.4, 0.01]]))
c3 = g.add_constant_col_simplex_matrix(
np.transpose(np.array([[1, 0], [0.2, 0.8], [0.1, 0.9], [0.01, 0.99]]))
)
Rain = g.add_operator(
graph.OperatorType.SAMPLE,
[
g.add_distribution(
graph.DistributionType.TABULAR, graph.AtomicType.BOOLEAN, [c1]
)
],
)
Sprinkler = g.add_operator(
graph.OperatorType.SAMPLE,
[
g.add_distribution(
graph.DistributionType.TABULAR, graph.AtomicType.BOOLEAN, [c2, Rain]
)
],
)
GrassWet = g.add_operator(
graph.OperatorType.SAMPLE,
[
g.add_distribution(
graph.DistributionType.TABULAR,
graph.AtomicType.BOOLEAN,
[c3, Sprinkler, Rain],
)
],
)
return g, Rain, Sprinkler, GrassWet
def test_query(self):
g, Rain, Sprinkler, GrassWet = self._create_graph()
g.query(Rain)
g.query(Sprinkler)
g.query(GrassWet)
g.infer(1)
p = g.add_constant_probability(0.8)
b = g.add_distribution(
graph.DistributionType.BERNOULLI, graph.AtomicType.BOOLEAN, [p]
)
# Querying a constant is weird but allowed
g.query(p)
# But querying a distribution directly rather than a sample is
# illegal:
with self.assertRaises(ValueError) as cm:
g.query(b)
self.assertEqual(
f"Query of node_id {b} expected a node of type 1 or 3 but is 2",
str(cm.exception),
)
def test_to_dot(self):
self.maxDiff = None
g, Rain, Sprinkler, GrassWet = self._create_graph()
g.query(Rain)
g.query(Sprinkler)
g.query(GrassWet)
g.observe(GrassWet, True)
observed = g.to_dot()
expected = """
digraph "graph" {
N0[label="simplex"];
N1[label="simplex"];
N2[label="simplex"];
N3[label="Tabular"];
N4[label="~"];
N5[label="Tabular"];
N6[label="~"];
N7[label="Tabular"];
N8[label="~"];
N0 -> N3;
N1 -> N5;
N2 -> N7;
N3 -> N4;
N4 -> N5;
N4 -> N7;
N5 -> N6;
N6 -> N7;
N7 -> N8;
O0[label="Observation"];
N8 -> O0;
Q0[label="Query"];
N4 -> Q0;
Q1[label="Query"];
N6 -> Q1;
Q2[label="Query"];
N8 -> Q2;
}"""
self.assertEqual(expected.strip(), observed.strip())
def test_observe(self):
g, Rain, Sprinkler, GrassWet = self._create_graph()
g.observe(GrassWet, True)
with self.assertRaises(ValueError) as cm:
g.observe(GrassWet, True)
self.assertTrue("duplicate observe for node" in str(cm.exception))
g = graph.Graph()
c1 = g.add_constant_probability(1.0)
c2 = g.add_constant_probability(0.5)
o1 = g.add_operator(graph.OperatorType.MULTIPLY, [c1, c2])
d1 = g.add_distribution(
graph.DistributionType.BERNOULLI, graph.AtomicType.BOOLEAN, [o1]
)
o2 = g.add_operator(graph.OperatorType.SAMPLE, [d1])
with self.assertRaises(ValueError) as cm:
g.observe(o1, True)
self.assertTrue(
"only SAMPLE and IID_SAMPLE nodes may be observed" in str(cm.exception)
)
g.observe(o2, True) # ok to observe this node
with self.assertRaises(ValueError) as cm:
g.observe(o2, False)
self.assertTrue("duplicate observe" in str(cm.exception))
g.remove_observations()
g.observe(o2, False)
def test_inference(self):
g, Rain, Sprinkler, GrassWet = self._create_graph()
g.observe(GrassWet, True)
qr = g.query(Rain)
g.query(GrassWet)
# Querying the same node twice is idempotent.
self.assertEqual(g.query(Rain), qr)
samples = g.infer(1)
self.assertTrue(len(samples) == 1)
# since we have observed grass wet is true the query should be true
self.assertEqual(type(samples[0][1]), bool)
self.assertTrue(samples[0][1])
# test parallel inference
samples_all = g.infer(num_samples=1, n_chains=2)
self.assertTrue(len(samples_all) == 2)
self.assertTrue(len(samples_all[0]) == 1)
self.assertTrue(len(samples_all[1]) == 1)
self.assertEqual(samples[0][0], samples_all[0][0][0])
self.assertEqual(samples[0][1], samples_all[0][0][1])
self.assertEqual(type(samples_all[1][0][0]), bool)
self.assertEqual(type(samples_all[1][0][1]), bool)
self.assertTrue(samples_all[1][0][1])
def test_infer_mean(self):
g = graph.Graph()
c1 = g.add_constant_probability(1.0)
op1 = g.add_operator(graph.OperatorType.MULTIPLY, [c1, c1])
d1 = g.add_distribution(
graph.DistributionType.BERNOULLI, graph.AtomicType.BOOLEAN, [op1]
)
op2 = g.add_operator(graph.OperatorType.SAMPLE, [d1])
g.query(op1)
g.query(op2)
means = g.infer_mean(100)
self.assertAlmostEqual(means[0], 1.0)
self.assertAlmostEqual(means[1], 1.0)
# test parallel inference
means_all = g.infer_mean(num_samples=100, n_chains=2)
self.assertTrue(len(means_all) == 2)
self.assertAlmostEqual(means_all[0][0], 1.0)
self.assertAlmostEqual(means_all[0][1], 1.0)
self.assertAlmostEqual(means_all[1][0], 1.0)
self.assertAlmostEqual(means_all[1][1], 1.0)
def test_neg_real(self):
g = graph.Graph()
with self.assertRaises(ValueError) as cm:
g.add_constant_neg_real(1.25)
self.assertTrue("neg_real must be <=0" in str(cm.exception))
neg1 = g.add_constant_neg_real(-1.25)
expected = """
Node 0 type 1 parents [ ] children [ ] negative real -1.25
"""
self.assertEqual(g.to_string().strip(), expected.strip())
add_negs = g.add_operator(graph.OperatorType.ADD, [neg1, neg1])
g.query(add_negs)
means = g.infer_mean(10)
self.assertAlmostEqual(means[0], -2.5)
samples = g.infer(10)
self.assertAlmostEqual(samples[0][0], -2.5)
def test_get_log_prob(self):
g, Rain, Sprinkler, GrassWet = self._create_graph()
g.observe(GrassWet, True)
g.query(Rain)
g.query(GrassWet)
conf = graph.InferConfig()
conf.keep_log_prob = True
g.infer(
num_samples=10,
algorithm=graph.InferenceType.GIBBS,
seed=123,
n_chains=2,
infer_config=conf,
)
log_probs = g.get_log_prob()
self.assertEqual(len(log_probs), 2)
self.assertEqual(len(log_probs[0]), 10)
|
from bika.lims import bikaMessageFactory as _
from bika.lims.browser import BrowserView
from bika.lims.browser.bika_listing import BikaListingView
from bika.lims.permissions import *
from bika.lims.utils import isActive
from bika.lims.utils import tmpID
from plone.app.layout.globals.interfaces import IViewView
from Products.CMFCore.utils import getToolByName
from Products.CMFPlone.utils import _createObjectByType
from zope.interface import implements
class ClientAnalysisSpecsView(BikaListingView):
implements(IViewView)
def __init__(self, context, request):
super(ClientAnalysisSpecsView, self).__init__(context, request)
self.catalog = 'bika_setup_catalog'
self.contentFilter = {
'portal_type': 'AnalysisSpec',
'sort_on': 'sortable_title',
'getClientUID': context.UID(),
'path': {
"query": "/".join(context.getPhysicalPath()),
"level": 0
}
}
self.context_actions = {}
self.show_sort_column = False
self.show_select_row = False
self.show_select_column = True
self.pagesize = 50
self.form_id = "analysisspecs"
self.icon = self.portal_url + "/++resource++bika.lims.images/analysisspec_big.png"
self.title = self.context.translate(_("Analysis Specifications"))
self.columns = {
'Title': {'title': _('Title'),
'index': 'title'},
'SampleType': {'title': _('Sample Type'),
'index': 'getSampleTypeTitle'},
}
self.review_states = [
{'id': 'default',
'title': _('Active'),
'contentFilter': {'inactive_state': 'active'},
'transitions': [{'id': 'deactivate'}, ],
'columns': ['Title', 'SampleType']},
{'id': 'inactive',
'title': _('Dormant'),
'contentFilter': {'inactive_state': 'inactive'},
'transitions': [{'id': 'activate'}, ],
'columns': ['Title', 'SampleType']},
{'id': 'all',
'title': _('All'),
'contentFilter': {},
'columns': ['Title', 'SampleType']},
]
def __call__(self):
mtool = getToolByName(self.context, 'portal_membership')
checkPermission = mtool.checkPermission
if isActive(self.context):
if checkPermission(AddAnalysisSpec, self.context):
self.context_actions[_('Add')] = \
{'url': 'createObject?type_name=AnalysisSpec',
'icon': '++resource++bika.lims.images/add.png'}
#
# @lemoene with the changes made in AR-specs, I dont know how much
# sense this makes anymore.
# if checkPermission("Modify portal content", self.context):
# self.context_actions[_('Set to lab defaults')] = \
# {'url': 'set_to_lab_defaults',
# 'icon': '++resource++bika.lims.images/analysisspec.png'}
return super(ClientAnalysisSpecsView, self).__call__()
def folderitems(self):
items = BikaListingView.folderitems(self)
for x in range(len(items)):
if not items[x].has_key('obj'):
continue
obj = items[x]['obj']
items[x]['Title'] = obj.Title()
items[x]['replace']['Title'] = "<a href='%s'>%s</a>" % \
(items[x]['url'], items[x]['Title'])
items[x]['SampleType'] = obj.getSampleType().Title() \
if obj.getSampleType() else ""
return items
class SetSpecsToLabDefaults(BrowserView):
""" Remove all client specs, and add copies of all lab specs
"""
def __call__(self):
form = self.request.form
bsc = getToolByName(self.context, 'bika_setup_catalog')
# find and remove existing specs
cs = bsc(portal_type='AnalysisSpec',
getClientUID=self.context.UID())
if cs:
self.context.manage_delObjects([s.id for s in cs])
# find and duplicate lab specs
ls = bsc(portal_type='AnalysisSpec',
getClientUID=self.context.bika_setup.bika_analysisspecs.UID())
ls = [s.getObject() for s in ls]
for labspec in ls:
clientspec = _createObjectByType("AnalysisSpec", self.context,
tmpID())
clientspec.processForm()
clientspec.edit(
SampleType=labspec.getSampleType(),
ResultsRange=labspec.getResultsRange(),
)
translate = self.context.translate
message = _("Analysis specifications reset to lab defaults.")
self.context.plone_utils.addPortalMessage(message, 'info')
self.request.RESPONSE.redirect(self.context.absolute_url() +
"/analysisspecs")
return
|
#!/usr/bin/env python
import yaml
import json
import oauth2 as oauth
import feedparser
import datetime
from dateutil import parser
from feedgen.feed import FeedGenerator
import subprocess
import os.path
import requests
import tldextract
def unshorten(url):
if len(tldextract.extract(url).suffix) > 2:
return url
try:
r = requests.head(url)
if str(r.status_code)[0] == '3':
return r.headers['location']
else:
return url
except Exception as e:
print url, e
return url
def pull_tweets(consumer_key, consumer_secret, access_token,
access_token_secret, screen_name, list_slug=None, since_id=None,
max_iter=15, mentions=False):
# http://stackoverflow.com/questions/6399978/getting-started-with-twitter-
# oauth2-python
consumer = oauth.Consumer(key=consumer_key, secret=consumer_secret)
access_token = oauth.Token(key=access_token, secret=access_token_secret)
client = oauth.Client(consumer, access_token)
if list_slug is not None:
timeline_endpoint = (
'https://api.twitter.com/1.1/lists/statuses.json'
'?slug=%s&owner_screen_name=%s&count=200' % (list_slug, screen_name))
elif mentions:
timeline_endpoint = 'https://api.twitter.com/1.1/statuses/mentions_timeline.json?count=200&contributor_details=true&include_entities=true'
else:
timeline_endpoint = (
'https://api.twitter.com/1.1/statuses/home_timeline.json'
'?contributor_details=True&count=200')
if since_id is not None:
timeline_endpoint += '&since_id=%s' % since_id
response, data = client.request(timeline_endpoint)
data = json.loads(data)
if 'errors' in data:
raise RuntimeError(data)
if data == []:
print 'No tweets returned'
return []
##
all_data = []
all_data.extend(data)
max_id = None
for i in range(max_iter):
if min(t['id'] for t in data) == max_id:
break
max_id = min(t['id'] for t in data)
print 'Fetching page', i, 'before', max_id, list(t['created_at'] for t in data if t['id'] == max_id)[0]
response, data = client.request(
timeline_endpoint + '&max_id=%s' % max_id)
data = json.loads(data)
if 'errors' not in data:
all_data.extend(data)
else:
break
return all_data
def make_feed(RSS_FILE, twitter_account, get_images):
try:
feed = feedparser.parse(RSS_FILE)
since_id = feed.entries[0]['id'].split('/')[-1]
print 'Getting tweets since', since_id, feed.entries[0]['published']
except Exception as e:
print e
since_id = None
tweets = pull_tweets(since_id=since_id, **twitter_account)
if tweets==[]: return
fg = FeedGenerator()
if 'list_slug' in twitter_account:
feed_url = 'https://twitter.com/%s/lists/%s' % (twitter_account['screen_name'], twitter_account['list_slug'])
fg.description('Twitter home timeline for list %s ' + twitter_account['list_slug'])
fg.title('Twitter home timeline for %s' % twitter_account['list_slug'])
elif 'mentions' in twitter_account:
if twitter_account['mentions']:
feed_url = 'https://twitter.com/' + twitter_account['screen_name']
fg.description(
'Twitter mentions for ' + twitter_account['screen_name'])
fg.title('Twitter mentions for %s' % twitter_account['screen_name'])
else:
feed_url = 'https://twitter.com/' + twitter_account['screen_name']
fg.description(
'Twitter home timeline for ' + twitter_account['screen_name'])
fg.title('Twitter home timeline for %s' % twitter_account['screen_name'])
fg.id(feed_url)
fg.link({'href': feed_url, 'rel': 'alternate'})
for t in tweets:
tweet_url = 'https://twitter.com/%s/status/%s' % (
t['user']['id_str'], t['id_str'])
print 'Got tweet', tweet_url, t['created_at']
fe = fg.add_entry()
title = '@' + t['user']['screen_name'] + ' (%s)' % t['user']['name'] + ': ' + t['text']
fe.published(t['created_at'])
fe.author({
'name': t['user']['name'],
'uri': '',
'email': t['user']['screen_name']})
fe.id(tweet_url)
fe.link({'href': tweet_url, 'rel': 'alternate'})
content = t['text']
content += '<br /><br /><a href="https://twitter.com/intent/retweet?tweet_id=%s">Retweet</a>' % t[
'id_str']
content += '<a href="https://twitter.com/intent/tweet?in_reply_to=%s%%26text=%s">Reply</a>' % (
t['id_str'], '%40' + t['user']['screen_name'])
content += '<a href="https://twitter.com/intent/favorite?tweet_id=%s">Favorite</a><br /><br />' % t[
'id_str']
if 'entities' in t:
if ('media' in t['entities']) and get_images:
for u in t['entities']['media']:
if u['type'] == 'photo':
curl = subprocess.Popen(['curl', u['media_url']], stdout = subprocess.PIPE)
mogrify = subprocess.Popen(['mogrify', '-format', 'jpeg', '-', '-'] , stdout = subprocess.PIPE, stdin=curl.stdout)
jp2a = subprocess.Popen(['jp2a', '-i', '--html', '--width=120', '-'], stdout = subprocess.PIPE, stdin=mogrify.stdout)
img = jp2a.communicate()[0]
content += img
content += '\n<a href="%s">%s</a><br />\n' % (
u['media_url'], u['media_url'])
if 'urls' in t['entities']:
for u in t['entities']['urls']:
current_url = unshorten(u['expanded_url'])
fe.link({'href': current_url, 'rel': 'related'})
content += '\n<a href="%s">%s</a><br />\n' % (current_url, current_url)
content = content.replace(u['url'],current_url)
title = title.replace(u['url'],current_url)
fe.title(title)
fe.description(content)
return fg
if __name__ == '__main__':
CONFIG = yaml.load(open(os.path.expanduser('~/.ftb-config.yaml')))
for c in CONFIG['accounts']:
RSS_FILE = c['rss_file']
twitter_account = c['twitter']
get_images = c['get_images'] if 'get_images' in c else False
fg = make_feed(RSS_FILE, twitter_account, get_images)
if fg:
new_len = len(feedparser.parse(fg.rss_str()))
else:
new_len=0
print new_len, 'new entries'
break
old_feed = feedparser.parse(RSS_FILE)
print len(old_feed.entries), 'old entries'
for e in old_feed.entries:
fe = fg.add_entry()
fe.published(e['published'])
if 'author_detail' in e:
fe.author(e['author_detail'])
else:
fe.author({'name':''})
fe.id(e['id'])
fe.title(e['title'])
fe.description(e['summary'])
for l in e['links']:
fe.link(l)
fg.rss_file(RSS_FILE)
|
<gh_stars>0
import json, argparse, pickle
import sqlite3 as lite
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
class EgoGraphs(object):
def __init__(self, database="../database.db"):
self.database = str(database)
#self.readDatabase()
#self.makeGraph()
#self.pickle()
self.unpickle()
def readDatabase(self):
with lite.connect(f'file:{database}?mode=ro', uri=True) as con:
cur = con.cursor()
#cur.execute('select band_id from Bands where band=?', (band_name,))
#resp = cur.fetchone()
#if len(resp) != 1:
# raise RuntimeError("didn't find that band")
#band_id = str(resp[0])
#cur.execute('select count(*) from Reviews where band_id=?', (band_id,))
#resp = cur.fetchone()
#if len(resp) != 1:
# num_reviews = 0
#else:
# num_reviews = int(resp[0])
cur.execute('select band_id,band from Bands where band_id in (select band_id from Similarities)')
self.bands_list = cur.fetchall()
cur.execute('select band_id,similar_to_id,score from Similarities where similar_to_id in (select band_id from Similarities)')
self.sim_list = cur.fetchall()
self.band_id_to_band = {band_id:band for band_id,band in self.bands_list}
#self.band_to_band_id = {band:band_id for band_id,band in self.bands_list}
#self.node_list = list(set(band_id for band_id,_,_ self.sim_list))
self.edge_list = [(band_id,similar_to_id,score) for band_id,similar_to_id,score in self.sim_list]
def makeGraph(self):
G = nx.Graph()
G.add_weighted_edges_from(self.edge_list)
self.G = nx.relabel_nodes(G, self.band_id_to_band)
def pickle(self):
fn = 'ego_stuff.pkl'
with open(fn, 'wb') as f:
pickle.dump(self.G, f)
def unpickle(self):
fn = 'ego_stuff.pkl'
with open(fn, 'rb') as f:
self.G = pickle.load(f)
def makeEgoGraph(self, band_name, ego_radius=2):
# https://stackoverflow.com/questions/17301887/how-to-compute-nearby-nodes-with-networkx
ego = nx.ego_graph(self.G, band_name, radius=ego_radius, center=True)
self.setNodeRadii(ego, band_name)
self.setEdgeStrokeWidth(ego, band_name)
return ego
def setNodeRadii(self, ego, band_name, method='shortest_path'):
"""
Set each node's 'radius' property
"""
min_radius = 3
max_radius = 10
if method == 'linear_indeg':
# radius is proportional to degree. This will show popular bands
nodes = ego.nodes()
indegs = ego.degree(nodes, weight='weight')
min_indeg = min(t[1] for t in indegs)
max_indeg = max(t[1] for t in indegs)
for node, indeg_tuple in zip(nodes, indegs):
indeg = indeg_tuple[1]
radius = float(indeg - min_indeg) / float(max(1, max_indeg - min_indeg)) # in [0,1]
radius = min_radius + (max_radius-min_radius)*radius # in [min_radius, max_radius]
ego.node[node]['radius'] = radius
elif method == 'shortest_path':
# Compute inverse of weight == inverse of recommendation score
for edge in ego.edges():
ego.edges[edge]['invweight'] = 1./ego.edges[edge]['weight']
# Compute shortest paths from band to all other nodes based on 'invweight'
length_dict, path_dict = nx.single_source_dijkstra(ego, band_name, weight='invweight')
# Set each node to min_radius, in case the shortest path thing below craps out
# This shouldn't happen if ego is actually an ego graph
for node in ego.nodes():
ego.nodes[node]['radius'] = min_radius
# Set requested band to max_radius plus a little bit
ego.nodes[band_name]['radius'] = 1.25*max_radius
for target, path in path_dict.items():
if not path:
print('missing a path')
continue
if len(path) == 1: # path length 0 is band_name to band_name
continue
# Compute some sort of combined recommendation score
# Want something that takes into account distance from source (band_name)
# score = 1/1*neighbor_band + 1/2*neighbor_or_neighbor?
# Geometric mean?
#weights = [ego.edges[(path[i-1],path[i])]['weight'] for i in range(1,len(path))]
alpha = 0.1; # penalty for being farther from the source
#weights = [ego.edges[(path[i-1],path[i])]['weight']*alpha**(i-1) for i in range(1,len(path))]
weights = [ego.edges[(path[i-1],path[i])]['weight']/ego.degree(path[i-1])*alpha**(i-1) for i in range(1,len(path))]
score = np.exp(np.sum(np.log(weights))/len(weights))
#score /= len(weights)
ego.nodes[target]['score'] = score
# Also store the path from band_name to this node
ego.nodes[target]['path'] = path
#scores = [t[1]['score'] for t in ego.nodes(data=True) if 'score' in t[1]]
#plt.figure()
#plt.hist(scores, bins=25)
#plt.show()
# Take only the upper qth quantile of the scores, to trim the graph a bit
# Or better yet, specify a max size?
scores = [t[1]['score'] for t in ego.nodes(data=True) if 'score' in t[1]]
#limit = np.quantile(scores, 0.5)
max_size = 250
ind = max(0, len(ego) - max_size)
limit = np.partition(scores, ind)[ind]
nodes = list(ego.nodes(data=True))
for t in nodes:
if 'score' not in t[1]:
continue
score = t[1]['score']
if score < limit:
ego.remove_node(t[0])
# And if any nodes are isolated, drop them
nodes = list(ego.nodes(data=True))
for t in nodes:
if ego.degree(t[0]) == 0:
ego.remove_node(t[0])
print(len(ego))
min_score = min(t[1]['score'] for t in ego.nodes(data=True) if 'score' in t[1])
max_score = max(t[1]['score'] for t in ego.nodes(data=True) if 'score' in t[1])
print(min_score, max_score)
for node,data in ego.nodes(data=True):
if 'score' not in data:
continue
score = data['score']
score = float(score - min_score) / float(max(1, max_score - min_score))
score = score
radius = min_radius + (max_radius - min_radius)*score
ego.nodes[node]['radius'] = radius
# Scale factor for forceRadial
for node in ego.nodes():
radius = ego.nodes[node]['radius']
factor = max(0, 1. - float(radius - min_radius) / float(max(1, max_radius - min_radius)))
factor = factor ** 2
ego.nodes[node]['force_radial_factor'] = factor
else:
raise ValueError('Dunno that method')
def setEdgeStrokeWidth(self, ego, band_name, method='weight_simple'):
"""
Set each edge's 'stroke_width' property
"""
if method == 'weight_simple':
# stroke width is simply the score... doesn't show relation to requested band
min_weight = min(t[2]['weight'] for t in ego.edges(data=True))
max_weight = max(t[2]['weight'] for t in ego.edges(data=True))
for t in ego.edges(data=True):
source, target, data = t
weight = data['weight']
width = weight
width = float(width - min_weight) / float(max(1, max_weight - min_weight))
width = width ** 0.33
#width = np.log10(width+1)
min_width = 1.5
max_width = 4
width = min_width + (max_width - min_width) * width
#print(width)
ego.edges[(source,target)]['stroke_width'] = width
else:
raise ValueError('Dunno that method')
def writeJSON(self, json_file, ego):
nodes = [{'name': str(t[0]),
'radius': t[1]['radius'],
'force_radial_factor': t[1]['force_radial_factor'],
'path': t[1].get('path', []),
}
for t in ego.nodes(data=True)]
links = [{'source': t[0],
'target': t[1],
'stroke_width': t[2]['stroke_width'],
'sim_score': t[2]['weight'],
}
for t in ego.edges(data=True)]
min_radius = min(t[1]['radius'] for t in ego.nodes(data=True))
max_radius = max(t[1]['radius'] for t in ego.nodes(data=True))
min_sim_score = min(v['sim_score'] for v in links)
max_sim_score = max(v['sim_score'] for v in links)
data = {'nodes': nodes,
'links': links,
'min_radius': min_radius,
'max_radius': max_radius,
'min_sim_score': min_sim_score,
'max_sim_score': max_sim_score,
}
with open(json_file, 'w') as f:
json.dump(data, f)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('json_out')
parser.add_argument('band_name')
args = parser.parse_args()
database = "../database.db"
ego_graphs = EgoGraphs(database)
ego = ego_graphs.makeEgoGraph(args.band_name, ego_radius=2)
ego_graphs.writeJSON(args.json_out, ego)
#plt.figure()
#nx.draw_spring(ego, with_labels=True)
#plt.show()
|
from datetime import datetime
from unittest import TestCase, main
from unittest.mock import Mock, patch
from dateutil import relativedelta
from MySQLdb import DatabaseError
from spectacles_xix.db_ops import(
NOT_TWEETED_CONDITION,
PLAY_SELECT,
abbreviation_db,
play_db,
query_by_wicks_id,
query_by_date,
query_play,
tweet_db
)
class TestQuery(TestCase):
def setUp(self):
self.mock_result = [
('test 1', 'test 2', 'test 3', 4),
('test 1a', 'test 2a', 'test 3a', 5)
]
self.config = {'test 1': 'test 2'}
self.date = datetime.now().date() + relativedelta.relativedelta(
years=-200
)
def test_tweet_db(self):
mock_cursor = Mock()
test_play_id = 56768
tweet_db(mock_cursor, test_play_id)
self.assertEqual(mock_cursor.mock_calls[0][1][1][1], test_play_id)
def test_tweet_db_error(self):
mock_cursor = Mock()
mock_cursor.execute.side_effect = DatabaseError
test_play_id = 56768
with self.assertLogs(level="ERROR"):
tweet_db(mock_cursor, test_play_id)
self.assertEqual(mock_cursor.mock_calls[0][1][1][1], test_play_id)
def test_abbreviation_db(self):
test_abbreviation = 'tst'
mock_expansion = 'test'
mock_cursor = Mock()
mock_cursor.fetchone.return_value = (mock_expansion,)
test_expansion = abbreviation_db(mock_cursor, test_abbreviation)
self.assertEqual(test_expansion, mock_expansion)
self.assertEqual(
mock_cursor.execute.mock_calls[0][1][1], [test_abbreviation]
)
mock_cursor.fetchone.assert_called_once()
def test_abbreviation_db_empty(self):
test_abbreviation = ''
mock_expansion = ''
mock_cursor = Mock()
test_expansion = abbreviation_db(mock_cursor, test_abbreviation)
self.assertEqual(test_expansion, mock_expansion)
mock_cursor.execute.assert_not_called()
mock_cursor.fetchone.assert_not_called()
def test_abbreviation_db_no_result(self):
test_abbreviation = 'tst'
mock_cursor = Mock()
mock_cursor.fetchone.return_value = []
test_expansion = abbreviation_db(mock_cursor, test_abbreviation)
self.assertEqual(test_expansion, test_abbreviation)
self.assertEqual(
mock_cursor.execute.mock_calls[0][1][1], [test_abbreviation]
)
mock_cursor.fetchone.assert_called_once()
def test_abbreviation_db_error(self):
test_abbreviation = 'tst'
mock_cursor = Mock()
mock_cursor.execute.side_effect = DatabaseError()
with self.assertLogs(level="ERROR"):
test_expansion = abbreviation_db(mock_cursor, test_abbreviation)
self.assertEqual(test_expansion, test_abbreviation)
self.assertEqual(
mock_cursor.execute.mock_calls[0][1][1], [test_abbreviation]
)
mock_cursor.fetchone.assert_not_called()
def test_play_db(self):
test_query_string = 'query string'
test_lookup_term = 'lookup term'
mock_cursor = Mock()
mock_cursor.fetchall.return_value = self.mock_result
test_result = play_db(mock_cursor, test_query_string, test_lookup_term)
self.assertEqual(self.mock_result, test_result)
mock_cursor.execute.assert_called_once_with(
test_query_string, [test_lookup_term]
)
mock_cursor.fetchall.assert_called_once_with()
def test_play_db_empty(self):
test_query_string = 'query string'
test_lookup_term = 'lookup term'
mock_result = []
mock_cursor = Mock()
mock_cursor.fetchall.return_value = mock_result
with self.assertLogs(level="INFO"):
test_result = play_db(
mock_cursor, test_query_string, test_lookup_term
)
self.assertEqual(mock_result, test_result)
mock_cursor.execute.assert_called_once_with(
test_query_string, [test_lookup_term]
)
mock_cursor.fetchall.assert_called_once_with()
def test_play_db_error(self):
test_query_string = 'query string'
test_lookup_term = 'lookup term'
mock_result = []
mock_cursor = Mock()
mock_cursor.execute.side_effect = DatabaseError()
with self.assertLogs(level="ERROR"):
test_result = play_db(
mock_cursor, test_query_string, test_lookup_term
)
self.assertEqual(mock_result, test_result)
mock_cursor.execute.assert_called_once_with(
test_query_string, [test_lookup_term]
)
mock_cursor.fetchall.assert_not_called()
@patch('spectacles_xix.db_ops.DictCursor')
@patch('spectacles_xix.db_ops.play_db')
@patch('spectacles_xix.db_ops.db_cursor')
def test_query_play(self, mock_db_cursor, mock_play, mock_dict_cursor):
test_query_string = 'test query string'
test_lookup_term = 'test term'
mock_play.return_value = self.mock_result
mock_cursor = Mock()
mock_db_cursor.return_value.__enter__.return_value = mock_cursor
test_result = query_play(
self.config, test_query_string, test_lookup_term
)
self.assertEqual(test_result, self.mock_result)
mock_db_cursor.assert_called_once_with(
self.config, cursorclass=mock_dict_cursor
)
mock_play.assert_called_once_with(
mock_cursor, test_query_string, test_lookup_term
)
@patch('spectacles_xix.db_ops.query_play')
def test_query_by_wicks_id(self, mock_query):
test_wicks_id = 9999
target_query_string = '{}\nWHERE wicks = %s\n{}'.format(
PLAY_SELECT, NOT_TWEETED_CONDITION
)
mock_query.return_value = self.mock_result
test_result = query_by_wicks_id(self.config, test_wicks_id)
self.assertEqual(test_result, self.mock_result)
mock_query.assert_called_once_with(
self.config, target_query_string, test_wicks_id
)
@patch('spectacles_xix.db_ops.query_play')
def test_query_by_wicks_id_tweeted(self, mock_query):
test_wicks_id = 9999
target_query_string = '{}\nWHERE wicks = %s\n'.format(
PLAY_SELECT
)
mock_query.return_value = self.mock_result
test_result = query_by_wicks_id(
self.config, test_wicks_id, tweeted=True
)
self.assertEqual(test_result, self.mock_result)
mock_query.assert_called_once_with(
self.config, target_query_string, test_wicks_id
)
@patch('spectacles_xix.db_ops.query_play')
def test_query_by_date(self, mock_query):
test_date = self.date
target_query_string = '{}\nWHERE greg_date = %s\n{}\n'.format(
PLAY_SELECT, NOT_TWEETED_CONDITION
)
mock_query.return_value = self.mock_result
test_result = query_by_date(self.config, test_date)
self.assertEqual(test_result, self.mock_result)
mock_query.assert_called_once_with(
self.config, target_query_string, test_date.isoformat()
)
@patch('spectacles_xix.db_ops.query_play')
def test_query_by_date_limit(self, mock_query):
test_date = self.date
target_query_string = '{}\nWHERE greg_date = %s\n{}\nLIMIT 1'.format(
PLAY_SELECT, NOT_TWEETED_CONDITION
)
mock_query.return_value = self.mock_result
test_result = query_by_date(self.config, test_date, limit=500)
self.assertEqual(test_result, self.mock_result)
mock_query.assert_called_once_with(
self.config, target_query_string, test_date.isoformat()
)
@patch('spectacles_xix.db_ops.query_play')
def test_query_by_date_tweeted(self, mock_query):
target_query_string = '{}\nWHERE greg_date = %s\n\n'.format(
PLAY_SELECT
)
test_date = self.date
mock_query.return_value = self.mock_result
test_result = query_by_date(
self.config, test_date, tweeted=True
)
self.assertEqual(test_result, self.mock_result)
mock_query.assert_called_once_with(
self.config, target_query_string, test_date.isoformat()
)
if __name__ == '__main__':
main() |
<reponame>COHRINT/cops_and_robots
#!/usr/bin/env python
"""Provides a common base class for all map layers.
Since many layers share parameters and functions, the ``layer`` module
defines these in one place, allowing all layers to use it as a
superclass.
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2015, Cohrint"
__credits__ = ["<NAME>", "<NAME>"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
import logging
import matplotlib.pyplot as plt
# TODO: @Refactor Is this necessary? Especially the self.target
class Layer(object):
"""A collection of generic layer parameters and functions.
.. image:: img/classes_Layer.png
Parameters
----------
bounds : array_like, optional
Map boundaries as [xmin,ymin,xmax,ymax] in [m]. Defaults to
[-5, -5, 5, 5].
visible : bool, optional
Whether or not the layer is shown when plotting.
target : str, optional
Name of target tracked by this layer. Defaults to `''`.
ax : axes handle, optional
The axes to be used for plotting. Defaults to current axes.
alpha : float, optional
The layer's transparency, from 0 to 1. Defaults to 0.8.
cmap_str : str, optional
The colormap string for the layer. Defaults to `'parula'`.
"""
def __init__(self, bounds=[-10, -10, 10, 10], visible=True, target='',
fig=None, ax=None, alpha=0.8, cmap_str='parula'):
self.fig = fig
self.ax = ax
self.bounds = bounds # [xmin,ymin,xmax,ymax] in [m]
self.visible = visible
self.target = target
self.alpha = alpha
if cmap_str == 'parula':
self.cmap = fake_parula_cmap()
else:
self.cmap = plt.cm.get_cmap(cmap_str)
def fake_parula_cmap():
"""Taken from https://github.com/BIDS/colormap.
"""
from matplotlib.colors import LinearSegmentedColormap
cm_data = [[0.2081, 0.1663, 0.5292], [0.2116238095, 0.1897809524, 0.5776761905],
[0.212252381, 0.2137714286, 0.6269714286], [0.2081, 0.2386, 0.6770857143],
[0.1959047619, 0.2644571429, 0.7279], [0.1707285714, 0.2919380952,
0.779247619], [0.1252714286, 0.3242428571, 0.8302714286],
[0.0591333333, 0.3598333333, 0.8683333333], [0.0116952381, 0.3875095238,
0.8819571429], [0.0059571429, 0.4086142857, 0.8828428571],
[0.0165142857, 0.4266, 0.8786333333], [0.032852381, 0.4430428571,
0.8719571429], [0.0498142857, 0.4585714286, 0.8640571429],
[0.0629333333, 0.4736904762, 0.8554380952], [0.0722666667, 0.4886666667,
0.8467], [0.0779428571, 0.5039857143, 0.8383714286],
[0.079347619, 0.5200238095, 0.8311809524], [0.0749428571, 0.5375428571,
0.8262714286], [0.0640571429, 0.5569857143, 0.8239571429],
[0.0487714286, 0.5772238095, 0.8228285714], [0.0343428571, 0.5965809524,
0.819852381], [0.0265, 0.6137, 0.8135], [0.0238904762, 0.6286619048,
0.8037619048], [0.0230904762, 0.6417857143, 0.7912666667],
[0.0227714286, 0.6534857143, 0.7767571429], [0.0266619048, 0.6641952381,
0.7607190476], [0.0383714286, 0.6742714286, 0.743552381],
[0.0589714286, 0.6837571429, 0.7253857143],
[0.0843, 0.6928333333, 0.7061666667], [0.1132952381, 0.7015, 0.6858571429],
[0.1452714286, 0.7097571429, 0.6646285714], [0.1801333333, 0.7176571429,
0.6424333333], [0.2178285714, 0.7250428571, 0.6192619048],
[0.2586428571, 0.7317142857, 0.5954285714], [0.3021714286, 0.7376047619,
0.5711857143], [0.3481666667, 0.7424333333, 0.5472666667],
[0.3952571429, 0.7459, 0.5244428571], [0.4420095238, 0.7480809524,
0.5033142857], [0.4871238095, 0.7490619048, 0.4839761905],
[0.5300285714, 0.7491142857, 0.4661142857], [0.5708571429, 0.7485190476,
0.4493904762], [0.609852381, 0.7473142857, 0.4336857143],
[0.6473, 0.7456, 0.4188], [0.6834190476, 0.7434761905, 0.4044333333],
[0.7184095238, 0.7411333333, 0.3904761905],
[0.7524857143, 0.7384, 0.3768142857], [0.7858428571, 0.7355666667,
0.3632714286], [0.8185047619, 0.7327333333, 0.3497904762],
[0.8506571429, 0.7299, 0.3360285714], [0.8824333333, 0.7274333333, 0.3217],
[0.9139333333, 0.7257857143, 0.3062761905], [0.9449571429, 0.7261142857,
0.2886428571], [0.9738952381, 0.7313952381, 0.266647619],
[0.9937714286, 0.7454571429, 0.240347619], [0.9990428571, 0.7653142857,
0.2164142857], [0.9955333333, 0.7860571429, 0.196652381],
[0.988, 0.8066, 0.1793666667], [0.9788571429, 0.8271428571, 0.1633142857],
[0.9697, 0.8481380952, 0.147452381], [0.9625857143, 0.8705142857, 0.1309],
[0.9588714286, 0.8949, 0.1132428571], [0.9598238095, 0.9218333333,
0.0948380952], [0.9661, 0.9514428571, 0.0755333333],
[0.9763, 0.9831, 0.0538]]
return LinearSegmentedColormap.from_list('parula', cm_data)
|
<gh_stars>0
#! coding:utf-8
"""
The bottle module defines the Bottle class that is one element in
a water sort puzzle.
"""
# Import to do typing :Bottle inside class Bottle
from __future__ import annotations
from typing import Sequence, Optional, Set, Any
class BottleError(Exception):
"""Exception from the Bottle class."""
class Bottle:
"""
A bottle contains doses of colored water (up to Bottle.MAX_DOSES)
The content of a bottle is a list of objects where each objet identifies a color.
doses = [None, None, None, None] in case of empty bottle (nb_doses = 0)
doses = ['X', None, None, None] where the bottle contains only one dose of 'X' (nb_doses = 1)
doses = ['X', 'Y', 'Y', None] where the bottle contains one dose of 'X' at the bottom and
2 doses of 'Y' at the top (nb_doses = 3)
In this situation, the bottle contains 3 doses with 2 different colors
"""
# Speedup properties for this class
__slots__ = "doses", "nb_doses"
MAX_DOSES = 4
def __init__(self, doses: Sequence):
self.doses: list[Any] = [
None,
] * Bottle.MAX_DOSES
self.nb_doses = 0
for dose in doses:
if dose is not None:
self.doses[self.nb_doses] = dose
self.nb_doses += 1
@property
def is_empty(self) -> bool:
"""@return True if the bottle is empty."""
return self.nb_doses == 0
@property
def is_full(self) -> bool:
"""@return True if the bottle is full."""
return self.nb_doses == Bottle.MAX_DOSES
@property
def colors(self) -> Set[Any]:
"""@return Set of the different colors in the bottle."""
return set(self.doses[: self.nb_doses])
@property
def nb_different_colors(self) -> int:
"""Number of different colors in the bottle."""
return len(self.colors)
@property
def top_color(self) -> Optional[Any]:
"""Top color in the bottle."""
if self.nb_doses == 0:
return None
return self.doses[self.nb_doses - 1]
def iter_doses(self):
"""Iterator on every dose holding a color in the bottle."""
for i in range(self.nb_doses):
yield self.doses[i]
def is_same_as(self, other: Bottle) -> bool:
"""
@return True if bottles are the same.
(same as __eq__ but not checking isinstance of the other bottle to speedup computation)
"""
if self.nb_doses != other.nb_doses:
return False
for i in range(self.nb_doses):
if self.doses[i] != other.doses[i]:
return False
return True
def pop_dose(self) -> Any:
"""Pop the top dose in the bottle and return its color."""
if self.is_empty:
raise BottleError("Cannot pop dose from an empty bottle")
ret = self.doses[self.nb_doses - 1]
self.doses[self.nb_doses - 1] = None
self.nb_doses -= 1
return ret
def can_push_dose(self, color: Any) -> bool:
"""@return True if one dose of the color can be poured into the bottle."""
if self.nb_doses == 0:
return True
if self.nb_doses == Bottle.MAX_DOSES:
return False
return self.doses[self.nb_doses - 1] == color
def push_dose(self, color: Any) -> None:
"""Pour one dose of the color into the bottle."""
if not self.can_push_dose(color):
raise BottleError(f"Cannot pour {color} into {self}")
self.doses[self.nb_doses] = color
self.nb_doses += 1
def is_possible_to_pour_one_dose_into(self, destination: Bottle) -> bool:
"""
@return True if at least one dose of the top color can be poured into
the destination bottle.
"""
if self.nb_doses == 0:
return False
if destination.nb_doses == 0:
return True
if destination.nb_doses == Bottle.MAX_DOSES:
return False
# Same top colors ?
return (
self.doses[self.nb_doses - 1] == destination.doses[destination.nb_doses - 1]
)
def is_interesting_to_pour_into(self, destination: Bottle) -> bool:
"""
@return True if pouring into destination leads to an interesting situation.
(Quite the same as is_possible_to_pour_one_dose_into but also checking for
interesting resulting situation)
"""
if destination.nb_doses == Bottle.MAX_DOSES:
return False # destination is full
if self.nb_doses == 0:
return False # Source empty
if destination.nb_doses == 0:
if self.nb_different_colors == 1:
return False # Because de resulting situation would be the same
return True
# Same top colors ?
return (
self.doses[self.nb_doses - 1] == destination.doses[destination.nb_doses - 1]
)
def pour_into(self, destination: Bottle) -> int:
"""Pour all possible doses of top color into the destination bottle.
@return number of poured doses
"""
nb_doses = 0
while self.is_possible_to_pour_one_dose_into(destination):
color = self.pop_dose()
destination.push_dose(color)
nb_doses += 1
return nb_doses
def clone(self) -> Bottle:
"""@return Create a copy clone of the bottle."""
copy_list_doses = self.doses.copy()
return Bottle(copy_list_doses)
def __repr__(self):
return f"<{self.doses[:self.nb_doses]}>"
|
<reponame>Rasools/CancerProteinSecretionML
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 20 09:50:10 2017
@author: azams
Updated by <NAME>
"""
#print(__doc__)
import os
import numpy as np
import pandas as pd
import statsmodels.api as sm
import matplotlib.pyplot as plt
# from sklearn.preprocessing import LabelEncoder, StandardScaler
#import math as mt
from scipy import interp
from itertools import cycle
from sklearn import svm
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier, AdaBoostClassifier
from sklearn.ensemble import RandomForestRegressor, ExtraTreesRegressor, AdaBoostRegressor
from sklearn.linear_model import LogisticRegression, Ridge, Lasso
from sklearn.model_selection import KFold, StratifiedKFold, cross_val_score
from sklearn.metrics import roc_curve, auc
from xgboost import XGBClassifier, XGBRegressor
#dfPSP = pd.read_csv('BigDataProject/PSP_gene_data.csv')
#dfSEC = pd.read_csv('BigDataProject/SEC_gene_data.csv')
###############################################################################
###############################################################################
def ReadOMICSdataCSV(fName):
"""
Reads in the omics data CSV file,
removes the redundant column 'Unnamed: 0'
and returns the data as a dataframe.
"""
df = pd.read_csv(fName + ".csv")
# There is a column with name Unnamed: 0.
# Dropping it here.
if 'Unnamed: 0' in df.columns:
df.drop('Unnamed: 0', axis=1, inplace=True)
return df
###############################################################################
###############################################################################
#def prepareDFCancerType(dfSpecificCancerData):
#
# """
# Given the cancer data which is already filtered for a specific
# cancer type (optional, NOT necessary!) is passed as a dataframe, it
# i) separates the genes (features) from the class variables (particular to cancer types only),
# such as: TumorStage, Race, and Gender
# ii) asks the user to choose one of the class variables
# iii)removes the rest of the class variables and adds the chosen variable as the first column(s), followed by
# the data for all the genes (features) and returns as a dataframe ready to work on.
# """
#
# # Determine the number of genes (features) and class variables in the dataframe
# # Note that the dataframe is arranged such that "CancerStatus" is the first
# # class variable, so all columns before "CancerStatus" are genes.
# numFeatures = dfSpecificCancerData.columns.get_loc('CancerStatus')
# numClassVars = len(dfSpecificCancerData.columns) - numFeatures
#
# features = dfSpecificCancerData.iloc[:, 0:numFeatures]
## f_names = list(features.columns)
#
# targets = dfSpecificCancerData.iloc[:, 577:len(dfSpecificCancerData.columns)]
# t_names = list(targets.columns)
# print("\n*********************************************")
# while True:
# ClassVar = input("Choose a class variable (" + ' '.join(t_names) + "): ")
# if ClassVar in t_names:
# break
# else:
# print("Please splell correctly!")
#
# print("\n*********************************************")
# target = targets[ClassVar]
# df = features
# df[ClassVar] = target
#
# # Class variable is the last column, bringing it to the first place.
# cols = df.columns.tolist()
# cols = cols[-1:] + cols[:-1]
# df = df[cols]
#
# return df
###############################################################################
###############################################################################
def prepareDFgeneral(dfAllCancerData):
"""
Given that the entire PSP cancer data is passed as a dataframe, it
i) separates the genes (features) from the target variables,
that is: CancerStatus, Project, TumorStage, Race, and Gender
ii) asks the user to chose one of them as the class variable
iii)removes the rest and adds the chosen variable as the first column, followed by
the entire genes (features) and returns as a dataframe ready to work on.
"""
# Determine the number of genes (features) in the dataframe
# Note that the dataframe is arranged such that "CancerStatus" is the first
# class variable, so all columns before "CancerStatus" are genes.
numFeatures = dfAllCancerData.columns.get_loc('CancerStatus')
features = dfAllCancerData.iloc[:, 0:numFeatures]
# f_names = list(features.columns)
targets = dfAllCancerData.iloc[:, numFeatures:len(dfAllCancerData.columns)]
t_names = list(targets.columns)
print("\n*********************************************")
while True:
ClassVar = input("Choose a class variable (" + ' '.join(t_names) + "): ")
if ClassVar in t_names:
break
else:
print("Please splell correctly!")
print("\n*********************************************")
target = targets[ClassVar]
df = features
df[ClassVar] = target
# Class variable is the last column, bringing it to the first place.
cols = df.columns.tolist()
cols = cols[-1:] + cols[:-1]
df = df[cols]
return df
###############################################################################
def prepareDF(dfAllCancerData, ClassVar):
"""
Given that the entire PSP cancer data is passed as a dataframe, it
i) separates the genes (features) from the target variables,
that is: CancerStatus, Project, TumorStage, Race, and Gender
ii) keeps the column corresponding to ClassVar and removes the rest
iii) and moves it to be the first column, followed by
the entire genes (features) and returns as a dataframe ready to work on.
"""
# Determine the number of genes (features) in the dataframe
# Note that the dataframe is arranged such that "CancerStatus" is the first
# class variable, so all columns before "CancerStatus" are genes.
numFeatures = dfAllCancerData.columns.get_loc('CancerStatus')
features = dfAllCancerData.iloc[:, 0:numFeatures]
target = dfAllCancerData[ClassVar]
df = features
df[ClassVar] = target
# Class variable is the last column, bringing it to the first place.
cols = df.columns.tolist()
cols = cols[-1:] + cols[:-1]
df = df[cols]
return df
###############################################################################
#from Pramod
def prepareDF_Mod(dfAllCancerData, TargetVariable):
"""
Given that the PSP cancer data is passed as a dataframe, it
i) separates the Genes (features) from the target variables,
ii) asks the user to chose one of them as the class variable
iii) adds it as the first column of features and returns a dataframe
ready to work on.
"""
# Determine the number of genes (features) in the dataframe
# Note that the dataframe is arranged such that "CancerStatus" is the first
# class variable, so all columns before "CancerStatus" are genes.
numFeatures = dfAllCancerData.columns.get_loc('CancerStatus')
features = dfAllCancerData.iloc[:, 0:numFeatures]
CancerStatuses = dfAllCancerData[TargetVariable]
df = features
df[TargetVariable] = CancerStatuses
# Class variable is the last column, bringing it to the first place.
cols = df.columns.tolist()
cols = cols[-1:] + cols[:-1]
df = df[cols]
return df
###############################################################################
###############################################################################
def printNaNs(df):
"""
Given that the PSP cancer data is passed as a dataframe, it
i) prints the number of missing values (if any) in each of the columns.
ii)reports if no missing data is found.
"""
print("\n*********************************************")
print("Number of samples in the dataset: {0}".format(df.shape[0]))
print("*********************************************")
print("Printing missing values count (if any) in each of the columns. ")
flag = True
for c in df.columns:
if df[c].isnull().sum():
print("{:_<12} : {:5d}".format(c,df[c].isnull().sum()))
flag = False
if flag:
print('No missing data right now!')
print("*********************************************")
###############################################################################
###############################################################################
def dropNaNs(df, ClassVar='none'):
"""
Given the omics data passed as a dataframe, and (optionally) the name of a
class variable, it
i) prints the total number of samples in the dataset.
ii) if none of the samples have any missing values, it returns the same dataframe, else:
a) number of samples having missing values are reported
b) these samples are removed from the dataset
c) number of samples remained in the dataset after removing with missing values, are reported
d) returns the updated dataframe
"""
print("\n*********************************************")
print("Number of samples in the dataset: {0}".format(df.shape[0]))
print("*********************************************")
if ClassVar == 'none':
dfdna = df.dropna()
else:
dfdna = df.dropna(subset=[ClassVar])
if df.shape[0] > dfdna.shape[0]:
print("Number of samples having missing values: {0}".format(df.shape[0]- dfdna.shape[0]))
print("Number of samples remained after dropping samples with missing data: {0}".format(dfdna.shape[0]))
else:
print("There are no samples with missing values!")
return dfdna
###############################################################################
###############################################################################
def printClassVarValCounts(df, ClassVar):
"""
Given that the PSP cancer data is passed as a dataframe, and Class variable as string, it
i) prints the total number of samples in the dataset
ii) Prints all distinct values and the number of samples corresponding to these values
iii) It also displays the total number of missing values(if any) as NaN
"""
print("\n*********************************************")
print("Number of samples in the dataset: {0}".format(df.shape[0]))
print("Target variable, {0}, has {1} unique values,".format(ClassVar, len(df[ClassVar].unique())))
print("with the following distribution of the data.")
print(df[ClassVar].value_counts(dropna=False))
print("*********************************************")
###############################################################################
###############################################################################
# If there are some levels of the Class Variable that we want to exclude,
# we can use this method.
###############################################################################
def RemoveExtraLevels(df, ClassVar, toRemove):
"""
Given that the PSP cancer data is passed as a dataframe, Class variable as string,
and a list of values of class variable which need to be removed
i) prints the total number of samples in the dataset
ii) Prints all distinct values and the number of samples corresponding to these values
iii) It also displays the total number of missing values(if any) as NaN
"""
for x in toRemove:
df.drop(df.index[df[ClassVar] == x], inplace= True)
printClassVarValCounts(df, ClassVar)
return df
###############################################################################
###############################################################################
def FilterLevels(df, ClassVar, toKeep, printStats='yes'):
"""
Given the cancer data as a dataframe, Class variable as string,
and a list of values of that class variable which should be kept:
i) prints the total number of samples in the dataset
ii) Prints all distinct class variable values and the number of samples
corresponding to these values
iii) It also displays the total number of missing values(if any) as NaN
"""
df_new = pd.DataFrame()
for x in toKeep:
df_temp = df[df[ClassVar] == x]
df_new = df_new.append(df_temp)
if printStats=='yes':
printClassVarValCounts(df_new, ClassVar)
return df_new
###############################################################################
###############################################################################
def returnVarLevels(df, var):
"""
Returns the unique values/levels of the given variable.
"""
return df[var].unique().tolist()
###############################################################################
###############################################################################
def mapClassVar(dfdna, ClassVar, varLevels):
"""
Pass it a dataframe, dfdna, after removing NaNs using dropNaNs(df),
and class variable, it will
i) map the levels of string levels of the variable to integers
ii) apply this mapping to dfdna
iii) return the new df
iv) print the mapping.
"""
if ClassVar == 'TumorStageMerged' and len(varLevels) > 2:
# special case when requesting a regression - tumor levels should be
# ordered alphabetically (i.e., "i", "ii", "iii", "iv")
varLevels.sort()
df_le = dfdna.copy()
df_le[ClassVar] = [varLevels.index(x) for x in df_le[ClassVar]]
print("\n*********************************************")
print('The following label encoding has been assigned to the values of {0}.'.format(ClassVar))
dictionary = dict(zip(np.arange(0, len(varLevels)), varLevels))
print(dictionary)
print("\n*********************************************")
return df_le
###############################################################################
###############################################################################
def fitScalarTransform(df):
"""
Standardize the data so that variance is 1 and mean is zero.
Returns X_scaled and y.
y is the column corresponding to the class variable.
X_scaled contains are all other variables on which scaling is applied.
contains
"""
array = df.values
X = array[:,1:len(df.columns)]
# y = array[:,0]
# above way of getting y changes y to floats, whereas y is simply the label
# encoded class variable. The line below is to remedy this.
# Doing so, we get y as int64 which is required at some places e.g., when plotting t-SNE results.
y = np.asarray(df.iloc[:,0])
scaler = StandardScaler().fit(X)
# scaler
# scaler.mean_
# scaler.scale_
X_scaled = scaler.transform(X)
return X_scaled, y
###############################################################################
from sklearn.preprocessing import FunctionTransformer
###############################################################################
def fitLogTransform(df,offset):
array = df.values
X = array[:,1:len(df.columns)]
# y = array[:,0]
# above way of getting y changes y to floats, whereas y is simply the label
# encoded class variable. The line below is to remedy this.
# Doing so, we get y as int64 which is required at some places e.g., when plotting t-SNE results.
y = np.asarray(df.iloc[:,0])
# logScaler = FunctionTransformer(np.log1p)
# X_scaled = logScaler.transform(X)
X_scaled = np.log(X + offset)
return X_scaled, y
###############################################################################
#from Pramod
###############################################################################
def dffitLogTransform(df):
"""
Takes a dataframe with the first column as the classificatin variable and
gene expression levels as the rest of the columns and returns a new dataframe
with log transformed gene expression levels
"""
gene_names = df.ix[:,0:].columns.values
df_new = pd.DataFrame(index=range(len(df)))
logScaler = FunctionTransformer(np.log1p)
for gene in gene_names:
X = df[gene]
X_scaled = logScaler.transform(X)
df_new[gene] = X_scaled.reshape(-1,1)
return df_new
###############################################################################
def PrepareLogitResults(df, ClassVar):
"""
Pass it a dataframe, it fits a logit model using ClassVar and then returns
the following model parameters:
'Beta', 'p-Value', 'OR', 'CI (2.5%)', 'CI (97.5%)'
"""
df['intercept']=1.0
train_cols=df.columns[1:]
res = sm.Logit(df[ClassVar], df[train_cols]).fit(maxiter=10000, method='ncg')#'ncg') #bfgs
params = res.params
conf = res.conf_int()
conf['OR'] = params
conf.columns = ['CI (2.5%)', 'CI (97.5%)', 'OR']
conf = np.exp(conf)
conf['p-Value'] = res.pvalues
conf['Beta'] = res.params.values
cols_order = ['Beta', 'p-Value', 'OR', 'CI (2.5%)', 'CI (97.5%)']
conf = conf[cols_order]
conf.reset_index(level=0, inplace=True)
conf = conf.rename(columns={'index':'Variable'})
return conf
###############################################################################
from sklearn.preprocessing import MinMaxScaler
###############################################################################
def Ranks2Dict(ranks, names, order=1):
minmax = MinMaxScaler()
ranks = minmax.fit_transform(order*np.array([ranks]).T).T[0]
ranks = map(lambda x: round(x, 10), ranks)
return dict(zip(names, ranks ))
##############################################################################
###############################################################################
def PrepareCorrResults(df):
"""
Pass it a dataframe, and it returns Pairwise Pearson correlation coefficient values
for the entire variables of the datadframe.
The first columns of the returned dfCORR contains correlation values of the classvariable
versus all other variables.
"""
dfCORR = df.corr()
dfCORR.reset_index(level=0, inplace=True)
dfCORR = dfCORR.rename(columns={'index':'Variable'})
return dfCORR
###############################################################################
def CVScorer(models, CV, X, y, scoring, shuffle, folds=10):
if CV == 'Validation: SKF':
cv = StratifiedKFold(n_splits=folds, shuffle=shuffle)
elif CV == 'Validation: KF':
cv = KFold(n_splits=folds, shuffle=shuffle)
dfCVscores = pd.DataFrame(columns=['Model', 'Scoring', 'Score', 'CI-lower', 'CI-high'])
for model in models:
modelName = str(model).partition('(')[0]
if modelName == 'LogisticRegression':
if model.penalty == 'l1':
modelName = 'LassoRegression'
elif model.penalty == 'l2':
modelName = 'RidgeRegression'
elif modelName == 'Lasso':
modelName = 'LassoRegression'
elif modelName == 'Ridge':
modelName = 'RidgeRegression'
scores = cross_val_score(model, X, y, scoring=scoring, cv=cv, n_jobs=-1)
dfCVscores = dfCVscores.append(pd.Series([modelName, scoring, scores.mean(),(scores.mean() - 2*scores.std()), (scores.mean() + 2*scores.std())],
index=dfCVscores.columns), ignore_index=True)
#print("{3} [-/+]: {0:.2f} [{1:.2f}, {2:.2f}]".format(scores.mean(),
# (scores.mean() - 2*scores.std()), (scores.mean() + 2*scores.std()), 'Model: ' + modelName + ', Cross validated average score of ' + scoring))
return dfCVscores
###############################################################################
def ROCanalysis(mod_name, CV, classifier, X, y, shuffle, folds=10):
"""
Plot ROC curve generated using 10-fold cross validation for the given model.
mod_name:: Name of the classifier.
CV:: chose one of these: 'Validation: SKF', 'Validation: KF'
classifier:: e.g., LogisticRegression()
X:: Featuers/variables
y:: the class variable
shuffle:: True
"""
# Classification and ROC analysis
# Run classifier with cross-validation and plot ROC curves
# array = df.values
# X = array[:,1:len(df.columns)]
# y = array[:,0]
if CV == 'Validation: SKF':
cv = StratifiedKFold(n_splits=folds, shuffle=shuffle)
elif CV == 'Validation: KF':
cv = KFold(n_splits=folds, shuffle=shuffle)
mean_tpr = 0.0
mean_fpr = np.linspace(0, 1, 101)
mean_acc = []
tprs = []
colors = cycle(['darkcyan', 'indigo', 'darkgreen', 'darkgoldenrod', 'darkblue'
, 'darkorange', 'mediumvioletred', 'crimson', 'darksalmon', 'darkred'])
lw = 2
plt.figure(figsize=(8, 8))
i = 0
for (train, test), color in zip(cv.split(X, y), colors):
if mod_name.startswith('Ridge'):
classifier.fit(X[train], y[train])
confScores = classifier.decision_function(X[test])
fpr, tpr, thresholds = roc_curve(y[test], confScores, pos_label=1)
else:
probas_ = classifier.fit(X[train], y[train]).predict_proba(X[test])
fpr, tpr, thresholds = roc_curve(y[test], probas_[:, 1], pos_label=1)
# model = classifier.fit(X[train], y[train])
#[:, 1]
# fpr, tpr, thresholds = roc_curve(y[test], probas, pos_label=1)
mean_acc.append(classifier.score(X[test],y[test]))
# Compute ROC curve and area the curve
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
tp = interp(mean_fpr, fpr, tpr)
tp[0]=0.0
tprs.append(tp)
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, alpha=0.55, lw=lw, color=color,
label='ROC fold %d (area = %0.2f)' % (i+1, roc_auc))
i += 1
plt.plot([0, 1], [0, 1], linestyle='--', lw=lw, color='k')#,
#label='Luck')
#
tprs = np.array(tprs)
mean_tprs = tprs.mean(axis=0)
std = tprs.std(axis=0)
tprs_upper = np.minimum(mean_tprs + std, 1)
tprs_lower = mean_tprs - std
# mean_auc_test = auc(mean_fpr, mean_tprs)
# print(mean_auc_test)
# plt.plot(base_fpr, mean_tprs, 'b',label="Mean ROC (area = %0.2f)" % (mean_auc), lw=lw)
plt.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=0.4)
#
mean_tpr /= cv.get_n_splits(X, y)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
# print(mean_auc)
plt.plot(mean_fpr, mean_tpr, color='b', linestyle=':',
label='Mean ROC (area = %0.2f)' % mean_auc, lw=4)
plt.xlim([-0.01, 1.01])
plt.ylim([-0.01, 1.01])
plt.xlabel("False Positive Rate (1 - Specificity) \n Cross-Validation Average"
+ " Score of Accuracy: %0.3f%%" % (np.mean(mean_acc)*100), size=12)
plt.ylabel('True Positive Rate (Sensitivity)', size=12)
plt.title("Receiver Operating Characteristic Curve (%s) \n Model: %s"
% (CV, mod_name), size=13)
plt.legend(loc="lower right")
plt.grid(True)
plt.show()
# plt.savefig("auc.png")
##
# doc.add_picture("auc.png", width=docx.shared.Cm(20), height=docx.shared.Cm(20))
# doc.add_paragraph(" Model: " + mod_name + "\n" + CV)
# return X, y
###############################################################################
def GeneExpression(df,Level):
"""
This function takes in a data frame with only gene expression values, and
provides the list of genes whose median gene expression values are less
than Level.
If Level ends in '%', then it will return genes whose gene expression
values lie in the lower X-percentile (where X = Level) of the population.
If Level == 'zero', then genes that have zero expression in all given
samples will be returned.
"""
df_Gene=df.iloc[:,1:]
data_stats = pd.DataFrame()
data_stats['Gene Name']= df_Gene.columns.values
data_stats['Median'] = list(df_Gene.median())
data_stats['Mean'] = list(df_Gene.mean())
if type(Level) == 'str':
if Level == 'zero':
# find genes with all zero expression values
gene_sums = df_Gene.sum()
LowCountGene = gene_sums[gene_sums == 0].index
else:
Level = float(Level[0:-1])
gene_medians = df_Gene.median()
percentile = np.percentile(gene_medians,Level)
LowCountGene = gene_medians[gene_medians <= percentile].index
else:
gene_medians = df_Gene.median()
LowCountGene = gene_medians[gene_medians < Level].index
return data_stats, np.array(LowCountGene)
def CleanData (df, Level):
data_stats, LowCountGene = GeneExpression(df, Level)
df_clean = df.drop(LowCountGene,1)
return df_clean
###############################################################################
def prepCancerTypeDict(hdfStore=False, inFile='allcancerdata', outFile='CancerDataStore'):
"""
This function loads the entire PSP cancer dataset from 'allcancerdata.csv'
and returns a dictionary of dataframes, where each dataframe corresponds
to a cancer type.
"""
# Import data from csv to a data frame
df = ReadOMICSdataCSV('../data/' + inFile)
df = df.dropna(subset = ['Project'])
projects = df['Project'].unique()
arr = []
for project in projects:
arr.append(project)
arr = np.array(arr)
# Create a dictionary of data frames separated by cancer type
cancerTypesDic = dict()
for project in arr:
ClassVar = 'Project'
toKeep = [project]
cancerTypesDic[project]= FilterLevels(df, ClassVar, toKeep, printStats='no')
# For hdfStore=True, we write the dictionay to a hdfStore.
if hdfStore:
CancerDataStore = pd.HDFStore('../data/' + outFile + '.h5')
for (key, value) in cancerTypesDic.items():
# keys are names of cancers, e.g., TCGA-BRCA. Using split to ignore the TCGA- part and use
# the rest as the name. With prefix TCGA-, it is not a valid Python identifier.
CancerDataStore.put(key.split('-')[1], value)
print("{0} successfully saved in store!".format(key))
#print(CancerDataStore)
CancerDataStore.close()
return cancerTypesDic
###############################################################################
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis #, QuadraticDiscriminantAnalysis
###############################################################################
def plotPCA(X, y, nComp, target_names, save=False):
"""
=======================================================
Comparison of LDA and PCA 2D projection of PSP dataset
=======================================================
Principal Component Analysis (PCA) applied to this data identifies the
combination of attributes (principal components, or directions in the
feature space) that account for the most variance in the data. Here we
plot the different samples on the possible pairs of principal components.
Linear Discriminant Analysis (LDA) tries to identify attributes that
account for the most variance *between classes*. In particular,
LDA, in contrast to PCA, is a supervised method, using known class labels.
"""
pca = PCA(n_components=nComp)
X_r = pca.fit(X).transform(X)
# Percentage of variance explained for each components
#print('explained variance ratio (first two components): %s'
# % str(pca.explained_variance_ratio_))
l = len(target_names)
colors = ['darkcyan', 'indigo', 'darkgreen', 'darkgoldenrod', 'darkblue'
, 'darkorange', 'mediumvioletred', 'crimson', 'darksalmon', 'darkred', 'cyan', 'orange','green']
colors = colors[0:l]
target_codes = list(range(0,l))
plt.figure(figsize=(8, 8))
lw = 2
for xComp in range(1,nComp+1):
for yComp in range(xComp+1,nComp+1):
for color, i, target_name in zip(colors, target_codes, target_names):
plt.scatter(X_r[y == i, xComp-1], X_r[y == i, yComp-1], color=color, alpha=.8, lw=lw,
label=target_name)
plt.legend(loc='best', shadow=False, scatterpoints=1)
plt.title('PCA applied to dataset')
plt.xlabel('PCA ' + str(xComp))
plt.ylabel('PCA ' + str(yComp))
if save:
plt.savefig('PCA component ' + str(xComp) + ' by ' + str(yComp) + '.png')
plt.show()
###############################################################################
###############################################################################
def plotLDA(X, y, nComp, target_names, save=False):
"""
=======================================================
Comparison of LDA and PCA 2D projection of dataset
=======================================================
Principal Component Analysis (PCA) applied to this data identifies the
combination of attributes (principal components, or directions in the
feature space) that account for the most variance in the data. Here we
plot the different samples on the possible pairs of principal components.
Linear Discriminant Analysis (LDA) tries to identify attributes that
account for the most variance *between classes*. In particular,
LDA, in contrast to PCA, is a supervised method, using known class labels.
"""
lda = LinearDiscriminantAnalysis(n_components=nComp)
X_r2 = lda.fit(X, y).transform(X)
l = len(target_names)
colors = ['darkcyan', 'indigo', 'darkgreen', 'darkgoldenrod', 'darkblue'
, 'darkorange', 'mediumvioletred', 'crimson', 'darksalmon', 'darkred', 'cyan', 'orange','green']
colors = colors[0:l]
target_codes = list(range(0,l))
plt.figure(figsize=(8, 8))
lw = 2
for xComp in range(1,nComp+1):
for yComp in range(xComp+1,nComp+1):
for color, i, target_name in zip(colors, target_codes, target_names):
plt.scatter(X_r2[y == i, xComp-1], X_r2[y == i, yComp-1], alpha=.8, color=color, lw=lw,
label=target_name)
plt.legend(loc='best', shadow=False, scatterpoints=1)
plt.title('LDA applied to dataset')
plt.xlabel('LDA ' + str(xComp))
plt.ylabel('LDA ' + str(yComp))
if save:
plt.savefig('LDA component ' + str(xComp) + ' by ' + str(yComp) + '.png')
plt.show()
###############################################################################
###############################################################################
def plotPCAvsLDA(X, y, nComp, target_names, save=False):
"""
=======================================================
Comparison of LDA and PCA 2D projection of dataset
=======================================================
Principal Component Analysis (PCA) applied to this data identifies the
combination of attributes (principal components, or directions in the
feature space) that account for the most variance in the data. Here we
plot the different samples on the possible pairs of principal components.
Linear Discriminant Analysis (LDA) tries to identify attributes that
account for the most variance *between classes*. In particular,
LDA, in contrast to PCA, is a supervised method, using known class labels.
"""
pca = PCA(n_components=nComp)
X_r = pca.fit(X).transform(X)
# qda = QuadraticDiscriminantAnalysis(n_components=nComp)
# X_r = qda.fit(X, y).transform(X)
lda = LinearDiscriminantAnalysis(n_components=nComp)
X_r2 = lda.fit(X, y).transform(X)
# Percentage of variance explained for each components
#print('explained variance ratio (first two components): %s'
# % str(pca.explained_variance_ratio_))
l = len(target_names)
colors = ['darkcyan', 'indigo', 'darkgreen', 'darkgoldenrod', 'darkblue'
, 'darkorange', 'mediumvioletred', 'crimson', 'darksalmon', 'darkred', 'cyan', 'orange','green']
colors = colors[0:l]
target_codes = list(range(0,l))
plt.figure(figsize=(8, 8))
lw = 2
for xComp in range(1,nComp+1):
for yComp in range(xComp+1,nComp+1):
for color, i, target_name in zip(colors, target_codes, target_names):
plt.scatter(X_r[y == i, xComp-1], X_r[y == i, yComp-1], color=color, alpha=.8, lw=lw,
label=target_name)
plt.legend(loc='best', shadow=False, scatterpoints=1)
plt.title('PCA applied to dataset')
plt.xlabel('PCA ' + str(xComp))
plt.ylabel('PCA ' + str(yComp))
if save:
plt.savefig('PCA component ' + str(xComp) + ' by ' + str(yComp) + '.png')
plt.figure()
for color, i, target_name in zip(colors, target_codes, target_names):
plt.scatter(X_r2[y == i, xComp-1], X_r2[y == i, yComp-1], alpha=.8, color=color, lw=lw,
label=target_name)
plt.legend(loc='best', shadow=False, scatterpoints=1)
plt.title('LDA applied to dataset')
plt.xlabel('LDA ' + str(xComp))
plt.ylabel('LDA ' + str(yComp))
if save:
plt.savefig('LDA component ' + str(xComp) + ' by ' + str(yComp) + '.png')
plt.show()
###############################################################################
#def CancerTypesDiscAnalysis(dfAllOD, CancerTypes, nComp = 2, save=False):
# """
# We want to analyze how well different cancer types are separated from each other.
# We filter out samples where 'CancerStatus' = 'Primary solid Tumor' and ClassVar = 'Project'.
# Then we chose which CancerTypes to compare against each other and draw plots using PCA and LDA
# for the analysis purposes.
# dfAllOD is dataframe of all data
# CancerTypes is a list of the cancer types that we want to compare against each other.
# To be able to see LDA plots, compare a min of 3 cancer types at a time.
# """
# # from CancerStatus keep only 'Primary solid Tumor'
# ClassVar = 'CancerStatus'
# toKeep = ['Primary solid Tumor']
# df_pst = FilterLevels(dfAllOD, ClassVar, toKeep)
#
# # Now remove extra variables, we keep only Project
# df_pst.drop(['CancerStatus', 'TumorStage', 'Race', 'Gender'], axis=1, inplace=True)
#
## # Print counts for missing values.
## OD.printNaNs(df_pst)
#
# # drop all the rows where there is any missing data
# dfdna_pst = dropNaNs(df_pst)
#
# # Class variable is the last column, bringing it to the first place.
# cols = dfdna_pst.columns.tolist()
# cols = cols[-1:] + cols[:-1]
# dfdna_pst = dfdna_pst[cols]
#
# ClassVar = 'Project'
## OD.printClassVarValCounts(dfdna_pst,ClassVar)
## ProjectIDS = OD.returnVarLevels(dfdna_pst, ClassVar)
#
# dfdna_pst_fl = FilterLevels(dfdna_pst, ClassVar, CancerTypes)
#
# dfdna_pst_fl, ClassVarEncOrder = mapClassVar(dfdna_pst_fl,ClassVar)
#
# dfdna_pst_fl_cd = CleanData(dfdna_pst_fl,2)
#
# X_scaled_lg, y_lg = fitLogTransform(dfdna_pst_fl_cd)
#
## target_names = ClassVarEncOrder
# plotPCAvsLDA(X_scaled_lg, y_lg, nComp, ClassVarEncOrder, save=save)
#
## return ClassVarEncOrder
###############################################################################
def dfCancerTypesOrdered(dfAllOD):
"""
We want to analyze how well different cancer types are separated from each other.
We filter out samples where 'CancerStatus' = 'Primary solid Tumor' and ClassVar = 'Project'.
Then we chose which CancerTypes to compare against each other and draw plots using PCA and LDA
for the analysis purposes.
dfAllOD is dataframe of all data
CancerTypes is a list of the cancer types that we want to compare against each other.
To be able to see LDA plots, compare a min of 3 cancer types at a time.
"""
ClassVar = 'CancerStatus'
#toKeep = ['Primary solid Tumor']
toKeep = ['Solid Tissue Normal', 'Primary solid Tumor']
df_pst = FilterLevels(dfAllOD, ClassVar, toKeep)
# Determine the number of genes (features) in the dataframe
# Note that the dataframe is arranged such that "CancerStatus" is the first
# class variable, so all columns before "CancerStatus" are genes.
numFeatures = dfAllOD.columns.get_loc('CancerStatus')
# Now remove extra variables, we keep only Project
remVars = df_pst.columns[numFeatures:].tolist()
remVars.remove('Project')
df_pst.drop(remVars, axis=1, inplace=True)
# drop all the rows where there is any missing data
dfdna_pst = dropNaNs(df_pst)
# Class variable is the last column, bringing it to the first place.
cols = dfdna_pst.columns.tolist()
cols = cols[-1:] + cols[:-1]
dfdna_pst = dfdna_pst[cols]
# create a data frame of unique values of all cancer types (Project IDs) sorted
# with respect to descending frequency.
ClassVar = 'Project'
VarLevels = pd.DataFrame(dfdna_pst[ClassVar].value_counts())
VarLevels.reset_index(inplace=True)
VarLevels.rename(columns={'index':ClassVar,ClassVar:'Frequency'}, inplace=True)
VarLevels.sort_values(by='Frequency', inplace=True, ascending=False)
# Here we get a list of all uniques values of Project sorted by descending frequency
CancerTypesSorted = VarLevels[ClassVar].tolist()
return dfdna_pst, CancerTypesSorted
###############################################################################
###############################################################################
def returnVarLevelsSorted(dfdna, ClassVar):
"""
Returns the unique values/levels of the given variable.
"""
# create a data frame of unique values of all cancer types (Project IDs) sorted
# with respect to descending frequency.
VarLevels = pd.DataFrame(dfdna[ClassVar].value_counts())
VarLevels.reset_index(inplace=True)
VarLevels.rename(columns={'index':ClassVar,ClassVar:'Frequency'}, inplace=True)
VarLevels.sort_values(by='Frequency', inplace=True, ascending=False)
# Here we get a list of all uniques values of Project sorted by descending frequency
VarLevelsSorted = VarLevels[ClassVar].tolist()
return VarLevels, VarLevelsSorted
###############################################################################
###############################################################################
import seaborn as sns
import matplotlib.patheffects as PathEffects
def tSNEscatter(x, colors, ClassVarEncOrder, nClasses):
# We choose a color palette with seaborn.
palette = np.array(sns.color_palette("husl", n_colors=nClasses))
# We create a scatter plot.
f = plt.figure(figsize=(8, 8))
ax = plt.subplot(aspect='equal')
sc = ax.scatter(x[:,0], x[:,1], lw=0, s=40, c=palette[colors.astype(np.int)])
# plt.xlim(-25, 25)
# plt.ylim(-25, 25)
ax.axis('off')
ax.axis('tight')
plt.title('TSNE 2D projection applied to dataset')
# We add the labels for each class.
txts = []
for i in range(nClasses):
# Position of each label.
xtext, ytext = np.median(x[colors == i, :], axis=0)
name = ClassVarEncOrder[i]
txt = ax.text(xtext, ytext, name, fontsize=12) #name[5:]
txt.set_path_effects([
PathEffects.Stroke(linewidth=5, foreground="w"),
PathEffects.Normal()])
txts.append(txt)
plt.show()
# return f, ax, sc, txts
###############################################################################
#from sklearn.model_selection import KFold, StratifiedKFold
from sklearn.feature_selection import RFECV
from itertools import compress
###############################################################################
def RecursiceFeatureElimCV(mod_name, CV, classifier, data,n_splits, scoring):
col_names = list(data)
feature_names = col_names[1:]
array = data.values
# X = array[:,1:len(data.columns)]
y = array[:,0]
X, _ = fitLogTransform(data)
if CV == 'SKF':
cv = StratifiedKFold(n_splits=n_splits, shuffle=True)
elif CV == 'KF':
cv = KFold(n_splits=n_splits, shuffle=True)
# Create the RFE object and compute a cross-validated score.
#svc = SVC(kernel="linear")
rfecv = RFECV(estimator=classifier, step=1, cv=cv,
scoring=scoring)
rfecv.fit(X, y)
print("Optimal number of Genes selected: %d" % rfecv.n_features_)
#
#print("Num Features:", fit.n_features_)
print("Selected Genes:") #, rfecv.support_)
fil = list(rfecv.support_)
selected_genes = list(compress(feature_names, fil))
print(selected_genes)
#np.invert
print("\nGenes not selected {0}:".format(len(feature_names)- rfecv.n_features_))
notselected_genes = list(compress(feature_names, np.invert(fil)))
print(notselected_genes)
# print("Feature Ranking:", rfecv.ranking_)
#
# Plot number of features VS. cross-validation scores
plt.figure()
plt.xlabel("Number of genes selected")
plt.ylabel("Cross validation score")
plt.title("Selection of Most Important Genes using RFECV (%s) \n Model: %s" % (CV, mod_name)) #, size=13)
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_, 'bo-')
plt.grid(True)
plt.show()
return selected_genes, notselected_genes#X, y, rfecv.grid_scores_
###############################################################################
#def BRCA_TumorStageMapping(x):
# if x in ['stage ia','stage ib']:
# return 'stage i'
# elif x in ['stage iia','stage iib']:
# return 'stage ii'
# elif x in ['stage iiia','stage iiib','stage iiic']:
# return 'stage iii'
# else:
# return x
###############################################################################
# NOTE: to restore this function, need to get the gene name mapping info from somewhere else
# def BeegleSearchCommonGenes(beegleSearchResults, localGeneSet=False):
# if localGeneSet is False:
# dfGeneNamesMappingPSP = pd.read_csv("dfGeneNamesMappingPSP", sep=",")
# localGeneSet = dfGeneNamesMappingPSP['GeneName'].tolist()
# dfBeegleResults = pd.read_table(beegleSearchResults + ".tsv")
# beegleGeneSet = dfBeegleResults['Gene Symbol'].tolist()
# #return the intersection of two lists
# return list(set(localGeneSet) & set(beegleGeneSet))
###############################################################################
###############################################################################
def filterSamplesFromData(dfCancerType, ClassVar, VarLevelsToKeep):
"""
Remove NaNs and "not reported" values from dataset.
In addition, if ClassVar is not "CancerStatus", then only keep "Primary
solid Tumor" samples in the dataset.
"""
totalsamples = dfCancerType.shape[0]
dfCancerType = dropNaNs(dfCancerType, ClassVar)
if totalsamples > dfCancerType.shape[0]:
print('Number of samples in the dataset after removing missing values: {0}' \
.format(dfCancerType.shape[0]))
dfAnalysis = dfCancerType.copy()
ClassVarLevelsFreqTab, ClassVarLevelsSorted = returnVarLevelsSorted(dfAnalysis, ClassVar)
totalsamples = dfAnalysis.shape[0]
print('Variable for analysis: ' + '\033[1m{:10s}\033[0m'.format(ClassVar))
print('Total samples: ' + '\033[1m{:d}\033[0m\n'.format(totalsamples))
print(ClassVarLevelsFreqTab)
# Keep samples related to Tumor cells only if CancerStatus is not the ClassVar
if ClassVar != 'CancerStatus':
toKeep = ['Primary solid Tumor']
# print('\nKeeping samples concerning "Primary solid Tumor" only.')
dfAnalysis = FilterLevels(dfAnalysis, 'CancerStatus', toKeep, printStats='no')
# print updated stats if ClassVar was not CancerStatus
if totalsamples > dfAnalysis.shape[0]:
# print('Updated, number of samples in the dataset:' + '\033[1m{:d}\033[0m'.format(dfAnalysis.shape[0]))
print('\nRemoved {0} samples where CancerStatus was not "Primary solid Tumor".'.format(totalsamples - dfAnalysis.shape[0]))
ClassVarLevelsFreqTab, ClassVarLevelsSorted = returnVarLevelsSorted(dfAnalysis,ClassVar)
ClassVarLevelsFreqTab
# sometimes ClassVar is 'not reported' for some samples. We need to remove those as well.
# and print the updated stats and also update the dataset.
if 'not reported' in ClassVarLevelsSorted:
notReported = sum(ClassVarLevelsFreqTab[ClassVarLevelsFreqTab[ClassVar] == 'not reported']['Frequency'])
print('\nRemoved {0} samples where "{1}" is "not reported".'.format(notReported, ClassVar))
dfAnalysis.drop(dfAnalysis.index[dfAnalysis[ClassVar] == 'not reported'], inplace= True)
print('Now, there are '
+ '\033[1m'
+ str(dfAnalysis.shape[0])
+ '\033[0m'
+ ' samples in the dataset.')#.format(dfAnalysis.shape[0]))
ClassVarLevelsFreqTab, ClassVarLevelsSorted = returnVarLevelsSorted(dfAnalysis, ClassVar)
ClassVarLevelsFreqTab
# Keep samples only for the values in VarLevelsToKeep while samples corresponding to the rest are filtered out.
dfAnalysis_fl = FilterLevels(dfAnalysis, ClassVar, VarLevelsToKeep, printStats='no')
ClassVarLevelsFreqTab, ClassVarLevelsSorted = returnVarLevelsSorted(dfAnalysis_fl, ClassVar)
print(ClassVarLevelsFreqTab)
dfAnalysis_fl = prepareDF(dfAnalysis_fl, ClassVar)
return dfAnalysis_fl, ClassVarLevelsFreqTab
###############################################################################
def filterGenesFromData(dfAnalysis_fl, CancerType, ClassVar, med_tpm_threshold):
"""
Remove genes from dataset according to specified parameters.
"""
if type(med_tpm_threshold) is list:
removeGenes = [x for x in list(dfAnalysis_fl) if x not in med_tpm_threshold + [ClassVar]]
dfAnalysis_fl_cd = dfAnalysis_fl.drop(removeGenes, 1)
print('\n*********************************************')
print('All genes were removed except the following:')
print(med_tpm_threshold)
elif med_tpm_threshold != 'none': # remove low-TPM genes if specified, and dim reduction is not requested
# Look at the list low_tpm_genes, these are the genes which will be removed.
data_stats, low_tpm_genes = GeneExpression(dfAnalysis_fl,med_tpm_threshold)
print('\n*********************************************')
if type(med_tpm_threshold) == 'str':
if med_tpm_threshold == 'zero':
print('The following {0} genes were removed because all their' \
'TPM values in the set are zero:' \
.format(len(low_tpm_genes)))
else:
print('The following {0} genes were removed because their' \
'median TPM values lie in the lower {1} percentile of' \
'the entire set:' \
.format(len(low_tpm_genes),med_tpm_threshold[0:-1]))
else:
print('The following {0} genes were removed because their median' \
'TPM values are less than {1}:' \
.format(len(low_tpm_genes), med_tpm_threshold))
print(low_tpm_genes)
# Remove low-TPM genes
dfAnalysis_fl_cd = CleanData(dfAnalysis_fl, med_tpm_threshold)
print('\nSize of the dataframe after filtering low-TPM genes: {0}' \
.format(dfAnalysis_fl_cd.shape))
else:
# Don't remove any genes
print('No genes were removed from the dataset.')
dfAnalysis_fl_cd = dfAnalysis_fl
return dfAnalysis_fl_cd
###############################################################################
def performGeneRanking(dfAnalysis_fl_cd, ClassVar, VarLevelsToKeep, logTransOffset, RS, score_metric):
"""
Fit classification models, rank genes (features) based on feature
importance scores, and perform a cross-fold validation analysis to assess
the predictive performance of each model.
"""
# Perform label encoding for the ClassVar and log-transform data
dfAnalysis_fl_cd = mapClassVar(dfAnalysis_fl_cd, ClassVar, VarLevelsToKeep)
X, y = fitLogTransform(dfAnalysis_fl_cd, logTransOffset)
print('Performing ranking of the genes...\n')
geneNames = dfAnalysis_fl_cd.columns[1:].tolist()
ranks = {}
# for random forest methods, use floor(sqrt(numfeats)) as the number of estimators
num_est = int(X.shape[1]**0.5)
if len(VarLevelsToKeep) == 2:
# define models (used later for CV analysis)
models = [ExtraTreesClassifier(n_estimators=num_est, random_state=RS), # 0
RandomForestClassifier(n_estimators=num_est, random_state=RS), # 1
AdaBoostClassifier(n_estimators=num_est), # 2
XGBClassifier(), # 3
LinearDiscriminantAnalysis(), # 4
svm.SVC(kernel='linear'), # 5
LogisticRegression(penalty='l1', solver='saga', max_iter=10000, random_state=RS), # 6
LogisticRegression(penalty='l2', solver='saga', max_iter=10000, random_state=RS)] # 7
extc = ExtraTreesClassifier(n_estimators=num_est, random_state=RS)
extc.fit(X, y)
ranks['ExtraTreesClassifier'] = Ranks2Dict(extc.feature_importances_, geneNames)
print('- Extra Trees Classifier complete.')
rfc = RandomForestClassifier(n_estimators=num_est, random_state=RS)
rfc.fit(X, y)
ranks['RandomForestClassifier'] = Ranks2Dict(rfc.feature_importances_, geneNames)
print('- Random Forest Classifier complete.')
AdabCLF = AdaBoostClassifier(n_estimators=num_est)
AdabCLF.fit(X, y)
ranks['AdaBoostClassifier'] = Ranks2Dict(AdabCLF.feature_importances_, geneNames)
print('- AdaBoost Classifier complete.')
xgb = XGBClassifier()
xgb.fit(X, y)
ranks['XGBClassifier'] = Ranks2Dict(xgb.feature_importances_, geneNames)
print('- XGB Classifier complete.')
lda = LinearDiscriminantAnalysis(solver='eigen', shrinkage='auto')
lda.fit(X, y)
ranks['LinearDiscriminantAnalysis'] = Ranks2Dict(np.abs(lda.coef_[0]), geneNames)
print('- Linear Discriminant Analysis complete.')
svmSVC = svm.SVC(kernel='linear')
svmSVC.fit(X, y)
ranks['SVC'] = Ranks2Dict(np.abs(svmSVC.coef_[0]), geneNames)
print('- SVC complete.')
# Run a logistic regression using Lasso (L1) regularization
lasso = LogisticRegression(penalty='l1', solver='saga', max_iter=10000, random_state=RS, n_jobs=-1)
lasso.fit(X, y)
ranks['LassoRegression'] = Ranks2Dict(np.abs(lasso.coef_[0]), geneNames)
print('- Lasso Regression complete.')
# Run a logistic regression using Ridge (L2) regularization
ridge = LogisticRegression(penalty='l2', solver='saga', max_iter=10000, random_state=RS, n_jobs=-1)
ridge.fit(X, y)
ranks['RidgeRegression'] = Ranks2Dict(np.abs(ridge.coef_[0]), geneNames)
print('- Ridge Regression complete.')
else:
# define models (used later for CV analysis)
models = [ExtraTreesRegressor(n_estimators=num_est, random_state=RS), # 0
RandomForestRegressor(n_estimators=num_est, random_state=RS), # 1
AdaBoostRegressor(n_estimators=num_est), # 2
XGBRegressor(), # 3
svm.SVR(kernel='linear'), # 4
Lasso(max_iter=10000, random_state=RS), # 5
Ridge(max_iter=10000, random_state=RS)] # 6
extr = ExtraTreesRegressor(n_estimators=num_est, random_state=RS)
extr.fit(X, y)
ranks['ExtraTreesRegressor'] = Ranks2Dict(extr.feature_importances_, geneNames)
print('- Extra Trees Regressor complete.')
rfr = RandomForestRegressor(n_estimators=num_est, random_state=RS)
rfr.fit(X, y)
ranks['RandomForestRegressor'] = Ranks2Dict(rfr.feature_importances_, geneNames)
print('- Random Forest Regressor complete.')
AdabR = AdaBoostRegressor(n_estimators=num_est)
AdabR.fit(X, y)
ranks['AdaBoostRegressor'] = Ranks2Dict(AdabR.feature_importances_, geneNames)
print('- AdaBoost Regressor complete.')
xgb = XGBRegressor()
xgb.fit(X, y)
ranks['XGBRegressor'] = Ranks2Dict(xgb.feature_importances_, geneNames)
print('- XGB Regressor complete.')
# Note: LDA is not applicable for regression-based problems
svmSVR = svm.SVR(kernel='linear')
svmSVR.fit(X, y)
ranks['SVR'] = Ranks2Dict(np.abs(svmSVR.coef_[0]), geneNames)
print('- SVR complete.')
# Run a linear regression using Lasso (L1) regularization
lasso = Lasso(max_iter=10000, random_state=RS)
lasso.fit(X, y)
ranks['LassoRegression'] = Ranks2Dict(np.abs(lasso.coef_), geneNames)
print('- Lasso Regression complete.')
# Run a linear regression using Ridge (L2) regularization
ridge = Ridge(max_iter=10000, random_state=RS)
ridge.fit(X, y)
ranks['RidgeRegression'] = Ranks2Dict(np.abs(ridge.coef_), geneNames)
print('- Ridge Regression complete.')
# calculate average rank for each gene
r = {}
for name in geneNames:
r[name] = round(np.mean([ranks[method][name] for method in ranks.keys()]), 10)
ranks['Average'] = r
# organize and sort ranks
dfRanks = pd.DataFrame.from_dict(ranks)
dfRanks.reset_index(inplace=True)
dfRanks.rename(columns={'index':'GeneNames'}, inplace=True)
dfRanks.sort_values(by='Average', inplace=True, ascending=False)
print('\nDone!\n')
print('\n*********************************************')
# Run model cross-validation and determine model performance
CV = 'Validation: SKF'
shuffle = True
folds = 10
if len(VarLevelsToKeep) > 2 and score_metric in ['accuracy', 'f1', 'roc_auc', 'average_precision']:
raise ValueError('The provided score_metric is not applicable for regression problems!')
elif len(VarLevelsToKeep) == 2 and score_metric in ['explained_variance', 'neg_mean_squared_error', 'r2']:
raise ValueError('The provided score_metric is not applicable for binary classification problems!')
print('Performing models CV analysis...\n')
dfCVscores = CVScorer(models, CV, X, y, score_metric, shuffle, folds)
print('\nDone!\n')
return dfRanks, dfCVscores
###############################################################################
def writeResultsToFile(dfRanks, dfCVscores, CancerType, ClassVar, VarLevelsToKeep, resultsPath):
"""
Export gene ranks and model scores to .csv files
"""
parent_dir_name = resultsPath
print('Writing dataset, genes ranking and CV analysis results to a ' \
'directory named "{0}"'.format(CancerType))
os.makedirs(os.path.join(parent_dir_name, CancerType) , exist_ok=True)
if len(VarLevelsToKeep) > 2 and ClassVar == 'TumorStageMerged':
file_name_piece = 'TumorStage_regression'
elif ClassVar in ['TumorStage', 'TumorStageMerged', 'TumorStageBinary']:
file_name_piece = '_'.join(['TumorStage'] + VarLevelsToKeep)
file_name_piece = file_name_piece.replace(' ','')
else:
file_name_piece = ClassVar
dfRanks.to_csv(os.path.join(parent_dir_name, CancerType, CancerType \
+ '_' + file_name_piece + '_GenesRanking.csv'), index=False)
dfCVscores.to_csv(os.path.join(parent_dir_name, CancerType, CancerType \
+ '_' + file_name_piece + '_CVscores.csv'), index=False)
print('\nDone!\n')
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import operator
import collections
import math
import os
import random
import zipfile
import time
import numpy as np
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
buffer_index = 0
# Read the data into a list of strings.
def read_data(filename):
"""Extract the first file enclosed in a zip file as a list of words"""
with open(filename,mode="r") as f:
data = f.read()
data_chars = list(set(data))
return data.split(),data_chars,data
filename = './nepal/corpus.txt'
words,chars,character_data = read_data(filename)
print('Data size', len(words))
# Step 2: Build the dictionary and replace rare words with UNK token.
vocabulary_size = 50000
with open("./nepal/data.npy") as fil:
t = fil.readlines()
word_max_len, char_max_len = map(lambda x: int(x),t)
def build_dataset(words, vocabulary_size):
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(vocabulary_size - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reverse_dictionary
char_dictionary = dict()
for char in chars:
char_dictionary[char] = len(char_dictionary)
reverse_char_dictionary = dict(zip(char_dictionary.values(),char_dictionary.keys()))
char_data = []
for char in character_data:
char_data.append(char_dictionary[char])
data, count, dictionary, reverse_dictionary = build_dataset(words, vocabulary_size)
del words # Hint to reduce memory.
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])
data_index = 0
char_data_index = 0
# loading tweet list in integer marking form
word_batch_list = np.load("./nepal/word_embedding.npy")
char_batch_list = np.load("./nepal/char_embedding.npy")
with open("./nepal/tweet_ids.txt") as fil:
tweet_list = map(lambda y: filter(lambda x: x != '\n',y), fil.readlines())
batch_list = dict()
# Step 3: Function to generate a training batch for the skip-gram model.
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span)
for _ in range(span):
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
for i in range(batch_size // num_skips):
target = skip_window # target label at the center of the buffer
targets_to_avoid = [skip_window]
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[target]
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
# Backtrack a little bit to avoid skipping words in the end of a batch
data_index = (data_index + len(data) - span) % len(data)
return batch, labels
def generate_batch_char(batch_size, num_skips, skip_window):
global char_data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span)
for _ in range(span):
buffer.append(char_data[char_data_index])
char_data_index = (char_data_index + 1) % len(char_data)
for i in range(batch_size // num_skips):
target = skip_window # target label at the center of the buffer
targets_to_avoid = [skip_window]
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[target]
buffer.append(char_data[char_data_index])
char_data_index = (char_data_index + 1) % len(char_data)
# Backtrack a little bit to avoid skipping words in the end of a batch
char_data_index = (char_data_index + len(char_data) - span) % len(char_data)
return batch, labels
def generate_batch_train(batch_size, num_skips, skip_window):
global buffer_index
train_data_index = 0
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
batch_chars = np.ndarray(shape=(batch_size, char_max_len),dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
l = batch_size // word_max_len
word_data = np.ndarray(shape=[l*word_max_len])
char_data = np.ndarray(shape=[l*word_max_len,char_max_len])
for i in range(l):
word_data[word_max_len*i:word_max_len*(i+1)] = word_batch_list[buffer_index]
char_data[word_max_len*i:word_max_len*(i+1)] = char_batch_list[buffer_index]
buffer_index = buffer_index + 1 % len(word_batch_list)
buffer = collections.deque(maxlen=span)
buffer_ = collections.deque(maxlen=span)
for _ in range(span):
buffer.append(word_data[train_data_index])
buffer_.append(char_data[train_data_index])
train_data_index = (train_data_index + 1) % len(data)
for i in range(batch_size // num_skips):
target = skip_window # target label at the center of the buffer
targets_to_avoid = [skip_window]
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buffer[skip_window]
batch_chars[i*num_skips + j] = buffer_[skip_window]
labels[i * num_skips + j, 0] = buffer[target]
buffer.append(word_data[train_data_index])
buffer_.append(char_data[train_data_index])
train_data_index = (train_data_index + 1) % len(data)
# Backtrack a little bit to avoid skipping words in the end of a batch
train_data_index = (train_data_index + len(word_data) - span) % len(word_data)
return batch, batch_chars, labels
batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)
for i in range(8):
print(batch[i], reverse_dictionary[batch[i]],
'->', labels[i, 0], reverse_dictionary[labels[i, 0]])
batch, labels = generate_batch_char(batch_size=8, num_skips=2, skip_window=1)
for i in range(8):
print(batch[i], reverse_char_dictionary[batch[i]],
'->', labels[i, 0], reverse_char_dictionary[labels[i, 0]])
# Step 4: Build and train a skip-gram model.
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 2 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
skip_char_window = 2
num_char_skips = 3
char_vocabulary_size = len(char_dictionary)
print(char_vocabulary_size)
# We pick a random validation set to sample nearest neighbors. Here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_char_size = 10
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_char_window = 20
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
valid_char_examples = np.random.choice(valid_char_window, valid_char_size, replace=False)
valid_examples[0] = dictionary['nee']
num_sampled = 64 # Number of negative examples to sample.
char_batch_size = 64
query_tokens = map(lambda x: dictionary[x],['nee','requir'])
tweet_batch_size = 10
lambda_1 = 0.7
# word_max_len
# char_max_lens
graph = tf.Graph()
learning_rate = 5e-1
with graph.as_default():
# Input data.
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_input_chars = tf.placeholder(tf.int32, shape=[char_batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
train_char_labels = tf.placeholder(tf.int32, shape=[char_batch_size, 1])
word_char_embeddings = tf.placeholder(tf.int32, shape=[batch_size,char_max_len])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
valid_char_dataset = tf.constant(valid_char_examples, dtype=tf.int32)
query_ints = tf.constant(query_tokens, dtype=tf.int32)
# Ops and variables pinned to the CPU because of missing GPU implementation
tweet_char_holder = tf.placeholder(tf.int32, shape=[tweet_batch_size,word_max_len,char_max_len])
tweet_word_holder = tf.placeholder(tf.int32, shape=[tweet_batch_size, word_max_len])
with tf.device('/cpu:0'):
# Look up embeddings for inputs.
embeddings = tf.Variable(tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
char_embeddings = tf.Variable(tf.random_uniform([char_vocabulary_size, embedding_size//2],-1.0,1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
char_embed = tf.nn.embedding_lookup(char_embeddings,train_input_chars)
lambda_2 = tf.Variable(tf.random_normal([1],stddev=1.0))
w1 = tf.Variable(tf.random_normal([embedding_size,embedding_size // 4],stddev=1.0/math.sqrt(embedding_size)))
w2 = tf.Variable(tf.random_normal([embedding_size // 4,1],stddev=1.0/math.sqrt(embedding_size)))
weights = tf.stack([w1]*batch_size)
vvector = tf.stack([w2]*batch_size)
weights_tweet = tf.stack([w1]*tweet_batch_size*word_max_len)
vvector_tweet = tf.stack([w2]*tweet_batch_size*word_max_len)
# Construct the variables for the NCE loss
nce_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# character weights
nce_char_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size // 2],
stddev=1.0 / math.sqrt(embedding_size // 2)))
nce_char_biases = tf.Variable(tf.zeros([vocabulary_size]))
nce_train_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_train_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
loss = tf.reduce_mean(
tf.nn.nce_loss(weights=nce_weights,
biases=nce_biases,
labels=train_labels,
inputs=embed,
num_sampled=num_sampled,
num_classes=vocabulary_size))
loss_char = tf.reduce_mean(
tf.nn.nce_loss(weights=nce_char_weights,
biases=nce_char_biases,
labels=train_char_labels,
inputs=char_embed,
num_sampled=10,
num_classes=char_vocabulary_size))
# Construct the SGD optimizer using a learning rate of 1.0.
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
optimizer_char = tf.train.AdamOptimizer(learning_rate /5).minimize(loss_char)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
norm_char = tf.sqrt(tf.reduce_sum(tf.square(char_embeddings), 1, keep_dims=True))
normalized_char_embeddings = char_embeddings / norm_char
valid_embeddings_char = tf.nn.embedding_lookup(
normalized_char_embeddings, valid_char_dataset)
similarity_char = tf.matmul(
valid_embeddings_char, normalized_char_embeddings, transpose_b=True)
character_word_embeddings = tf.nn.embedding_lookup(normalized_char_embeddings, word_char_embeddings)
with tf.variable_scope("lstm"):
lstm = tf.contrib.rnn.BasicLSTMCell(embedding_size//2,reuse=tf.get_variable_scope().reuse)
revlstm = tf.contrib.rnn.BasicLSTMCell(embedding_size//2,reuse=tf.get_variable_scope().reuse)
output_fwd = []
output_bwd = []
state_fwd = lstm.zero_state(batch_size, dtype=tf.float32)
state_bwd = revlstm.zero_state(batch_size, dtype=tf.float32)
for l in range(char_max_len):
if l > 0:
with tf.variable_scope(tf.get_variable_scope(),reuse=True):
with tf.variable_scope("lstm"):
cell_output_fwd, state_fwd = lstm(character_word_embeddings[:,l],state_fwd)
cell_output_bwd, state_bwd = revlstm(character_word_embeddings[:,l],state_bwd)
cell_output_fwd = tf.reshape(cell_output_fwd, shape=[batch_size,1,embedding_size//2])
cell_output_bwd = tf.reshape(cell_output_bwd, shape=[batch_size,1,embedding_size//2])
output_fwd = tf.concat([output_fwd, cell_output_fwd],axis=1)
output_bwd = tf.concat([cell_output_bwd, output_bwd],axis=1)
else:
with tf.variable_scope("lstm"):
cell_output_fwd, state_fwd = lstm(character_word_embeddings[:,l],state_fwd)
cell_output_bwd, state_bwd = revlstm(character_word_embeddings[:,l],state_bwd)
output_fwd = tf.reshape(cell_output_fwd, shape=[batch_size,1,embedding_size//2])
output_bwd = tf.reshape(cell_output_bwd, shape=[batch_size,1,embedding_size//2])
output = tf.reduce_mean(tf.concat([output_bwd, output_fwd],axis=2),axis=1)
# attention = tf.nn.softmax(tf.matmul(vvector, tf.nn.tanh(tf.matmul(intermediate,weights)),transpose_a=True))
# output = tf.reshape(tf.matmul(attention,intermediate),shape=[batch_size,embedding_size])
word_embeddings = tf.nn.embedding_lookup(normalized_embeddings, train_inputs)
final_embedding = lambda_2*word_embeddings + (1-lambda_2)*output
with tf.variable_scope(tf.get_variable_scope(), reuse=None):
loss_char_train = tf.reduce_mean(
tf.nn.nce_loss(weights=nce_train_weights,
biases=nce_train_biases,
labels=train_labels,
inputs=final_embedding,
num_sampled=64,
num_classes=vocabulary_size))
optimizer_train = tf.train.AdamOptimizer(learning_rate/5).minimize(loss_char_train)
tweet_word_embed = tf.nn.embedding_lookup(normalized_embeddings, tweet_word_holder)
tweet_char_embeddings = tf.reshape(tf.nn.embedding_lookup(normalized_char_embeddings, tweet_char_holder),shape=[tweet_batch_size*word_max_len, char_max_len, embedding_size//2])
output_fwd = []
output_bwd = []
state_fwd = lstm.zero_state(tweet_batch_size*word_max_len, dtype=tf.float32)
state_bwd = revlstm.zero_state(tweet_batch_size*word_max_len, dtype=tf.float32)
for l in range(char_max_len):
with tf.variable_scope(tf.get_variable_scope(),reuse=True):
with tf.variable_scope("lstm"):
cell_output_fwd, state_fwd = lstm(tweet_char_embeddings[:,l],state_fwd)
cell_output_bwd, state_bwd = revlstm(tweet_char_embeddings[:,l],state_bwd)
if l == 0:
output_fwd = tf.reshape(cell_output_fwd, shape=[tweet_batch_size*word_max_len,1,embedding_size//2])
output_bwd = tf.reshape(cell_output_bwd, shape=[tweet_batch_size*word_max_len,1,embedding_size//2])
else:
with tf.variable_scope("lstm"):
cell_output_fwd = tf.reshape(cell_output_fwd, shape=[tweet_batch_size*word_max_len,1,embedding_size//2])
cell_output_bwd = tf.reshape(cell_output_bwd, shape=[tweet_batch_size*word_max_len,1,embedding_size//2])
output_fwd = tf.concat([output_fwd, cell_output_fwd],axis=1)
output_bwd = tf.concat([cell_output_bwd, output_bwd],axis=1)
tweet_char_embed = tf.reshape(tf.reduce_mean(tf.concat([output_bwd, output_fwd],axis=2),axis=1),shape=[tweet_batch_size, word_max_len, embedding_size])
# attention = tf.nn.softmax(tf.matmul(vvector_tweet, tf.nn.tanh(tf.matmul(intermediate,weights_tweet)),transpose_a=True))
# tweet_char_embed = tf.reshape(tf.matmul(attention,intermediate),shape=[tweet_batch_size,word_max_len,embedding_size])
tweet_embedding = tf.reduce_mean(lambda_2*tweet_word_embed + (1-lambda_2)*tweet_char_embed,axis=1)
query_embedding = tf.reshape(tf.reduce_mean(tf.nn.embedding_lookup(normalized_embeddings,query_tokens),axis=0),shape=[1,embedding_size])
query_similarity = tf.reshape(tf.matmul(tweet_embedding, query_embedding, transpose_b=True),shape=[tweet_batch_size])
init = tf.global_variables_initializer()
# Step 5: Begin training.
num_steps = 500001
#num_steps = 0
num_steps_train = 500001
with tf.Session(graph=graph) as session:
# We must initialize all variables before we use them.
init.run()
count = 0
print("Initialized")
average_loss = 0
average_char_loss = 0
for step in xrange(num_steps):
final_embeddings = normalized_embeddings.eval()
final_char_embedding = normalized_char_embeddings.eval()
np.save('./wordcharattn/word.npy',final_embeddings)
np.save('./wordcharattn/char.npy',final_char_embedding)
batch_inputs, batch_labels = generate_batch(
batch_size, num_skips, skip_window)
feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}
batch_char_inputs, batch_char_labels = generate_batch_char(
char_batch_size, num_skips, skip_window)
feed_dict_char = {train_input_chars: batch_char_inputs, train_char_labels: batch_char_labels}
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
_, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += loss_val
_, loss_char_val = session.run([optimizer_char, loss_char], feed_dict=feed_dict_char)
average_char_loss += loss_char_val
if step % 100 == 0:
if step > 0:
print(time.time()- start_time)
start_time = time.time()
average_loss /= 100
average_char_loss /= 100
else:
start_time = time.time()
# The average loss is an estimate of the loss over the last 2000 batches.
print("Average loss at step ", step, ": ", average_loss)
print("Average character loss at step ", step, ": ", average_char_loss)
average_loss = 0
average_char_loss = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
sim_char = similarity_char.eval()
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log_str = "Nearest to %s:" % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = "%s %s," % (log_str, close_word)
print(log_str)
for i in xrange(valid_char_size):
valid_word = reverse_char_dictionary[valid_char_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim_char[i, :]).argsort()[1:top_k + 1]
log_str = "Nearest to %s:" % valid_word
for k in xrange(top_k):
close_word = reverse_char_dictionary[nearest[k]]
log_str = "%s %s," % (log_str, close_word)
print(log_str)
tweet_embedding_val = []
for t in range(len(word_batch_list) // tweet_batch_size):
feed_dict = {
tweet_word_holder : word_batch_list[t*tweet_batch_size:t*tweet_batch_size + tweet_batch_size],
tweet_char_holder : char_batch_list[t*tweet_batch_size:t*tweet_batch_size + tweet_batch_size]
}
l = session.run(query_similarity, feed_dict = feed_dict)
if len(tweet_embedding_val) % 10000 == 0 :
print(len(tweet_embedding_val))
tweet_embedding_val += list(l)
tweet_embedding_dict = dict(zip(tweet_list, tweet_embedding_val))
sorted_tweets = [i for i in sorted(tweet_embedding_dict.items(), key=lambda x: -x[1])]
count += 1
file_list = []
for i in range(len(sorted_tweets)):
file_list.append('Nepal-Need 0 %s %d %f running'%(sorted_tweets[i][0],i+1,sorted_tweets[i][1]))
with open("./wordcharattn/tweet_list_%d.txt"%(count),mode="w") as fw:
fw.write('\n'.join(map(lambda x: str(x),file_list)))
print("Written tweet_list")
average_loss = 0
for step in xrange(num_steps_train):
final_embeddings = normalized_embeddings.eval()
final_char_embedding = normalized_char_embeddings.eval()
np.save('./wordcharattn/word.npy',final_embeddings)
np.save('./wordcharattn/char.npy',final_char_embedding)
batch_inputs, batch_char_inputs, batch_labels = generate_batch_train(
batch_size, num_skips, skip_window)
feed_dict = {train_inputs: batch_inputs, word_char_embeddings : batch_char_inputs, train_labels: batch_labels,}
_, loss_train_val = session.run([optimizer_train, loss_char_train], feed_dict=feed_dict)
average_loss += loss_train_val
if step % 100 == 0:
if step > 0:
print(time.time()- start_time)
start_time = time.time()
average_loss /= 100
else:
start_time = time.time()
# The average loss is an estimate of the loss over the last 2000 batches.
print("Average loss at step ", step, ": ", average_loss)
average_loss = 0
average_char_loss = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
sim_char = similarity_char.eval()
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log_str = "Nearest to %s:" % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = "%s %s," % (log_str, close_word)
print(log_str)
for i in xrange(valid_char_size):
valid_word = reverse_char_dictionary[valid_char_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim_char[i, :]).argsort()[1:top_k + 1]
log_str = "Nearest to %s:" % valid_word
for k in xrange(top_k):
close_word = reverse_char_dictionary[nearest[k]]
log_str = "%s %s," % (log_str, close_word)
print(log_str)
tweet_embedding_val = []
for t in range(len(word_batch_list) // tweet_batch_size):
feed_dict = {
tweet_word_holder : word_batch_list[t*tweet_batch_size:t*tweet_batch_size + tweet_batch_size],
tweet_char_holder : char_batch_list[t*tweet_batch_size:t*tweet_batch_size + tweet_batch_size]
}
l = session.run(query_similarity, feed_dict = feed_dict)
if len(tweet_embedding_val) % 10000 == 0 :
print(len(tweet_embedding_val))
tweet_embedding_val += list(l)
tweet_embedding_dict = dict(zip(tweet_list, tweet_embedding_val))
sorted_tweets = [i for i in sorted(tweet_embedding_dict.items(), key=lambda x: -x[1])]
count += 1
file_list = []
for i in range(len(sorted_tweets)):
file_list.append('Nepal-Need 0 %s %d %f running'%(sorted_tweets[i][0],i+1,sorted_tweets[i][1]))
with open("./wordcharattn/tweet_list_%d.txt"%(count),mode="w") as fw:
fw.write('\n'.join(map(lambda x: str(x),file_list)))
final_embeddings = normalized_embeddings.eval()
final_char_embedding = normalized_char_embeddings.eval()
np.save('./wordcharattn/word.npy',final_embeddings)
np.save('./wordcharattn/char.npy',final_char_embedding)
|
<filename>OffTarget.py
import os, platform
from PyQt5 import QtWidgets, uic, QtCore, QtGui
from functools import partial
import GlobalSettings
import gzip
import traceback
#global logger
logger = GlobalSettings.logger
class OffTarget(QtWidgets.QDialog):
def __init__(self):
try:
super(OffTarget, self).__init__()
uic.loadUi(GlobalSettings.appdir + 'OffTargetAnalysis.ui', self)
self.setWindowTitle("Off-Target Analysis")
self.show()
self.progressBar.setMinimum(0)
self.progressBar.setMaximum(100)
self.progressBar.reset()
self.Run.clicked.connect(self.run_analysis)
self.tolerancehorizontalSlider.valueChanged.connect(self.tol_change)
self.tolerancehorizontalSlider.setMaximum(100)
self.tolerancehorizontalSlider.setMinimum(0)
self.tolerance = 0.0
self.tolerancelineEdit.setText("0")
self.pushButton.clicked.connect(self.tol_change)
self.cancelButton.clicked.connect(self.exit)
self.fill_data_dropdown()
self.perc = False
self.bool_temp = False
self.running = False
self.process = QtCore.QProcess()
# make sure to intialize the class variable in init. That way elsewhere and other classes can access it
self.output_path = ''
groupbox_style = """
QGroupBox:title{subcontrol-origin: margin;
left: 10px;
padding: 0 5px 0 5px;}
QGroupBox#Step1{border: 2px solid rgb(111,181,110);
border-radius: 9px;
font: bold;
margin-top: 10px;}"""
self.Step1.setStyleSheet(groupbox_style)
self.Step2.setStyleSheet(groupbox_style.replace("Step1", "Step2"))
self.Step3.setStyleSheet(groupbox_style.replace("Step1", "Step3"))
except Exception as e:
logger.critical("Error initializing OffTarget class.")
logger.critical(e)
logger.critical(traceback.format_exc())
exit(-1)
#copied from MT to fill in the chromo and endo dropdowns based on CSPR files user provided at the startup
def fill_data_dropdown(self):
try:
try:
self.EndocomboBox.diconnect()
except:
pass
try:
self.OrgcomboBox.diconnect()
except:
pass
self.OrgcomboBox.clear()
self.EndocomboBox.clear()
self.mismatchcomboBox.clear()
self.organisms_to_files = {}
self.organisms_to_endos = {}
#fill in chromosome and endo dropdowns
onlyfiles = [f for f in os.listdir(GlobalSettings.CSPR_DB) if os.path.isfile(os.path.join(GlobalSettings.CSPR_DB , f))]
self.orgsandendos = {}
self.shortName = {}
for file in onlyfiles:
if file.find('.cspr') != -1:
newname = file[0:-4]
endo = newname[newname.rfind("_") + 1:-1]
hold = gzip.open(file, 'r')
buf = (hold.readline())
hold.close()
buf = str(buf)
buf = buf.strip("'b")
buf = buf[:len(buf) - 2]
species = buf.replace("GENOME: ", "")
if species in self.organisms_to_files:
self.organisms_to_files[species][endo] = [file, file.replace(".cspr", "_repeats.db")]
else:
self.organisms_to_files[species] = {}
self.organisms_to_files[species][endo] = [file, file.replace(".cspr", "_repeats.db")]
if species in self.organisms_to_endos:
self.organisms_to_endos[species].append(endo)
else:
self.organisms_to_endos[species] = [endo]
if self.OrgcomboBox.findText(species) == -1:
self.OrgcomboBox.addItem(species)
# fill in endos dropdown based on current organism
endos = self.organisms_to_endos[str(self.OrgcomboBox.currentText())]
self.EndocomboBox.addItems(endos)
self.OrgcomboBox.currentIndexChanged.connect(self.update_endos)
self.EndocomboBox.currentIndexChanged.connect(self.change_endos)
# update file names for current org/endo combo
self.cspr_file = self.organisms_to_files[str(self.OrgcomboBox.currentText())][endos[0]][0]
self.db_file = self.organisms_to_files[str(self.OrgcomboBox.currentText())][endos[0]][1]
#fill in Max Mismatch dropdown
mismatch_list = ['1','2','3','4','5','6','7','8','9','10']
self.mismatchcomboBox.addItems(mismatch_list)
except Exception as e:
logger.critical("Error in fill_data_dropdown() in OffTarget.")
logger.critical(e)
logger.critical(traceback.format_exc())
exit(-1)
def change_endos(self):
try:
#update file names based on current org/endo combo
self.cspr_file = self.organisms_to_files[str(self.OrgcomboBox.currentText())][str(self.EndocomboBox.currentText())][0]
self.db_file = self.organisms_to_files[str(self.OrgcomboBox.currentText())][str(self.EndocomboBox.currentText())][1]
except Exception as e:
logger.critical("Error in change_endos() in OffTarget.")
logger.critical(e)
logger.critical(traceback.format_exc())
exit(-1)
def update_endos(self):
try:
#try to disconnect index changed signal on endo dropdown if there is one
try:
self.EndocomboBox.currentIndexChanged.disconnect()
except:
pass
#clear endo dropdown and fill in with endos relative to the current organism
self.EndocomboBox.clear()
endos = self.organisms_to_endos[str(self.OrgcomboBox.currentText())]
self.EndocomboBox.addItems(endos)
self.cspr_file = self.organisms_to_files[str(self.OrgcomboBox.currentText())][endos[0]][0]
self.db_file = self.organisms_to_files[str(self.OrgcomboBox.currentText())][endos[0]][1]
#reconnect index changed signal on endo dropdown
self.EndocomboBox.currentIndexChanged.connect(self.change_endos)
except Exception as e:
logger.critical("Error in update_endos() in OffTarget.")
logger.critical(e)
logger.critical(traceback.format_exc())
exit(-1)
#tolerance slider / entry box. Allows for slider to update, or the user to input in text box
def tol_change(self):
try:
if(self.tolerance == float(self.tolerancelineEdit.text())):
self.tolerance = self.tolerancehorizontalSlider.value() / 100 * 0.5
self.tolerance = round(self.tolerance, 3)
self.tolerancelineEdit.setText(str(self.tolerance))
else:
self.tolerance = float(self.tolerancelineEdit.text())
self.tolerance = round(self.tolerance, 3)
self.tolerancehorizontalSlider.setValue(round(self.tolerance/0.5 * 100))
except Exception as e:
logger.critical("Error in tol_change() in OffTarget.")
logger.critical(e)
logger.critical(traceback.format_exc())
exit(-1)
#run button linked to run_analysis, which is linked to the run button
def run_command(self):
try:
#reset bools for new command to run
self.perc = False
self.bool_temp = False
self.running = False
if (self.AVG.isChecked()):
avg_output = r'TRUE'
detailed_output = r' FALSE '
else:
avg_output = r'FALSE'
detailed_output = r' TRUE '
#setup arguments for C++ .exe
app_path = GlobalSettings.appdir.replace('\\','/')
if platform.system() == 'Windows':
exe_path = app_path + r'OffTargetFolder/OT_Win.exe'
elif platform.system() == 'Linux':
exe_path = app_path + r'OffTargetFolder/OT_Lin'
else:
exe_path = app_path + r'OffTargetFolder/OT_Mac'
exe_path = '"' + exe_path + '"'
data_path = ' "' + app_path + 'OffTargetFolder/temp.txt' + '"' ##
cspr_path = ' "' + GlobalSettings.CSPR_DB + '/' + self.cspr_file + '"'
db_path = ' "' + GlobalSettings.CSPR_DB + '/' + self.db_file + '"'
self.output_path = ' "' + GlobalSettings.CSPR_DB + '/' + self.FileName.text() + '_OffTargetResults.txt"'
filename = self.output_path
filename = filename[:len(filename) - 1]
filename = filename[1:]
filename = filename.replace('"', '')
exists = os.path.isfile(filename)
CASPER_info_path = r' "' + app_path + 'CASPERinfo' + '" '
num_of_mismathes = int(self.mismatchcomboBox.currentText())
tolerance = self.tolerance
endo = ' "' + self.EndocomboBox.currentText() + '"'
hsu = ' "' + GlobalSettings.mainWindow.Results.endo_data[self.EndocomboBox.currentText()][2] + '"'
#create command string
cmd = exe_path + data_path + endo + cspr_path + db_path + self.output_path + CASPER_info_path + str(num_of_mismathes) + ' ' + str(tolerance) + detailed_output + avg_output + hsu
if platform.system() == 'Windows':
cmd = cmd.replace('/', '\\')
#used to know when the process is done
def finished():
self.running = False
self.progressBar.setValue(100)
#used to know when data is ready to read from stdout
def dataReady():
#filter the data from stdout, bools used to know when the .exe starts outputting the progress
#percentages to be able to type cast them as floats and update the progress bar. Also, must
#split the input read based on '\n\ characters since the stdout read can read multiple lines at
#once and is all read in as raw bytes
line = str(self.process.readAllStandardOutput())
line = line[2:]
line = line[:len(line)-1]
if platform.system() == 'Windows':
for lines in filter(None, line.split(r'\r\n')):
if(lines.find("Running Off Target Algorithm for") != -1 and self.perc == False):
self.perc = True
if(self.perc == True and self.bool_temp == False and lines.find("Running Off Target Algorithm for") == -1):
lines = lines[32:]
lines = lines.replace("%","")
if(float(lines) <= 99.5):
num = float(lines)
self.progressBar.setValue(num)
else:
self.bool_temp = True
else:
for lines in filter(None, line.split(r'\n')):
if(lines.find("Running Off Target Algorithm for") != -1 and self.perc == False):
self.perc = True
if(self.perc == True and self.bool_temp == False and lines.find("Running Off Target Algorithm for") == -1):
lines = lines[32:]
lines = lines.replace("%","")
if(float(lines) <= 99.5):
num = float(lines)
self.progressBar.setValue(num)
else:
self.bool_temp = True
#connect QProcess to the dataReady func, and finished func, reset progressBar only if the outputfile name
#given does not already exist
if(exists == False):
self.process.readyReadStandardOutput.connect(partial(dataReady))
self.process.readyReadStandardError.connect(partial(dataReady))
self.progressBar.setValue(0)
QtCore.QTimer.singleShot(100, partial(self.process.start, cmd))
self.process.finished.connect(finished)
else: #error message about file already being created
msg = QtWidgets.QMessageBox()
msg.setWindowTitle("Error")
msg.setStandardButtons(QtWidgets.QMessageBox.Ok)
msg.setText("Output file already exists. Please choose a new output file name.")
msg.exec()
except Exception as e:
logger.critical("Error in run_command() in OffTarget.")
logger.critical(e)
logger.critical(traceback.format_exc())
exit(-1)
#linked to run button
def run_analysis(self):
try:
#make sure an analysis isn't already running before starting
if(self.running == False):
self.running = True
self.run_command()
except Exception as e:
logger.critical("Error in run_analysis() in OffTarget.")
logger.critical(e)
logger.critical(traceback.format_exc())
exit(-1)
#exit linked to user clicking cancel, resets bools, and kills process if one was running
def exit(self):
try:
self.perc = False
self.bool_temp = False
self.running = False
self.process.kill()
self.hide()
except Exception as e:
logger.critical("Error in exit() in OffTarget.")
logger.critical(e)
logger.critical(traceback.format_exc())
exit(-1)
#closeEvent linked to user pressing the x in the top right of windows, resets bools, and
#kills process if there was one running
def closeEvent(self, event):
try:
self.process.kill()
self.perc = False
self.bool_temp = False
self.running = False
event.accept()
except Exception as e:
logger.critical("Error in closeEvent() in OffTarget.")
logger.critical(e)
logger.critical(traceback.format_exc())
exit(-1)
|
<filename>inpainting/model/model_parsers.py
import numpy as np
import scipy
import scipy.spatial
import string
import os,re
import random
import torch
from ffindex import *
to1letter = {
"ALA":'A', "ARG":'R', "ASN":'N', "ASP":'D', "CYS":'C',
"GLN":'Q', "GLU":'E', "GLY":'G', "HIS":'H', "ILE":'I',
"LEU":'L', "LYS":'K', "MET":'M', "PHE":'F', "PRO":'P',
"SER":'S', "THR":'T', "TRP":'W', "TYR":'Y', "VAL":'V' }
num2aa=[
'ALA','ARG','ASN','ASP','CYS',
'GLN','GLU','GLY','HIS','ILE',
'LEU','LYS','MET','PHE','PRO',
'SER','THR','TRP','TYR','VAL',
]
aa2num= {x:i for i,x in enumerate(num2aa)}
# full sc atom representation (Nx14)
aa2long=[
(" N "," CA "," C "," O "," CB ", None, None, None, None, None, None, None, None, None), # ala
(" N "," CA "," C "," O "," CB "," CG "," CD "," NE "," CZ "," NH1"," NH2", None, None, None), # arg
(" N "," CA "," C "," O "," CB "," CG "," OD1"," ND2", None, None, None, None, None, None), # asn
(" N "," CA "," C "," O "," CB "," CG "," OD1"," OD2", None, None, None, None, None, None), # asp
(" N "," CA "," C "," O "," CB "," SG ", None, None, None, None, None, None, None, None), # cys
(" N "," CA "," C "," O "," CB "," CG "," CD "," OE1"," NE2", None, None, None, None, None), # gln
(" N "," CA "," C "," O "," CB "," CG "," CD "," OE1"," OE2", None, None, None, None, None), # glu
(" N "," CA "," C "," O ", None, None, None, None, None, None, None, None, None, None), # gly
(" N "," CA "," C "," O "," CB "," CG "," ND1"," CD2"," CE1"," NE2", None, None, None, None), # his
(" N "," CA "," C "," O "," CB "," CG1"," CG2"," CD1", None, None, None, None, None, None), # ile
(" N "," CA "," C "," O "," CB "," CG "," CD1"," CD2", None, None, None, None, None, None), # leu
(" N "," CA "," C "," O "," CB "," CG "," CD "," CE "," NZ ", None, None, None, None, None), # lys
(" N "," CA "," C "," O "," CB "," CG "," SD "," CE ", None, None, None, None, None, None), # met
(" N "," CA "," C "," O "," CB "," CG "," CD1"," CD2"," CE1"," CE2"," CZ ", None, None, None), # phe
(" N "," CA "," C "," O "," CB "," CG "," CD ", None, None, None, None, None, None, None), # pro
(" N "," CA "," C "," O "," CB "," OG ", None, None, None, None, None, None, None, None), # ser
(" N "," CA "," C "," O "," CB "," OG1"," CG2", None, None, None, None, None, None, None), # thr
(" N "," CA "," C "," O "," CB "," CG "," CD1"," CD2"," CE2"," CE3"," NE1"," CZ2"," CZ3"," CH2"), # trp
(" N "," CA "," C "," O "," CB "," CG "," CD1"," CD2"," CE1"," CE2"," CZ "," OH ", None, None), # tyr
(" N "," CA "," C "," O "," CB "," CG1"," CG2", None, None, None, None, None, None, None), # val
]
# read A3M and convert letters into
# integers in the 0..20 range,
def parse_a3m(filename):
msa = []
table = str.maketrans(dict.fromkeys(string.ascii_lowercase))
# read file line by line
for line in open(filename,"r"):
# skip labels
if line[0] == '>':
continue
# remove right whitespaces
line = line.rstrip()
# remove lowercase letters and append to MSA
msa.append(line.translate(table))
# convert letters into numbers
alphabet = np.array(list("ARNDCQEGHILKMFPSTWYV-"), dtype='|S1').view(np.uint8)
msa = np.array([list(s) for s in msa], dtype='|S1').view(np.uint8)
for i in range(alphabet.shape[0]):
msa[msa == alphabet[i]] = i
# treat all unknown characters as gaps
msa[msa > 20] = 20
return msa
# parse HHsearch output
def parse_hhr(filename, ffindex, idmax=105.0):
# labels present in the database
label_set = set([i.name for i in ffindex])
out = []
with open(filename, "r") as hhr:
# read .hhr into a list of lines
lines = [s.rstrip() for _,s in enumerate(hhr)]
# read list of all hits
start = lines.index("") + 2
stop = lines[start:].index("") + start
hits = []
for line in lines[start:stop]:
# ID of the hit
#label = re.sub('_','',line[4:10].strip())
label = line[4:10].strip()
# position in the query where the alignment starts
qstart = int(line[75:84].strip().split("-")[0])-1
# position in the template where the alignment starts
tstart = int(line[85:94].strip().split("-")[0])-1
hits.append([label, qstart, tstart, int(line[69:75])])
# get line numbers where each hit starts
start = [i for i,l in enumerate(lines) if l and l[0]==">"] # and l[1:].strip() in label_set]
# process hits
for idx,i in enumerate(start):
# skip if hit is too short
if hits[idx][3] < 10:
continue
# skip if template is not in the database
if hits[idx][0] not in label_set:
continue
# get hit statistics
p,e,s,_,seqid,sim,_,neff = [float(s) for s in re.sub('[=%]', ' ', lines[i+1]).split()[1::2]]
# skip too similar hits
if seqid > idmax:
continue
query = np.array(list(lines[i+4].split()[3]), dtype='|S1')
tmplt = np.array(list(lines[i+8].split()[3]), dtype='|S1')
simlr = np.array(list(lines[i+6][22:]), dtype='|S1').view(np.uint8)
abc = np.array(list(" =-.+|"), dtype='|S1').view(np.uint8)
for k in range(abc.shape[0]):
simlr[simlr == abc[k]] = k
confd = np.array(list(lines[i+11][22:]), dtype='|S1').view(np.uint8)
abc = np.array(list(" 0123456789"), dtype='|S1').view(np.uint8)
for k in range(abc.shape[0]):
confd[confd == abc[k]] = k
qj = np.cumsum(query!=b'-') + hits[idx][1]
tj = np.cumsum(tmplt!=b'-') + hits[idx][2]
# matched positions
matches = np.array([[q-1,t-1,s-1,c-1] for q,t,s,c in zip(qj,tj,simlr,confd) if s>0])
# skip short hits
ncol = matches.shape[0]
if ncol<10:
continue
# save hit
#out.update({hits[idx][0] : [matches,p/100,seqid/100,neff/10]})
out.append([hits[idx][0],matches,p/100,seqid/100,neff/10])
return out
# read and extract xyz coords of N,Ca,C atoms
# from a PDB file
def parse_pdb(filename):
lines = open(filename,'r').readlines()
N = np.array([[float(l[30:38]), float(l[38:46]), float(l[46:54])]
for l in lines if l[:4]=="ATOM" and l[12:16].strip()=="N"])
Ca = np.array([[float(l[30:38]), float(l[38:46]), float(l[46:54])]
for l in lines if l[:4]=="ATOM" and l[12:16].strip()=="CA"])
C = np.array([[float(l[30:38]), float(l[38:46]), float(l[46:54])]
for l in lines if l[:4]=="ATOM" and l[12:16].strip()=="C"])
xyz = np.stack([N,Ca,C], axis=0)
# indices of residues observed in the structure
idx = np.array([int(l[22:26]) for l in lines if l[:4]=="ATOM" and l[12:16].strip()=="CA"])
return xyz,idx
def parse_pdb_lines(lines):
# indices of residues observed in the structure
idx_s = [int(l[22:26]) for l in lines if l[:4]=="ATOM" and l[12:16].strip()=="CA"]
# 4 BB + up to 10 SC atoms
xyz = np.full((len(idx_s), 14, 3), np.nan, dtype=np.float32)
for l in lines:
if l[:4] != "ATOM":
continue
resNo, atom, aa = int(l[22:26]), l[12:16], l[17:20]
idx = idx_s.index(resNo)
for i_atm, tgtatm in enumerate(aa2long[aa2num[aa]]):
if tgtatm == atom:
xyz[idx,i_atm,:] = [float(l[30:38]), float(l[38:46]), float(l[46:54])]
break
# save atom mask
mask = np.logical_not(np.isnan(xyz[...,0]))
xyz[np.isnan(xyz[...,0])] = 0.0
return xyz,mask,np.array(idx_s)
def parse_templates(ffdb, hhr_fn, atab_fn, n_templ=10):
# process tabulated hhsearch output to get
# matched positions and positional scores
infile = atab_fn
hits = []
for l in open(infile, "r").readlines():
if l[0]=='>':
key = l[1:].split()[0]
hits.append([key,[],[]])
elif "score" in l or "dssp" in l:
continue
else:
hi = l.split()[:5]+[0.0,0.0,0.0]
hits[-1][1].append([int(hi[0]),int(hi[1])])
hits[-1][2].append([float(hi[2]),float(hi[3]),float(hi[4])])
# get per-hit statistics from an .hhr file
# (!!! assume that .hhr and .atab have the same hits !!!)
# [Probab, E-value, Score, Aligned_cols,
# Identities, Similarity, Sum_probs, Template_Neff]
lines = open(hhr_fn, "r").readlines()
pos = [i+1 for i,l in enumerate(lines) if l[0]=='>']
for i,posi in enumerate(pos):
hits[i].append([float(s) for s in re.sub('[=%]',' ',lines[posi]).split()[1::2]])
# parse templates from FFDB
for hi in hits:
#if hi[0] not in ffids:
# continue
entry = get_entry_by_name(hi[0], ffdb.index)
if entry == None:
continue
# print(entry)
data = read_entry_lines(entry, ffdb.data)
hi += list(parse_pdb_lines(data))
# process hits
counter = 0
xyz,qmap,mask,f0d,f1d,ids = [],[],[],[],[],[]
for data in hits:
if len(data)<7:
continue
qi,ti = np.array(data[1]).T
_,sel1,sel2 = np.intersect1d(ti, data[6], return_indices=True)
ncol = sel1.shape[0]
if ncol < 10:
continue
ids.append(data[0])
f0d.append(data[3])
f1d.append(np.array(data[2])[sel1])
xyz.append(data[4][sel2])
mask.append(data[5][sel2])
qmap.append(np.stack([qi[sel1]-1,[counter]*ncol],axis=-1))
counter += 1
xyz = np.vstack(xyz).astype(np.float32)
qmap = np.vstack(qmap).astype(np.long)
f0d = np.vstack(f0d).astype(np.float32)
f1d = np.vstack(f1d).astype(np.float32)
ids = ids
return torch.from_numpy(xyz), torch.from_numpy(qmap), \
torch.from_numpy(f0d), torch.from_numpy(f1d), ids
def read_templates(qlen, ffdb, hhr_fn, atab_fn, n_templ=10):
xyz_t, qmap, t0d, t1d, ids = parse_templates(ffdb, hhr_fn, atab_fn)
npick = min(n_templ, len(ids))
sample = torch.arange(npick)
#
xyz = torch.full((npick, qlen, 3, 3), np.nan).float()
f1d = torch.zeros((npick, qlen, 3)).float()
f0d = list()
#
for i, nt in enumerate(sample):
sel = torch.where(qmap[:,1] == nt)[0]
pos = qmap[sel, 0]
xyz[i, pos] = xyz_t[sel, :3]
f1d[i, pos] = t1d[sel, :3]
f0d.append(torch.stack([t0d[nt,0]/100.0, t0d[nt, 4]/100.0, t0d[nt,5]], dim=-1))
return xyz, f1d, torch.stack(f0d, dim=0)
|
import json
import os
import logging
import boto3
from botocore.exceptions import ClientError
import pandas as pd
import openpyxl
import numpy as np
# Handle logger
logger = logging.getLogger()
logger.setLevel(logging.os.environ['LOG_LEVEL'])
dynamodb = boto3.resource('dynamodb')
aws_environment = os.environ['AWSENV']
logger.info("Finished handling variables, imports, and clients")
# Check if executing locally or on AWS, and configure DynamoDB connection accordingly.
# https://github.com/ganshan/sam-dynamodb-local/blob/master/src/Person.py
if aws_environment == "AWS_SAM_LOCAL":
table = boto3.resource('dynamodb', endpoint_url="http://dynamodb-local:8000").Table('visitorCount') #Local table name hard coded in entrypoint.sh for local dev
logger.info("Using local Dynamo container for testing")
else: # Running in AWS
table = dynamodb.Table(os.environ['TABLE_NAME'])
logger.info("Finished conditional dynamodb logic")
def getUserCount():
try:
logger.info("Querying DDB")
user_count_from_table = table.get_item(
Key={'Count': 'Users'}
)
#Handle first use case where count doesn't exist yet
if 'Item' in user_count_from_table:
user_count = user_count_from_table['Item']['Number'] +1
else:
user_count = 1
logger.info(user_count)
return user_count
#Catch known errors
#ToDo: Add more handling here
except ClientError as e:
if e.response['Error']['Code'] == 'RequestLimitExceeded':
logger.error('ERROR: ', e)
else:
logger.error("UNEXPECTED ERROR from DDB: %s" % e)
def updateUserCount(count):
try:
logger.info("Updating DDB with new user count")
table.put_item(
Item={
'Count': 'Users',
'Number': count
}
)
#Catch known errors
#ToDo: Add more handling here
except ClientError as e:
if e.response['Error']['Code'] == 'RequestLimitExceeded':
logger.error('ERROR: ', e)
else:
logger.error("UNEXPECTED ERROR from DDB: %s" % e)
def passiveGrowth(BEGdate, ENDdate, RID):
#Reads Data In
sarsales = pd.read_excel(r'data.xlsx', sheet_name='sarsales')
#sarsalesColumns = sarsales.columns.tolist()
sarsales=sarsales.to_numpy()
ElastRSector = pd.read_excel(r'data.xlsx', sheet_name='Elasticites')
ElastRSectorColumns = ElastRSector.columns.tolist()
ElastRSector=ElastRSector.to_numpy()
EFactors = pd.read_excel(r'data.xlsx', sheet_name='Econ Factors data')
EFactors=EFactors.to_numpy()
SizeSarsales = sarsales.shape[0]
SizeEFactors = EFactors.shape[0]
SizeElastRSector = ElastRSector.shape[0]
WidthElastRSector = ElastRSector.shape[1]
# logger.info(EFactors)
logger.info("SizeSarsales: "+str(SizeSarsales))
#Declares a few variables as set up
TRBID = RID * 1000000 + BEGdate
TREID = RID * 1000000 + ENDdate
TotalEconomicFactor = 0
factors = []
# logger.info("SizeSarsales:",str(SizeSarsales))
#Gets rsale for start and end
RSaleBeg=0
RSaleEnd=0
i=0
while i < SizeSarsales:
if sarsales[i][2] == TRBID:
RSaleBeg = sarsales[i][4]
if sarsales[i][2] == TREID:
RSaleEnd = sarsales[i][4]
if ((RSaleBeg != 0) and (RSaleEnd != 0)):
break
i=i+1
#Sets TGRSales
TGRSales = (RSaleEnd - RSaleBeg)/RSaleBeg
#Gets index of interest from EFactors
i=0
while i < SizeEFactors:
if EFactors[i][0] == BEGdate:
EFactorsIndex1 = i
if EFactors[i][0] == ENDdate:
EFactorsIndex2 = i
i=i+1
##Finds none zero values in EfactorsIndex1 and EfactorsIndex2 and calculates factors
##----------assumes its sorted (ie column[x] is the same factor in EFactors and ElastRSector
##Generates index we care about from ElastRSector
i = 0
while i < SizeElastRSector:
if ElastRSector[i][0] == RID:
ElastRSectorIndex = i
#finds none-zero values
j=2
while j < WidthElastRSector:
if ElastRSector[i][j] != 0:
#None zero Column
factors.append(j)
#Factor Name
#factors.append(ElastRSector[0][j])
factors.append(ElastRSectorColumns[j])
temp1=ElastRSector[i][j]
#Elastisity
factors.append(ElastRSector[i][j])
temp2=((EFactors[EFactorsIndex2][j-1] - EFactors[EFactorsIndex1][j-1]) / EFactors[EFactorsIndex1][j-1])
#growth
factors.append((EFactors[EFactorsIndex2][j-1] - EFactors[EFactorsIndex1][j-1]) / EFactors[EFactorsIndex1][j-1])
#Impact
factors.append(temp1*temp2)
#Begining factor
factors.append(EFactors[EFactorsIndex1][j-1])
#Ending factor
factors.append(EFactors[EFactorsIndex2][j - 1])
TotalEconomicFactor = TotalEconomicFactor + temp1 * temp2
j=j+1
if TotalEconomicFactor != 0:
break
i=i+1
factors = np.reshape(factors, (-1, 7))
Sizefactors = factors.shape[0]
PassiveGrowth = TotalEconomicFactor / TGRSales
return PassiveGrowth, TotalEconomicFactor, TGRSales, RSaleBeg, RSaleEnd;
def extract_child_from_body_of_apg_event(event, child_item, mandatory):
try:
passed_value = event['multiValueQueryStringParameters'][child_item][0]
return passed_value
except (KeyError, json.decoder.JSONDecodeError, TypeError) as e: #If passed value is empty then throw an error
if(mandatory):
logger.error(f"Could not find value for: {child_item}")
raise 'ERROR: Must pass in all required values!'
def lambda_handler(event, context):
RID=extract_child_from_body_of_apg_event(event, 'RID', mandatory=True)
StartDate=extract_child_from_body_of_apg_event(event, 'StartDate', mandatory=True)
EndDate=extract_child_from_body_of_apg_event(event, 'EndDate', mandatory=True)
StartDate=StartDate.replace('-','')
EndDate=EndDate.replace('-','')
RID=int(RID)
StartDate=int(StartDate)
EndDate=int(EndDate)
logger.info("RID: "+str(RID))
logger.info("StartDate: "+str(StartDate))
logger.info("EndDate: "+str(EndDate))
logger.info(type(EndDate))
user_count = getUserCount()
updateUserCount(user_count)
passiveGrowthVar, TotalEconomicFactor, TotalSalesGrowth, RSaleBeg, RSaleEnd = passiveGrowth(StartDate, EndDate, RID)
logger.info("passiveGrowthVar: "+str(passiveGrowthVar))
logger.info("BeginningValue: "+str(RSaleBeg))
logger.info("EndingValue: "+str(RSaleEnd))
logger.info("TotalSalesGrowth: "+str(TotalSalesGrowth))
logger.info("InfluencerEconomicFactorImpact: "+str(TotalEconomicFactor))
logger.info("User count: "+str(user_count))
return {
"statusCode": 200,
"headers": {
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Credentials": "true",
"Access-Control-Allow-Headers": 'Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token',
"Content-Type": "application/json"
},
"body": json.dumps({
"User count": str(user_count),
"passiveGrowthVar": str(passiveGrowthVar),
"BeginningValue": str(RSaleBeg),
"EndingValue": str(RSaleEnd),
"TotalSalesGrowth": str(TotalSalesGrowth),
"InfluencerEconomicFactorImpact": str(TotalEconomicFactor)
}),
}
|
<reponame>seahorn/crab-llvm
#!/usr/bin/env python3
import sys
import os
import os.path
import re
#------------------------------------------------------------------------#
# Process a *.crabir file generated by the Clam option --ocrab and print
# information about how many assertions were proven by Clam.
#------------------------------------------------------------------------#
# The option --ocrab prints the CrabIR together with comments next to
# each assertion. These comments state whether the assertion was
# proved for all possible calling contexts or otherwise, it says how
# many times the assertion was not proven. Note that the
# inter-procedural analysis might re-analyze a function multiple times
# (one per different calling context). Because of this, the same
# assertion can be re-checked multiple times. This script reports that an
# assertion is proven if the assertion was proven for all calling
# contexts.
#
# Usage:
# clam.py prog.c --crab-check=assert --ocrab=prog.crabir MORE_OPTIONS
# read_results.py prog.crabir
#------------------------------------------------------------------------#
def processLog(f, verbosity):
#print("Verbosity level=" + str(verbosity))
fd = None
try:
fd = open(f, 'r')
except Exception as e:
sys.exit('ERROR: could not open {}'.format(f))
print(e)
funmap = {}
curr_func = None
curr_num_of_asserts, curr_num_of_ok_asserts, curr_num_of_fail_asserts = 0, 0, 0
curr_ok_assert_lines, curr_fail_assert_lines = "", ""
for line in fd:
if "-- Inserted" in line:
s1,_,_ = line.split('.')
print(s1)
elif "declare" in line:
_,s = line.split('declare')
# remove the first character that it's a whitespace
s = s[1:]
# match any character until next (
pattern = re.compile(r"^.*?(?=\()")
func = re.search(pattern, s).group(0)
if curr_func is not None:
# store results
funmap[curr_func] = (curr_num_of_asserts, curr_num_of_ok_asserts, curr_num_of_fail_asserts, \
curr_ok_assert_lines, curr_fail_assert_lines)
# reset counters
curr_num_of_asserts, curr_num_of_ok_asserts, curr_num_of_fail_asserts = 0, 0, 0
curr_ok_assert_lines, curr_fail_assert_lines = "", ""
curr_func = func
elif re.search("Result: OK", line):
curr_num_of_ok_asserts += 1
curr_num_of_asserts += 1
curr_ok_assert_lines += line
elif re.search("Result: FAIL", line):
curr_num_of_fail_asserts += 1
curr_num_of_asserts += 1
curr_fail_assert_lines += line
total_asserts, total_ok_asserts, total_fail_asserts, total_skipped_asserts = 0,0,0,0
for key, value in funmap.items():
(total, ok, fail, ok_lines, fail_lines) = value
if total > 0 and ok == 0 and fail == 0:
print("Function " + str(key) + " has " + str(total) + \
" assertions but it was not analyzed")
total_skipped_asserts += total
continue
total_asserts += total
total_ok_asserts += ok
total_fail_asserts += fail
if (verbosity == 1 and (ok < total)) or verbosity > 1:
print("Function " + str(key) + ":")
print("\tTotal number of assertions:" + str(total))
print("\tTotal number of proven assertions:" + str(ok))
print("\tTotal number of fail assertions:" + str(fail))
if verbosity > 2:
print("Proven assertions:")
print(ok_lines)
print("Fail assertions:")
print(fail_lines)
print("-- Checked assertions : " + str(total_asserts))
print("-- Unchecked assertions: " + str(total_skipped_asserts) + \
" (These assertions are in functions that have not been analyzed)")
print("-- Proven assertions : " + str(total_ok_asserts))
print("-- Failed assertions : " + str(total_fail_asserts))
warning_ratio = 0
if total_asserts > 0:
ok_ratio = ((total_ok_asserts / total_asserts) * 100.0)
print(" Ratio of proven assertions: " + str(ok_ratio) + "%")
if total_fail_asserts + total_ok_asserts < total_asserts:
print("-- Missed assertions : " + str(total_asserts - (total_ok_asserts + total_fail_asserts)) + \
" <-- This should be 0")
def parseOpt (argv):
import argparse as a
p = a.ArgumentParser(description='Process a .crabir file produced by Clam option --ocrab')
p.add_argument(metavar='FILE', dest="infile", nargs=1, help=".crabir file")
p.add_argument('-verbose','--verbose', metavar='NUM', required=False,
type=int, dest="verbose", default=0,
help="Verbosity level")
args = p.parse_args(argv)
return args
def main (argv):
args = parseOpt(argv[1:])
processLog(args.infile[0], args.verbose)
return 0
if __name__ == '__main__':
res = None
try:
res = main(sys.argv)
except Exception as e:
print(e)
except KeyboardInterrupt:
pass
finally:
sys.exit(res)
|
#-------------------------------------------------------------------------------
# Name: mfapy Example_4_Simulation.py
# Simulation of 13C-MFA using artificial observed MDVs to select effective carbon source.
# Toy model used in this code in modified from Antoniewicz et al Metab. Eng. 2007, 9, 68-86.
#
# Author: Fumio_Matsuda
#
# Created: 12/06/2018
# Copyright: (c) Fumio_Matsuda 2018
# Licence: MIT license
#-------------------------------------------------------------------------------
import mfapy
if __name__ == '__main__':
#
# Construction of metabolic model
#
reactions, reversible, metabolites, target_fragments = mfapy.mfapyio.load_metabolic_model("Example_4_Simulation_model.txt", format = "text")
model = mfapy.metabolicmodel.MetabolicModel(reactions, reversible, metabolites, target_fragments)
#
# Configurations
#
model.set_configuration(callbacklevel = 0) #
model.set_configuration(iteration_max = 20000) # Maximal iternations in optimization
model.set_configuration(number_of_repeat = 5) #Iteration in self.fitting_flux(method = 'deep') [SLSQP => LN_PRAXIS] * n
model.set_configuration(ncpus = 4)
model.set_configuration(initial_search_repeats_in_grid_search = 5) # Number of repeat for finding initial flux in Grid search mode such as model.generate_initial_states(5, 1)
model.set_configuration(grid_search_iterations = 1) # Fitting interations at each grid.
#
# Load metabolic state from file
#
flux = model.load_states("Example_4_Simulation_status.csv", format = 'csv')
model.set_constraints_from_state_dict(flux)
for method, isotope1, isotope2 in [("single",{'#100000':1.0},{}),("single",{'#111111':0.5,'#000000':0.5},{}),("single",{'#111111':0.5,'#100000':0.5},{}),("parallel",{'#111111':0.5,'#000000':0.5},{'#100000':1.0})]:
for i in range(100):
#
# Generation of instances of CarbonSource class from model
#
cs = model.generate_carbon_source_template()
cs.set_each_isotopomer('SubsGlc',isotope1, correction = "no")
#
# Generation of MDV instances from metabolic state and carbon source.
#
mdv = model.generate_mdv(flux, cs)
mdv.add_gaussian_noise(0.01)
mdv.set_std(0.01, method = 'absolute')
#
# Addition of labeling experiment 1
#
model.clear_experiment()
model.set_experiment('ex1', mdv, cs)
if method == "parallel":
#
# Generation of instances of CarbonSource class from model
#
cs2 = model.generate_carbon_source_template()
cs2.set_each_isotopomer('SubsGlc',isotope2, correction = "no")
#
# Generation of MDV instances from metabolic state and carbon source.
#
mdv2 = model.generate_mdv(flux, cs2)
mdv2.add_gaussian_noise(0.01)
mdv2.set_std(0.01, method = 'absolute')
#
# Addition of labeling experiment 2
#
model.set_experiment('ex2', mdv2, cs2)
#
# Generate initial metabolic state for fitting
#
model.set_configuration(iteration_max = 20000) # Maximal iternations in optimization
model.set_configuration(number_of_repeat = 5) #Iteration in self.fitting_flux(method = 'deep') [SLSQP => LN_PRAXIS] * n
state, flux_initial = model.generate_initial_states(200, 8, method ="parallel")
state, RSS_bestfit, flux_opt1 = model.fitting_flux(method = "GN_CRS2_LM", flux = flux_initial)
state, RSS_bestfit, flux_opt2 = model.fitting_flux(method = "deep", flux = flux_opt1)
pvalue, rss_thres = model.goodness_of_fit(flux_opt2[0], alpha = 0.05)
#
# Target reactions to estimate 95% CI
#
target_reactions = [('reaction', "v4"),('reaction', "v5"),('reaction', "v6"),('reaction', "v7"),('reversible', "FUM")]
ci_edge = model.generate_ci_template(targets = target_reactions)
#
# Grid search
#
model.set_configuration(iteration_max = 10000) # Maximal iternations in optimization
model.set_configuration(number_of_repeat = 3) #Iteration in self.fitting_flux(method = 'deep') [SLSQP => LN_PRAXIS] * n
ci = model.search_ci(ci_edge, flux_opt2[0], method = 'grid')
#
# Output of grid search results
#
for rid in target_reactions:
lb = ci['data'][rid]['lower_boundary']
ub = ci['data'][rid]['upper_boundary']
print(rid,isotope1,isotope2, "Interval", ub-lb, "Lower bondary:", lb, "Upper boundary:", ub, "RSS_bestfit:",RSS_bestfit[0])
|
<filename>alexnet/src/inference.py
import json
import os
import time
import torch
import torchvision.transforms as transforms
import torchvision.models as models
from PIL import Image
from matplotlib import pyplot as plt
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def load_class_names(p_clsnames, p_clsnames_cn):
"""
加载分类名
:param p_clsnames:
:param p_clsnames_cn:
:return:
"""
with open(p_clsnames, "r") as f:
class_names = json.load(f)
with open(p_clsnames_cn, encoding='UTF-8') as f:
class_names_cn = f.readlines()
return class_names, class_names_cn
def process_img(path):
"""
图片数据读取
:param path:
:return:
"""
# hard code 基于 ImageNet 统计得来的均值和标准差
norm_mean = [0.485, 0.456, 0.406]
norm_std = [0.229, 0.224, 0.225]
inference_transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop((224, 224)),
transforms.ToTensor(),
transforms.Normalize(norm_mean, norm_std),
])
# 将图片数据转换为模型读取的形式
img_rgb = Image.open(path).convert('RGB')
img_tensor = inference_transform(img_rgb)
# c,h,w -> b,c,h,w
img_tensor.unsqueeze_(0)
img_tensor = img_tensor.to(device)
return img_tensor, img_rgb
def get_model(path_state_dict, vis_model=False):
"""
创建模型,加载参数
:param path_state_dict:
:param vis_model:
:return:
"""
model = models.alexnet()
pretrained_state_dict = torch.load(path_state_dict)
model.load_state_dict(pretrained_state_dict)
model.eval()
if vis_model:
from torchsummary import summary
summary(model, input_size=(3, 224, 224), device="cpu")
model.to(device)
return model
if __name__ == '__main__':
# config
path_state_dict = os.path.join(BASE_DIR, "..", "data", "alexnet-owt-4df8aa71.pth")
# path_img = os.path.join(BASE_DIR, "..", "data", "golden_retriever.jpg")
path_img = os.path.join(BASE_DIR, "..", "data", "tiger_cat.jpg")
path_classnames = os.path.join(BASE_DIR, "..", "data", "imagenet1000.json")
path_classnames_cn = os.path.join(BASE_DIR, "..", "data", "imagenet_classnames.txt")
# load class names
cls_n, cls_n_cn = load_class_names(path_classnames, path_classnames_cn)
# 1/5 load img
img_tensor, img_rgb = process_img(path_img)
# 2/5 load model
alexnet_model = get_model(path_state_dict, True)
# 3/5 inference: tensor -> vector
with torch.no_grad():
time_tic = time.time()
outputs = alexnet_model(img_tensor)
time_toc = time.time()
# 4/5 index to class names
_, pred_int = torch.max(outputs.data, 1)
_, top5_idx = torch.topk(outputs.data, 5, dim=1)
pred_idx = int(pred_int.cpu().numpy())
pred_str, pred_cn = cls_n[pred_idx], cls_n_cn[pred_idx]
print("img: {} is: {}\n{}".format(os.path.basename(path_img), pred_str, pred_cn))
print("time consuming:{:.2f}s".format(time_toc - time_tic))
# 5/5 visualization
plt.imshow(img_rgb)
plt.title("predict:{}".format(pred_str))
top5_num = top5_idx.cpu().numpy().squeeze()
text_str = [cls_n[t] for t in top5_num]
for idx in range(len(top5_num)):
plt.text(5, 15 + idx * 30, "top {}:{}".format(idx + 1, text_str[idx]), bbox=dict(fc='yellow'))
plt.show()
|
#!/usr/bin/env python3
import discord
from discord.ext import tasks, commands
from RadarrAPI import RadarrAPI as radarr
from getSubs import get_subs
from ZoomPy import ZoomUs
import csv
import pandas as pd
import requests
import shutil
import logging
client = discord.Client()
file2write=open("last_movie_id.txt",'w')
file2write.write("0")
file2write.close()
logging.basicConfig(filename='example.log', level=logging.DEBUG)
@client.event
async def on_ready():
print('We have logged in as {0.user}'.format(client))
await client.change_presence(activity=discord.Game(name="Utiliza '!ayuda' para ver mis comandos!"))
@client.event
async def on_message(message):
with open("commands.csv", "r") as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
command_list = next(csv_reader)
print(command_list)
#!film -> Filmbot!
flag_list = ["🇺🇸", "🇪🇸", "🇫🇷", "🇮🇹", "🇩🇪", "🇷🇺", "🇯🇵", "🇰🇷"]
if message.author == client.user:
return
if message.content.startswith('!hello'):
await message.channel.send('Hello World!')
if message.content.startswith('!film'):
mensaje = message.content
movie_name = mensaje.replace("!film ", "")
embed=discord.Embed(title="Tadzio´s FilmBot", description="¿En que calidad deseas tu película?", color=0xe74c3c)
embed.set_thumbnail(url="https://img.pngio.com/hal-icon-65700-free-icons-library-hal-png-1600_1600.jpg")
#SELECTING QUALITY
quality_settings = await message.channel.send(embed=embed)
for emoji in ["<:720:763163748948508744>", "<:1080:763163336945958943>", "<:4k:763164907548966922>", "<:any:763181087794397194>"]:
await quality_settings.add_reaction(emoji)
def check(reaction, user):
if user == message.author and str(reaction.emoji) in ["<:720:763163748948508744>", "<:1080:763163336945958943>", "<:4k:763164907548966922>", "<:any:763181087794397194>"]:
return True,
else:
return False
try:
reaction, user = await client.wait_for('reaction_add', timeout=60, check = check)
except TimeoutError:
await message.channel.send('Tiempo de espera agotado. Intentalo de nuevo!')
exit
else:
print(type(reaction.emoji))
if str(reaction.emoji) == "<:any:763181087794397194>":
resolution = "ANY"
elif str(reaction.emoji) == "<:720:763163748948508744>":
resolution = "720"
elif str(reaction.emoji) == "<:1080:763163336945958943>":
resolution = "1080"
elif str(reaction.emoji) == "<:4k:763164907548966922>":
resolution = "4K"
embed=discord.Embed(title="Tadzio´s FilmBot", description="¿En que idioma buscas tu película?", color=0xe74c3c)
embed.set_thumbnail(url="https://img.pngio.com/hal-icon-65700-free-icons-library-hal-png-1600_1600.jpg")
#SELECTING LANGUAGE
language_settings = await message.channel.send(embed=embed)
for emoji in flag_list:
await language_settings.add_reaction(emoji)
def check(reaction, user):
if user == message.author and str(reaction.emoji) in flag_list:
return True,
else:
return False
try:
reaction, user = await client.wait_for('reaction_add', timeout=60, check = check)
except TimeoutError:
await message.channel.send('Tiempo de espera agotado. Intentalo de nuevo!')
exit
else:
print(type(reaction.emoji))
if str(reaction.emoji) == "🇺🇸":
language = "english"
elif str(reaction.emoji) == "🇪🇸":
language = "spanish"
elif str(reaction.emoji) == "🇫🇷":
language = "french"
elif str(reaction.emoji) == "🇮🇹":
language = "italian"
elif str(reaction.emoji) == "🇩🇪":
language = "deutsch"
elif str(reaction.emoji) == "🇷🇺":
language = "russian"
elif str(reaction.emoji) == "🇯🇵":
language = "japanese"
elif str(reaction.emoji) == "🇰🇷":
language = "korean"
#MENSAJE DE CONFIRMACION
embed=discord.Embed(title="Tadzio´s FilmBot", description="Buscando películas que contengan \"{}\", en resolución {} y en el idioma {}...".format(movie_name, resolution, str(reaction.emoji)), color=0xe74c3c)
embed.set_thumbnail(url="https://img.pngio.com/hal-icon-65700-free-icons-library-hal-png-1600_1600.jpg")
await message.channel.send(embed=embed)
file2write=open("last_movie_id.txt",'r')
past_film_id = str(file2write.read())
file2write.close()
print(resolution)
print(past_film_id)
radarr_initiate = radarr().get_torrent_public(movie_name = movie_name, past_film_id = past_film_id, resolution = resolution)
if radarr_initiate is not None:
magnet_link = radarr_initiate[0]
past_film_id = radarr_initiate[1]
movie_name = radarr_initiate[2]
movie_image = radarr_initiate[3]
size = radarr_initiate[4]
hash_code = radarr_initiate[5]
file2write=open("last_movie_id.txt",'w')
file2write.write(str(past_film_id))
file2write.close()
embed = discord.Embed(
title = movie_name,
description = magnet_link,
colour = discord.Colour.red()
)
embed.set_image(url = movie_image)
await message.channel.send(embed=embed)
embed=discord.Embed(title="Tadzio´s FilmBot", description="Recuerda utilizar el comando '!subs' si deseas obtener subtitulos para tu película!", color=0xe74c3c)
embed.set_thumbnail(url="https://img.pngio.com/hal-icon-65700-free-icons-library-hal-png-1600_1600.jpg")
await message.channel.send(embed=embed)
else:
await message.channel.send("Película no encontrada. Intentalo de nuevo con una resolución diferente.")
#!subs -> Subs (Under development)
if message.content.startswith('!subs'):
requesting_user = message.author
print(requesting_user)
#SEARCHING FOR SUBS
embed=discord.Embed(title="Tadzio´s SubsBot", description="¿En que idioma deseas obtener tus subtitulos?", color=0x19C5FF)
embed.set_thumbnail(url="https://i.imgur.com/AyfoBv5.png")
subtitles_settings = await message.channel.send(embed=embed)
for emoji in flag_list:
await subtitles_settings.add_reaction(emoji)
def check(reaction, user):
if user == message.author and str(reaction.emoji) in flag_list:
return True,
else:
return False
try:
reaction, user = await client.wait_for('reaction_add', timeout=60, check = check)
except TimeoutError:
await message.channel.send('Tiempo de espera agotado. Intentalo de nuevo!')
exit
else:
print(type(reaction.emoji))
if str(reaction.emoji) == "🇺🇸":
language = "en"
elif str(reaction.emoji) == "🇪🇸":
language = "es"
elif str(reaction.emoji) == "🇫🇷":
language = "fr"
elif str(reaction.emoji) == "🇮🇹":
language = "it"
elif str(reaction.emoji) == "🇩🇪":
language = "ger"
elif str(reaction.emoji) == "🇷🇺":
language = "rus"
elif str(reaction.emoji) == "🇯🇵":
language = "jpn"
elif str(reaction.emoji) == "🇰🇷":
language = "kor"
embed=discord.Embed(title="Tadzio´s SubsBot", description="Ingresa la dirección completa en la que está incluida tu película.", color=0x19C5FF)
embed.add_field(name="Ejemplo:", value="D:\Películas\Pulp Fiction (1994) [1080p]\Pulp.Fiction.1994.1080p.BrRip.x264.YIFY.mp4", inline=False)
embed.set_thumbnail(url="https://i.imgur.com/AyfoBv5.png")
await message.channel.send(embed=embed)
def check(user):
if requesting_user == message.author:
print("Xd")
return True
else:
print("Xdn´t")
return False
try:
message = await client.wait_for('message', timeout=120, check = check)
except TimeoutError:
await message.channel.send('Tiempo de espera agotado. Intentalo de nuevo!')
exit
else:
print("checkcheck")
direccion_filme = message.content
subDb = get_subs(direccion_filme,language)
if subDb.subs_found == True:
embed=discord.Embed(title="Tadzio´s SubsBot", description="Subtitulos encontrados y guardados en la dirección\n: " + subDb.path , color=0x19C5FF)
embed.set_thumbnail(url="https://i.imgur.com/AyfoBv5.png")
await message.channel.send(embed=embed)
else:
await message.channel.send('Subtitulos no encontrados! Intenta cambiar el lenguaje o la dirección.')
#!zoom -> Creates or deletes a Zoom Call!
if message.content.startswith("!zoom"):
if message.content.startswith("!zoom delete"):
try:
ZoomUs().delete_meeting()
await message.channel.send('Todas las reuniones de Zoom han sido eliminadas exitosamente.')
except:
await message.channel.send('Ha ocurrido un error eliminando las reuniones de Zoom, intentalo nuevamente.')
else:
zoom_meeting_link = ZoomUs().create_meeting()
embed=discord.Embed(title="Reunión de Zoom", description="Aquí está el link para la reunión de Zoom.", url = zoom_meeting_link ,color=0x19C5FF)
embed.add_field(name="Link", value=zoom_meeting_link, inline=False)
embed.set_thumbnail(url = "https://seeklogo.com/images/Z/zoom-fondo-azul-vertical-logo-8246E36E95-seeklogo.com.png")
await message.channel.send(embed=embed)
#!lola -> Impedida
if message.content.startswith("!lola"):
await message.channel.send(file = discord.File("./imagenes/lola.jpg"))
#!sech -> Embed con el Ete Sech
if message.content.startswith("!sech"):
embed=discord.Embed(title="ete sech 😎", description="ete sech")
embed.set_image(url = "https://i.redd.it/hfta4w753mo51.jpg")
embed.set_thumbnail(url = "https://i.redd.it/hfta4w753mo51.jpg")
embed.set_footer(text = "ete sech")
await message.channel.send(embed=embed)
#!ayuda -> Embed con los comandos del server
if message.content.startswith("!ayuda"):
embed=discord.Embed(title="Lista de commandos de HAL-9000:", color = 0xe74c3c)
embed.set_thumbnail(url = "https://img.pngio.com/hal-icon-65700-free-icons-library-hal-png-1600_1600.jpg")
embed.add_field(name="!zoom", value="Crea una llamada en Zoom y retorna el link de esta misma.", inline=False)
embed.add_field(name="!zoom delete", value="Elimina todas las llamadas creadas en Zoom.", inline=False)
embed.add_field(name="!film + nombre del filme", value="Busca una película y retorna el link del torrent para su descarga. Ej: !film A Clockwork Orange", inline=False)
embed.add_field(name="!create", value="Abre el asistente para crear un comando personalizado", inline=False)
embed.add_field(name="!custom", value="Retorna la lista de cutom commands ya creados.", inline=False)
await message.channel.send(embed=embed)
#!create -> Crea comandos
if message.content.startswith("!create"):
requesting_user = message.author
mensaje_comando = "None"
command_image = "None"
og_path = "None"
embed=discord.Embed(title="HAL-9000 Command Creator", description="Seguido de '!', escribe el nombre del comando que desees crear:", color=0xe74c3c)
embed.set_thumbnail(url="https://img.pngio.com/hal-icon-65700-free-icons-library-hal-png-1600_1600.jpg")
await message.channel.send(embed=embed)
def check(user):
if requesting_user == message.author:
print("Xd")
return True
else:
print("Xdn´t")
return False
try:
message = await client.wait_for('message', timeout=60, check = check)
except TimeoutError:
await message.channel.send('Tiempo de espera agotado. Intentalo de nuevo!')
exit
else:
print("checkcheck")
nombre_comando = message.content
if nombre_comando[0] == "!" and nombre_comando not in command_list:
#Pregunta si desea añadir un mensaje
embed=discord.Embed(title="HAL-9000 Command Creator", description="¿Deseas añadir un mensaje?", color=0xe74c3c)
embed.set_thumbnail(url="https://img.pngio.com/hal-icon-65700-free-icons-library-hal-png-1600_1600.jpg")
message_settings = await message.channel.send(embed=embed)
for emoji in ["✅","⛔"]:
await message_settings.add_reaction(emoji)
def check(reaction, user):
if user == message.author and str(reaction.emoji) in ["✅","⛔"]:
return True,
else:
return False
try:
reaction, user = await client.wait_for('reaction_add', timeout=60, check = check)
except TimeoutError:
await message.channel.send('Tiempo de espera agotado. Intentalo de nuevo!')
exit
else:
print(type(reaction.emoji))
if str(reaction.emoji) == "✅":
add_message = True
elif str(reaction.emoji) == "⛔":
add_message = False
if add_message:
embed=discord.Embed(title="HAL-9000 Command Creator", description="Escribe el mensaje que deseas añadir al comando:", color=0xe74c3c)
embed.set_thumbnail(url="https://img.pngio.com/hal-icon-65700-free-icons-library-hal-png-1600_1600.jpg")
await message.channel.send(embed=embed)
def check(user):
if requesting_user == message.author:
print("Xd")
return True
else:
print("Xdn´t")
return False
try:
message = await client.wait_for('message', timeout=60, check = check)
except TimeoutError:
await message.channel.send('Tiempo de espera agotado. Intentalo de nuevo!')
exit
else:
print("checkcheck")
mensaje_comando = message.content
#Pregunta si desea añadir una imagen
embed=discord.Embed(title="HAL-9000 Command Creator", description="¿Deseas añadir un archivo multimedia?", color=0xe74c3c)
embed.set_thumbnail(url="https://img.pngio.com/hal-icon-65700-free-icons-library-hal-png-1600_1600.jpg")
image_settings = await message.channel.send(embed=embed)
for emoji in ["✅","⛔"]:
await image_settings.add_reaction(emoji)
def check(reaction, user):
if user == message.author and str(reaction.emoji) in ["✅","⛔"]:
return True,
else:
return False
try:
reaction, user = await client.wait_for('reaction_add', timeout=60, check = check)
except TimeoutError:
await message.channel.send('Tiempo de espera agotado. Intentalo de nuevo!')
exit
else:
print(type(reaction.emoji))
if str(reaction.emoji) == "✅":
add_image = True
elif str(reaction.emoji) == "⛔":
add_image = False
if add_image:
embed=discord.Embed(title="HAL-9000 Command Creator", description="Sube la imagen/video/gif que desees añadir al comando:", color=0xe74c3c)
embed.set_thumbnail(url="https://img.pngio.com/hal-icon-65700-free-icons-library-hal-png-1600_1600.jpg")
await message.channel.send(embed=embed)
def check(user):
if requesting_user == message.author:
print("Xd")
return True
else:
print("Xdn´t")
return False
try:
message = await client.wait_for('message', timeout=60, check = check)
except TimeoutError:
await message.channel.send('Tiempo de espera agotado. Intentalo de nuevo!')
exit
else:
print("checkcheck")
command_image = message.attachments[0].url
print(command_image)
r = requests.get(command_image, stream=True, headers={'User-agent': 'Mozilla/5.0'})
embed=discord.Embed(title="HAL-9000 Command Creator", description="¿Que tipo de archivo estas subiendo? (Video, imagen o gif)", color=0xe74c3c)
embed.set_thumbnail(url="https://img.pngio.com/hal-icon-65700-free-icons-library-hal-png-1600_1600.jpg")
await message.channel.send(embed=embed)
def check(user):
if requesting_user == message.author:
print("Xd")
return True
else:
print("Xdn´t")
return False
try:
message = await client.wait_for('message', timeout=60, check = check)
except TimeoutError:
await message.channel.send('Tiempo de espera agotado. Intentalo de nuevo!')
exit
else:
if message.content.lower() == "video":
extension_archivo = ".mp4"
elif message.content.lower() == "imagen":
extension_archivo = ".png"
elif message.content.lower() == "gif":
extension_archivo = ".gif"
if r.status_code == 200:
image_path = "./imagenes/{}{}".format(nombre_comando, extension_archivo)
og_path = image_path
with open(image_path, 'wb') as f:
r.raw.decode_content = True
shutil.copyfileobj(r.raw, f)
print(nombre_comando)
print(mensaje_comando)
print(og_path)
df = pd.read_csv("commands.csv")
df[nombre_comando] = [mensaje_comando, og_path]
df.to_csv("commands.csv", index=False)
embed=discord.Embed(title="HAL-9000 Command Creator", description="Has creado del comando '{}' exitosamente!".format(nombre_comando), color=0xe74c3c)
embed.set_thumbnail(url="https://img.pngio.com/hal-icon-65700-free-icons-library-hal-png-1600_1600.jpg")
await message.channel.send(embed=embed)
else:
await message.channel.send('No has ingresado un nombre de comando válido!')
#!commands_created -> Iterates over the created commands!
if message.content in command_list:
command_name = [message.content]
df = pd.read_csv("commands.csv", usecols=command_name)
message_text = df[command_name].iloc[0].values[0]
message_image = df[command_name].iloc[1].values[0]
if message_text != "None":
await message.channel.send(message_text)
if message_image != "None":
await message.channel.send(file = discord.File(message_image))
print(message_text)
print(message_image)
#!custom -> Abre la lista de custom commands
if message.content.startswith('!custom'):
await message.channel.send(command_list)
client.run('DISCORD_KEY')
|
<gh_stars>0
import numpy as np
from sklearn.model_selection import train_test_split
from .formula import Formula
from .plot import hist_two
class VarLearning():
def __init__(self, name='test', data=None, n_target=1,
test_size=0.2, var_labels=None, fix_dim=False,
n_value=3, min_use=None, max_use=None, shot=1,
use_int=False, int_check=False,
method='Ada', seed=None, json=None, verbose=0, **kw):
self.name = name
self.n_target = n_target
self.test_size = test_size
self.var_labels = var_labels
self.fix_dim = fix_dim
self.n_value = n_value
self.min_use = min_use
self.max_use = max_use
self.shot = shot
self.use_int = use_int
self.int_check = int_check
self.method = method
self.seed = seed
self.verbose = verbose
self.kw = kw
self.hist = None
self.score = None
self.set_data(data)
self.classifier = None
self.cmd = {'json': self.json,
'direct': self.direct,
'single': self.single,
'random_shot': self.random_shot,
'multishot': self.multishot}
self.json = json
def set_data(self, data):
if data is None:
self.data = None
return
if type(data) == str:
with open(data) as f:
data = [line.split() for line in f.readlines()]
if self.use_int:
self.data = np.array(data, int)
else:
self.data = np.array(data)
self.separate_data()
self.formula = Formula(n_input=self.data[0].size - self.n_target,
min_use=self.min_use, max_use=self.max_use,
var_labels=self.var_labels,
fix_dim=self.fix_dim)
def separate_data(self):
x_data = self.data[:, 0: -1 * self.n_target]
y_data = self.data[:, -1 * self.n_target:self.data[0].size]
self.x_train, self.x_test, self.y_train, self.y_test \
= train_test_split(x_data, y_data,
test_size=self.test_size, shuffle=True)
self.y_train_array = np.array([x[0] for x in self.y_train])
self.y_test_array = np.array([x[0] for x in self.y_test])
def run(self, cmd):
if not self.int_check:
return self.run_base(cmd)
else:
self.use_int = False
acc_non_int = self.run_base(cmd)
self.use_int = True
acc_int = self.run_base(cmd)
hist_two(acc_non_int, acc_int, bins=100, range=None,
name=self.name + '_int_check', xlabel='accuracy',
label1='Original', label2='Int')
def run_base(self, cmd):
if cmd in self.cmd:
if cmd == 'multishot':
self.cmd[cmd]()
else:
if self.seed is None:
self.seed = 1
acc = []
for i in range(self.shot):
self.separate_data()
acc.append(self.cmd[cmd]()[0])
self.seed += 1
return acc
else:
raise RuntimeError('Command: {} is not available'.format(cmd))
def make_classifier(self, name, x_train, x_test):
y_train = self.y_train_array
y_test = self.y_test_array
if self.method.lower() == 'dnn':
from var_learning.classifier_dnn import DNN as Classifier
y_train = self.y_train
y_test = self.y_test
elif self.method.lower() in ('decisiontree', 'dt'):
from var_learning.classifier_dt import DecisionTree as Classifier
elif self.method.lower() in ('randomforest', 'rf'):
from var_learning.classifier_rf import RandomForest as Classifier
elif self.method.lower() in ('ada', 'AdaBoost'):
from var_learning.classifier_ada import AdaBoost as Classifier
elif self.method.lower() in ('grad', 'gradientboosting'):
from var_learning.classifier_grad import GradientBoosting as \
Classifier
else:
raise RuntimeError('Classifier method: {} is not available'.format(
self.method))
self.classifier = Classifier(
x_train=x_train, x_test=x_test, y_train=y_train, y_test=y_test,
name=name, seed=self.seed, verbose=self.verbose,
**self.kw)
def json(self):
import json
with open(self.json) as f:
rpn = json.load(f)
x_train = []
x_test = []
formula = []
for r in rpn:
f = Formula(var_labels=self.var_labels)
f.rpn = r
formula.append(f)
x_train.append(formula[-1].calc(self.x_train))
x_test.append(formula[-1].calc(self.x_test))
x_train = np.concatenate(x_train, 1)
x_test = np.concatenate(x_test, 1)
self.make_classifier(self.name + "_json", x_train, x_test)
acc = self.classifier.run_all()
values = '{} {}'.format([f.rpn for f in formula],
[f.get_formula() for f in formula])
if self.verbose:
print('{:.3f} {}'.format(acc, values))
return acc, values, formula
def direct(self):
self.make_classifier(self.name + "_direct", self.x_train, self.x_test)
acc = self.classifier.run_all()
values = ', '.join(self.formula.var_labels)
print('{:.3f} {}'.format(acc, values))
return acc, values
def single(self):
acc = []
values = []
for i in range(self.x_train[0].size):
x_train = self.x_train[:, i:i + 1]
x_test = self.x_test[:, i:i + 1]
self.make_classifier(self.name + "_" + self.formula.var_labels[i],
x_train, x_test)
acc.append(self.classifier.run_all())
values.append(self.formula.var_labels[i])
print('{:.3f} {}'.format(acc[-1], values[-1]))
return acc, values
def random_shot(self):
x_train = []
x_test = []
formula = []
for i in range(self.n_value):
formula.append(
Formula(n_input=self.x_train[0].size,
min_use=self.min_use, max_use=self.max_use,
var_labels=self.formula.var_labels,
fix_dim=self.fix_dim))
formula[-1].make_rpn()
x_train.append(formula[-1].calc(self.x_train))
x_test.append(formula[-1].calc(self.x_test))
x_train = np.concatenate(x_train, 1)
x_test = np.concatenate(x_test, 1)
self.make_classifier(self.name + "_random", x_train, x_test)
acc = self.classifier.run_all()
values = '{} {}'.format([f.rpn for f in formula],
[f.get_formula() for f in formula])
if self.verbose:
print('{:.3f} {}'.format(acc, values))
return acc, values, formula
def multishot(self):
import datetime
print('{} Start multishot: shot={}, n_value={}'.format(
datetime.datetime.now(), self.shot, self.n_value))
top_history = []
model_list = []
for i in range(self.shot):
acc, values, formula = self.random_shot()
if len(model_list) < 5:
model_list.append((acc, values, formula))
model_list.sort(key=lambda x: -x[0])
else:
if model_list[-1][0] < acc:
model_list.pop()
model_list.append((acc, values, formula))
model_list.sort(key=lambda x: -x[0])
if i % 100 == 0:
top_history.append(model_list[0][0])
print(datetime.datetime.now())
print("Top accuracy history: ", end='')
for h in top_history:
print('{:.3f}, '.format(h), end='')
print('')
print("Top 5 value combination list at {}".format(i))
for x in model_list:
print('{:.3f}: '.format(x[0]), end='')
for f in x[2]:
print('{}, '.format(f.rpn), end='')
for f in x[2]:
print('{}, '.format(f.get_formula()), end='')
print('')
print("Top 5 value combination list at {}".format(self.shot))
for x in model_list:
print('{:.3f}: '.format(x[0]), end='')
for f in x[2]:
print('{}, '.format(f.rpn), end='')
for f in x[2]:
print('{}, '.format(f.get_formula()), end='')
print('')
|
<gh_stars>0
import shutil
import subprocess
import os
import sys
import requests
import yaml
import pystache
import knightos.util as util
from knightos.workspace import Workspace
from knightos.kernels import ensure_kernel
def execute(project_name=None, emulator=None, debugger=None, assembler=None, platform=None, vcs=None, kernel_source=None, compiler=None, template=None, force=None):
root = os.getcwd()
exists = setup_root(root, project_name, force)
ws = Workspace(root)
site_packages = []
if exists and not project_name:
project_name = ws.config.get("name")
print("Found existing project: " + project_name)
# Grab project-specific options
if ws.config.get("-sdk-emulator"):
emulator=ws.config.get("-sdk-emulator")
if ws.config.get("-sdk-debugger"):
emulator=ws.config.get("-sdk-debugger")
if ws.config.get("-sdk-assembler"):
emulator=ws.config.get("-sdk-assembler")
if ws.config.get("-sdk-compiler"):
compiler=ws.config.get("-sdk-compiler")
if ws.config.get("-sdk-template"):
template=ws.config.get("-sdk-template")
if ws.config.get("-sdk-site-packages"):
site_packages=ws.config.get("-sdk-site-packages").split(" ")
if template == "c":
assembler = "scas" # temporary
template_dir = os.path.join(os.path.dirname(__file__), "..", "templates")
with open(os.path.join(template_dir, template, template + ".yaml")) as f:
template_yaml = yaml.load(f.read())
template_vars = {
'project_name': project_name,
'assembler': assembler,
'compiler': compiler,
'emulator': emulator,
'debugger': debugger,
'platform': platform,
'key': '{:02X}'.format(util.get_key(platform)),
'upgrade_ext': util.get_upgrade_ext(platform),
'fat': '{:02X}'.format(util.get_fat(platform)),
'privileged': '{:02X}'.format(util.get_privileged(platform)),
'kernel_path': kernel_source
}
init(ws, root, exists, site_packages, template_yaml, template_vars, vcs, force)
def init(ws, root, exists, site_packages, template, template_vars, vcs, force):
print("Installing SDK...")
if template_vars['kernel_path'] == None:
install_kernel(ws.kroot, template_vars['platform'])
shutil.move(os.path.join(ws.kroot,
"kernel-" + template_vars['platform'] + ".rom"),
os.path.join(root, ".knightos", "kernel.rom"))
print("Installing packages...")
ws.ensure_packages()
if shutil.which('git') and vcs == "git":
if not os.path.exists(os.path.join(root, ".git")):
print("Initializing new git repository...")
subprocess.call(["git", "init", root], stdout=open(os.devnull, 'w'), stderr=subprocess.STDOUT)
elif shutil.which('hg') and vcs == "hg":
if not os.path.exists(os.path.join(root, ".hg")):
print("Initializing new hg repository...")
subprocess.call(["hg", "init", root], stdout=open(os.devnull, 'w'), stderr=subprocess.STDOUT)
print("Installing template...")
template_dir = os.path.join(os.path.dirname(__file__), "..", "templates", template["name"])
def _compile_file(input_path, output_path, tvars, mode):
with open(input_path, mode) as src:
if os.path.basename(output_path) == ".gitignore" and vcs != "git":
pass
if "binary" in i and i["binary"]:
with open(output_path, "wb") as out:
out.write(src.read())
else:
with open(output_path, "w") as out:
out.write(pystache.render(src.read(), template_vars))
for i in template["files"]:
input_path = os.path.join(template_dir, i["template"])
output_path = os.path.join(ws.root, i["path"])
if not os.path.exists(output_path):
if not exists or (exists and i["reinit"]):
mode = "r"
if "binary" in i and i["binary"]:
mode = "rb"
_compile_file(input_path, output_path, template_vars, mode)
# TODO: Check for software listed in template['requries']
print("All done! You can use `make help` to find out what to do next.")
def setup_root(root, project_name, force):
kroot = os.path.join(root, ".knightos")
if os.path.exists(kroot):
shutil.rmtree(kroot)
print("Notice: Rebuilding existing environment")
os.makedirs(root, mode=0o755, exist_ok=True)
exists = False
if len(os.listdir(root)) > 0:
exists = True
if not exists and not project_name:
sys.stderr.write("You must specify a project name for new projects.\n")
sys.exit(1)
if exists and not os.path.exists(os.path.join(root, "package.config")):
if not force:
sys.stderr.write("This directory is not empty and does not appear to have a KnightOS project, aborting!\n")
sys.exit(1)
else:
sys.stderr.write("Warning: forcibly installing SDK in populated directory\n")
os.makedirs(kroot, mode=0o755)
os.makedirs(os.path.join(kroot, "include"), mode=0o755)
os.makedirs(os.path.join(kroot, "packages"), mode=0o755)
os.makedirs(os.path.join(kroot, "pkgroot"), mode=0o755)
return exists
def install_kernel(root, platform):
path, version = ensure_kernel(platform)
print("Installing kernel " + version)
os.symlink(path, os.path.join(root, os.path.basename(path)))
with open(os.path.join(root, 'kernel-version'), 'w') as f:
f.write(version)
|
<gh_stars>0
"""
@Author: <NAME>
@Date: 22 Nov, 2021
@Description: Discriminator and Generator from DCGAN paper
"""
import torch
import torch.nn as nn
class Discriminator(nn.Module):
def __init__(self, channel_img, features_d):
super(Discriminator, self).__init__()
self.disc = nn.Sequential(
nn.Conv2d(
channel_img, features_d, kernel_size=4, stride=2, padding=1
), # Input Shape: N x channel_img x 64 x 64
nn.LeakyReLU(0.2),
self._block(features_d, features_d*2, 4, 2, 1), # Shape: 32 x 32
self._block(features_d*2, features_d*4, 4, 2, 1), # Shape: 16 x 16
self._block(features_d*4, features_d*8, 4, 2, 1), # Shape: 8 x 8
self._block(features_d*8, features_d*16, 4, 2, 1), # Shape: 4 x 4
nn.Conv2d(features_d*16, 1, kernel_size=4, stride=2, padding=1), # Shape 1 x 1
nn.Sigmoid()
)
def _block(self, in_channels, out_channels, kernel_size, stride, padding):
return nn.Sequential(
nn.Conv2d(
in_channels,
out_channels,
kernel_size,
stride,
padding,
bias=False
),
nn.BatchNorm2d(out_channels),
nn.LeakyReLU(0.2),
)
def forward(self, x):
return self.disc(x)
class Generator(nn.Module):
def __init__(self, z_dim, channel_img, features_g):
super(Generator, self).__init__()
self.gen = nn.Sequential(
# Input Shape: N x z_dim x 1 x 1
self._block(z_dim, features_g*16, 4, 1, 0), # Shape: 4 x 4
self._block(features_g*16, features_g*8, 4, 2, 1), # Shape: 8 x 8
self._block(features_g*8, features_g*4, 4, 2, 1), # Shape: 16 x 16
self._block(features_g*4, features_g*2, 4, 2, 1), # Shape 32 x 32
nn.ConvTranspose2d(
features_g*2, channel_img, kernel_size=4, stride=2, padding=1
), # Shape: 64 x 64
nn.Tanh(), # Output in [-1, 1]
)
def _block(self, in_channels, out_channels, kernel_size, stride, padding):
return nn.Sequential(
nn.ConvTranspose2d(
in_channels,
out_channels,
kernel_size,
stride,
padding,
bias=False
),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
)
def forward(self, x):
return self.gen(x)
def initialize_weights(model):
for m in model.modules():
if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d, nn.BatchNorm2d)):
nn.init.normal_(m.weight.data, 0.0, 0.02)
def main():
N, in_channnel, H, W = 8, 3, 64, 64
z_dim = 100
x = torch.randn((N, in_channnel, H, W))
disc = Discriminator(in_channnel, N)
initialize_weights(disc)
assert disc(x).shape == (N, 1, 1, 1)
z = torch.randn((N, z_dim, 1, 1))
gen = Generator(z_dim, in_channnel, N)
initialize_weights(gen)
assert gen(z).shape == (N, in_channnel, H, W)
# if __name__ == "__main__":
# main() |
<reponame>Panda4817/MySousChef
from django import forms
from django.forms import modelformset_factory
from .models import *
# Form for gathering recipe information when creating own recipe
class MyRecipeInfoForm(forms.ModelForm):
title = forms.CharField(widget=forms.TextInput(attrs={
'class': 'form-control',
'placeholder': 'MyRecipe Title',
'id': 'titleinput',
'oninput': 'checktext(id);'
}), label='MyRecipe Title', required=True)
serves = forms.IntegerField(widget=forms.NumberInput(attrs={
'class': 'form-control',
'placeholder': 'Servings',
'id': 'servesinput',
'min': '1',
'oninput': 'checknumber(id);'
}), label='Servings', required=True)
time = forms.IntegerField(widget=forms.NumberInput(attrs={
'class': 'form-control',
'placeholder': 'Ready in ... minutes',
'id': 'timeinput',
'min': '1',
'oninput': 'checknumber(id);'
}), label='Ready in ... minutes', required=True)
image = forms.ChoiceField(widget=forms.RadioSelect(), choices=MyRecipe.IMAGE_CHOICES, required=True)
wine_pairing = forms.CharField(widget=forms.Textarea(attrs={
'class': 'form-control',
'id': 'wineinput',
'rows': '2',
'placeholder': 'Enter wine pairings or other drinks (optional)'
}), required=False)
class Meta:
model = MyRecipe
fields = (
'title',
'serves',
'time',
'wine_pairing',
'image'
)
# Form for adding ingredient information for own recipe
class IngredientsForm(forms.ModelForm):
name = forms.CharField(widget=forms.TextInput(attrs={
'class': 'form-control',
'placeholder': 'Ingredient name',
'oninput': 'checktext(id);'
}), label='Ingredient name', required=True)
amount = forms.DecimalField(widget=forms.TextInput(attrs={
'class': 'form-control',
'placeholder': 'Amount',
'oninput': 'checktext(id);'
}), label='Amount', required=True)
unit = forms.CharField(widget=forms.TextInput(attrs={
'class': 'form-control',
'placeholder': 'Units (optional)',
}), label='Units', required=False)
meta = forms.CharField(widget=forms.Textarea(attrs={
'class': 'form-control',
'placeholder': 'Other information about ingredient (optional)',
'rows': '2'
}), required=False)
class Meta:
model = MyRecipeIngredients
fields = (
'name',
'amount',
'unit',
'meta'
)
# Formset for ingredient forms
IngredientsFormset = modelformset_factory(
MyRecipeIngredients,
form=IngredientsForm,
extra=1)
# Form for gathering instrcutions for own recipe
class InstructionsForm(forms.ModelForm):
step = forms.CharField(widget=forms.Textarea(attrs={
'class': 'form-control',
'placeholder': 'Enter Step information',
'rows': '2',
'oninput': 'checktext(id);'
}), required=True)
number = forms.IntegerField(widget=forms.NumberInput(attrs={
'class': 'form-control',
'placeholder': 'Number',
'min': '1',
'value': '1',
'oninput': 'checknumber(id);'
}), label='Number', required=True)
class Meta:
model = MyRecipeInstructions
fields = (
'number',
'step'
)
# Formset for instruction forms
InstructionsFormset = modelformset_factory(
MyRecipeInstructions,
form=InstructionsForm,
extra=1)
# formsets used to dynamically generate forms |
<reponame>DanielMontecino/NeuroEvolution<filename>utils/codifications.py
from time import time
import numpy as np
import random
import pickle
class Layer(object):
def cross(self, other_layer):
raise NotImplementedError
def mutate(self):
raise NotImplementedError
def compare(self, other_layer):
raise self.__repr__() == other_layer.__repr__()
def self_copy(self):
raise NotImplementedError
@classmethod
def random_layer(cls):
raise NotImplementedError
def __repr__(self):
raise NotImplementedError
@classmethod
def create(cls, **kwargs):
# returns an object of the same class as the one who created it,
# then if someone inherits this class, the returned object will
# be from the heir's class
return cls(**kwargs)
@staticmethod
def gauss_mutation(val, max_val, min_val, int_=True):
m = 0
s = (max_val - min_val) / 10.
new_val = val + random.gauss(m, s)
if int_:
new_val = int(new_val)
if new_val < min_val:
new_val = 2 * min_val - new_val
elif new_val > max_val:
new_val = max_val - (new_val - max_val)
if new_val > max_val or new_val < min_val:
new_val = Layer.gauss_mutation(val, max_val, min_val, int_)
return new_val
class Chromosome(object):
def __init__(self):
self.age = 0
def increase_age(self):
self.age += 1
@classmethod
def random_individual(cls):
raise NotImplementedError
def simple_individual(self):
raise NotImplementedError
def cross(self, other_chromosome):
raise NotImplementedError
def mutate(self):
raise NotImplementedError
def equals(self, other_chromosome):
return self.__repr__() == other_chromosome.__repr__()
def __repr__(self):
raise NotImplementedError
def self_copy(self):
raise NotImplementedError
def save(self, filename):
outfile = open(filename, 'wb')
pickle.dump(self, outfile)
outfile.close()
@staticmethod
def load(filename):
infile = open(filename, 'rb')
loaded_chromosome = pickle.load(infile)
infile.close()
return loaded_chromosome
def decode(self, **kwargs):
raise NotImplementedError
@classmethod
def create(cls, **kwargs):
# returns an object of the same class as the one who created it,
# then if someone inherits this class, the returned object will
# be from the heir's class
return cls(**kwargs)
class Fitness:
def eval_list(self, chromosome_list, test=False, **kwargs):
return[self.calc(c, test=test, **kwargs) for c in chromosome_list]
def calc(self, chromosome, test=False):
raise NotImplementedError
def set_params(self, **kwargs):
raise NotImplementedError
def calc_mean(self, chromosome, samples=5):
f = []
ti = time()
for i in range(samples):
f.append(self.calc(chromosome))
print("Acc: %0.3f" % np.mean(f), np.std(f), np.max(f))
print("Time elapsed: %0.3f" % (time() - ti))
def save(self, filename):
outfile = open(filename, 'wb')
pickle.dump(self, outfile)
outfile.close()
@staticmethod
def load(file):
infile = open(file, 'rb')
fit = pickle.load(infile)
infile.close()
return fit
|
<filename>extractors.py
from functools import cached_property
from typing import List
from krysalid.html_tags import Tag
from krysalid.parsers import Extractor
from krysalid.queryset import QuerySet
from krysalid.utils.iteration import break_when, keep_while
TABLE_TAGS = ['table', 'thead', 'tr', 'th', 'td', 'tbody', 'tfoot']
class TableExtractor(Extractor):
"""A parser that only deals with tables"""
def __init__(self, class_or_id_name: str=None, base_url: str=None, processors: List=[]):
super().__init__()
self.class_or_id_name = class_or_id_name
self.base_url = base_url
self.processors = set(processors)
@cached_property
def get_rows(self):
table_body = break_when(lambda x: x == 'tbody', self.container)
return table_body.find_all('tr')
@cached_property
def extract_values(self):
rows = []
for row in self.get_rows:
columns = row.find_all('td')
column_to_add = []
for column in columns:
column_to_add.extend([column.string])
rows.append(column_to_add)
return rows
@property
def first(self):
return self.get_row(0)
def get_row(self, index: int):
return self.get_rows[index]
def start_tag(self, tag, attrs, **kwargs):
if tag in TABLE_TAGS:
instance = Tag(tag, attrs, extractor=self)
self.container.append(instance)
self._current_tag = instance
self.recursively_add_tag(instance)
self._add_coordinates(instance, kwargs.get('position'))
instance.index = kwargs.get('index')
def end_tag(self, tag):
def filtering_function(x):
return x.name == 'table' and not x.closed
tag_to_close = break_when(filtering_function, self.container)
tag_to_close.closed = True
def self_closing_tag(self, tag, attrs, **kwargs):
return False
def parse_comment(self, data: str, **kwargs):
return False
class TextExtractor(Extractor):
"""An extractor that can get all the text from
a given html page"""
def start_tag(self, tag, attrs, **kwargs):
return False
def end_tag(self, tag):
return False
def self_closing_tag(self, tag, attrs, **kwargs):
return False
class ImageExtractor(Extractor):
"""An extractor that returns all the images present
on the given html page"""
def __init__(self, unique: bool=False, as_type: str=None,
url_must_contain: str=None, match_height: int=None,
match_width: int=None):
super().__init__()
self.unique = unique
self.as_type = as_type
self.url_must_contain = url_must_contain
self.match_height = match_height
self.match_width = match_width
self.processors = []
# Checks whether an url was already
# processed by the extractor
self._processed_urls = set()
def _run_processors(self):
pass
def get_images(self):
filtered_images = []
if self.unique:
urls = set()
for image in self.container:
if image['src'] not in urls:
urls.add(image['src'])
filtered_images.append(image)
if self.as_type is not None:
images = filtered_images or self.container
filtered_images = keep_while(lambda x: x['src'].endswith(self.as_type), images)
if self.url_must_contain is not None:
images = filtered_images or self.container
filtered_images = keep_while(lambda x: self.url_must_contain in x['src'], images)
return QuerySet.copy(filtered_images or self.container)
def download_images(self):
pass
def start_tag(self, tag, attrs, **kwargs):
return False
def end_tag(self, tag):
return False
def internal_data(self, data, **kwargs):
return False
def parse_comment(self, data: str, **kwargs):
return False
def self_closing_tag(self, tag, attrs, **kwargs):
self._opened_tags.update([tag])
instance = Tag(tag, attrs, extractor=self)
self.container.append(instance)
self._current_tag = instance
instance.closed = True
instance.index = kwargs.get('index')
self._add_coordinates(instance, kwargs.get('position'))
|
<filename>saves_bot/data.py<gh_stars>0
import asyncio
import json
import logging
import time
from pathlib import Path
from typing import Optional, TypedDict, TypeVar
import aiofiles
from saves_bot.utils import WrappingLock
_T = TypeVar('_T')
class UserData(TypedDict):
saves: float
class GuildData(TypedDict):
can_count_role: int
saves: float
class SavesData:
users_path: Path
guilds_path: Path
users: WrappingLock[dict[int, UserData]]
guilds: WrappingLock[dict[int, GuildData]]
refreshing_task: Optional[asyncio.Task[float]]
def __init__(self) -> None:
self.users_path = Path('users.json')
self.guilds_path = Path('guilds.json')
self.users = WrappingLock({})
self.guilds = WrappingLock({})
self.last_refresh = 0
self.refreshing_task = None
async def refresh_data(self) -> float:
aloop = asyncio.get_running_loop()
if self.refreshing_task is not None:
return await self.refreshing_task
self.refreshing_task = aloop.create_task(self._refresh_data(aloop))
period = await self.refreshing_task
self.refreshing_task = None
return period
async def _refresh_data(self, aloop: asyncio.AbstractEventLoop) -> float:
logging.info('Syncinc saves data...')
start = time.perf_counter()
await asyncio.gather(
self._refresh_single_data_dict(aloop, self.users_path, self.users),
self._refresh_single_data_dict(aloop, self.guilds_path, self.guilds),
)
end = time.perf_counter()
period = end - start
logging.info('Saves data synced in %f seconds', period)
return period
async def _refresh_single_data_dict(self,
aloop: asyncio.AbstractEventLoop,
data_path: Path,
data_dict: WrappingLock[dict[int, _T]]
) -> Optional[_T]:
try:
async with aiofiles.open(data_path) as fp:
datas_str = await fp.read()
except FileNotFoundError:
file_datas = {}
else:
file_datas: dict[str, _T] = await aloop.run_in_executor(None, json.loads, datas_str)
async with data_dict as local_datas:
for (id, data) in file_datas.items():
id = int(id)
if id not in local_datas:
local_datas[id] = data
for (id, data) in local_datas.items():
file_datas[str(id)] = data
datas_str = await aloop.run_in_executor(None, json.dumps, file_datas)
async with aiofiles.open(data_path, 'w') as fp:
await fp.write(datas_str)
async def get_guild(self, id: int) -> GuildData:
"""Assumes that self.guilds is already locked"""
try:
guild_data = self.guilds.value[id]
except KeyError:
guild_data: GuildData = {
'can_count_role': 0,
'saves': 0,
}
self.guilds.value[id] = guild_data
return guild_data
async def get_user(self, id: int) -> UserData:
"""Assumes that self.users is already locked"""
try:
user_data = self.users.value[id]
except KeyError:
user_data: UserData = {
'saves': 0,
}
self.users.value[id] = user_data
return user_data
|
# -*- coding: utf-8 -*-
import mltk
from mltk.data import ArraysDataStream, DataStream
import tensorkit as tk
from tensorkit import tensor as T
import sys
import torch
import numpy as np
from flow_next.common import TrainConfig, DataSetConfig, make_dataset, train_model, ImageAugmentationMapper, get_mapper
from flow_next.common.train_utils import train_classifier
from flow_next.models.glow import GlowConfig, Glow
from ood_regularizer.experiment.datasets.overall import load_overall, load_complexity
from ood_regularizer.experiment.models.utils import get_mixed_array
from ood_regularizer.experiment.utils import plot_fig, make_diagram_torch, get_ele_torch
from utils.evaluation import dequantized_bpd
import torch.autograd as autograd
import torchvision.models as models
from imgaug import augmenters as iaa
class ExperimentConfig(mltk.Config):
# model parameters
z_dim = 256
act_norm = False
weight_norm = False
batch_norm = False
l2_reg = 0.0002
kernel_size = 3
shortcut_kernel_size = 1
nf_layers = 20
# training parameters
result_dir = None
write_summary = True
max_epoch = 400
warm_up_start = 200
initial_beta = -3.0
uniform_scale = False
use_transductive = True
mixed_train = False
self_ood = False
mixed_ratio = 1.0
mutation_rate = 0.1
noise_type = "mutation" # or unit
in_dataset_test_ratio = 1.0
pretrain = False
compressor = 2 # 0 for jpeg, 1 for png, 2 for flif
max_step = None
batch_size = 64
smallest_step = 5e-5
initial_lr = 0.0005
lr_anneal_factor = 0.5
lr_anneal_epoch_freq = []
lr_anneal_step_freq = None
clip_norm = 5
n_critical = 5
# evaluation parameters
train_n_qz = 1
test_n_qz = 10
test_batch_size = 64
test_epoch_freq = 200
plot_epoch_freq = 20
distill_ratio = 1.0
distill_epoch = 5000
epsilon = -20.0
min_logstd_of_q = -3.0
sample_n_z = 100
x_shape = (32, 32, 3)
x_shape_multiple = 3072
extra_stride = 2
class_num = 10
ensemble_times = 5
odin_T = 1000
odin_epsilon = 0.0012 * 2 # multiple 2 for the normalization [-1, 1] instead of [0, 1] in ODIN
classifier_train = TrainConfig(
optimizer='adamax',
init_batch_size=128,
batch_size=64,
test_batch_size=64,
test_epoch_freq=10,
max_epoch=200,
grad_global_clip_norm=None,
# grad_global_clip_norm=100.0,
debug=True
)
train = TrainConfig(
optimizer='adamax',
init_batch_size=128,
batch_size=64,
test_batch_size=64,
test_epoch_freq=10,
max_epoch=50,
grad_global_clip_norm=None,
# grad_global_clip_norm=100.0,
debug=True
)
model = GlowConfig(
hidden_conv_activation='relu',
hidden_conv_channels=[128, 128],
depth=3,
levels=3,
)
in_dataset = 'cifar10'
out_dataset = 'svhn'
count_experiment = False
def main():
with mltk.Experiment(ExperimentConfig, args=sys.argv[1:]) as exp, \
T.use_device(T.first_gpu_device()):
while True:
try:
exp.make_dirs('plotting')
break
except Exception:
pass
config = exp.config
# prepare for training and testing data
config.in_dataset = DataSetConfig(name=config.in_dataset)
config.out_dataset = DataSetConfig(name=config.out_dataset)
x_train_complexity, x_test_complexity = load_complexity(config.in_dataset.name, config.compressor)
svhn_train_complexity, svhn_test_complexity = load_complexity(config.out_dataset.name, config.compressor)
if config.count_experiment:
with open('/home/cwx17/research/ml-workspace/projects/wasserstein-ood-regularizer/count_experiments',
'a') as f:
f.write(exp.abspath("") + '\n')
f.close()
experiment_dict = {
'svhn': '/mnt/mfs/mlstorage-experiments/cwx17/52/e5/02279d802d3a2cb471f5',
'tinyimagenet': '/mnt/mfs/mlstorage-experiments/cwx17/1a/d5/02812baa4f7008fd61f5',
'celeba': '/mnt/mfs/mlstorage-experiments/cwx17/f0/e5/02c52d867e4308fd61f5',
'cifar100': '/mnt/mfs/mlstorage-experiments/cwx17/8c/d5/02732c28dc8d08fd61f5',
'cifar10': '/mnt/mfs/mlstorage-experiments/cwx17/e0/e5/02c52d867e4308fd61f5',
'noise': '/mnt/mfs/mlstorage-experiments/cwx17/6b/d5/02812baa4f7019aef1f5',
'constant': '/mnt/mfs/mlstorage-experiments/cwx17/54/e5/02279d802d3a19aef1f5',
'not_mnist28': '/mnt/mfs/mlstorage-experiments/cwx17/42/e5/02279d802d3a520f61f5',
'fashion_mnist28': '/mnt/mfs/mlstorage-experiments/cwx17/32/e5/02279d802d3a08fd61f5',
'kmnist28': '/mnt/mfs/mlstorage-experiments/cwx17/0a/d5/02812baa4f7008fd61f5',
'mnist28': '/mnt/mfs/mlstorage-experiments/cwx17/01/e5/02c52d867e4308fd61f5',
'omniglot28': '/mnt/mfs/mlstorage-experiments/cwx17/22/e5/02279d802d3a08fd61f5',
'noise28': '/mnt/mfs/mlstorage-experiments/cwx17/82/f5/02279d802d3a032303f5',
'constant28': '/mnt/mfs/mlstorage-experiments/cwx17/e5/e5/02732c28dc8d032303f5',
}
print(experiment_dict)
if config.in_dataset.name in experiment_dict:
restore_dir = experiment_dict[config.in_dataset.name]
else:
restore_dir = None
print('restore model from {}'.format(restore_dir))
# load the dataset
cifar_train_dataset, cifar_test_dataset, cifar_dataset = make_dataset(config.in_dataset)
print('CIFAR DataSet loaded.')
svhn_train_dataset, svhn_test_dataset, svhn_dataset = make_dataset(config.out_dataset)
print('SVHN DataSet loaded.')
config.class_num = cifar_train_dataset.slots['y'].max_val + 1
cifar_train_flow = cifar_test_dataset.get_stream('train', 'x', config.batch_size)
cifar_test_flow = cifar_test_dataset.get_stream('test', 'x', config.batch_size)
svhn_train_flow = svhn_test_dataset.get_stream('train', 'x', config.batch_size)
svhn_test_flow = svhn_test_dataset.get_stream('test', 'x', config.batch_size)
x_train = cifar_dataset.get_array('train', 'x')
y_train = cifar_dataset.get_array('train', 'y')
x_test = cifar_dataset.get_array('test', 'x')
y_test = cifar_dataset.get_array('test', 'y')
svhn_train = svhn_dataset.get_array('train', 'x')
svhn_test = svhn_dataset.get_array('test', 'x')
if restore_dir is None:
for current_class in range(config.ensemble_times):
# construct the model
model = Glow(cifar_train_dataset.slots['x'], exp.config.model)
print('Model constructed.')
# train the model
train_model(exp, model, cifar_train_dataset, cifar_test_dataset)
torch.save(model, 'model_{}.pkl'.format(current_class))
with mltk.TestLoop() as loop:
@torch.no_grad()
def eval_ll(x):
x = T.from_numpy(x)
ll, outputs = model(x)
bpd = -dequantized_bpd(ll, cifar_train_dataset.slots['x'])
return T.to_numpy(bpd)
final_cifar_test_ll = []
final_svhn_test_ll = []
for current_class in range(0, config.ensemble_times):
if restore_dir is None:
model = torch.load('model_{}.pkl'.format(current_class))
else:
model = torch.load(restore_dir + '/model_{}.pkl'.format(current_class))
final_cifar_test_ll.append(get_ele_torch(eval_ll, cifar_test_flow))
final_svhn_test_ll.append(get_ele_torch(eval_ll, svhn_test_flow))
config.x_shape = x_train.shape[1:]
config.x_shape_multiple = 1
for x in config.x_shape:
config.x_shape_multiple *= x
def get_bpd_waic(arrays):
arrays = np.stack(arrays, axis=0)
waic = np.mean(arrays, axis=0) - np.var(arrays, axis=0)
return waic
def get_ll_waic(arrays):
arrays = np.stack(arrays, axis=0)
arrays = arrays * config.x_shape_multiple * np.log(2)
waic = np.mean(arrays, axis=0) - np.var(arrays, axis=0)
return waic
def get_mean(arrays):
arrays = np.stack(arrays, axis=0)
return np.mean(arrays, axis=0)
def get_var(arrays):
arrays = np.stack(arrays, axis=0)
return -np.var(arrays, axis=0)
loop.add_metrics(bpd_waic_histogram=plot_fig(
data_list=[get_bpd_waic(final_cifar_test_ll), get_bpd_waic(final_svhn_test_ll)],
color_list=['red', 'green'],
label_list=[config.in_dataset.name + ' Test', config.out_dataset.name + ' Test'],
x_label='bits/dim', fig_name='bpd_waic_histogram'))
loop.add_metrics(ll_waic_histogram=plot_fig(
data_list=[get_ll_waic(final_cifar_test_ll), get_ll_waic(final_svhn_test_ll)],
color_list=['red', 'green'],
label_list=[config.in_dataset.name + ' Test', config.out_dataset.name + ' Test'],
x_label='bits/dim', fig_name='ll_waic_histogram'))
loop.add_metrics(mean_log_prob_histogram=plot_fig(
data_list=[get_mean(final_cifar_test_ll), get_mean(final_svhn_test_ll)],
color_list=['red', 'green'],
label_list=[config.in_dataset.name + ' Test', config.out_dataset.name + ' Test'],
x_label='bits/dim', fig_name='mean_log_prob_histogram'))
loop.add_metrics(var_log_prob_histogram=plot_fig(
data_list=[get_var(final_cifar_test_ll), get_var(final_svhn_test_ll)],
color_list=['red', 'green'],
label_list=[config.in_dataset.name + ' Test', config.out_dataset.name + ' Test'],
x_label='bits/dim', fig_name='var_log_prob_histogram'))
if __name__ == '__main__':
main()
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the Google Analytics cookies."""
import unittest
from plaso.formatters import ganalytics as _ # pylint: disable=unused-import
from plaso.lib import eventdata
from plaso.lib import timelib
from plaso.parsers.cookie_plugins import ganalytics
from plaso.parsers.sqlite_plugins import chrome_cookies
from plaso.parsers.sqlite_plugins import firefox_cookies
from tests.parsers.sqlite_plugins import test_lib as sqlite_plugins_test_lib
class GoogleAnalyticsPluginTest(sqlite_plugins_test_lib.SQLitePluginTestCase):
"""Tests for the Google Analytics plugin."""
def _GetAnalyticsCookies(self, event_queue_consumer):
"""Return a list of analytics cookies."""
cookies = []
for event_object in self._GetEventObjectsFromQueue(event_queue_consumer):
if isinstance(event_object, ganalytics.GoogleAnalyticsEvent):
cookies.append(event_object)
return cookies
def testParsingFirefox29CookieDatabase(self):
"""Tests the Process function on a Firefox 29 cookie database file."""
plugin = firefox_cookies.FirefoxCookiePlugin()
test_file = self._GetTestFilePath([u'firefox_cookies.sqlite'])
event_queue_consumer = self._ParseDatabaseFileWithPlugin(plugin, test_file)
event_objects = self._GetAnalyticsCookies(event_queue_consumer)
self.assertEqual(len(event_objects), 25)
event_object = event_objects[14]
self.assertEqual(
event_object.utmcct,
u'/frettir/erlent/2013/10/30/maelt_med_kerfisbundnum_hydingum/')
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-10-30 21:56:06')
self.assertEqual(event_object.timestamp, expected_timestamp)
self.assertEqual(event_object.url, u'http://ads.aha.is/')
self.assertEqual(event_object.utmcsr, u'mbl.is')
expected_msg = (
u'http://ads.aha.is/ (__utmz) Sessions: 1 Domain Hash: 137167072 '
u'Sources: 1 Last source used to access: mbl.is Ad campaign '
u'information: (referral) Last type of visit: referral Path to '
u'the page of referring link: /frettir/erlent/2013/10/30/'
u'maelt_med_kerfisbundnum_hydingum/')
self._TestGetMessageStrings(
event_object, expected_msg, u'http://ads.aha.is/ (__utmz)')
def testParsingChromeCookieDatabase(self):
"""Test the process function on a Chrome cookie database."""
plugin = chrome_cookies.ChromeCookiePlugin()
test_file = self._GetTestFilePath([u'cookies.db'])
event_queue_consumer = self._ParseDatabaseFileWithPlugin(plugin, test_file)
event_objects = self._GetAnalyticsCookies(event_queue_consumer)
# The cookie database contains 560 entries in total. Out of them
# there are 75 events created by the Google Analytics plugin.
self.assertEqual(len(event_objects), 75)
# Check few "random" events to verify.
# Check an UTMZ Google Analytics event.
event_object = event_objects[39]
self.assertEqual(event_object.utmctr, u'enders game')
self.assertEqual(event_object.domain_hash, u'68898382')
self.assertEqual(event_object.sessions, 1)
expected_msg = (
u'http://imdb.com/ (__utmz) Sessions: 1 Domain Hash: 68898382 '
u'Sources: 1 Last source used to access: google Ad campaign '
u'information: (organic) Last type of visit: organic Keywords '
u'used to find site: enders game')
self._TestGetMessageStrings(
event_object, expected_msg, u'http://imdb.com/ (__utmz)')
# Check the UTMA Google Analytics event.
event_object = event_objects[41]
self.assertEqual(event_object.timestamp_desc, u'Analytics Previous Time')
self.assertEqual(event_object.cookie_name, u'__utma')
self.assertEqual(event_object.visitor_id, u'1827102436')
self.assertEqual(event_object.sessions, 2)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2012-03-22 01:55:29')
self.assertEqual(event_object.timestamp, expected_timestamp)
expected_msg = (
u'http://assets.tumblr.com/ (__utma) '
u'Sessions: 2 '
u'Domain Hash: 151488169 '
u'Visitor ID: 1827102436')
self._TestGetMessageStrings(
event_object, expected_msg, u'http://assets.tumblr.com/ (__utma)')
# Check the UTMB Google Analytics event.
event_object = event_objects[34]
self.assertEqual(
event_object.timestamp_desc, eventdata.EventTimestamp.LAST_VISITED_TIME)
self.assertEqual(event_object.cookie_name, u'__utmb')
self.assertEqual(event_object.domain_hash, u'154523900')
self.assertEqual(event_object.pages_viewed, 1)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2012-03-22 01:48:30')
self.assertEqual(event_object.timestamp, expected_timestamp)
expected_msg = (
u'http://upressonline.com/ (__utmb) Pages Viewed: 1 Domain Hash: '
u'154523900')
self._TestGetMessageStrings(
event_object, expected_msg, u'http://upressonline.com/ (__utmb)')
if __name__ == '__main__':
unittest.main()
|
<gh_stars>0
# Copyright 2014 - Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from oslo_config import cfg
from oslo_log import log as logging
import pkg_resources as pkg
from mistral.actions import action_generator
from mistral.utils import inspect_utils as i_u
from mistral import version
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def get_mapping():
def delete_comment(map_part):
for key, value in map_part.items():
if isinstance(value, dict):
delete_comment(value)
if '_comment' in map_part:
del map_part['_comment']
package = version.version_info.package
if os.path.isabs(CONF.openstack_actions_mapping_path):
mapping_file_path = CONF.openstack_actions_mapping_path
else:
path = CONF.openstack_actions_mapping_path
mapping_file_path = pkg.resource_filename(package, path)
LOG.info("Processing OpenStack action mapping from file: %s",
mapping_file_path)
with open(mapping_file_path) as fh:
mapping = json.load(fh)
for k, v in mapping.items():
if isinstance(v, dict):
delete_comment(v)
return mapping
class OpenStackActionGenerator(action_generator.ActionGenerator):
"""OpenStackActionGenerator.
Base generator for all OpenStack actions,
creates a client method declaration using
specific python-client and sets needed arguments
to actions.
"""
action_namespace = None
base_action_class = None
@classmethod
def prepare_action_inputs(cls, origin_inputs, added=[]):
"""Modify action input string.
Sometimes we need to change the default action input definition for
OpenStack actions in order to make the workflow more powerful.
Examples::
>>> prepare_action_inputs('a,b,c', added=['region=RegionOne'])
a, b, c, region=RegionOne
>>> prepare_action_inputs('a,b,c=1', added=['region=RegionOne'])
a, b, region=RegionOne, c=1
>>> prepare_action_inputs('a,b,c=1,**kwargs',
added=['region=RegionOne'])
a, b, region=RegionOne, c=1, **kwargs
>>> prepare_action_inputs('**kwargs', added=['region=RegionOne'])
region=RegionOne, **kwargs
>>> prepare_action_inputs('', added=['region=RegionOne'])
region=RegionOne
:param origin_inputs: A string consists of action inputs, separated by
comma.
:param added: (Optional) A list of params to add to input string.
:return: The new action input string.
"""
if not origin_inputs:
return ", ".join(added)
inputs = [i.strip() for i in origin_inputs.split(',')]
kwarg_index = None
for index, input in enumerate(inputs):
if "=" in input:
kwarg_index = index
if "**" in input:
kwarg_index = index - 1
kwarg_index = len(inputs) if kwarg_index is None else kwarg_index
kwarg_index = kwarg_index + 1 if kwarg_index < 0 else kwarg_index
for a in added:
if "=" not in a:
inputs.insert(0, a)
kwarg_index += 1
else:
inputs.insert(kwarg_index, a)
return ", ".join(inputs)
@classmethod
def create_action_class(cls, method_name):
if not method_name:
return None
action_class = type(str(method_name), (cls.base_action_class,),
{'client_method_name': method_name})
return action_class
@classmethod
def create_actions(cls):
mapping = get_mapping()
method_dict = mapping.get(cls.action_namespace, {})
action_classes = []
for action_name, method_name in method_dict.items():
class_ = cls.create_action_class(method_name)
try:
client_method = class_.get_fake_client_method()
except Exception:
LOG.exception("Failed to create action: %s.%s" %
(cls.action_namespace, action_name))
continue
arg_list = i_u.get_arg_list_as_str(client_method)
# Support specifying region for OpenStack actions.
modules = CONF.openstack_actions.modules_support_region
if cls.action_namespace in modules:
arg_list = cls.prepare_action_inputs(
arg_list,
added=['action_region=""']
)
description = i_u.get_docstring(client_method)
action_classes.append(
{
'class': class_,
'name': "%s.%s" % (cls.action_namespace, action_name),
'description': description,
'arg_list': arg_list,
}
)
return action_classes
|
#!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "<NAME>" at 20:22, 12/06/2020 %
# %
# Email: <EMAIL> %
# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %
# Github: https://github.com/thieu1995 %
#-------------------------------------------------------------------------------------------------------%
import numpy as np
from mealpy.optimizer import Optimizer
class BaseSMA(Optimizer):
"""
My modified version of: Slime Mould Algorithm (SMA)
(Slime Mould Algorithm: A New Method for Stochastic Optimization)
Link:
https://doi.org/10.1016/j.future.2020.03.055
https://www.researchgate.net/publication/340431861_Slime_mould_algorithm_A_new_method_for_stochastic_optimization
Notes:
+ Selected 2 unique and random solution to create new solution (not to create variable) --> remove third loop in original version
+ Check bound and update fitness after each individual move instead of after the whole population move in the original version
+ My version not only faster but also better
"""
ID_WEI = 2
def __init__(self, problem, epoch=10000, pop_size=100, pr=0.03, **kwargs):
"""
Args:
epoch (int): maximum number of iterations, default = 10000
pop_size (int): number of population size, default = 100
pr (float): probability threshold (z in the paper), default = 0.03
"""
super().__init__(problem, kwargs)
self.nfe_per_epoch = pop_size
self.sort_flag = True
self.epoch = epoch
self.pop_size = pop_size
self.pr = pr
def create_solution(self):
"""
Returns:
The position position with 2 element: index of position/location and index of fitness wrapper
The general format: [position, [target, [obj1, obj2, ...]], weight]
## To get the position, fitness wrapper, target and obj list
## A[self.ID_POS] --> Return: position
## A[self.ID_FIT] --> Return: [target, [obj1, obj2, ...]]
## A[self.ID_FIT][self.ID_TAR] --> Return: target
## A[self.ID_FIT][self.ID_OBJ] --> Return: [obj1, obj2, ...]
"""
position = np.random.uniform(self.problem.lb, self.problem.ub)
fitness = self.get_fitness_position(position=position)
weight = np.zeros(self.problem.n_dims)
return [position, fitness, weight]
def evolve(self, epoch):
"""
Args:
epoch (int): The current iteration
"""
# plus eps to avoid denominator zero
s = self.g_best[self.ID_FIT][self.ID_TAR] - self.pop[-1][self.ID_FIT][self.ID_TAR] + self.EPSILON
# calculate the fitness weight of each slime mold
for i in range(0, self.pop_size):
# Eq.(2.5)
if i <= int(self.pop_size / 2):
self.pop[i][self.ID_WEI] = 1 + np.random.uniform(0, 1, self.problem.n_dims) * \
np.log10((self.g_best[self.ID_FIT][self.ID_TAR] - self.pop[i][self.ID_FIT][self.ID_TAR]) / s + 1)
else:
self.pop[i][self.ID_WEI] = 1 - np.random.uniform(0, 1, self.problem.n_dims) * \
np.log10((self.g_best[self.ID_FIT][self.ID_TAR] - self.pop[i][self.ID_FIT][self.ID_TAR]) / s + 1)
a = np.arctanh(-((epoch + 1) / self.epoch) + 1) # Eq.(2.4)
b = 1 - (epoch + 1) / self.epoch
pop_new = []
for idx in range(0, self.pop_size):
# Update the Position of search agent
if np.random.uniform() < self.pr: # Eq.(2.7)
pos_new = np.random.uniform(self.problem.lb, self.problem.ub)
else:
p = np.tanh(np.abs(self.pop[idx][self.ID_FIT][self.ID_TAR] - self.g_best[self.ID_FIT][self.ID_TAR])) # Eq.(2.2)
vb = np.random.uniform(-a, a, self.problem.n_dims) # Eq.(2.3)
vc = np.random.uniform(-b, b, self.problem.n_dims)
# two positions randomly selected from population, apply for the whole problem size instead of 1 variable
id_a, id_b = np.random.choice(list(set(range(0, self.pop_size)) - {idx}), 2, replace=False)
pos_1 = self.g_best[self.ID_POS] + vb * (self.pop[idx][self.ID_WEI] * self.pop[id_a][self.ID_POS] - self.pop[id_b][self.ID_POS])
pos_2 = vc * self.pop[idx][self.ID_POS]
pos_new = np.where(np.random.uniform(0, 1, self.problem.n_dims) < p, pos_1, pos_2)
# Check bound and re-calculate fitness after each individual move
pos_new = self.amend_position_faster(pos_new)
pop_new.append([pos_new, None, np.zeros(self.problem.n_dims)])
self.pop = self.update_fitness_population(pop_new)
class OriginalSMA(BaseSMA):
"""
This version developed by one on my student: Slime Mould Algorithm (SMA)
(Slime Mould Algorithm: A New Method for Stochastic Optimization)
Link:
https://doi.org/10.1016/j.future.2020.03.055
"""
ID_WEI = 2
def __init__(self, problem, epoch=10000, pop_size=100, pr=0.03, **kwargs):
"""
Args:
epoch (int): maximum number of iterations, default = 1000
pop_size (int): number of population size, default = 100
pr (float): probability threshold (z in the paper), default = 0.03
"""
super().__init__(problem, epoch, pop_size, pr, **kwargs)
def evolve(self, epoch):
"""
Args:
epoch (int): The current iteration
"""
s = self.g_best[self.ID_FIT][self.ID_TAR] - self.pop[-1][self.ID_FIT][self.ID_TAR] + self.EPSILON # plus eps to avoid denominator zero
# calculate the fitness weight of each slime mold
for i in range(0, self.pop_size):
# Eq.(2.5)
if i <= int(self.pop_size / 2):
self.pop[i][self.ID_WEI] = 1 + np.random.uniform(0, 1, self.problem.n_dims) * \
np.log10((self.g_best[self.ID_FIT][self.ID_TAR] - self.pop[i][self.ID_FIT][self.ID_TAR]) / s + 1)
else:
self.pop[i][self.ID_WEI] = 1 - np.random.uniform(0, 1, self.problem.n_dims) * \
np.log10((self.g_best[self.ID_FIT][self.ID_TAR] - self.pop[i][self.ID_FIT][self.ID_TAR]) / s + 1)
a = np.arctanh(-((epoch + 1) / self.epoch) + 1) # Eq.(2.4)
b = 1 - (epoch + 1) / self.epoch
pop_new = []
for idx in range(0, self.pop_size):
# Update the Position of search agent
current_agent = self.pop[idx].copy()
if np.random.uniform() < self.pr: # Eq.(2.7)
current_agent[self.ID_POS] = np.random.uniform(self.problem.lb, self.problem.ub)
else:
p = np.tanh(np.abs(current_agent[self.ID_FIT][self.ID_TAR] - self.g_best[self.ID_FIT][self.ID_TAR])) # Eq.(2.2)
vb = np.random.uniform(-a, a, self.problem.n_dims) # Eq.(2.3)
vc = np.random.uniform(-b, b, self.problem.n_dims)
for j in range(0, self.problem.n_dims):
# two positions randomly selected from population
id_a, id_b = np.random.choice(list(set(range(0, self.pop_size)) - {idx}), 2, replace=False)
if np.random.uniform() < p: # Eq.(2.1)
current_agent[self.ID_POS][j] = self.g_best[self.ID_POS][j] + vb[j] * (
current_agent[self.ID_WEI][j] * self.pop[id_a][self.ID_POS][j] - self.pop[id_b][self.ID_POS][j])
else:
current_agent[self.ID_POS][j] = vc[j] * current_agent[self.ID_POS][j]
pos_new = self.amend_position_faster(current_agent[self.ID_POS])
pop_new.append([pos_new, None, np.zeros(self.problem.n_dims)])
self.pop = self.update_fitness_population(pop_new)
|
# -*- coding: utf-8 -*-
# Copyright 2018 Moarri Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = '<NAME>'
import pkg_resources
from moarri_profile_iacs.iacs_profile.iacs_caaml_profile import *
from xml.dom import minidom, Node
from lxml import etree
from enum import Enum
_XSD_PATH = "xsd"
class CAAMLProfileIACSVersion(Enum):
UNKNOWN = 0
V5 = 5
V6 = 6
_XSD_FILE_NAMES = {
"v6": "CAAMLv6_SnowProfileIACS.xsd"
}
def trim_string(x):
if x is None:
return None
return x.strip()
def extract_double(el):
return float(trim_string(el))
def extract_from_enum_type(enum_cls, el):
return enum_cls.value_of(trim_string(el))
def extract_unit_attribute(enum_cls, el):
return enum_cls.value_of(trim_string(el.getAttribute(ATTR_UOM)))
def extract_unit_attribute_name(enum_cls, el, attr_name):
return enum_cls.value_of(trim_string(el.getAttribute(attr_name)))
def _xmlschema(version):
splitted = version.split(".")
if len(splitted) > 0:
main_version = splitted[0]
if main_version in _XSD_FILE_NAMES:
xsd_file = _XSD_FILE_NAMES[main_version]
xsd_string = pkg_resources.resource_string(__name__, _XSD_PATH+"/"+version+"/"+xsd_file)
if xsd_string:
xmlschema_doc = etree.XML(xsd_string)
schema = etree.XMLSchema(xmlschema_doc)
return schema
return None
def check_caaml_file(filename):
caaml_doc = etree.parse(filename)
root_node = caaml_doc.getroot()
if root_node and 'caaml' in root_node.nsmap:
caaml_string = root_node.nsmap['caaml']
version = caaml_string.split("/")[-1]
xmlschema = _xmlschema(version)
if xmlschema.validate(caaml_doc):
return True, version
return False, CAAMLProfileIACSVersion.UNKNOWN
def parse_caaml_file(filename):
return parse_caaml_doc(minidom.parse(filename))
def parse_caaml_doc(xmldoc):
snow_profile_node = xmldoc.firstChild
if not snow_profile_node.localName == SnowProfileMeta.MAIN_NODE.code:
return None
else:
profile_id = snow_profile_node.getAttribute(SnowProfileMeta.ATTR_GML_ID.code)
results = _parse_snow_profile_results([x for x in snow_profile_node.childNodes if
x.nodeType == Node.ELEMENT_NODE and x.localName == SnowProfileMeta.CHILD_SNOW_PROFILE_RESULTS_OF.code][
0])
return SnowProfile(profile_id, results)
def _parse_aspect(e):
main_node = \
[x for x in e.childNodes if x.nodeType == Node.ELEMENT_NODE and x.localName == AspectMeta.MAIN_NODE.code][0]
if main_node is not None and main_node != []:
position_node = [x for x in main_node.childNodes if
x.nodeType == Node.ELEMENT_NODE and x.localName == AspectMeta.CHILD_POSITION.code][0]
if position_node is not None and position_node != []:
return Aspect.create_default(extract_from_enum_type(IACSAspectCardinalType, position_node.firstChild.data))
return None
def _parse_snow_height_components(e):
components = None
main_node = [x for x in e.childNodes if
x.nodeType == Node.ELEMENT_NODE and x.localName == SnowHeightComponentsMeta.MAIN_NODE.code][0]
if main_node is not None and main_node != []:
snow_height = None
snow_height_uom = None
swe = None
swe_uom = None
height_present = False
swe_present = False
for node in [x for x in main_node.childNodes if x.nodeType == Node.ELEMENT_NODE]:
if node.localName == SnowHeightComponentsMeta.CHILD_SNOW_HEIGHT.code:
snow_height = extract_double(node.firstChild.data)
snow_height_uom = extract_unit_attribute(IACSUnitsLengthType, node)
height_present = True
elif node.localName == SnowHeightComponentsMeta.CHILD_SWE.code:
swe = extract_double(node.firstChild.data)
swe_uom = extract_unit_attribute(IACSUnitsLengthType, node)
swe_present = True
if height_present and swe_present:
components = SnowHeightComponents.create_both(snow_height, snow_height_uom, swe, swe_uom)
elif height_present:
components = SnowHeightComponents.create_snow_height(snow_height, snow_height_uom)
elif swe_present:
components = SnowHeightComponents.create_swe(swe, swe_uom)
return components
def _parse_hardness(e):
force_unit = extract_unit_attribute(IACSUnitsForceType, e)
if force_unit == IACSUnitsForceType.N:
hardness = Hardness.create_numeric(extract_double(e.firstChild.data))
elif force_unit == IACSUnitsForceType.EMPTY:
hardness = Hardness.create_cardinal(extract_from_enum_type(IACSHardnessType, e.firstChild.data))
else:
hardness = Hardness()
return hardness
def _parse_lwc(e):
lwc_unit = extract_unit_attribute(IACSUnitsLwcType, e)
if lwc_unit == IACSUnitsLwcType.PRCVOL:
lwc = Lwc.create_numeric(extract_double(e.firstChild.data))
elif lwc_unit == IACSUnitsLwcType.EMPTY:
lwc = Lwc.create_cardinal(extract_from_enum_type(IACSLiquidWaterContentType, e.firstChild.data))
else:
lwc = Lwc()
return lwc
def _parse_grain_size(e):
uom = extract_unit_attribute(IACSUnitsLengthType, e)
main_node = \
[x for x in e.childNodes if x.nodeType == Node.ELEMENT_NODE and x.localName == GrainSizeMeta.MAIN_NODE.code][0]
if main_node is None:
return GrainSize()
avg = None
avg_max = None
for node in [x for x in main_node.childNodes if x.nodeType == Node.ELEMENT_NODE]:
if node.localName == GrainSizeMeta.CHILD_AVG.code:
avg = extract_double(node.firstChild.data)
elif node.localName == GrainSizeMeta.CHILD_AVG_MAX.code:
avg_max = extract_double(node.firstChild.data)
return GrainSize.create(uom, avg, avg_max)
def _parse_layer(e):
depth_top = None
depth_top_uom = IACSUnitsLengthType.CM
thickness = None
thickness_uom = IACSUnitsLengthType.CM
valid_formation_time = None
grain_form_primary = None
grain_form_secondary = None
grain_size = None
hardness = None
lwc = None
layer_elements = [x for x in e.childNodes if x.nodeType == Node.ELEMENT_NODE]
for node in layer_elements:
if node.localName == LayerMeta.CHILD_DEPTH_TOP.code:
depth_top = extract_double(node.firstChild.data)
depth_top_uom = extract_unit_attribute(IACSUnitsLengthType, node)
elif node.localName == LayerMeta.CHILD_THICKNESS.code:
thickness = extract_double(node.firstChild.data)
thickness_uom = extract_unit_attribute(IACSUnitsLengthType, node)
elif node.localName == LayerMeta.CHILD_GRAIN_FORM_PRIMARY.code:
grain_form_primary = extract_from_enum_type(IACSGrainShapeType, node.firstChild.data)
elif node.localName == LayerMeta.CHILD_GRAIN_FORM_SECONDARY.code:
grain_form_secondary = extract_from_enum_type(IACSGrainShapeType, node.firstChild.data)
elif node.localName == LayerMeta.CHILD_GRAIN_SIZE.code:
grain_size = _parse_grain_size(node)
elif node.localName == LayerMeta.CHILD_HARDNESS.code:
hardness = _parse_hardness(node)
elif node.localName == LayerMeta.CHILD_LWC.code:
lwc = _parse_lwc(node)
return StratProfileLayer(depth_top, depth_top_uom, thickness, thickness_uom, valid_formation_time, grain_form_primary, grain_form_secondary, grain_size, hardness, lwc)
def _parse_strat_profile(e):
layers = []
for node in [x for x in e.childNodes if
x.nodeType == Node.ELEMENT_NODE and x.localName == LayerMeta.MAIN_NODE.code]:
layer = _parse_layer(node)
if layer is not Node:
layers.append(layer)
return layers
def _parse_temp_profile_obs(e, depth_uom=IACSUnitsLengthType.CM):
depth = None
snow_temp = None
for node in [x for x in e.childNodes if x.nodeType == Node.ELEMENT_NODE]:
if node.localName == TempProfileObsMeta.CHILD_DEPTH.code:
depth = extract_double(node.firstChild.data)
elif node.localName == TempProfileObsMeta.CHILD_SNOW_TEMP.code:
snow_temp = extract_double(node.firstChild.data)
return TempProfileObs(depth, depth_uom, snow_temp)
def _parse_temp_profile(e):
temp_profile = []
temp_profile_uom_depth = extract_unit_attribute_name(IACSUnitsLengthType, e, UOM_DEPTH)
temp_profile_uom_temp = extract_unit_attribute_name(IACSUnitsTempType, e, UOM_TEMP)
for node in [x for x in e.childNodes if
x.nodeType == Node.ELEMENT_NODE and x.localName == TempProfileObsMeta.MAIN_NODE.code]:
temp_profile_obs = _parse_temp_profile_obs(node, temp_profile_uom_depth)
if temp_profile_obs is not None:
temp_profile.append(temp_profile_obs)
return TempProfile(temp_profile, temp_profile_uom_depth, temp_profile_uom_temp)
def _parse_snow_profile_results(element):
main_node = [x for x in element.childNodes if
x.nodeType == Node.ELEMENT_NODE and x.localName == SnowProfileMeasurementsMeta.MAIN_NODE.code][0]
if main_node is not None and main_node != []:
measurement_components = {
'direction': IACSDirectionType.value_of(main_node.getAttribute(SnowProfileMeasurementsMeta.ATTR_DIR.code))}
for node in [x for x in main_node.childNodes if x.nodeType == Node.ELEMENT_NODE]:
if node.localName == SnowProfileMeasurementsMeta.CHILD_COMMENT.code:
measurement_components['comment'] = trim_string(node.firstChild.data)
elif node.localName == SnowProfileMeasurementsMeta.CHILD_PROFILE_DEPTH.code:
measurement_components['profile_depth'] = extract_double(node.firstChild.data)
measurement_components['profile_depth_uom'] = extract_unit_attribute(IACSUnitsLengthType, node)
elif node.localName == SnowProfileMeasurementsMeta.CHILD_PENETRATION_RAM.code:
measurement_components['penetration_ram'] = extract_double(node.firstChild.data)
measurement_components['penetration_ram_uom'] = extract_unit_attribute(IACSUnitsLengthType, node)
elif node.localName == SnowProfileMeasurementsMeta.CHILD_PENETRATION_FOOT.code:
measurement_components['penetration_foot'] = extract_double(node.firstChild.data)
measurement_components['penetration_foot_uom'] = extract_unit_attribute(IACSUnitsLengthType, node)
elif node.localName == SnowProfileMeasurementsMeta.CHILD_PENETRATION_SKI.code:
measurement_components['penetration_ski'] = extract_double(node.firstChild.data)
measurement_components['penetration_ski_uom'] = extract_unit_attribute(IACSUnitsLengthType, node)
elif node.localName == SnowProfileMeasurementsMeta.CHILD_AIR_TEMP_PRES.code:
measurement_components['air_temp_pres'] = extract_double(node.firstChild.data)
measurement_components['air_temp_pres_uom'] = extract_unit_attribute(IACSUnitsTempType, node)
elif node.localName == SnowProfileMeasurementsMeta.CHILD_WIND_SPD.code:
measurement_components['wind_spd'] = extract_double(node.firstChild.data)
measurement_components['wind_spd_uom'] = extract_unit_attribute(IACSUnitsWindSpdType, node)
elif node.localName == SnowProfileMeasurementsMeta.CHILD_WIND_DIR.code:
aspect = _parse_aspect(node)
if aspect is not None:
measurement_components['wind_dir'] = aspect
elif node.localName == SnowProfileMeasurementsMeta.CHILD_HS.code:
components = _parse_snow_height_components(node)
if components is not None:
measurement_components['hs'] = components
elif node.localName == SnowProfileMeasurementsMeta.CHILD_HIN.code:
components = _parse_snow_height_components(node)
if components is not None:
measurement_components['hin'] = components
elif node.localName == SnowProfileMeasurementsMeta.CHILD_HN24.code:
components = _parse_snow_height_components(node)
if components is not None:
measurement_components['hn24'] = components
elif node.localName == SnowProfileMeasurementsMeta.CHILD_SKY_COND.code:
measurement_components['sky_cond'] = extract_from_enum_type(IACSSkyConditionType, node.firstChild.data)
elif node.localName == SnowProfileMeasurementsMeta.CHILD_PRECIP_TI.code:
measurement_components['precip_ti'] = extract_from_enum_type(IACSPrecipTIType, node.firstChild.data)
elif node.localName == SnowProfileMeasurementsMeta.CHILD_STRAT_PROFILE.code:
measurement_components['strat_profile'] = _parse_strat_profile(node)
elif node.localName == SnowProfileMeasurementsMeta.CHILD_TEMP_PROFILE.code:
measurement_components['temp_profile'] = _parse_temp_profile(node)
else:
return None
results = SnowProfileResults(SnowProfileMeasurements.create_default(**measurement_components))
return results
|
import numpy as np
from keras.models import Sequential
from keras.optimizers import SGD
from keras.utils import np_utils
#from matplotlib.pyplot import imshow
from utils import *
########################
# Import the layer types needed
from keras.layers.core import Dense, Activation, Dropout,Flatten
from keras.layers.convolutional import Convolution2D,MaxPooling2D
########################
##TODO
# ADD INIT AND UPDATE FUNCTIONS
# HANDLE DATA FROM SIMULATION
# WRITE OBJECTIVE FUNCTION
# LOOK AT OUTPUT ACTIVATION
class CNNTraining:
# Load the dataset
def __init__(self,X,out_layers):
# Define the model
self.model = Sequential()
print ("X Model: " , X.shape)
self.model.add(Convolution2D(nb_filter=32,nb_row=3,nb_col=3,dim_ordering='tf',input_shape=(X.shape[0], X.shape[1], X.shape[2])))
self.model.add(Activation("relu"))
self.model.add(MaxPooling2D(pool_size=(2, 2)))
self.model.add(Convolution2D(nb_filter=64,nb_row=3,nb_col=3))
self.model.add(Activation('relu'))
self.model.add(MaxPooling2D(pool_size=(2, 2)))
self.model.add(Flatten())
self.model.add(Dense(output_dim=128))
self.model.add(Dropout(0.5))
self.model.add(Dense(output_dim=out_layers))
self.model.add(Activation("sigmoid"))
self.model.compile(loss='mean_squared_error', optimizer=SGD(lr=0.001, momentum=0.9, nesterov=True))
## Fit the model (10% of training data used as validation set)
# model.fit(X_train, y_train, nb_epoch=2, batch_size=32,validation_split=0.1, show_accuracy=True)
## Evaluate the model on test data
# objective_score = model.evaluate(X_test, y_test, show_accuracy=True, batch_size=32)
# print objective_score
def update_model_offline(self,X,Y):
history=model.fit(X, Y, nb_epoch=5, batch_size=32,validation_split=0.1, show_accuracy=False)
return loss
def update_model_online(self,X, Y):
X_Train=np.expand_dims(X,axis=0)
Y_Train=np.expand_dims(Y,axis=0)
# loss=self.model.train_on_batch(X_Train,Y_Train)
loss = self.model.fit(X_Train, Y_Train, nb_epoch=2, batch_size=1, validation_split=0.0, show_accuracy=True)
print (loss.history)
# history=self.model.fit(X_Train, Y_Train, nb_epoch=1, batch_size=1,validation_split=0.0, verbose=1, show_accuracy=True)
return loss
def load_weights(self,file):
self.model.load_weights(file)
def predict_model(self,X):
X_Pred=np.expand_dims(X,axis=0)
return self.model.predict_on_batch(X_Pred)
#TODO IMPORT SENSOR DATA
#X_train, y_train, X_test, y_test = getMNISTData()
# We need to rehape the data back into a 1x28x28 image
# X_train = np.reshape(X_train, (X_train.shape[0], 1, 28, 28))
# X_test = np.reshape(X_test, (X_test.shape[0], 1, 28, 28))
# ## Categorize the labels
# y_train = np_utils.to_categorical(y_train, num_classes)
# y_test = np_utils.to_categorical(y_test, num_classes)
|
<reponame>bio2bel/complexportal<filename>src/bio2bel_complexportal/export_belns.py
# -*- coding: utf-8 -*-
"""Export Complex Portal as a BEL namespace."""
import sys
from hashlib import sha256
from pathlib import Path
from typing import Optional, Union
from bel_resources import parse_bel_resource, write_namespace
from bio2bel_complexportal.constants import NAMESPACE_URL, TSV_PATH
from bio2bel_complexportal.parser import df_getter
CHUNKSIZE = 2 ** 20 # 1 megabyte
NAMESPACE_NAME = 'Complex Portal'
NAMESPACE_KEYWORD = 'complexportal'
def hash_file(path: Union[str, Path]) -> str:
"""Determine the SHA256 hash of the contents of the file at `path`.
:param path: path to the file to be hashed
:returns: the hexdigest of the SHA256 hash of the contents of the file
"""
with open(path, 'rb') as f:
hasher = sha256()
chunk = f.read(CHUNKSIZE)
while chunk:
hasher.update(chunk)
chunk = f.read(CHUNKSIZE)
return hasher.hexdigest()
def is_version_string_equal_to_digest(
path: Union[str, Path], digest: Optional[str]
) -> bool:
"""Check if the digest stored in the VersionString of the BELNS at `path` is the same as the passed-in digest string.
If the digest string is `None`, always returns False.
:param path: path to the BELNS that is being compared
:param digest: hexdigest of the data that the BELNS may be based on
:returns: True if the digests match, False otherwise
"""
if not digest:
return False
try:
with open(path, 'r') as belns:
resource = parse_bel_resource(belns)
if resource.get('Namespace', {}).get('VersionString') == digest:
return True
except FileNotFoundError:
pass
return False
def tsv_to_belns(
tsv_path: Union[str, Path],
belns_path: Optional[Union[str, Path]],
digest: Optional[str] = None,
) -> None:
"""Create a BELNS file from the TSV data.
If `belns_path` is None, the namespace will be printed to STDOUT.
:param tsv_path: path to the input TSV
:param belns_path: path to the output BELNS
:param digest: SHA256 hexdigest of the input TSV for versioning the BELNS
"""
df = df_getter()
complexnames = df['Complex ac']
try:
if belns_path is not None:
belns = open(belns_path, 'w')
else:
belns = None
write_namespace(
values={complexname: 'C' for complexname in complexnames},
namespace_name=NAMESPACE_NAME,
namespace_keyword=NAMESPACE_KEYWORD,
namespace_version=digest,
file=belns,
)
finally:
if belns is not None:
belns.close()
def main() -> Optional[int]:
assert (
len(sys.argv) == 1 or len(sys.argv) == 2
), 'requires 0 or 1 arguments: [output filename]'
output_file = sys.argv[1] if len(sys.argv) == 2 else None
if output_file and output_file.strip() == '-': # handle '-' for stdout output
output_file = None
try:
digest = hash_file(TSV_PATH)
except FileNotFoundError:
print(f'no cached data at `{TSV_PATH}`; unable to continue', file=sys.stderr)
return 1
# check if the new tsv is different from what we have
if output_file:
if is_version_string_equal_to_digest(output_file, digest):
print(f'`{NAMESPACE_URL}` has not changed; exiting', file=sys.stderr)
print(
f'delete or rename `{output_file}` to force a re-run', file=sys.stderr
)
return
# the output file doesn't exist, is STDOUT, or the TSV data is newer
# so we write a new BELNS
tsv_to_belns(TSV_PATH, output_file, digest)
if __name__ == '__main__':
sys.exit(main())
|
# Copyright (c) 2019 Horizon Robotics. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Some basic layers."""
import gin
import copy
import numpy as np
import torch
import torch.nn as nn
from alf.initializers import variance_scaling_init
from alf.nest.utils import get_outer_rank
from alf.tensor_specs import TensorSpec
from alf.utils import common
from alf.utils.math_ops import identity
def normalize_along_batch_dims(x, mean, variance, variance_epsilon):
"""Normalizes a tensor by ``mean`` and ``variance``, which are expected to have
the same tensor spec with the inner dims of ``x``.
Args:
x (Tensor): a tensor of (``[D1, D2, ..] + shape``), where ``D1``, ``D2``, ..
are arbitrary leading batch dims (can be empty).
mean (Tensor): a tensor of ``shape``
variance (Tensor): a tensor of ``shape``
variance_epsilon (float): A small float number to avoid dividing by 0.
Returns:
Normalized tensor.
"""
spec = TensorSpec.from_tensor(mean)
assert spec == TensorSpec.from_tensor(variance), \
"The specs of mean and variance must be equal!"
bs = BatchSquash(get_outer_rank(x, spec))
x = bs.flatten(x)
variance_epsilon = torch.as_tensor(variance_epsilon).to(variance.dtype)
inv = torch.rsqrt(variance + variance_epsilon)
x = (x - mean.to(x.dtype)) * inv.to(x.dtype)
x = bs.unflatten(x)
return x
class BatchSquash(object):
"""Facilitates flattening and unflattening batch dims of a tensor. Copied
from `tf_agents`.
Exposes a pair of matched flatten and unflatten methods. After flattening
only 1 batch dimension will be left. This facilitates evaluating networks
that expect inputs to have only 1 batch dimension.
"""
def __init__(self, batch_dims):
"""Create two tied ops to flatten and unflatten the front dimensions.
Args:
batch_dims (int): Number of batch dimensions the flatten/unflatten
ops should handle.
Raises:
ValueError: if batch dims is negative.
"""
if batch_dims < 0:
raise ValueError('Batch dims must be non-negative.')
self._batch_dims = batch_dims
self._original_tensor_shape = None
def flatten(self, tensor):
"""Flattens and caches the tensor's batch_dims."""
if self._batch_dims == 1:
return tensor
self._original_tensor_shape = tensor.shape
return torch.reshape(tensor,
(-1, ) + tuple(tensor.shape[self._batch_dims:]))
def unflatten(self, tensor):
"""Unflattens the tensor's batch_dims using the cached shape."""
if self._batch_dims == 1:
return tensor
if self._original_tensor_shape is None:
raise ValueError('Please call flatten before unflatten.')
return torch.reshape(
tensor, (tuple(self._original_tensor_shape[:self._batch_dims]) +
tuple(tensor.shape[1:])))
@gin.configurable
class OneHot(nn.Module):
def __init__(self, num_classes):
super().__init__()
self._num_classes = num_classes
def forward(self, input):
return nn.functional.one_hot(
input, num_classes=self._num_classes).to(torch.float32)
@gin.configurable
class FixedDecodingLayer(nn.Module):
def __init__(self,
input_size,
output_size,
basis_type="rbf",
sigma=1.,
tau=0.5):
"""A layer that uses a set of fixed basis for decoding the inputs.
Args:
input_size (int): the size of input to be decoded, representing the
number of representation coefficients
output_size (int): the size of the decoded output
basis_type (str): the type of basis to be used for decoding
- "poly": polynomial basis using Vandermonde matrix
- "cheb": polynomial basis using Chebyshev polynomials
- "rbf": radial basis functions
- "haar": Haar wavelet basis
sigma (float): the bandwidth parameter used for RBF basis.
If None, a default value of 1. will be used.
tau (float): a factor for weighting the basis exponentially
according to the order (``n``) of the basis, i.e., ``tau**n```
"""
# get the argument list with vals
self._kwargs = copy.deepcopy(locals())
self._kwargs.pop('self')
self._kwargs.pop('__class__')
super(FixedDecodingLayer, self).__init__()
assert input_size > 0, "input_size should be at least one"
assert basis_type in {"poly", "cheb", "rbf", "haar"
}, ("the specified method "
"{} is not supported".format(basis_type))
self._B = nn.Linear(input_size, output_size, bias=False)
def _polyvander_matrix(n, D, tau=tau):
# non-square matrix [n, D + 1]
x = torch.linspace(-1, 1, n)
B = torch.as_tensor(np.polynomial.polynomial.polyvander(x, D))
# weight for encoding the preference to low-frequency basis
exp_factor = torch.arange(D + 1).float()
basis_weight = tau**exp_factor
return B * basis_weight
def _chebvander_matrix(n, D, tau=tau):
# non-square matrix [n, D + 1]
x = np.linspace(-1, 1, n)
B = torch.as_tensor(np.polynomial.chebyshev.chebvander(x, D))
# weight for encoding the preference to low-frequency basis
exp_factor = torch.arange(D + 1).float()
basis_weight = tau**exp_factor
return B * basis_weight
def _rbf_matrix(n, sigma=1.0):
# square matrix [n, n]
x = torch.linspace(-1, 1, n)
B = torch.empty(n, n)
for d in range(n):
B[:, d] = torch.exp(-(x - x[d])**2 / sigma)
return B
def _haar_matrix(n, tau=tau):
# square matrix [n, n]
def _is_power_of_two(x):
return (x & (x - 1)) == 0
# allow only size n to be the power of 2
assert _is_power_of_two(n), "n is required to be the power of 2"
def _get_haar_matrix(n):
if n > 2:
h = _get_haar_matrix(n // 2)
else:
return torch.Tensor([[1, 1], [1, -1]])
def _kron(A, B):
return torch.einsum("ab,cd->acbd", A, B).view(
A.size(0) * B.size(0),
A.size(1) * B.size(1))
# calculate upper haar part
h_n = _kron(h, torch.Tensor([[1], [1]]))
# calculate lower haar part
h_i = torch.sqrt(torch.Tensor([n / 2])) * _kron(
torch.eye(len(h)), torch.Tensor([[1], [-1]]))
# combine both parts
h = torch.cat((h_n, h_i), dim=1)
return h
B = _get_haar_matrix(n) / torch.sqrt(torch.Tensor([n]))
# weight for encoding the preference to low-frequency basis
exp_factor = torch.ceil(torch.log2(torch.arange(n).float() + 1))
basis_weight = tau**exp_factor
return B * basis_weight
if basis_type == "poly":
B = _polyvander_matrix(output_size, input_size - 1)
elif basis_type == "cheb":
B = _chebvander_matrix(output_size, input_size - 1)
elif basis_type == "rbf":
assert input_size == output_size
B = _rbf_matrix(input_size, sigma=sigma)
elif basis_type == "haar":
assert input_size == output_size
B = _haar_matrix(input_size)
# assign the constructed transformation matrix and set it to be non-trainable
self._B.weight.requires_grad = False
self._B.weight.copy_(B)
def forward(self, inputs):
return self._B(inputs)
@property
def weight(self):
return self._B.weight
@gin.configurable
class FC(nn.Module):
def __init__(self,
input_size,
output_size,
activation=identity,
use_bias=True,
kernel_initializer=None,
kernel_init_gain=1.0,
bias_init_value=0.0):
"""A fully connected layer that's also responsible for activation and
customized weights initialization. An auto gain calculation might depend
on the activation following the linear layer. Suggest using this wrapper
module instead of ``nn.Linear`` if you really care about weight std after
init.
Args:
input_size (int): input size
output_size (int): output size
activation (torch.nn.functional):
use_bias (bool): whether use bias
kernel_initializer (Callable): initializer for the FC layer kernel.
If none is provided a ``variance_scaling_initializer`` with gain as
``kernel_init_gain`` will be used.
kernel_init_gain (float): a scaling factor (gain) applied to
the std of kernel init distribution. It will be ignored if
``kernel_initializer`` is not None.
bias_init_value (float): a constant
"""
# get the argument list with vals
self._kwargs = copy.deepcopy(locals())
self._kwargs.pop('self')
self._kwargs.pop('__class__')
super(FC, self).__init__()
self._activation = activation
self._linear = nn.Linear(input_size, output_size, bias=use_bias)
self._kernel_initializer = kernel_initializer
self._kernel_init_gain = kernel_init_gain
self._bias_init_value = bias_init_value
self._use_bias = use_bias
self.reset_parameters()
def reset_parameters(self):
if self._kernel_initializer is None:
variance_scaling_init(
self._linear.weight.data,
gain=self._kernel_init_gain,
nonlinearity=self._activation)
else:
self._kernel_initializer(self._linear.weight.data)
if self._use_bias:
nn.init.constant_(self._linear.bias.data, self._bias_init_value)
def forward(self, inputs):
return self._activation(self._linear(inputs))
@property
def weight(self):
return self._linear.weight
@property
def bias(self):
return self._linear.bias
def make_parallel(self, n):
"""Create a ``ParallelFC`` using ``n`` replicas of ``self``.
The initialized layer parameters will be different.
"""
return ParallelFC(n=n, **self._kwargs)
@gin.configurable
class ParallelFC(nn.Module):
def __init__(self,
input_size,
output_size,
n,
activation=identity,
use_bias=True,
kernel_initializer=None,
kernel_init_gain=1.0,
bias_init_value=0.0):
"""Parallel FC layer.
It is equivalent to ``n`` separate FC layers with the same
``input_size`` and ``output_size``.
Args:
input_size (int): input size
output_size (int): output size
n (int): n independent ``FC`` layers
activation (torch.nn.functional):
use_bias (bool): whether use bias
kernel_initializer (Callable): initializer for the FC layer kernel.
If none is provided a ``variance_scaling_initializer`` with gain
as ``kernel_init_gain`` will be used.
kernel_init_gain (float): a scaling factor (gain) applied to
the std of kernel init distribution. It will be ignored if
``kernel_initializer`` is not None.
bias_init_value (float): a constant
"""
super().__init__()
self._activation = activation
self._weight = nn.Parameter(torch.Tensor(n, output_size, input_size))
if use_bias:
self._bias = nn.Parameter(torch.Tensor(n, output_size))
else:
self._bias = None
for i in range(n):
if kernel_initializer is None:
variance_scaling_init(
self._weight.data[i],
gain=kernel_init_gain,
nonlinearity=self._activation)
else:
kernel_initializer(self._weight.data[i])
if use_bias:
nn.init.constant_(self._bias.data, bias_init_value)
def forward(self, inputs):
"""Forward
Args:
inputs (torch.Tensor): with shape ``[B, n, input_size]`` or ``[B, input_size]``
Returns:
torch.Tensor with shape ``[B, n, output_size]``
"""
n, k, l = self._weight.shape
if inputs.ndim == 2:
assert inputs.shape[1] == l, (
"inputs has wrong shape %s. Expecting (B, %d)" % (inputs.shape,
l))
inputs = inputs.unsqueeze(0).expand(n, *inputs.shape)
elif inputs.ndim == 3:
assert (inputs.shape[1] == n and inputs.shape[2] == l), (
"inputs has wrong shape %s. Expecting (B, %d, %d)" %
(inputs.shape, n, l))
inputs = inputs.transpose(0, 1) # [n, B, l]
else:
raise ValueError("Wrong inputs.ndim=%d" % inputs.ndim)
if self.bias is not None:
y = torch.baddbmm(
self._bias.unsqueeze(1), inputs,
self.weight.transpose(1, 2)) # [n, B, k]
else:
y = torch.bmm(inputs, self._weight.transpose(1, 2)) # [n, B, k]
y = y.transpose(0, 1) # [B, n, k]
return self._activation(y)
@property
def weight(self):
"""Get the weight Tensor.
Returns:
Tensor: with shape (n, output_size, input_size). ``weight[i]`` is
the weight for the i-th FC layer. ``weight[i]`` can be used for
``FC`` layer with the same ``input_size`` and ``output_size``
"""
return self._weight
@property
def bias(self):
"""Get the bias Tensor.
Returns:
Tensor: with shape (n, output_size). ``bias[i]`` is the bias for the
i-th FC layer. ``bias[i]`` can be used for ``FC`` layer with
the same ``input_size`` and ``output_size``
"""
return self._bias
@gin.configurable
class Conv2D(nn.Module):
def __init__(self,
in_channels,
out_channels,
kernel_size,
activation=torch.relu_,
strides=1,
padding=0,
use_bias=True,
kernel_initializer=None,
kernel_init_gain=1.0,
bias_init_value=0.0):
"""A 2D Conv layer that's also responsible for activation and customized
weights initialization. An auto gain calculation might depend on the
activation following the conv layer. Suggest using this wrapper module
instead of ``nn.Conv2d`` if you really care about weight std after init.
Args:
in_channels (int): channels of the input image
out_channels (int): channels of the output image
kernel_size (int or tuple):
activation (torch.nn.functional):
strides (int or tuple):
padding (int or tuple):
use_bias (bool):
kernel_initializer (Callable): initializer for the conv layer kernel.
If None is provided a variance_scaling_initializer with gain as
``kernel_init_gain`` will be used.
kernel_init_gain (float): a scaling factor (gain) applied to the
std of kernel init distribution. It will be ignored if
``kernel_initializer`` is not None.
bias_init_value (float): a constant
"""
super(Conv2D, self).__init__()
self._activation = activation
self._conv2d = nn.Conv2d(
in_channels,
out_channels,
kernel_size,
stride=strides,
padding=padding,
bias=use_bias)
if kernel_initializer is None:
variance_scaling_init(
self._conv2d.weight.data,
gain=kernel_init_gain,
nonlinearity=self._activation)
else:
kernel_initializer(self._conv2d.weight.data)
if use_bias:
nn.init.constant_(self._conv2d.bias.data, bias_init_value)
def forward(self, img):
return self._activation(self._conv2d(img))
@property
def weight(self):
return self._conv2d.weight
@property
def bias(self):
return self._conv2d.bias
@gin.configurable
class ParallelConv2D(nn.Module):
def __init__(self,
in_channels,
out_channels,
kernel_size,
n,
activation=torch.relu_,
strides=1,
padding=0,
use_bias=True,
kernel_initializer=None,
kernel_init_gain=1.0,
bias_init_value=0.0):
"""A parallel 2D Conv layer that can be used to perform n independent
2D convolutions in parallel.
It is equivalent to ``n`` separate ``Conv2D`` layers with the same
``in_channels`` and ``out_channels``.
Args:
in_channels (int): channels of the input image
out_channels (int): channels of the output image
kernel_size (int or tuple):
n (int): n independent ``Conv2D`` layers
activation (torch.nn.functional):
strides (int or tuple):
padding (int or tuple):
use_bias (bool):
kernel_initializer (Callable): initializer for the conv layer kernel.
If None is provided a ``variance_scaling_initializer`` with gain
as ``kernel_init_gain`` will be used.
kernel_init_gain (float): a scaling factor (gain) applied to the
std of kernel init distribution. It will be ignored if
``kernel_initializer`` is not None.
bias_init_value (float): a constant
"""
super(ParallelConv2D, self).__init__()
self._activation = activation
self._n = n
self._in_channels = in_channels
self._out_channels = out_channels
self._kernel_size = common.tuplify2d(kernel_size)
self._conv2d = nn.Conv2d(
in_channels * n,
out_channels * n,
kernel_size,
groups=n,
stride=strides,
padding=padding,
bias=use_bias)
for i in range(n):
if kernel_initializer is None:
variance_scaling_init(
self._conv2d.weight.data[i * out_channels:(i + 1) *
out_channels],
gain=kernel_init_gain,
nonlinearity=self._activation)
else:
kernel_initializer(
self._conv2d.weight.data[i * out_channels:(i + 1) *
out_channels])
# [n*C', C, kernel_size, kernel_size]->[n, C', C, kernel_size, kernel_size]
self._weight = self._conv2d.weight.view(
self._n, self._out_channels, self._in_channels,
self._kernel_size[0], self._kernel_size[1])
if use_bias:
nn.init.constant_(self._conv2d.bias.data, bias_init_value)
# [n*C']->[n, C']
self._bias = self._conv2d.bias.view(self._n, self._out_channels)
else:
self._bias = None
def forward(self, img):
"""Forward
Args:
img (torch.Tensor): with shape ``[B, C, H, W]``
or ``[B, n, C, H, W]``
where the meaning of the symbols are:
- ``B``: batch size
- ``n``: number of replicas
- ``C``: number of channels
- ``H``: image height
- ``W``: image width.
When the shape of img is ``[B, C, H, W]``, all the n 2D Conv
operations will take img as the same shared input.
When the shape of img is ``[B, n, C, H, W]``, each 2D Conv operator
will have its own input data by slicing img.
Returns:
torch.Tensor with shape ``[B, n, C', H', W']``
where the meaning of the symbols are:
- ``B``: batch
- ``n``: number of replicas
- ``C'``: number of output channels
- ``H'``: output height
- ``W'``: output width
"""
if img.ndim == 4:
# the shared input case
assert img.shape[1] == self._in_channels, (
"Input img has wrong shape %s. Expecting (B, %d, H, W)" %
(img.shape, self._in_channels))
img = img.unsqueeze(1).expand(img.shape[0], self._n,
*img.shape[1:])
elif img.ndim == 5:
# the non-shared case
assert (
img.shape[1] == self._n
and img.shape[2] == self._in_channels), (
"Input img has wrong shape %s. Expecting (B, %d, %d, H, W)"
% (img.shape, self._n, self._in_channels))
else:
raise ValueError("Wrong img.ndim=%d" % img.ndim)
# merge replica and channels
img = img.reshape(img.shape[0], img.shape[1] * img.shape[2],
*img.shape[3:])
res = self._activation(self._conv2d(img))
# reshape back: [B, n*C', H', W'] -> [B, n, C', H', W']
res = res.reshape(res.shape[0], self._n, self._out_channels,
*res.shape[2:])
return res
@property
def weight(self):
return self._weight
@property
def bias(self):
return self._bias
@gin.configurable
class ConvTranspose2D(nn.Module):
def __init__(self,
in_channels,
out_channels,
kernel_size,
activation=torch.relu_,
strides=1,
padding=0,
use_bias=True,
kernel_initializer=None,
kernel_init_gain=1.0,
bias_init_value=0.0):
"""A 2D ConvTranspose layer that's also responsible for activation and
customized weights initialization. An auto gain calculation might depend
on the activation following the conv layer. Suggest using this wrapper
module instead of ``nn.ConvTranspose2d`` if you really care about weight std
after init.
Args:
in_channels (int): channels of the input image
out_channels (int): channels of the output image
kernel_size (int or tuple):
activation (torch.nn.functional):
strides (int or tuple):
padding (int or tuple):
use_bias (bool):
kernel_initializer (Callable): initializer for the conv_trans layer.
If None is provided a variance_scaling_initializer with gain as
``kernel_init_gain`` will be used.
kernel_init_gain (float): a scaling factor (gain) applied to the
std of kernel init distribution. It will be ignored if
``kernel_initializer`` is not None.
bias_init_value (float): a constant
"""
super(ConvTranspose2D, self).__init__()
self._activation = activation
self._conv_trans2d = nn.ConvTranspose2d(
in_channels,
out_channels,
kernel_size,
stride=strides,
padding=padding,
bias=use_bias)
if kernel_initializer is None:
variance_scaling_init(
self._conv_trans2d.weight.data,
gain=kernel_init_gain,
nonlinearity=self._activation,
transposed=True)
else:
kernel_initializer(self._conv_trans2d.weight.data)
if use_bias:
nn.init.constant_(self._conv_trans2d.bias.data, bias_init_value)
def forward(self, img):
return self._activation(self._conv_trans2d(img))
@property
def weight(self):
return self._conv_trans2d.weight
@property
def bias(self):
return self._conv_trans2d.bias
@gin.configurable
class ParallelConvTranspose2D(nn.Module):
def __init__(self,
in_channels,
out_channels,
kernel_size,
n,
activation=torch.relu_,
strides=1,
padding=0,
use_bias=True,
kernel_initializer=None,
kernel_init_gain=1.0,
bias_init_value=0.0):
"""A parallel ConvTranspose2D layer that can be used to perform n
independent 2D transposed convolutions in parallel.
Args:
in_channels (int): channels of the input image
out_channels (int): channels of the output image
kernel_size (int or tuple):
n (int): n independent ``ConvTranspose2D`` layers
activation (torch.nn.functional):
strides (int or tuple):
padding (int or tuple):
use_bias (bool):
kernel_initializer (Callable): initializer for the conv_trans layer.
If None is provided a ``variance_scaling_initializer`` with gain
as ``kernel_init_gain`` will be used.
kernel_init_gain (float): a scaling factor (gain) applied to the
std of kernel init distribution. It will be ignored if
``kernel_initializer`` is not None.
bias_init_value (float): a constant
"""
super(ParallelConvTranspose2D, self).__init__()
self._activation = activation
self._n = n
self._in_channels = in_channels
self._out_channels = out_channels
self._kernel_size = common.tuplify2d(kernel_size)
self._conv_trans2d = nn.ConvTranspose2d(
in_channels * n,
out_channels * n,
kernel_size,
groups=n,
stride=strides,
padding=padding,
bias=use_bias)
for i in range(n):
if kernel_initializer is None:
variance_scaling_init(
self._conv_trans2d.weight.data[i * in_channels:(i + 1) *
in_channels],
gain=kernel_init_gain,
nonlinearity=self._activation)
else:
kernel_initializer(
self._conv_trans2d.weight.data[i * in_channels:(i + 1) *
in_channels])
# [n*C, C', kernel_size, kernel_size]->[n, C, C', kernel_size, kernel_size]
self._weight = self._conv_trans2d.weight.view(
self._n, self._in_channels, self._out_channels,
self._kernel_size[0], self._kernel_size[1])
if use_bias:
nn.init.constant_(self._conv_trans2d.bias.data, bias_init_value)
# [n*C]->[n, C]
self._bias = self._conv_trans2d.bias.view(self._n,
self._out_channels)
else:
self._bias = None
def forward(self, img):
"""Forward
Args:
img (torch.Tensor): with shape ``[B, C, H, W]``
or ``[B, n, C, H, W]``
where the meaning of the symbols are:
- ``B``: batch size
- ``n``: number of replicas
- ``C``: number of channels
- ``H``: image height
- ``W``: image width.
When the shape of img is ``[B, C, H, W]``, all the n transposed 2D
Conv operations will take img as the same shared input.
When the shape of img is ``[B, n, C, H, W]``, each transposed 2D
Conv operator will have its own input data by slicing img.
Returns:
torch.Tensor with shape ``[B, n, C', H', W']``
where the meaning of the symbols are:
- ``B``: batch
- ``n``: number of replicas
- ``C'``: number of output channels
- ``H'``: output height
- ``W'``: output width
"""
if img.ndim == 4:
# the shared input case
assert img.shape[1] == self._in_channels, (
"Input img has wrong shape %s. Expecting (B, %d, H, W)" %
(img.shape, self._in_channels))
img = img.unsqueeze(1).expand(img.shape[0], self._n,
*img.shape[1:])
elif img.ndim == 5:
# the non-shared case
assert (
img.shape[1] == self._n
and img.shape[2] == self._in_channels), (
"Input img has wrong shape %s. Expecting (B, %d, %d, H, W)"
% (img.shape, self._n, self._in_channels))
else:
raise ValueError("Wrong img.ndim=%d" % img.ndim)
# merge replica and channels
img = img.reshape(img.shape[0], img.shape[1] * img.shape[2],
*img.shape[3:])
res = self._activation(self._conv_trans2d(img))
# reshape back: [B, n*C', H', W'] -> [B, n, C', H', W']
res = res.reshape(res.shape[0], self._n, self._out_channels,
res.shape[2], res.shape[3])
return res
@property
def weight(self):
return self._weight
@property
def bias(self):
return self._bias
class Reshape(nn.Module):
def __init__(self, shape):
"""A layer for reshape the tensor.
The result of this layer is a tensor reshaped to ``(B, *shape)`` where
``B`` is ``x.shape[0]``
Args:
shape (tuple): desired shape not including the batch dimension.
"""
super().__init__()
self._shape = shape
def forward(self, x):
return x.reshape(x.shape[0], *self._shape)
def _tuplify2d(x):
if isinstance(x, tuple):
assert len(x) == 2
return x
return (x, x)
def _conv_transpose_2d(in_channels,
out_channels,
kernel_size,
stride=1,
padding=0):
# need output_padding so that output_size is stride * input_size
# See https://pytorch.org/docs/stable/nn.html#torch.nn.ConvTranspose2d
output_padding = stride + 2 * padding - kernel_size
return nn.ConvTranspose2d(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
output_padding=output_padding)
@gin.configurable(whitelist=['v1_5', 'with_batch_normalization'])
class BottleneckBlock(nn.Module):
"""Bottleneck block for ResNet.
We allow two slightly different architectures:
* v1: Placing the stride at the first 1x1 convolution as described in the
original ResNet paper `Deep residual learning for image recognition
<https://arxiv.org/abs/1512.03385>`_.
* v1.5: Placing the stride for downsampling at 3x3 convolution. This variant
is also known as ResNet V1.5 and improves accuracy according to
`<https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch>`_.
"""
def __init__(self,
in_channels,
kernel_size,
filters,
stride,
transpose=False,
v1_5=True,
with_batch_normalization=True):
"""
Args:
kernel_size (int): the kernel size of middle layer at main path
filters (int): the filters of 3 layer at main path
stride (int): stride for this block.
transpose (bool): a bool indicate using ``Conv2D`` or ``Conv2DTranspose``.
If two BottleneckBlock layers ``L`` and ``LT`` are constructed
with the same arguments except ``transpose``, it is gauranteed that
``LT(L(x)).shape == x.shape`` if ``x.shape[-2:]`` can be divided
by ``stride``.
v1_5 (bool): whether to use the ResNet V1.5 structure
with_batch_normalization (bool): whether to include batch normalization.
Note that standard ResNet uses batch normalization.
Return:
Output tensor for the block
"""
super().__init__()
filters1, filters2, filters3 = filters
conv_fn = _conv_transpose_2d if transpose else nn.Conv2d
padding = (kernel_size - 1) // 2
if v1_5:
a = conv_fn(in_channels, filters1, 1)
b = conv_fn(filters1, filters2, kernel_size, stride, padding)
else:
a = conv_fn(in_channels, filters1, 1, stride)
b = conv_fn(filters1, filters2, kernel_size, 1, padding)
nn.init.kaiming_normal_(a.weight.data)
nn.init.zeros_(a.bias.data)
nn.init.kaiming_normal_(b.weight.data)
nn.init.zeros_(b.bias.data)
c = conv_fn(filters2, filters3, 1)
nn.init.kaiming_normal_(c.weight.data)
nn.init.zeros_(c.bias.data)
s = conv_fn(in_channels, filters3, 1, stride)
nn.init.kaiming_normal_(s.weight.data)
nn.init.zeros_(s.bias.data)
relu = nn.ReLU(inplace=True)
if with_batch_normalization:
core_layers = nn.Sequential(a, nn.BatchNorm2d(filters1), relu, b,
nn.BatchNorm2d(filters2), relu, c,
nn.BatchNorm2d(filters3))
shortcut_layers = nn.Sequential(s, nn.BatchNorm2d(filters3))
else:
core_layers = nn.Sequential(a, relu, b, relu, c)
shortcut_layers = s
self._core_layers = core_layers
self._shortcut_layers = shortcut_layers
def forward(self, inputs):
core = self._core_layers(inputs)
shortcut = self._shortcut_layers(inputs)
return torch.relu_(core + shortcut)
def calc_output_shape(self, input_shape):
x = torch.zeros(1, *input_shape)
y = self.forward(x)
return y.shape[1:]
|
from selenium.webdriver.common.keys import Keys
from selenium import webdriver
import time
import csv
# SaveTable
def SaveTable (DataTable):
# Open File And set mode Write
with open('mrudTables.csv', 'w',encoding='utf-8', newline='') as csvfile:
# Head
fieldnames = ['مشخصه', 'نام درس', 'مقطع کلاس', 'نظری', 'نوع عملی', 'جنسیت', 'گروه کلاس',
'باقي مانده', 'ساعت کلاس', 'sساعت امتحان', 'ت امتحان', 'نام استاد', 'گروه بندی', '', '', "", ""]
# Config head
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
# Write Head
writer.writeheader()
for row in DataTable:
# Write Row
writer.writerow({fieldnames[0]: row[0], fieldnames[1]: row[1], fieldnames[2]: row[2],
fieldnames[3]: row[3], fieldnames[4]: row[4], fieldnames[5]: row[5], fieldnames[6]: row[6],
fieldnames[7]: row[7], fieldnames[8]: row[8], fieldnames[9]: row[9], fieldnames[10]: row[10],
fieldnames[11]: row[11], fieldnames[12]: row[12], fieldnames[13]: row[13], fieldnames[14]: row[14],
fieldnames[15]: row[15], fieldnames[16]: row[16], fieldnames[17]: row[17] })
# value
SubjectCorse = 'معماری'
table = []
i = 1
tempHash = hash('start')
MaxLen = 10
url = 'http://edu.kiau.ac.ir'
Mode = 0
user = '962097617'
pasw = '<PASSWORD>'
# config drive
driver = webdriver.Firefox()
# GoTo url
driver.get(url)
# Wating Load
assert "سیستم ثبت نام" in driver.title
# Insert UserName And Password
driver.find_element_by_name("txtUserName").clear()
driver.find_element_by_name("txtUserName").send_keys(user)
driver.find_element_by_name("txtPassword").clear()
driver.find_element_by_name("txtPassword").send_keys(pasw)
# Input Key for Captcha
Captcha = input("\n\n\n\n PLS ENTER CAPTCHA NUMBER: ")
# Set Captcha Number
driver.find_element_by_name("texttasvir").send_keys(Captcha)
# Select login
driver.find_element_by_name("LoginButton0").click()
# /////////////////////////HOME PAGE////////////////////////// #
"""********************
Start Scipts
********************"""
# Value Script
scripts = [
'location.replace("http://edu.kiau.ac.ir/SelectCourse.aspx?h=2")', #لیست دروس ارئه شده ۱
"__doPostBack('ctl00$ContentPlaceHolder1$grdCourseList','Page$", #تغییر شماره صفحه 2
"$('#ctl00_ContentPlaceHolder1_Button6').click()", #3 رفتن به صفحه لیست
"$('#ctl00_ContentPlaceHolder1_TextBox1').val('معماری')",
"$('#ctl00_ContentPlaceHolder1_btnSave4').click()",
]
"""********************
Stop Scipts
********************"""
# Got add page
driver.execute_script(scripts[Mode])
time.sleep(2)
# Goto List Page
driver.execute_script(scripts[2])
time.sleep(2)
# Set Value
driver.execute_script(scripts[3])
while 1 :
try:
# Serch click
driver.execute_script(scripts[4])
# Wating
driver.implicitly_wait(5)
# Get row
rows = driver.find_elements_by_css_selector('tr .GridViewRow_listara')
rows.extend(driver.find_elements_by_css_selector('tr .GridViewAlternatingRow'))
for row in rows :
colsTemp = row.find_elements_by_css_selector("td")
cols = list(map(lambda x: x.text , colsTemp))
print(cols)
table.append(cols)
except:
print('Exeption')
exit()
|
from collections import defaultdict
import numpy as np
import unicodedata
import torch
from torch.utils.data import Dataset, DataLoader
class Dictionary(object):
NULL = '<NULL>'
NO_OP = 'NO_OP'
START = 2
@staticmethod
def normalize(token):
return unicodedata.normalize('NFD', token)
def __init__(self,tok2ind=None, ind2tok=None):
if tok2ind == None:
self.tok2ind = {self.NULL: 0}
else:
self.tok2ind = tok2ind
if ind2tok ==None:
self.ind2tok = {0: self.NULL}
else:
self.ind2tok = ind2tok
def __len__(self):
return len(self.tok2ind)
def __iter__(self):
return iter(self.tok2ind)
def __contains__(self, key):
if type(key) == int:
return key in self.ind2tok
elif type(key) == str:
return self.normalize(key) in self.tok2ind
def __getitem__(self, key):
if type(key) == int:
return self.ind2tok.get(key, self.NULL)
if type(key) == str:
return self.tok2ind.get(self.normalize(key),
self.tok2ind.get(self.NULL))
def __setitem__(self, key, item):
if type(key) == int and type(item) == str:
self.ind2tok[key] = item
elif type(key) == str and type(item) == int:
self.tok2ind[key] = item
else:
raise RuntimeError('Invalid (key, item) types.')
def add(self, token):
token = self.normalize(token)
if token not in self.tok2ind:
index = len(self.tok2ind)
self.tok2ind[token] = index
self.ind2tok[index] = token
def tokens(self):
"""Get dictionary tokens.
Return all the words indexed by this dictionary, except for special
tokens.
"""
tokens = [k for k in self.tok2ind.keys()
if k not in {'<NULL>', 'NO_OP'}]
return tokens
class KB():
def __init__(self, graph_file, e_vocab =None, r_vocab = None):
self.e1_view = defaultdict(list)
self.er_view = defaultdict(set)
self.kb = []
self.r_view = defaultdict(list)
self.e_vocab = e_vocab
self.r_vocab =r_vocab
self.sort_by_page_rank = False
with open(graph_file) as tsv:
for line in tsv:
try:
e1,r,e2 = line.strip().split("\t")
except:
import pdb
pdb.set_trace()
self.e_vocab.add(e1)
self.e_vocab.add(e2)
self.r_vocab.add(r)
e1 = self.e_vocab[e1]
e2 = self.e_vocab[e2]
r = self.r_vocab[r]
self.r_view[int(r)].append((int(e1), int(e2)))
self.e1_view[int(e1)].append((int(r), int(e2)))
self.er_view[(int(e1), int(r))].add(int(e2)) #used for masking out
self.kb.append((e1,r,e2))
# with open(args.train) as tsv:
# for line in tsv:
# e1, r, e2 = line.strip().split("\t")
# self.e_vocab.add(e1)
# self.e_vocab.add(e2)
# self.r_vocab.add(r)
# e1 = self.e_vocab[e1]
# e2 = self.e_vocab[e2]
# r = self.r_vocab[r]
# self.er_view[(int(e1), int(r))].add(int(e2))
# with open(args.dev) as tsv:
# for line in tsv:
# e1, r, e2 = line.strip().split("\t")
# self.e_vocab.add(e1)
# self.e_vocab.add(e2)
# self.r_vocab.add(r)
# e1 = self.e_vocab[e1]
# e2 = self.e_vocab[e2]
# r = self.r_vocab[r]
# self.er_view[(int(e1), int(r))].add(int(e2)) # used for masking out
# with open(args.test) as tsv:
# for line in tsv:
# e1, r, e2 = line.strip().split("\t")
# self.e_vocab.add(e1)
# self.e_vocab.add(e2)
# self.r_vocab.add(r)
# e1 = self.e_vocab[e1]
# e2 = self.e_vocab[e2]
# r = self.r_vocab[r]
# self.er_view[(int(e1), int(r))].add(int(e2)) # used for masking out
# if args.page_rank != None:
# with open(args.page_rank) as pgrnk:
# self.sort_by_page_rank = True
# self.page_rank[0] = 0.0
# for line in pgrnk:
# e, score = line.strip().split()
# self.page_rank[self.e_vocab[e]] = float(score[1:])
#
# if args.rank_from_candidates:
# import os
# import json
# with open(os.path.join("datasets", args.dataset, "rel2candidates.json")) as rel2candidatejson:
# candidates_ = json.load(rel2candidatejson)
# self.candidates = {}
# for r, cans in candidates_.items():
# self.candidates[self.r_vocab[r]] = set([self.e_vocab[c] for c in cans])
#
#
#
#
#
#
#
# def get_next_e_r(self, curr_es):
#
# next_e = np.zeros([len(curr_es), self.max_num_actions])
# next_r = np.zeros([len(curr_es), self.max_num_actions])
# mask = np.ones([len(curr_es), self.max_num_actions])
# for batch_count, curr_e in enumerate(curr_es):
# l = self.e1_view[curr_e]
#
# for action_count, tup in enumerate(l):
# if action_count >= self.max_num_actions:
# break
# r , e = tup
# next_e[batch_count, action_count] = e
# next_r[batch_count, action_count] = r
# mask[batch_count, action_count] = 0
# return next_r, next_e, mask
# def get_next_e_r_masked(self, curr_es, e1_b, e2_b, qr_b, step_no):
#
# next_e = np.zeros([len(curr_es), self.max_num_actions])
# next_r = np.zeros([len(curr_es), self.max_num_actions])
# mask = np.ones([len(curr_es), self.max_num_actions])
#
# for batch_count, curr_e in enumerate(curr_es):
# e2 = e2_b[batch_count]
# qr = qr_b[batch_count]
# e1 = e1_b[batch_count]
# outgoing_edges = self.e1_view[curr_e]
#
#
#
# for action_count, tup in enumerate(outgoing_edges):
# if action_count >= self.max_num_actions:
# break
# r , e = tup
# if step_no == self.args.num_steps -1 and e in self.er_view[(e1,qr)] and e != e2:
# continue
# if curr_e == e1 and qr == r and e2 == e:
# continue
# next_e[batch_count, action_count] = e
# next_r[batch_count, action_count] = r
# mask[batch_count, action_count] = 0
# if self.sort_by_page_rank:
# sorted_idx = np.argsort([self.page_rank[entity] for entity in next_e[batch_count, :]])
# next_e[batch_count, ] = next_e[batch_count, sorted_idx]
# next_r[batch_count, ] = next_r[batch_count, sorted_idx]
# mask[batch_count, ] = mask[batch_count, sorted_idx]
#
# # if step_no == 1 and curr_e == 4:
# # import pdb
# # pdb.set_trace()
# # next_e[batch_count, :] = next_e[batch_count, random_shufffle]
# # next_r[batch_count, :] = next_r[batch_count, random_shufffle]
# # mask[batch_count, :] = mask[batch_count, random_shufffle]
# # if 50 == e1:
# # import pdb
# # pdb.set_trace()
# # import pdb
# # pdb.set_trace()
# return next_r, next_e, mask
# def follow_path(self, start_entities, relations):
# current_entities = start_entities
# for step_no, next_relations in enumerate(relations):
# next_entities = []
# next_relations = next_relations.cpu().numpy().tolist()
# for e, r in zip(current_entities, next_relations):
# next_es = self.er_view[(e,r)]
# next_es.append(0)
# next_e = np.random.choice(len(next_es), 1)
# next_e = next_es[next_e[0]]
# next_entities.append(next_e)
# current_entities = next_entities
# return current_entities
class KB_dataset(Dataset):
def __init__(self, args, file, e_vocab, r_vocab, train=True, filter = False, filtering_function = None):
self.args = args
self.kb = []
self.e_vocab = e_vocab
self.r_vocab = r_vocab
self.max_num_actions = args.max_num_actions
self.rollouts = self.args.num_rollouts if train else self.args.beam_size
with open(file) as tsv:
for line in tsv:
e1,r,e2 = line.strip().split("\t")
if filter:
if not filtering_function(e1,r,e2):
continue
e1 = self.e_vocab[e1]
e2 = self.e_vocab[e2]
r = self.r_vocab[r]
self.kb.append((e1,r,e2))
def __len__(self):
return len(self.kb)
def __getitem__(self, idx):
e1,r,e2 = self.kb[idx]
return e1,r,e2
def batchify(self, batch):
e1 = [e for ex in batch for e in [ex[0],] * self.rollouts]
r = [e for ex in batch for e in [ex[1],] * self.rollouts]
e2 = [e for ex in batch for e in [ex[2],] * self.rollouts]
e1 = torch.LongTensor(e1)
r = torch.LongTensor(r)
e2 = torch.LongTensor(e2)
return e1, r, e2
|
from typing import Iterable, List
from spacy.language import Language
from spacy.tokens import Doc, Span, Token
__all__ = [
"bunsetu_available",
"bunsetu_span",
"bunsetu_spans",
"bunsetu_phrase_span",
"bunsetu_phrase_spans",
"bunsetu_head_list",
"bunsetu_head_tokens",
"bunsetu_bi_labels",
"bunsetu_position_types",
"BunsetuRecognizer",
"append_bunsetu_head_dep_suffix",
"BUNSETU_HEAD_SUFFIX",
"PHRASE_RELATIONS",
"POS_PHRASE_MAP",
]
BUNSETU_HEAD_SUFFIX = "_bunsetu"
PHRASE_RELATIONS = ("compound", "nummod", "nmod")
POS_PHRASE_MAP = {
"NOUN": "NP",
"NUM": "NP",
"PRON": "NP",
"PROPN": "NP",
"VERB": "VP",
"ADJ": "ADJP",
"ADV": "ADVP",
"CCONJ": "CCONJP",
}
def bunsetu_available(span: Span):
return "bunsetu_heads" in span.doc.user_data
def bunsetu_head_list(span: Span) -> Iterable[int]:
doc = span.doc
heads = doc.user_data["bunsetu_heads"]
if isinstance(span, Doc):
return heads
else:
start = span.start
end = span.end
return [i - start for i in heads if start <= i < end]
def bunsetu_head_tokens(span: Span) -> Iterable[Token]:
doc = span.doc
heads = doc.user_data["bunsetu_heads"]
if isinstance(span, Doc):
start = 0
end = len(span)
else:
start = span.start
end = span.end
return [span[i - start] for i in heads if start <= i < end]
def bunsetu_spans(span: Span) -> Iterable[Span]:
return [
bunsetu_span(head) for head in bunsetu_head_tokens(span)
]
def bunsetu_span(token: Token) -> Span:
bunsetu_bi_list = bunsetu_bi_labels(token.doc)
start = token.i
end = start + 1
for idx in range(start, 0, -1):
if bunsetu_bi_list[idx] == "B":
start = idx
break
else:
start = 0
doc_len = len(token.doc)
for idx in range(end, doc_len):
if bunsetu_bi_list[idx] == "B":
end = idx
break
else:
end = doc_len
doc = token.doc
return Span(doc, start=start, end=end, label=POS_PHRASE_MAP.get(doc[start:end].root.pos_, ""))
def bunsetu_phrase_spans(span: Span, phrase_relations: Iterable[str] = PHRASE_RELATIONS) -> Iterable[Span]:
return [
bunsetu_phrase_span(head, phrase_relations) for head in bunsetu_head_tokens(span)
]
def bunsetu_phrase_span(token: Token, phrase_relations: Iterable[str] = PHRASE_RELATIONS) -> Span:
def _traverse(head, _bunsetu, result):
for t in head.children:
if _bunsetu.start <= t.i < _bunsetu.end:
if t.dep_ in phrase_relations:
_traverse(t, _bunsetu, result)
result.append(head.i)
bunsetu = bunsetu_span(token)
phrase_tokens = []
_traverse(bunsetu.root, bunsetu, phrase_tokens)
start = min(phrase_tokens)
end = max(phrase_tokens) + 1
return Span(token.doc, start=start, end=end, label=bunsetu.label_)
def bunsetu_bi_labels(span: Span) -> List[str]:
doc = span.doc
bunsetu_bi = doc.user_data["bunsetu_bi_labels"]
if isinstance(span, Doc):
return bunsetu_bi
else:
start = span.start
end = span.end
return bunsetu_bi[start:end]
def bunsetu_position_types(span: Span) -> List[str]:
doc = span.doc
position_types = doc.user_data["bunsetu_position_types"]
if isinstance(span, Doc):
return position_types
else:
start = span.start
end = span.end
return position_types[start:end]
class BunsetuRecognizer:
def __init__(self, nlp: Language, remain_bunsetu_suffix: bool = False) -> None:
self.nlp = nlp
self._remain_bunsetu_suffix = remain_bunsetu_suffix
@property
def remain_bunsetu_suffix(self) -> str:
return self._remain_bunsetu_suffix
@remain_bunsetu_suffix.setter
def remain_bunsetu_suffix(self, remain: bool):
self._remain_bunsetu_suffix = remain
def __call__(self, doc: Doc) -> Doc:
debug = False
heads = [False] * len(doc)
for t in doc:
if t.dep_ == "ROOT":
heads[t.i] = True
elif t.dep_.endswith(BUNSETU_HEAD_SUFFIX):
heads[t.i] = True
if not self._remain_bunsetu_suffix:
t.dep_ = t.dep_[:-len(BUNSETU_HEAD_SUFFIX)]
for t in doc: # recovering uncovered subtrees
if heads[t.i]:
while t.head.i < t.i and not heads[t.head.i]:
heads[t.head.i] = t.head.pos_ not in {"PUNCT"}
if debug and heads[t.head.i]:
print("========= A", t.i + 1, t.orth_, "=========")
print(list((t.i + 1, t.orth_, t.head.i + 1) for t, is_head in zip(doc, heads) if is_head))
t = t.head
heads[t.head.i] = True
for ent in doc.ents: # removing head inside ents
head = None
outer = None
for t in ent:
if t.head.i == t.i or t.head.i < ent.start or ent.end <= t.head.i:
if not outer:
head = t
outer = t.head
elif outer.i != t.head.i:
break
else:
if head:
for t in ent:
if t.i != head.i:
heads[t.i] = False
bunsetu_heads = tuple(idx for idx, is_head in enumerate(heads) if is_head)
bunsetu_bi = ["I"] * len(doc)
if bunsetu_bi:
bunsetu_bi[0] = "B"
for head_i, next_head_i in zip(bunsetu_heads[:-1], bunsetu_heads[1:]):
l_head = doc[head_i]
r_head = doc[next_head_i]
if l_head.right_edge.i + 1 == r_head.left_edge.i or l_head.right_edge.i >= r_head.i: # (l)(r) or (l (r))
bunsetu_bi[r_head.left_edge.i] = "B"
elif l_head.i <= r_head.left_edge.i: # ((l) r)
bunsetu_bi[l_head.right_edge.i + 1] = "B"
else: # ((l) (missed_tokens) (r))
l_ancestors = set(t.i for t in l_head.ancestors)
r_ancestors = set(t.i for t in r_head.ancestors)
for m in doc[l_head.right_edge.i + 1: r_head.left_edge.i]: # find closer branch
found = False
for m_ancestor in [m] + list(m.ancestors):
if m_ancestor.i in r_ancestors:
bunsetu_bi[m_ancestor.i] = "B"
found = True
break
elif m_ancestor.i in l_ancestors:
break
if found:
break
else:
bunsetu_bi[l_head.right_edge.i + 1] = "B"
doc.user_data["bunsetu_heads"] = bunsetu_heads
doc.user_data["bunsetu_bi_labels"] = bunsetu_bi
position_types = [None] * len(doc)
for head in bunsetu_heads:
phrase = bunsetu_phrase_span(doc[head])
for t in phrase:
if t.i == t.head.i:
position_types[t.i] = "ROOT"
elif t.i == head:
position_types[t.i] = "NO_HEAD" if t.dep_ == "punct" else "SEM_HEAD"
else:
position_types[t.i] = "CONT"
first_func = True
for t, bi, position_type in reversed(list(zip(doc, bunsetu_bi, position_types))):
if bi:
first_func = True
if position_type is None:
if t.pos_ in {'AUX', 'ADP', 'SCONJ', 'CCONJ', 'PART'}:
if first_func:
position_types[t.i] = "SYN_HEAD"
first_func = False
else:
position_types[t.i] = "FUNC"
else:
position_types[t.i] = "CONT"
doc.user_data["bunsetu_position_types"] = position_types
return doc
def append_bunsetu_head_dep_suffix(tokens: List[Token], suffix: str = BUNSETU_HEAD_SUFFIX) -> None:
if not suffix:
return
for token in tokens:
if token.dep_.lower() == "root":
return
if token.head.i < tokens[0].i or tokens[-1].i < token.head.i:
token.dep_ += suffix
return
|
<reponame>berkeley-dsep-infra/data8xhub<filename>images/hwuploader/app.py
#!/usr/bin/env python3
import os
import sys
import base64
import json
from jinja2 import Environment, FileSystemLoader
from tornado import httpserver, ioloop, web, log
from ltivalidator import LTILaunchValidator, LTILaunchValidationError
class HomeWorkHandler(web.RequestHandler):
def render_template(self, name, **extra_ns):
"""Render an HTML page"""
ns = {
'static_url': self.static_url,
}
ns.update(extra_ns)
template = self.settings['jinja2_env'].get_template(name)
html = template.render(**ns)
self.write(html)
def finish_upload(self, hw):
signed_launch_args = self.get_argument('signed-launch-args')
launch_args = json.loads(web.decode_signed_value(
self.settings['cookie_secret'],
'launch-args',
signed_launch_args
).decode('utf-8'))
target_dir = os.path.join(self.settings['upload_base_dir'], hw)
# Protect ourselves from path traversal attacks
# NOTE: This is why it is important that upload_base_dir ends with a /
if not target_dir.startswith(self.settings['upload_base_dir']):
raise web.HTTPError(400, 'Invalid homework name')
os.makedirs(target_dir, exist_ok=True)
sourced_id = launch_args['lis_result_sourcedid']
target_path = os.path.join(target_dir, sourced_id)
if not target_path.startswith(target_dir):
raise web.HTTPError(400, 'Invalid launch_args')
if len(self.request.files) != 1:
raise web.HTTPError(400, 'Only one file can be uploaded at a time')
uploaded_file = list(self.request.files.values())[0][0]
try:
file_contents = uploaded_file.body.decode('utf-8')
except UnicodeDecodeError:
raise web.HTTPError(400, 'Could not decode uploaded file as UTF-8')
# Explicitly just write these as binary files, so we don't fudge with encoding here.
with open(target_path, 'w') as f:
f.write(json.dumps(launch_args) + '\n')
f.write(file_contents)
log.app_log.info('Saved file {target_path} for launch {launch_info}'.format(target_path=target_path, launch_info=json.dumps(launch_args)))
self.write(f"Done!")
def post(self, hw):
if self.request.files:
return self.finish_upload(hw)
else:
consumers = self.settings['consumers']
validator = LTILaunchValidator(consumers)
args = {}
for k, values in self.request.body_arguments.items():
# Convert everything to strings rather than bytes
args[k] = values[0].decode() if len(values) == 1 else [v.decode() for v in values]
# handle multiple layers of proxied protocol (comma separated) and take the outermost
if 'x-forwarded-proto' in self.request.headers:
# x-forwarded-proto might contain comma delimited values
# left-most value is the one sent by original client
hops = [h.strip() for h in self.request.headers['x-forwarded-proto'].split(',')]
protocol = hops[0]
else:
protocol = self.request.protocol
launch_url = protocol + "://" + self.request.host + self.request.uri
try:
if validator.validate_launch_request(
launch_url,
self.request.headers,
args
):
user_id = self.get_body_argument('user_id')
log.app_log.info(f'{user_id} successfully logged in')
except LTILaunchValidationError as e:
raise web.HTTPError(401, e.message)
launch_args = {}
for k, values in self.request.body_arguments.items():
launch_args[k] = values[0].decode() if len(values) == 1 else [v.decode() for v in values]
signed_launch_args = self.create_signed_value('launch-args', json.dumps(launch_args)).decode('utf-8')
self.render_template('main.html', signed_launch_args=signed_launch_args)
def main():
log.enable_pretty_logging()
if 'COOKIE_SECRET' not in os.environ:
log.app_log.error('Set a 32byte hex-encoded value as COOKIE_SECRET environment variable first!')
sys.exit(1)
if 'UPLOAD_BASE_DIR' not in os.environ:
log.app_log.error('Provide dir to store uploaded files in as UPLOAD_BASE_DIR (with trailing slash!) environment variable')
sys.exit(1)
if not os.environ['UPLOAD_BASE_DIR'].endswith('/'):
log.app_log.error('UPLOAD_BASE_DIR must end with a trailing /')
sys.exit(1)
consumers = {os.environ['LTI_KEY']: os.environ['LTI_SECRET']}
jinja2_env = Environment(loader=FileSystemLoader([os.path.dirname(__file__)]), autoescape=True)
settings = {
'jinja2_env': jinja2_env,
'cookie_secret': os.environ['COOKIE_SECRET'],
'consumers': consumers,
'upload_base_dir': os.environ['UPLOAD_BASE_DIR']
}
application = web.Application([
(r"/hwuploader/(\w+)", HomeWorkHandler),
], **settings)
http_server = httpserver.HTTPServer(application)
http_server.listen(8888)
ioloop.IOLoop.current().start()
if __name__ == "__main__":
main() |
<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 29 11:37:07 2019
@author: deborahkhider
"""
import xarray as xr
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import imageio
import os
import matplotlib.ticker as mticker
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
from matplotlib.colors import Normalize
import sys
import ast
class PiecewiseNorm(Normalize):
def __init__(self, levels, clip=False):
# the input levels
self._levels = np.sort(levels)
# corresponding normalized values between 0 and 1
self._normed = np.linspace(0, 1, len(levels))
Normalize.__init__(self, None, None, clip)
def __call__(self, value, clip=None):
# linearly interpolate to get the normalized value
return np.ma.masked_array(np.interp(value, self._levels, self._normed))
def histedges_equalN(x, nbin):
npt = len(x)
return np.interp(np.linspace(0, npt, nbin + 1),
np.arange(npt),
np.sort(x))
dataset_name = sys.argv[1]
figsize = ast.literal_eval(sys.argv[2])
#open the file
dataset = xr.open_dataset(dataset_name)
#Get the only variable. According to Scott, one file/variable
varname = list(dataset.data_vars.keys())[0]
## Get the flow values
val = dataset[varname].values
#val2 = exposure.equalize_hist(val)
nx = dataset.nx.values
ny = dataset.ny.values
## get the edges
val_attrs = dataset[varname].attrs
ymin = val_attrs['y_south_edge']
ymax= val_attrs['y_north_edge']
xmin = val_attrs['x_west_edge']
xmax = val_attrs['x_east_edge']
## get the steps
dx = val_attrs['dx']/3600
dy = val_attrs['dy']/3600
## easting/northing vectors
lon = xmin+dx/2+nx*dx
lat = ymin+dy/2+ny*dy
## convert to lat/lon for sanity
xx,yy=np.meshgrid(lon,lat)
xx2 = np.reshape(xx,xx.size)
yy2 = np.reshape(yy,yy.size)
dv= pd.DataFrame({'lon':xx2,'lat':yy2})
#make the map in cartopy
proj = ccrs.PlateCarree(central_longitude = np.mean(dataset['nx']))
idx = dataset['time'].values.size
count = list(np.arange(0,idx,1))
# get the levels to plot
levels = np.sort(np.unique(histedges_equalN(np.reshape(val,np.size(val)),60)))
# get the box
X_min= np.round(np.min(lon),2)
X_max= np.round(np.max(lon),2)
Y_min= np.round(np.min(lat),2)
Y_max= np.round(np.max(lat),2)
step_lon = np.round(np.ptp(lon)/5,2)
step_lat = np.round(np.ptp(lat)/5,2)
# Names
long_name = dataset[varname].attrs['long_name']
if '_' in long_name:
t = long_name.split('_')
strT = t[0].capitalize()
for item in t[1:]:
strT = strT + ' ' + item.capitalize()
else:
strT = long_name.capitalize()
filenames =[]
#Make a directory if it doesn't exit
if os.path.isdir('./figures') is False:
os.makedirs('./figures')
# loop to create all figures for each time slice
for i in count:
v = val[i,:,:]
fig,ax = plt.subplots(figsize=figsize)
ax = plt.axes(projection=proj)
ax.add_feature(cfeature.BORDERS)
ax.add_feature(cfeature.COASTLINE)
img = plt.contourf(lon, lat, v, levels,
transform=proj, cmap=cm.gist_gray,norm=PiecewiseNorm(levels))
ticks = levels[0::15]
ticks = np.sort(np.insert(ticks,-1,levels[-1]))
cbar = plt.colorbar(img, orientation = 'horizontal',
format = '%.2e',ticks=ticks)
cbar.ax.set_xlabel((strT+'('+dataset[varname].attrs['units']+')'))
ax.set_extent([X_min,X_max,Y_min,Y_max])
gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True,
linewidth=2, color='gray', alpha=0.5, linestyle='--')
gl.xlabels_top = False
gl.ylabels_right = False
gl.xlines = False
gl.ylines = False
gl.xlocator = mticker.FixedLocator(np.arange(X_min,X_max,step_lon))
gl.ylocator = mticker.FixedLocator(np.arange(Y_min,Y_max,step_lat))
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
gl.xlabel_style = {'size': 12, 'color': 'gray'}
gl.ylabel_style = {'size': 12, 'color': 'gray'}
ax.add_feature(cfeature.RIVERS)
#save as jpeg
filename = './figures/'+varname+'_t'+str(i)+'.jpeg'
filenames.append(filename)
plt.savefig(filename)
plt.close(fig)
#Make the GIF
images = []
for filename in filenames:
images.append(imageio.imread(filename))
imageio.mimsave(dataset_name+'_'+long_name+'_movie.gif', images)
|
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
PROHIBITED_ID = 0 # Prohibited (non-walkway)
WALKWAY_ID = 1 # Designated walkway
STREET_ID = 2 # Street (prohibited for walking)
CROSSING_ID = 3 # Transient walkway (e.g., street crossing)
EXIT_ID = 4 # Cells representing exit points (destinations)
ENTRY_ID = 5 # Entry cells (where pedestrians enter simulation_grid)
EXIT_OFFSET = 100 # Offset used for integer ids of exit points (destinations)
ENTRY_OFFSET = 200 # Offset used for integer ids of entry points
class Cell(object):
"""This class is used to represent a walking_grid cell.
Types are denoted by integer values:
0 - Prohibited (non-walkway)
1 - Designated walkway
2 - Street (prohibited for walking)
3 - Transient walkway (e.g., street crossing)
"""
def __init__(self, coordinates, cell_type=0, is_open=0, group_id=999):
"""Initialize cell object
:param coordinates: The x,y coordinates in the walking_grid
:param cell_type: Integer value indicating the cell type
:param is_open: For use when the cell represents a transient path
:param group_id: Used to indicate membership to a group of cells
"""
self.coordinates = coordinates
self.type = cell_type
self.open = is_open
self.group_id = group_id
self.occupant = None
def build_from_map_file(map_file_path):
"""This function builds the simulation gid from the given map file.
Note: The map file is specified in (row, column) format. All the data
arrays are likewise indexed (row, column).
:param map_file_path: Path to the map file
"""
global PROHIBITED_ID, WALKWAY_ID, STREET_ID, CROSSING_ID,\
EXIT_OFFSET, ENTRY_OFFSET, EXIT_ID, ENTRY_ID
with open(map_file_path, 'r') as fid:
# Skip comment lines
map_line = fid.readline()
while map_line[0] == '#':
map_line = fid.readline()
# Read the header and get the number of nodes and edges.
r, c, a = [float(v) for v in map_line.split()]
r = int(r)
c = int(c)
# initialize the walking_grid
walking_grid = np.zeros((r, c), dtype=int)
map_grid = np.zeros((r, c), dtype=int)
# Read each line of the map file
for map_line in fid:
# Skip comment lines
if map_line[0] == '#':
continue
values = [int(v) for v in map_line.split() if v.isdigit()]
if len(values) != 5:
continue
# Parse the values from the line of text
sr, sc, er, ec, t = values
# All the walking grid values default to prohibited. Update the
# cells to the types specified by the map file.
for row in range(min((sr, er)), max((sr, er)) + 1):
for col in range(min((sc, ec)), max((sc, ec)) + 1):
# Branch to account for the different cell types
# Note: Higher order types override
if t == WALKWAY_ID: # Walkway
walking_grid[row, col] = WALKWAY_ID
# Check for street crossing
if map_grid[row, col] == STREET_ID:
map_grid[row, col] = CROSSING_ID
else:
map_grid[row, col] = WALKWAY_ID
elif t == STREET_ID: # Street
if walking_grid[row, col] == WALKWAY_ID:
walking_grid[row, col] = WALKWAY_ID
map_grid[row, col] = CROSSING_ID
else:
map_grid[row, col] = STREET_ID
elif t == CROSSING_ID: # Street crossing
walking_grid[row, col] = WALKWAY_ID
map_grid[row, col] = CROSSING_ID
# Destination cell (exit simulation area)
elif EXIT_OFFSET <= t < ENTRY_OFFSET:
walking_grid[row, col] = WALKWAY_ID
map_grid[row, col] = EXIT_ID
group_id = t % EXIT_OFFSET
# Simulation entry point
elif t >= ENTRY_OFFSET:
walking_grid[row, col] = WALKWAY_ID
map_grid[row, col] = ENTRY_ID
group_id = t % ENTRY_OFFSET
# Unused grid cell ID
else:
if map_grid[row, col] == PROHIBITED_ID:
map_grid[row, col] = t
return (map_grid, walking_grid)
def show_map(mapgrid):
"""Show a visual representation of the map."""
print(mapgrid)
map_colors = np.array(
[[0, 0, 0], [255, 255, 255], [0, 100, 255], [255, 150, 0],
[255, 0, 0], [227, 0, 252], [0, 255, 50]], dtype=float
)
fig, a = plt.subplots()
a.xaxis.set_visible(False)
a.yaxis.set_visible(False)
plt.imshow(
mapgrid, interpolation='nearest', origin='lower', aspect='equal',
cmap=mpl.colors.ListedColormap(map_colors/255)
)
plt.show()
if __name__ == "__main__":
import os
print(os.getcwd())
show_map(build_from_map_file("GT_Boddy_Dodd_South_Map.txt")[0])
|
import os
import re
import unittest
import helpers
class _StartTypeAnnotationsSentinel(object):
pass
class _EndTypeAnnotationsSentinel(object):
pass
_START_SENTINEL = _StartTypeAnnotationsSentinel()
_END_SENTINEL = _EndTypeAnnotationsSentinel()
def return_start_sentinel():
return _START_SENTINEL
def return_end_sentinel():
return _END_SENTINEL
class _TypeSentinel(object):
_START_SENTINEL_RE = re.compile(r'^[a-zA-Z0-9_.]*return_start_sentinel\(\)\t[a-zA-Z0-9_.]*_StartTypeAnnotationsSentinel$')
_END_SENTINEL_RE = re.compile(r'^[a-zA-Z0-9_.]*return_end_sentinel\(\)\t[a-zA-Z0-9_.]*_EndTypeAnnotationsSentinel$')
def __init__(self, file_handler):
self.annotations = None
self.file_handler = file_handler
def __enter__(self):
self.file_handler.read()
return_start_sentinel()
return self
def __exit__(self, type_unused, value_unused, traceback_unused):
return_end_sentinel()
self.annotations = self.file_handler.read()
def get_annotations(self):
all_annotations = self.annotations.split('\n')
annotations = []
in_block = False
for annotation in all_annotations:
if self._START_SENTINEL_RE.match(annotation):
in_block = True
elif self._END_SENTINEL_RE.match(annotation):
break
elif in_block:
annotations.append(annotation)
return annotations
def get_annotations_filename():
return '/tmp/python-types-%d' % os.getpid()
class TestAnnotations(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.annotations_file = open(get_annotations_filename(), 'r')
@classmethod
def tearDownClass(cls):
cls.annotations_file.close()
def test_min(self):
with _TypeSentinel(self.annotations_file) as types:
min(2, 3)
min((2, 3))
self.assertEqual(
['min(int, int)\tint',
'min(tuple)\tint'],
types.get_annotations())
def test_int(self):
with _TypeSentinel(self.annotations_file) as types:
int("42")
int(42)
self.assertEqual(
['int(str)\tint',
'int(int)\tint'],
types.get_annotations())
def test_str(self):
with _TypeSentinel(self.annotations_file) as types:
str("42")
str(42)
"FoO".lower()
self.assertEqual(
['str(str)\tstr',
'str(int)\tstr',
'str.lower()\tstr'],
types.get_annotations())
def test_user_defined_class(self):
with _TypeSentinel(self.annotations_file) as types:
a = helpers.A()
a.foo(1, [])
helpers.A.foo(a, 1, [])
type(a)
self.assertEqual(
['helpers.A()\thelpers.A',
'helpers.A.foo(int, list)\tint',
'helpers.A.foo<U>(helpers.A, int, list)\tint',
'type(helpers.A)\ttype'], # <U> means the method is unbounded
types.get_annotations())
def test_user_defined_function(self):
with _TypeSentinel(self.annotations_file) as types:
helpers.foo([])
helpers.foo('')
helpers.foo('', b='')
helpers.foo('', *[])
helpers.foo('', **{})
helpers.foo('', *[], **{})
helpers.foo('', b='', *[], **{})
self.assertEqual(
['helpers.foo(list)\tlist',
'helpers.foo(str)\tstr',
'helpers.foo(str, b=str)\tstr',
'helpers.foo(str, *args)\tstr',
'helpers.foo(str, **kwargs)\tstr',
'helpers.foo(str, *args, **kwargs)\tstr',
'helpers.foo(str, b=str, *args, **kwargs)\tstr'],
types.get_annotations())
def test_c_class(self):
import cStringIO
with _TypeSentinel(self.annotations_file) as types:
b = cStringIO.StringIO()
type(b)
self.assertEqual(
['cStringIO.StringIO()\tcStringIO.StringO',
'type(cStringIO.StringO)\ttype'],
types.get_annotations())
def test_old_style_class(self):
import StringIO
with _TypeSentinel(self.annotations_file) as types:
b = StringIO.StringIO()
type(b)
# We do not test for equality because the constructor of StringIO call
# many other methods.
self.assertLess(
set(['StringIO.StringIO()\tStringIO.StringIO',
'type(StringIO.StringIO)\ttype']),
set(types.get_annotations()))
# TODO(skreft): add testst for method wrappers: int.__hash__(1), (1).__hash__()
if __name__ == '__main__':
unittest.main()
|
<reponame>eserie/wax-ml<gh_stars>10-100
# Copyright 2021 The WAX-ML Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The online newton optimizer. It extends optax optimizers."""
from typing import NamedTuple
import jax
import jax.numpy as jnp
from optax._src import base, combine
from optax._src.alias import ScalarOrSchedule, _scale_by_learning_rate
def newton(
learning_rate: ScalarOrSchedule, eps: float = 1e-5
) -> base.GradientTransformation:
"""The online newton optimizer.
It extends optax optimizers.
References
----------
[^1] [<NAME>., <NAME>. and <NAME>., 2007. Logarithmic regret algorithms for
online convex optimization. Machine Learning, 69(2-3), pp.169-192]
(https://link.springer.com/content/pdf/10.1007/s10994-007-5016-8.pdf)
Args:
learning_rate: this is a fixed global scaling factor.
initial_accumulator_value: initialisation for the accumulator.
eps: a small constant applied to denominator inside of the square root
(as in RMSProp) to avoid dividing by zero when rescaling.
Returns:
the corresponding `GradientTransformation`.
"""
return combine.chain(
scale_by_newton(eps=eps),
_scale_by_learning_rate(learning_rate),
)
class ScaleByNewtonState(NamedTuple):
"""State holding the sum of gradient squares to date."""
hessian_inv: base.Updates
def sherman_morrison(A_inv, u, v):
den = 1.0 + (u @ A_inv) @ v
A_inv -= A_inv @ jnp.outer(u, v) @ A_inv / den
return A_inv
def scale_by_newton(eps: float = 1e-7) -> base.GradientTransformation:
"""Rescale updates by multiplying by the inverse of an approximation of the hessian.
References
----------
[^1]: [<NAME>., <NAME>. and <NAME>., 2007. Logarithmic regret algorithms for online convex optimization. Machine Learning, 69(2-3), pp.169-192](https://www.cs.princeton.edu/~ehazan/papers/log-journal.pdf)
Args:
eps: A small floating point value to avoid zero denominator.
Returns:
An (init_fn, update_fn) tuple.
"""
def init_fn(params):
hessian_inv = jax.tree_map(
lambda t: jnp.eye(len(t.flatten()), dtype=t.dtype) / eps, params
)
return ScaleByNewtonState(hessian_inv=hessian_inv)
def update_fn(updates, state, params=None):
del params
class Tuple(tuple):
"""Class to avoid pytree conversion and allow for the use
of shapes in final reshape."""
...
shapes = jax.tree_map(lambda x: Tuple(x.shape), updates)
updates = jax.tree_map(lambda x: x.flatten(), updates)
hessian_inv = jax.tree_multimap(
lambda u, hinv: sherman_morrison(hinv, u, u), updates, state.hessian_inv
)
updates = jax.tree_multimap(lambda hinv, g: hinv @ g, hessian_inv, updates)
updates = jax.tree_multimap(lambda u, shape: u.reshape(shape), updates, shapes)
return updates, ScaleByNewtonState(hessian_inv=hessian_inv)
return base.GradientTransformation(init_fn, update_fn)
|
<reponame>upciti/debops<gh_stars>0
import asyncio
import itertools
import os
import re
from collections import defaultdict
from dataclasses import dataclass
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union, cast
import httpx
import ruamel.yaml
from ruamel.yaml.emitter import Emitter
from semver.version import Version
from . import logger
from .client import client_factory
from .exceptions import Ops2debError, Ops2debUpdaterError
from .fetcher import FetchResult, fetch_urls
from .parser import Blueprint, RemoteFile, load, validate
from .utils import separate_results_from_errors
# fixme: move this somewhere else, this code is also duplicated in formatter.py
class FixIndentEmitter(Emitter):
def expect_block_sequence(self) -> None:
self.increase_indent(flow=False, indentless=False)
self.state = self.expect_first_block_sequence_item
class BaseUpdateStrategy:
def __init__(self, client: httpx.AsyncClient):
self.client = client
async def _try_version(self, blueprint: Blueprint, version: str) -> bool:
if not (remote_file := blueprint.render_fetch(version=version)):
return False
url = remote_file.url
logger.debug(f"{self.__class__.__name__} - {blueprint.name} - Trying {url}")
try:
response = await self.client.head(url)
except httpx.HTTPError as e:
raise Ops2debUpdaterError(f"Failed HEAD request to {url}. {str(e)}")
status = response.status_code
if status >= 500:
raise Ops2debUpdaterError(f"Server error when requesting {url}")
elif status >= 400:
return False
return True
@classmethod
def is_blueprint_supported(cls, blueprint: Blueprint) -> bool:
raise NotImplementedError
async def __call__(self, blueprint: Blueprint) -> str:
raise NotImplementedError
class GenericUpdateStrategy(BaseUpdateStrategy):
"""
Tries a few blueprint fetch URLs with bumped versions to see if servers
replies with something else than a 404. More or less a brute force approach.
"""
async def _try_a_few_patches(
self, blueprint: Blueprint, version: Version
) -> Optional[Version]:
for i in range(0, 3):
version = version.bump_patch()
if await self._try_version(blueprint, str(version)) is True:
return version
return None
async def _try_versions(
self,
blueprint: Blueprint,
version: Version,
version_part: str,
) -> Version:
bumped_version = getattr(version, f"bump_{version_part}")()
if await self._try_version(blueprint, str(bumped_version)) is False:
if version_part != "patch":
if (
result := await self._try_a_few_patches(blueprint, bumped_version)
) is not None:
return await self._try_versions(blueprint, result, version_part)
else:
return version
else:
return version
else:
return await self._try_versions(blueprint, bumped_version, version_part)
@classmethod
def is_blueprint_supported(cls, blueprint: Blueprint) -> bool:
if not Version.isvalid(blueprint.version):
logger.warning(f"{blueprint.name} is not using semantic versioning")
return False
return True
async def __call__(self, blueprint: Blueprint) -> str:
current_version = version = Version.parse(blueprint.version)
for version_part in ["minor", "patch"]:
version = await self._try_versions(blueprint, version, version_part)
if version == current_version:
version = await self._try_versions(blueprint, version, "major")
return str(version)
class GithubUpdateStrategy(BaseUpdateStrategy):
"""Uses Github release API to find the latest release."""
github_url_re = r"^https://github.com/(?P<owner>[\w-]+)/(?P<name>[\w-]+)/"
github_media_type = "application/vnd.github.v3+json"
github_base_api_url = "https://api.github.com"
@classmethod
def _get_github_repo_api_base_url(cls, blueprint: Blueprint) -> str:
if (fetch := blueprint.render_fetch()) is None:
raise ValueError(f"Blueprint {blueprint.name} has no fetch instruction")
if (match := re.match(cls.github_url_re, fetch.url)) is None:
raise ValueError(f"URL {fetch.url} is not supported")
return f"{cls.github_base_api_url}/repos/{match['owner']}/{match['name']}"
async def _get_latest_github_release(self, blueprint: Blueprint) -> Dict[str, Any]:
repo_api_base_url = self._get_github_repo_api_base_url(blueprint)
headers = {"accept": self.github_media_type}
if (token := os.environ.get("GITHUB_TOKEN")) is not None:
headers["authorization"] = f"token {token}"
try:
response = await self.client.get(
f"{repo_api_base_url}/releases/latest", headers=headers
)
except httpx.HTTPError as e:
raise Ops2debUpdaterError(f"Failed to request Github API. {e}")
if response.status_code != 200:
error = f"Failed to request Github API. Error {response.status_code}."
try:
error += f" {response.json()['message']}."
except Exception:
pass
raise Ops2debUpdaterError(error)
return response.json()
@classmethod
def is_blueprint_supported(cls, blueprint: Blueprint) -> bool:
try:
cls._get_github_repo_api_base_url(blueprint)
return True
except ValueError:
return False
async def __call__(self, blueprint: Blueprint) -> str:
latest_release = await self._get_latest_github_release(blueprint)
if (tag_name := latest_release.get("tag_name")) is None:
raise Ops2debUpdaterError("Failed to determine latest release version")
version = tag_name if not tag_name.startswith("v") else tag_name[1:]
if Version.isvalid(version) and Version.isvalid(blueprint.version):
version = str(max(Version.parse(version), Version.parse(blueprint.version)))
if await self._try_version(blueprint, version) is False:
raise Ops2debUpdaterError("Failed to determine latest release URL")
return version
@dataclass(frozen=True)
class LatestRelease:
blueprint_index: int
blueprint: Blueprint
version: str
fetch_results: Dict[str, FetchResult]
def update_configuration(
self, blueprint_dict: Union[Dict[str, Any], List[Dict[str, Any]]]
) -> None:
# configuration file can be a list of blueprints or a single blueprint
raw_blueprint = (
blueprint_dict[self.blueprint_index]
if isinstance(blueprint_dict, list)
else blueprint_dict
)
new_sha256_object: Any = {}
for arch, fetch_result in self.fetch_results.items():
if isinstance(self.blueprint.fetch, RemoteFile):
new_sha256_object = fetch_result.sha256_sum
else:
new_sha256_object[arch] = fetch_result.sha256_sum
raw_blueprint["fetch"]["sha256"] = new_sha256_object
raw_blueprint["version"] = self.version
raw_blueprint.pop("revision", None)
async def _find_latest_version(client: httpx.AsyncClient, blueprint: Blueprint) -> str:
strategies = [GithubUpdateStrategy(client), GenericUpdateStrategy(client)]
strategies = [u for u in strategies if u.is_blueprint_supported(blueprint)]
if not strategies:
return blueprint.version
for update_strategy in strategies:
try:
version = await update_strategy(blueprint)
if version != blueprint.version:
logger.info(
f"{blueprint.name} can be bumped "
f"from {blueprint.version} to {version}"
)
return version
except Ops2debUpdaterError as e:
logger.debug(
f"{update_strategy.__class__.__name__} - {blueprint.name} - {str(e)}"
)
continue
error = f"Failed to update {blueprint.name}, enable debug logs for more information"
logger.error(error)
raise Ops2debUpdaterError(error)
async def _find_latest_releases(
blueprint_list: List[Blueprint], skip_names: Optional[List[str]] = None
) -> Tuple[List[LatestRelease], Dict[int, Ops2debError]]:
skip_names = skip_names or []
blueprints = {
index: blueprint
for index, blueprint in enumerate(blueprint_list)
if blueprint.fetch is not None and blueprint.name not in skip_names
}
async with client_factory() as client:
tasks = [_find_latest_version(client, b) for b in blueprints.values()]
tasks_results = await asyncio.gather(*tasks)
versions, errors = separate_results_from_errors(
dict(zip(blueprints.keys(), tasks_results))
)
# remove blueprints where the current version is still the latest
blueprints = {i: b for i, b in blueprints.items() if versions[i] != b.version}
# gather the urls of files we need to download to get the new checksums
urls: Dict[int, Dict[str, str]] = defaultdict(dict)
for index, blueprint in blueprints.items():
for arch in blueprint.supported_architectures():
blueprint = blueprint.copy(update={"arch": arch})
remote_file = cast(RemoteFile, blueprint.render_fetch(versions[index]))
urls[index][arch] = str(remote_file.url)
url_list = list(itertools.chain(*[u.values() for u in urls.values()]))
results, fetch_errors = await fetch_urls(url_list)
# remove blueprint we can't update because we could not fetch associated files
for failed_url, exception in fetch_errors.items():
for index, blueprint_urls in urls.items():
if failed_url in blueprint_urls.values():
errors[index] = exception
blueprints = {i: b for i, b in blueprints.items() if i not in errors.keys()}
latest_releases: List[LatestRelease] = []
for index, blueprint in blueprints.items():
latest_releases.append(
LatestRelease(
blueprint_index=index,
blueprint=blueprint,
version=versions[index],
fetch_results={arch: results[url] for arch, url in urls[index].items()},
)
)
return latest_releases, errors
def find_latest_releases(
blueprint_list: List[Blueprint], skip_names: Optional[List[str]] = None
) -> Tuple[List[LatestRelease], Dict[int, Ops2debError]]:
return asyncio.run(_find_latest_releases(blueprint_list, skip_names))
def update(
configuration_path: Path,
dry_run: bool = False,
output_path: Optional[Path] = None,
skip_names: List[str] = None,
) -> None:
yaml = ruamel.yaml.YAML(typ="rt")
yaml.Emitter = FixIndentEmitter
configuration_dict = load(configuration_path, yaml)
blueprints = validate(configuration_dict)
logger.title("Looking for new releases...")
releases, errors = find_latest_releases(blueprints, skip_names)
if not releases:
logger.info("Did not found any updates")
if dry_run is False and releases:
for release in releases:
release.update_configuration(configuration_dict)
with configuration_path.open("w") as output:
yaml.dump(configuration_dict, output)
logger.info("Configuration file updated")
if output_path is not None:
lines = [
f"Updated {r.blueprint.name} from {r.blueprint.version} to {r.version}"
for r in releases
]
output_path.write_text("\n".join(lines + [""]))
if errors:
raise Ops2debUpdaterError(f"{len(errors)} failures occurred")
|
from inflection import camelize
from jsonclasses.cdef import Cdef
from jsonclasses.jfield import JField
from jsonclasses.fdef import (
Fdef, ReadRule, WriteRule, Queryability, FType, FStore, Nullability
)
from jsonclasses.modifiers.required_modifier import RequiredModifier
from jsonclasses.modifiers.default_modifier import DefaultModifier
from .unary_sort_order import unary_sort_order
from .codable_struct import codable_struct, codable_struct_item
from .codable_enum import codable_enum, codable_enum_item
from .jtype_to_swift_type import jtype_to_swift_type
from ...utils.join_lines import join_lines
from ...utils.class_needs_api import class_needs_api
def data_class(cdef: Cdef) -> str:
if not class_needs_api(cdef):
return ''
return join_lines([
_class_create_input(cdef),
_class_update_input(cdef),
_class_sort_orders(cdef),
_class_result_picks(cdef),
_class_query(cdef),
_class_result(cdef),
_class_result(cdef, partial=True),
], 2)
def _class_create_input(cdef: Cdef) -> str:
items: list[str] = []
for field in cdef.fields:
if not _field_can_create(field):
continue
optional = not _is_field_required_for_create(field)
name = camelize(field.name, False)
stype = jtype_to_swift_type(field.fdef, 'C')
local_key = _is_field_local_key(field)
if local_key:
optional = True
item = codable_struct_item('public', 'var', name, stype, optional)
items.append(item)
if local_key:
idname = _field_ref_id_name(field)
item = codable_struct_item('public', 'var', idname, 'String', True)
items.append(item)
return codable_struct(to_create_input(cdef), items)
def _class_update_input(cdef: Cdef) -> str:
items: list[str] = []
for field in cdef.fields:
if not _field_can_update(field):
continue
name = camelize(field.name, False)
stype = jtype_to_swift_type(field.fdef, 'U')
local_key = _is_field_local_key(field)
item = codable_struct_item('public', 'var', name, stype, True)
items.append(item)
if local_key:
idname = _field_ref_id_name(field)
item = codable_struct_item('public', 'var', idname, 'String', True)
items.append(item)
return codable_struct(to_update_input(cdef), items)
def _class_sort_orders(cdef: Cdef) -> str:
fnames: list[str] = []
for field in cdef.fields:
if not _is_field_queryable(field):
continue
if is_field_primary(field):
continue
if is_field_ref(field):
continue
if not _field_can_read(field):
continue
fnames.append(camelize(field.name, False))
enum_items: list[str] = []
for name in fnames:
enum_items.append(codable_enum_item(name, 'String', name))
desc_name = name + 'Desc'
enum_items.append(codable_enum_item(desc_name, 'String', "-" + name))
name = to_sort_orders(cdef)
enum = codable_enum(name, 'String', enum_items)
unary = unary_sort_order(name, fnames, lambda s: s + 'Desc')
return join_lines([enum, unary], 2)
def _class_result_picks(cdef: Cdef) -> str:
items: list[str] = []
for field in cdef.fields:
if not _field_can_read(field):
continue
name = camelize(field.name, False)
items.append(codable_enum_item(name, 'String', name))
if _is_field_local_key(field):
idname = _field_ref_id_name(field)
items.append(codable_enum_item(idname, 'String', idname))
return codable_enum(to_result_picks(cdef), 'String', items)
def _class_query(cdef: Cdef) -> str:
items: list[str] = []
for field in cdef.fields:
if not _is_field_queryable(field):
continue
name = camelize(field.name, False)
type = jtype_to_swift_type(field.fdef, 'Q')
if is_field_ref(field):
if not _is_field_local_key(field):
continue
idname = _field_ref_id_name(field)
item = codable_struct_item('public', 'var', idname, 'IDQuery', True, 'nil')
items.append(item)
else:
item = codable_struct_item('public', 'var', name, type, True, 'nil')
items.append(item)
sort_orders = array(to_sort_orders(cdef))
order = codable_struct_item(
'fileprivate', 'var', '_order', sort_orders, True, 'nil')
result_picks = array(to_result_picks(cdef))
pick = codable_struct_item(
'fileprivate', 'var', '_pick', result_picks, True, 'nil')
omit = codable_struct_item(
'fileprivate', 'var', '_omit', result_picks, True, 'nil')
limit = codable_struct_item(
'fileprivate', 'var', '_limit', 'Int', True, 'nil')
skip = codable_struct_item(
'fileprivate', 'var', '_skip', 'Int', True, 'nil')
page_no = codable_struct_item(
'fileprivate', 'var', '_pageNo', 'Int', True, 'nil')
page_size = codable_struct_item(
'fileprivate', 'var', '_pageSize', 'Int', True, 'nil')
operators = [order, pick, omit, limit, skip, page_no, page_size]
items.extend(operators)
return codable_struct(to_query(cdef), items)
def _class_result(cdef: Cdef, partial: bool = False) -> str:
items: list[str] = []
for field in cdef.fields:
if not _field_can_read(field):
continue
optional = not _is_field_required_for_read(field)
if partial:
optional = True
name = camelize(field.name, False)
stype = jtype_to_swift_type(field.fdef, 'R')
local_key = _is_field_local_key(field)
item = codable_struct_item('public', 'let', name, stype, optional)
items.append(item)
if local_key:
idname = _field_ref_id_name(field)
item = codable_struct_item('public', 'let', idname, 'String', optional)
items.append(item)
name = to_result(cdef) if not partial else to_result_partial(cdef)
return codable_struct(name, items)
def to_create_input(cdef: Cdef) -> str:
return cdef.name + 'CreateInput'
def to_update_input(cdef: Cdef) -> str:
return cdef.name + 'UpdateInput'
def to_sort_orders(cdef: Cdef) -> str:
return cdef.name + 'SortOrder'
def to_result_picks(cdef: Cdef) -> str:
return cdef.name + 'ResultPick'
def to_query(cdef: Cdef) -> str:
return cdef.name + 'Query'
def to_result(cdef: Cdef) -> str:
return cdef.name
def to_result_partial(cdef: Cdef) -> str:
return 'Partial' + cdef.name
def _is_field_required_for_create(field: JField) -> bool:
if _field_has_default(field):
return False
if _is_field_nonnull(field):
return True
return next((True for v in field.types.modifier.vs if isinstance(v, RequiredModifier)), False)
def _is_field_required_for_read(field: JField) -> bool:
if _is_field_nonnull(field):
return True
return next((True for v in field.types.modifier.vs if isinstance(v, RequiredModifier)), False)
def _field_has_default(field: JField) -> bool:
if _is_field_nonnull(field):
return True
return next((True for v in field.types.modifier.vs if isinstance(v, DefaultModifier)), False)
def _is_field_nonnull(field: JField) -> bool:
if field.fdef.ftype == FType.LIST:
if field.fdef.fstore == FStore.LOCAL_KEY or field.fdef.fstore == FStore.FOREIGN_KEY:
if field.fdef.collection_nullability == Nullability.NONNULL:
return True
return False
def _is_field_queryable(field: JField) -> bool:
if field.fdef.read_rule == ReadRule.NO_READ:
return False
return field.fdef.queryability != Queryability.UNQUERYABLE
def _field_can_create(field: JField) -> bool:
return field.fdef.write_rule != WriteRule.NO_WRITE
def _field_can_update(field: JField) -> bool:
if field.fdef.write_rule == WriteRule.NO_WRITE:
return False
if field.fdef.write_rule == WriteRule.WRITE_ONCE:
if _is_field_required_for_create(field):
return False
return True
def _field_can_read(field: JField) -> bool:
if field.fdef.read_rule == ReadRule.NO_READ:
return False
if field.fdef.fstore == FStore.TEMP:
return False
return True
def _is_field_local_key(field: JField) -> bool:
return field.fdef.fstore == FStore.LOCAL_KEY
def _field_ref_id_name(field: JField) -> str:
return field.cdef.jconf.ref_key_encoding_strategy(field)
def is_field_primary(field: JField) -> bool:
return field.fdef.primary
def is_field_ref(field: JField) -> bool:
if field.fdef.fstore == FStore.LOCAL_KEY:
return True
if field.fdef.fstore == FStore.FOREIGN_KEY:
return True
return False
def array(val: str) -> str:
return '[' + val + ']'
|
<reponame>stevencdang/AutoML-DS-Components
# Author: <NAME>
# Class encapsulating a remote dataset with addional model prediction results
import logging
import os.path as path
import os
from io import IOBase
import json
from ls_dataset.d3m_dataset import D3MDataset
from ls_dataset.ls_prediction import LSPrediction
from ls_dataset.dsr_factory import DatasetResourceFactory
from ls_problem_desc.ls_problem import ProblemDesc
logger = logging.getLogger(__name__)
class D3MPrediction(D3MDataset, LSPrediction):
"""
Class representing a remote dataset with prediction results
"""
def __init__(self, dspath, dsdata, ppath, prob_desc=None, pfiles=None):
"""
inputs:
dspath - the path to the dataset root
dsdata - a dictionary containing the dataset metadata
ppath - the path the the prediction results file(s) directory root
prob_desc - the path to the problem description schema file that describes the prediction
"""
D3MDataset.__init__(self, dspath, dsdata)
LSPrediction.__init__(self, dspath, ppath, prob_desc, pfiles)
logger.debug("Initializing D3M prediction dataset")
@staticmethod
def from_json(fpath):
"""
A static constructor of this class given a jsonified file
"""
if isinstance(fpath, str):
if path.exists(fpath):
#Get dataset path from json path
with open(fpath, 'r') as f:
ds_json = json.load(f)
else:
# logger.error("Found no dataset json at path: %s" % str(fpath))
raise Exception("Found no dataset json at path: %s" % str(fpath))
elif isinstance(fpath, IOBase):
logger.debug("Loading dataset json from open file")
ds_json = json.load(fpath)
else:
# logger.error("Found no dataset json at path: %s" % str(fpath))
raise Exception("Found no dataset json at path: %s" % str(fpath))
logger.debug("got dataset json: %s" % str(ds_json))
json_doc = {'about': ds_json['about'],
'dataResources': ds_json['dataResources']
}
dpath = ds_json['dataset_info']['root_path']
dname = ds_json['dataset_info']['dataset_dir'].rsplit("_", 1)[0]
if isinstance(fpath, str):
logger.debug("Creating D3mDataset with fpath: %s\nMetadata: %s" % (dpath, str(ds_json)))
# ds = D3MDataset(fpath, ds_json['about'])
ds = D3MDataset(dpath, ds_json)
elif isinstance(fpath, IOBase):
logger.debug("Creating D3mDataset with fpath: %s\nMetadata: %s" % (dpath, str(ds_json)))
# ds = D3MDataset(fpath.name, ds_json['about'])
ds = D3MDataset(dpath, ds_json)
logger.debug("Creating problem description")
logger.debug("Got default problem: %s" % str(ProblemDesc.get_default_problem(ds)))
prob_desc = ProblemDesc.from_json(ProblemDesc.get_default_problem(ds))
# prob_desc = ProblemDesc.from_json(
# LSPrediction.get_default_problem(ds_json['dataset_info']['root_path']))
return D3MPrediction(dpath,
json_doc,
path.join(dpath, ds_json['dataset_info']['dataset_dir'], 'output'),
prob_desc=prob_desc,
pfiles=ds_json['pred_info']['pred_files']
)
def to_json(self, fpath=None):
"""
Write the dataset to info to file and return a string with the json. If no path is given,
then just returns a string with the json representation of the dataset json
"""
out = json.loads(super().to_json())
pred_out = json.loads(super().to_json())
out['pred_info'] = pred_out['pred_info']
if fpath is not None:
logger.debug("Writing dataset json to: %s" % fpath)
out_file = open(fpath, 'w')
json.dump(out, out_file)
out_file.close()
return json.dumps(out)
def __str__(self):
return self.to_json()
|
<filename>questions/60742/main.py
from urllib.request import Request, urlopen
import pandas as pd
import geopandas as gpd
from matplotlib import pyplot as plt
import seaborn as sns
import matplotlib.colors as mcol
plt.style.use("seaborn")
req = Request(
"https://vote.nyc/sites/default/files/pdf/election_results/2020/20201103General%20Election/00000100000Citywide%20President%20Vice%20President%20Citywide%20EDLevel.csv"
)
req.add_header(
"User-Agent",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.66 Safari/537.36",
)
content = urlopen(req)
df_2020 = pd.read_csv(
content,
usecols=[11, 12, 20, 21],
names=["AD", "ED", "Party", "Votes"],
thousands=",",
)
df_2020 = df_2020[
df_2020["Party"].isin(
[
"<NAME> / <NAME> (Republican)",
"<NAME> / <NAME> (Democratic)",
"<NAME> / <NAME> (Conservative)",
"<NAME> / <NAME> (Working Families)",
]
)
]
df_2020["Precinct"] = df_2020.apply(
lambda x: f"{str(x['ED']).zfill(3)}/{x['AD']}", axis=1
)
df_2020 = df_2020.pivot(index="Precinct", columns="Party", values="Votes").reset_index()
df_2020["Trump"] = df_2020["<NAME>. Trump / <NAME> (Republican)"].astype(
int
) + df_2020["<NAME> / <NAME> (Conservative)"].astype(int)
df_2020["Biden"] = df_2020["<NAME> / <NAME> (Democratic)"].astype(
int
) + df_2020["<NAME> / <NAME> (Working Families)"].astype(int)
df_2020 = df_2020[["Precinct", "Trump", "Biden"]]
df_2020["Trump Pct"] = df_2020["Trump"] / (df_2020["Biden"] + df_2020["Trump"]) * 100
df_2020 = df_2020[~df_2020["Trump Pct"].isna()]
req = Request(
"https://vote.nyc/sites/default/files/pdf/election_results/2016/20161108General%20Election/00000100000Citywide%20President%20Vice%20President%20Citywide%20EDLevel.csv"
)
req.add_header(
"User-Agent",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.66 Safari/537.36",
)
content = urlopen(req)
df_2016 = pd.read_csv(
content, usecols=[0, 1, 9, 10], names=["AD", "ED", "Party", "Votes"], thousands=","
)
df_2016 = df_2016[
df_2016["Party"].isin(
[
"<NAME> / <NAME> (Republican)",
"<NAME> / <NAME> (Women's Equality)",
"<NAME> / <NAME> (Conservative)",
"<NAME> / <NAME> (Working Families)",
"<NAME> / <NAME> (Democratic)",
]
)
]
df_2016["Precinct"] = df_2016.apply(
lambda x: f"{str(x['ED']).zfill(3)}/{x['AD']}", axis=1
)
df_2016 = df_2016.pivot(index="Precinct", columns="Party", values="Votes").reset_index()
df_2016["Trump"] = df_2016[
"<NAME> / <NAME> (Republican)"
].str.replace(",", "").fillna(0).astype(int) + df_2016[
"<NAME> / <NAME> (Conservative)"
].str.replace(
",", ""
).fillna(
0
).astype(
int
)
df_2016["Clinton"] = (
df_2016["<NAME> / <NAME> (Women's Equality)"]
.str.replace(",", "")
.fillna(0)
.astype(int)
+ df_2016["<NAME> / <NAME> (Working Families)"]
.str.replace(",", "")
.fillna(0)
.astype(int)
+ df_2016["<NAME> / <NAME> (Democratic)"]
.str.replace(",", "")
.fillna(0)
.astype(int)
)
df_2016 = df_2016[["Precinct", "Trump", "Clinton"]]
df_2016["Trump Pct"] = df_2016["Trump"] / (df_2016["Clinton"] + df_2016["Trump"]) * 100
df_2016 = df_2016[~df_2016["Trump Pct"].isna()]
# Output stats
print(df_2016["Trump Pct"].describe(percentiles=[0.025, 0.25, 0.5, 0.75, 0.975]))
print(df_2020["Trump Pct"].describe(percentiles=[0.025, 0.25, 0.5, 0.75, 0.975]))
df_merged = df_2020.merge(
df_2016, on="Precinct", how="outer", suffixes=("_2020", "_2016")
)[["Precinct", "Trump Pct_2020", "Trump Pct_2016"]]
# Plot graph
fig, ax = plt.subplots()
sns.boxenplot(
y="year",
x="value",
data=df_merged.melt(id_vars=["Precinct"], var_name="year"),
ax=ax,
)
ax.set_xlabel("Trump Vote Share (%)")
ax.set_yticklabels(["2020", "2016"])
ax.set_ylabel("Year")
ax.set_title("Distribution of Trump vote share in NYC precincts")
ax.set_xticks(list(range(0, 105, 5)))
plt.savefig("graph.png", bbox_inches="tight", pad_inches=0, dpi=400)
# Remove ED 61-64, no data for 2016
df_merged = df_merged[~df_merged["Precinct"].str.contains("/64|/63|/62|/61")]
# Calculate change in Trump vote %
df_merged["Shift"] = df_merged["Trump Pct_2020"] - df_merged["Trump Pct_2016"]
# Shapefile from https://geodata.lib.berkeley.edu/catalog/nyu-2451-34548
map_df = gpd.read_file("nyu_2451_34548.shp").to_crs(epsg=2163)
map_df["Precinct"] = map_df["ElectDist"].apply(lambda x: f"{str(x)[2:]}/{str(x)[:2]}")
map_merged = map_df.merge(df_merged, on="Precinct", how="inner")
# Plot map
fig, ax = plt.subplots()
cm1 = mcol.LinearSegmentedColormap.from_list("RWB", ["b", "w", "r"])
map_merged.plot(
column="Shift",
ax=ax,
cmap=cm1,
legend=True,
legend_kwds={"shrink": 0.7},
edgecolor="0.5",
linewidth=0.25,
missing_kwds=dict(
color="lightgrey",
),
)
ax.axis("off")
ax.set_title("Increase in Trump two-party vote share: 2016-2020")
plt.savefig("map.png", bbox_inches="tight", pad_inches=0, dpi=500)
|
<filename>api/conftest.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
import os
import pytest
from passlib.hash import bcrypt
from starlette.testclient import TestClient
from apit import app
from app.db import DBPrediction, DBUser
client = TestClient(app)
API_USER_NAME = os.environ.get("API_NEW_USER_NAME")
API_USER_PASSWORD = <PASSWORD>("API_NEW_USER_PASSWORD")
@pytest.fixture(scope="module")
def test_app():
"""Instantiate FastAPI app."""
client = TestClient(app)
yield client
@pytest.fixture
def user_auth_headers(test_app, monkeypatch):
"""
Return JWT-authentication request headers for authenticated routes.
Parameters
----------
test_app : fastapi.app
fastapi app object
monkeypatch : _pytest.monkeypatch
fixture to mock single-user existence, authenticate and return header
"""
# Mock existence of single user in users table
@classmethod
async def mock_get_one_by_username(cls, username):
hashed_pwd = <PASSWORD>(<PASSWORD>_USER_PASSWORD)
d = {"id": 1, "username": API_USER_NAME, "password_hash": <PASSWORD>}
return d
monkeypatch.setattr(DBUser, "get_one_by_username", mock_get_one_by_username)
# Authenticate user with JWT, and retrieve generated JWT for use in
# headers with routes that require user authentication
headers = {"Content-Type": "application/x-www-form-urlencoded"}
r = test_app.post(
"/token",
data={
"grant_type": "",
"username": API_USER_NAME,
"password": <PASSWORD>,
"scope": "",
"client_id": "",
"client_secret": "",
},
headers=headers,
)
assert r.status_code == 200
response_dict = json.loads(r.text)
assert list(response_dict) == ["access_token", "token_type"]
# Create header with JWT generated above and return header for use with
# routes that require user authentication
headers = {
"Content-Type": "application/json",
"Authorization": f"bearer {response_dict['access_token']}",
}
return headers
@pytest.fixture
def get_duplicate_user_by_username(monkeypatch):
"""Mock user model method to ensure no duplicate user in users table."""
@classmethod
async def mock_get_one_by_username(cls, username):
"""
Return user whose username is already in the users table.
Attributes
----------
username : str
name for user
Returns
-------
user : dict
databases object, None means no matching user in users table
"""
database_user = None
return database_user
@classmethod
async def mock_create(cls, notes):
"""Add user to users table."""
pass
monkeypatch.setattr(DBUser, "get_one_by_username", mock_get_one_by_username)
monkeypatch.setattr(DBUser, "create", mock_create)
@pytest.fixture
def get_all_users(monkeypatch):
"""Mock existence of multiple users in users table."""
@classmethod
async def mock_get_all(cls):
"""Return all users from the users table."""
hashed_pwd = <PASSWORD>.hash(API_USER_PASSWORD)
d = [{"id": 1, "username": API_USER_NAME, "password_hash": <PASSWORD>}]
return d
monkeypatch.setattr(DBUser, "get_all", mock_get_all)
@pytest.fixture
def get_single_user_by_username(monkeypatch):
"""Mock existence of single user in users table."""
@classmethod
async def mock_get_one(cls, id):
"""Return single user, by user id, from the users table."""
hashed_pwd = <PASSWORD>(API_USER_PASSWORD)
d = {"id": 1, "username": API_USER_NAME, "password_hash": <PASSWORD>}
return d
monkeypatch.setattr(DBUser, "get_one", mock_get_one)
@pytest.fixture
def multiple_new_predictions(monkeypatch):
"""Mock existence of multiple predictions in predictions table."""
multiple_new_predictions = [
{
"id": 1,
"url": (
"https://www.theguardian.com/science/2020/feb/13/not-just-a-"
"space-potato-nasa-unveils-astonishing-details-of-most-"
"distant-object-ever-visited-arrokoth"
),
"text": "some text here",
"user_id": 1,
}
]
@classmethod
async def mock_get_all(cls):
"""Return all predictions from the predictions table."""
d = multiple_new_predictions
return d
monkeypatch.setattr(DBPrediction, "get_all", mock_get_all)
return multiple_new_predictions
@pytest.fixture
def single_new_prediction(monkeypatch):
"""Mock existence of single prediction in predictions table."""
single_new_prediction = {
"id": 1,
"url": (
"https://www.theguardian.com/science/2020/feb/13/not-just-a"
"-space-potato-nasa-unveils-astonishing-details-of-most-"
"distant-object-ever-visited-arrokoth"
),
"text": "some text here",
"user_id": 1,
}
@classmethod
async def mock_get_one_by_url(cls, url):
"""Return a single prediction, by url, from the predictions table."""
return single_new_prediction
monkeypatch.setattr(DBPrediction, "get_one_by_url", mock_get_one_by_url)
return single_new_prediction
@pytest.fixture
def create_multiple_new_predictions(monkeypatch):
"""
Mock prediction model method to ensure no duplicate prediction in
predictions table.
"""
@classmethod
async def mockfunc_get_one_by_username(cls, username):
"""Return a user record from the users table."""
hashed_pwd = bcrypt.hash(API_USER_PASSWORD)
d = {"id": 1, "username": API_USER_NAME, "password_hash": <PASSWORD>}
return d
@classmethod
async def mock_get_one_by_url(cls, url):
"""
Return None to indicate no pre-existing prediction with specified url
in the predictions table.
"""
return None
@classmethod
async def mock_create_new_record(cls, notes):
"""
Do nothing, to allow new prediction to be added to predictions table.
"""
pass
monkeypatch.setattr(
DBUser, "get_one_by_username", mockfunc_get_one_by_username
)
monkeypatch.setattr(DBPrediction, "get_one_by_url", mock_get_one_by_url)
monkeypatch.setattr(DBPrediction, "create", mock_create_new_record)
|
<reponame>juiceinc/recipe
"""Test Recipe against multiple database engines"""
import os
from datetime import date, datetime
from sqlalchemy import Column, Date, DateTime, Integer, MetaData, String, Table, insert
from recipe import Recipe, Shelf, get_oven
def str_dedent(s):
return "\n".join([x.lstrip() for x in s.split("\n")]).lstrip("\n")
class TestRecipeSQLServer(object):
def setup(self):
connection_string = os.environ.get("SQL_SERVER_CONNECTION_STR", None)
self.skip_tests = False
if connection_string is None:
self.skip_tests = True
return
self.oven = get_oven(connection_string)
self.meta = MetaData(bind=self.oven.engine)
self.session = self.oven.Session()
self.table = Table(
"foo",
self.meta,
Column("first", String),
Column("last", String),
Column("age", Integer),
Column("birth_date", Date),
Column("dt", DateTime),
extend_existing=True,
)
self.meta.create_all(self.oven.engine)
data = [
{
"first": "hi",
"last": "there",
"age": 5,
"birth_date": date(2015, 1, 1),
"dt": datetime(2005, 12, 1, 12, 15),
},
{
"first": "hi",
"last": "fred",
"age": 10,
"birth_date": date(2015, 5, 15),
"dt": datetime(2013, 10, 15, 5, 20, 10),
},
]
with self.oven.engine.connect() as conn:
for row in data:
conn.execute(insert(self.table).values(**row))
self.shelf = self.shelf_from_yaml(
"""
_version: 2
first:
kind: Dimension
field: first
last:
kind: Dimension
field: last
firstlast:
kind: Dimension
field: "first + last"
id_field: first
age:
kind: Measure
field: sum(age)
test_month:
kind: Dimension
field: month(birth_date)
year_by_format:
kind: Dimension
field: dt
format: "%Y"
count:
kind: Measure
field: count(*)
""",
self.table,
)
def teardown(self):
if not self.skip_tests:
self.meta.drop_all(self.oven.engine)
def shelf_from_yaml(self, yaml_config, selectable):
"""Create a shelf directly from configuration"""
return Shelf.from_validated_yaml(yaml_config, selectable)
def recipe(self, **kwargs):
return Recipe(shelf=self.shelf, session=self.session, **kwargs)
def assertRecipeCSV(self, recipe, content):
actual = recipe.dataset.csv.replace("\r\n", "\n")
expected = str_dedent(content)
assert actual == expected
def test_dimension(self):
if self.skip_tests:
return
recipe = self.recipe().metrics("age", "count").dimensions("first")
self.assertRecipeCSV(
recipe,
"""
first,age,count,first_id
hi,15,2,hi
""",
)
recipe = self.recipe().metrics("age").dimensions("firstlast")
self.assertRecipeCSV(
recipe,
"""
firstlast_id,firstlast,age,firstlast_id
hi,hifred,10,hi
hi,hithere,5,hi
""",
)
def test_dates_and_Datetimes(self):
"""We can convert dates using formats"""
if self.skip_tests:
return
recipe = (
self.recipe()
.dimensions("year_by_format")
.metrics("count")
.order_by("year_by_format")
)
self.assertRecipeCSV(
recipe,
"""
year_by_format,count,year_by_format_id
2005-01-01 00:00:00,1,2005-01-01 00:00:00
2013-01-01 00:00:00,1,2013-01-01 00:00:00
""",
)
recipe = (
self.recipe()
.dimensions("year_by_format")
.metrics("count")
.order_by("-year_by_format")
)
self.assertRecipeCSV(
recipe,
"""
year_by_format,count,year_by_format_id
2013-01-01 00:00:00,1,2013-01-01 00:00:00
2005-01-01 00:00:00,1,2005-01-01 00:00:00
""",
)
# Test a month() conversion
recipe = (
self.recipe()
.dimensions("test_month")
.metrics("age", "count")
.order_by("-test_month")
)
self.assertRecipeCSV(
recipe,
"""
test_month,age,count,test_month_id
2015-05-01,10,1,2015-05-01
2015-01-01,5,1,2015-01-01
""",
)
|
#!/usr/bin/python
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
import os
import sys
import re
import time
import subprocess
from fabric.api import *
from contrail_provisioning.database.base import DatabaseCommon
from contrail_provisioning.database.templates import cassandra_create_user_template
class DatabaseSetup(DatabaseCommon):
def __init__(self, args_str = None):
super(DatabaseSetup, self).__init__()
self._args = None
if not args_str:
args_str = ' '.join(sys.argv[1:])
self.global_defaults = {
'dir' : '/usr/share/cassandra',
'database_listen_ip' : '127.0.0.1',
'cfgm_ip': '127.0.0.1',
'minimum_diskGB': '256',
'collector_ip_list':['127.0.0.1']
}
self.parse_args(args_str)
self.database_listen_ip = self._args.self_ip
self.database_seed_list = self._args.seed_list
self.database_dir = self._args.dir
def parse_args(self, args_str):
'''
Eg. setup-vnc-database
--self_ip 10.84.13.23
--dir /usr/share/cassandra
--initial_token 0 --seed_list 10.84.13.23 10.84.13.24
--data_dir /home/cassandra
--zookeeper_ip_list 10.1.5.11 10.1.5.12
--collector_ip_list 10.1.5.11 10.1.5.12
--database_index 1
--node_to_delete 10.1.5.11
'''
parser = self._parse_args(args_str)
parser.add_argument("--self_ip", help = "IP Address of this database node")
parser.add_argument("--cfgm_ip", help = "IP Address of the config node")
if self.pdist in ['fedora', 'centos', 'redhat']:
parser.add_argument("--dir", help = "Directory where database binary exists", default = '/usr/share/cassandra')
if self.pdist in ['Ubuntu']:
parser.add_argument("--dir", help = "Directory where database binary exists", default = '/etc/cassandra')
parser.add_argument("--initial_token", help = "Initial token for database node")
parser.add_argument("--seed_list", help = "List of seed nodes for database", nargs='+')
parser.add_argument("--data_dir", help = "Directory where database stores data")
parser.add_argument("--analytics_data_dir", help = "Directory where database stores analytics data")
parser.add_argument("--ssd_data_dir", help = "SSD directory that database stores data")
parser.add_argument("--zookeeper_ip_list", help = "List of IP Addresses of zookeeper servers",
nargs='+', type=str)
parser.add_argument("--collector_ip_list", help = "List of IP Addresses of collector servers",
nargs='+', type=str)
parser.add_argument("--database_index", help = "The index of this databse node")
parser.add_argument("--minimum_diskGB", help = "Required minimum disk space for contrail database")
parser.add_argument("--kafka_broker_id", help = "The broker id of the database node")
parser.add_argument("--node_to_delete", help = "The DB node to remove from the cluster")
parser.add_argument("--cassandra_user", help = "Cassandra user name if provided")
parser.add_argument("--cassandra_password", help = "Cassandra password if provided")
parser.add_argument("--opscenter_ip", help = "IP Address of webui/opscenter node")
self._args = parser.parse_args(self.remaining_argv)
def create_analytics_data_dir(self, data_dir, cass_data_dir,
analytics_dir, analytics_dir_link=None):
if analytics_dir_link:
verify_dir = analytics_dir_link
else:
verify_dir = analytics_dir
if not os.path.exists(verify_dir):
if not os.path.exists(data_dir):
local("sudo mkdir -p %s" % (data_dir))
local("sudo chown -R cassandra: %s" % (data_dir))
if not os.path.exists(cass_data_dir):
local("sudo mkdir -p %s" % (cass_data_dir))
local("sudo chown -R cassandra: %s" % (cass_data_dir))
if not os.path.exists(analytics_dir):
local("sudo mkdir -p %s" % (analytics_dir))
local("sudo chown -R cassandra: %s" % (analytics_dir))
if analytics_dir_link:
local("sudo ln -s %s %s" % (analytics_dir, analytics_dir_link))
local("sudo chown -h cassandra: %s" % (analytics_dir_link))
def setup_analytics_data_dir(self):
data_dir = self._args.data_dir
analytics_data_dir = self._args.analytics_data_dir
if self.is_cql_supported():
CASSANDRA_ANALYTICS_KEYSPACE = 'ContrailAnalyticsCql'
else:
CASSANDRA_ANALYTICS_KEYSPACE = 'ContrailAnalytics'
if not data_dir:
data_dir = '/var/lib/cassandra'
cass_data_dir = os.path.join(data_dir, 'data')
else:
cass_data_dir = data_dir
if analytics_data_dir:
analytics_dir_link = os.path.join(cass_data_dir,
CASSANDRA_ANALYTICS_KEYSPACE)
analytics_dir = os.path.join(analytics_data_dir,
CASSANDRA_ANALYTICS_KEYSPACE)
self.create_analytics_data_dir(data_dir, cass_data_dir,
analytics_dir, analytics_dir_link)
else:
analytics_dir = os.path.join(cass_data_dir,
CASSANDRA_ANALYTICS_KEYSPACE)
self.create_analytics_data_dir(data_dir, cass_data_dir,
analytics_dir)
disk_cmd = "df -Pk " + analytics_dir + " | grep % | awk '{print $2}'"
total_disk = local(disk_cmd, capture = True).strip()
if (int(total_disk)/(1024*1024) < int(self._args.minimum_diskGB)):
raise RuntimeError('Minimum disk space for analytics db is not met')
def fixup_config_files(self):
self.fixup_etc_hosts_file(self.database_listen_ip, self.hostname)
self.fixup_cassandra_config_file(self.database_listen_ip,
self.database_seed_list,
self._args.data_dir,
self._args.ssd_data_dir,
cluster_name='Contrail',
user=self._args.cassandra_user)
self.fixup_datastax_config_file(self._args.opscenter_ip)
self.setup_analytics_data_dir()
self.fixup_cassandra_env_config()
self.fixup_contrail_database_nodemgr()
#self.fixup_zookeeper_configs()
self.fixup_kafka_server_properties(self.database_listen_ip)
def fixup_kafka_server_properties(self, listen_ip):
#Update the broker id of the /usr/share/kafka/config/server.properties
KAFKA_SERVER_PROPERTIES='/usr/share/kafka/config/server.properties'
cnd = os.path.exists(KAFKA_SERVER_PROPERTIES)
if not cnd:
raise RuntimeError('%s does not appear to be a kafka config directory' % KAFKA_SERVER_PROPERTIES)
if self._args.kafka_broker_id is not None:
self.replace_in_file(KAFKA_SERVER_PROPERTIES, 'broker.id=', 'broker.id='+self._args.kafka_broker_id)
#Handling for Kafka-0.8.3
self.replace_in_file(KAFKA_SERVER_PROPERTIES, '#port=9092', 'port=9092')
self.replace_in_file(KAFKA_SERVER_PROPERTIES, \
'listeners=PLAINTEXT://:9092','#listeners=PLAINTEXT://:9092')
#Add all the zoo keeper server address to the server.properties file
zk_list = [server + ":2181" for server in self._args.zookeeper_ip_list]
zk_list_str = ','.join(map(str, zk_list))
self.replace_in_file(KAFKA_SERVER_PROPERTIES, 'zookeeper.connect=.*', 'zookeeper.connect='+zk_list_str)
self.replace_in_file(KAFKA_SERVER_PROPERTIES, '#advertised.host.name=<hostname routable by clients>',\
'advertised.host.name='+listen_ip)
#Set partitioning and retention policy
self.replace_in_file(KAFKA_SERVER_PROPERTIES, 'num.partitions=.*',
'num.partitions=30')
self.replace_in_file(KAFKA_SERVER_PROPERTIES, '#log.retention.bytes=.*',
'log.retention.bytes=1073741824')
self.replace_in_file(KAFKA_SERVER_PROPERTIES, 'log.retention.bytes=.*',
'log.retention.bytes=268435456')
self.replace_in_file(KAFKA_SERVER_PROPERTIES, 'log.segment.bytes=.*',
'log.segment.bytes=268435456')
self.replace_in_file(KAFKA_SERVER_PROPERTIES, 'log.retention.hours=.*',
'log.retention.hours=24')
self.replace_in_file(KAFKA_SERVER_PROPERTIES, 'log.cleanup.policy=.*',
'log.cleanup.policy=delete')
self.replace_in_file(KAFKA_SERVER_PROPERTIES, 'log.cleaner.threads=.*',
'log.cleaner.threads=2')
self.replace_in_file(KAFKA_SERVER_PROPERTIES, 'log.cleaner.dedupe.buffer.size=.*',
'log.cleaner.dedupe.buffer.size=250000000')
# Set log compaction and topic delete options
self.replace_in_file(KAFKA_SERVER_PROPERTIES, 'log.cleaner.enable=false','log.cleaner.enable=true')
if not self.file_pattern_check(KAFKA_SERVER_PROPERTIES, 'log.cleanup.policy=delete'):
local('sudo echo "log.cleanup.policy=delete" >> %s' % KAFKA_SERVER_PROPERTIES)
if not self.file_pattern_check(KAFKA_SERVER_PROPERTIES, 'delete.topic.enable=true'):
local('sudo echo "delete.topic.enable=true" >> %s' % KAFKA_SERVER_PROPERTIES)
if not self.file_pattern_check(KAFKA_SERVER_PROPERTIES, 'log.cleaner.threads=2'):
local('sudo echo "log.cleaner.threads=2" >> %s' % KAFKA_SERVER_PROPERTIES)
if not self.file_pattern_check(KAFKA_SERVER_PROPERTIES, 'log.cleaner.dedupe.buffer.size=250000000'):
local('sudo echo "log.cleaner.dedupe.buffer.size=250000000" >> %s' % KAFKA_SERVER_PROPERTIES)
#Set replication factor to 2 if more than one kafka broker is available
if (len(self._args.seed_list) > 1 or len(self._args.seed_list[0].split(','))>1):
if not self.file_pattern_check(KAFKA_SERVER_PROPERTIES, 'default.replication.factor'):
local('sudo echo "default.replication.factor=2" >> %s' % (KAFKA_SERVER_PROPERTIES))
KAFKA_LOG4J_PROPERTIES='/usr/share/kafka/config/log4j.properties'
cnd = os.path.exists(KAFKA_LOG4J_PROPERTIES)
if not cnd:
raise RuntimeError('%s does not appear to be a kafka logs config' % KAFKA_LOG4J_PROPERTIES)
local("sudo sed -i 's/DailyRollingFileAppender/RollingFileAppender/g' %s" % KAFKA_LOG4J_PROPERTIES)
local("sudo sed -i \"s/DatePattern='.'yyyy-MM-dd-HH/MaxBackupIndex=10/g\" %s" % KAFKA_LOG4J_PROPERTIES)
def fixup_contrail_database_nodemgr(self):
conf_file = '/etc/contrail/contrail-database-nodemgr.conf'
config_vals = {
'DEFAULTS' : {
'minimum_diskGB' : self._args.minimum_diskGB,
'hostip' : self.database_listen_ip,
},
'COLLECTOR' : {
'server_list' : ' '.join('%s:%s' %(server, '8086')
for server in self._args.collector_ip_list)
},
}
for section, parameter_values in config_vals.items():
for parameter, value in parameter_values.items():
self.set_config(conf_file, section, parameter, value)
# end fixup_contrail_database_nodemgr
def create_cassandra_user(self):
template_vals = {
'__cassandra_user__': self._args.cassandra_user,
'__cassandra_password__': self._args.cassandra_password,
}
self._template_substitute_write(cassandra_create_user_template.template,
template_vals, self._temp_dir_name + '/cassandra_create_user')
local("sudo mv %s/cassandra_create_user /etc/contrail/cassandra_create_user" %(self._temp_dir_name))
connected=False
retry_threshold = 10
retry = 1
while connected == False and retry < retry_threshold:
#create account using cql
status = subprocess.Popen('sudo cqlsh %s -u cassandra -p cassandra -f /etc/contrail/cassandra_create_user' % self.database_listen_ip, shell=True,stderr = subprocess.PIPE,stdout=subprocess.PIPE).stderr.read()
if 'already exists' in status or not status:
print 'connection made'
connected = True
else:
print status
retry = retry + 1
time.sleep(5)
return connected
def run_services(self):
local("sudo database-server-setup.sh %s" % (self.database_listen_ip))
#If user name and passwd provided setit up in cassandra before starting the database service
if self._args.cassandra_user is not None:
assert(self.create_cassandra_user())
#Checks if a pattern is present in the file or not
def file_pattern_check(self, file_name, regexp):
rs = re.compile(regexp)
with open(file_name, 'r') as f:
for line in f:
match = rs.search(line)
if match:
return True
return False
def restart_zookeeper(self):
local('sudo service zookeeper restart')
def update_seed_list(self):
conf_dir = self.cassandra.conf_dif
conf_file = os.path.join(conf_dir, self.cassandra.conf_file)
if self._args.seed_list:
self.replace_in_file(conf_file, ' - seeds:*', ' - seeds: "' + ", ".join(self._args.seed_list) + '"')
local("sudo service contrail-database restart")
def decommission_db_node(self):
print "Decommissioning node %s from cluster. This might take a long time" % self._args.self_ip
local("nodetool decommission")
is_decommissioned = local('nodetool netstats | grep "Mode: DECOMMISSIONED"').succeeded
if not is_decommissioned:
raise RuntimeError("Error while decommissioning %s from the DB cluster", del_db_node)
local("service supervisor-database stop")
def remove_db_node(self):
print "Removing node %s from cluster. This might take a long time" % self._args.node_to_delete
with settings(warn_only = True):
node_uuid = local('nodetool status | grep %s | awk \'{print $7}\'' % self._args.node_to_delete, capture = True)
if node_uuid:
local("nodetool removenode %s" % node_uuid)
else:
print "Node %s was never part of the cluster", self._args.node_to_delete
return
with settings(warn_only = True):
is_removed = local('nodetool status | grep %s' % self._args.node_to_delete).failed
if not is_removed:
raise RuntimeError("Error while removed node %s from the DB cluster", self._args.node_to_delete)
def restart(self):
#local('service zookeeper restart')
local('service contrail-database restart')
local('service supervisor-database restart')
def main(args_str = None):
database = DatabaseSetup(args_str)
database.setup()
def update_zookeeper_servers(args_str = None):
database = DatabaseSetup(args_str)
database.fixup_zookeeper_configs()
database.fixup_kafka_server_properties(database.database_listen_ip)
def restart_zookeeper_server(args_str = None):
database = DatabaseSetup(args_str)
database.restart_zookeeper()
def readjust_seed_list(args_str = None):
database = DatabaseSetup(args_str)
database.update_seed_list()
def decommission_cassandra_node(args_str = None):
database = DatabaseSetup(args_str)
database.decommission_db_node()
def remove_cassandra_node(args_str = None):
database = DatabaseSetup(args_str)
database.remove_db_node()
if __name__ == "__main__":
main()
|
from pyrosetta import *
from pyrosetta.rosetta import *
from rosetta.protocols.rosetta_scripts import *
from rosetta.protocols.antibody import *
from rosetta.protocols.antibody.design import *
from pyrosetta.rosetta.core.scoring import *
import pandas_anarci_numberer
from rosetta.utility import *
import os
import sys
import timeit
import numpy as np
import pandas as pd
import re
from pathlib import Path
def main(path, structure, light_chain):
aho_structure = Path(structure).stem + '_aho.pdb'
pandas_anarci_numberer.main(path +'/'+ structure, 'aho', path +'/'+ aho_structure)
init('-input_ab_scheme AHo -ex1 -ex2 -use_input_sc -flip_HNQ -no_optH false -allow_omega_mismatches_for_north_clusters')
pose = pose_from_pdb(path +'/'+ aho_structure)
original_pose = pose.clone()
scorefxn = get_score_function()
start = timeit.default_timer()
min_packer = """
<ROSETTASCRIPTS>
<SCOREFXNS>
<ScoreFunction name="r15" weights="ref2015" />
<ScoreFunction name="molmech" weights="mm_std_fa_elec_dslf_fa13" />
<ScoreFunction name="r15_cart" weights="ref2015" >
<Reweight scoretype="pro_close" weight="0.0" />
<Reweight scoretype="cart_bonded" weight="0.625" />
</ScoreFunction>
</SCOREFXNS>
<RESIDUE_SELECTORS>
</RESIDUE_SELECTORS>
<TASKOPERATIONS>
<RestrictToRepacking name="no_design" />
<ExtraRotamersGeneric name="extrachi" ex1="1" ex2="1" ex1_sample_level="1" ex2_sample_level="1" />
</TASKOPERATIONS>
<FILTERS>
</FILTERS>
<MOVERS>
<MinMover name="min_torsion" scorefxn="molmech" chi="true" bb="1" cartesian="F" />
<MinMover name="min_cart" scorefxn="r15_cart" chi="true" bb="1" cartesian="T" />
<PackRotamersMover name="pack1" scorefxn="r15" task_operations="no_design,extrachi" />
</MOVERS>
<APPLY_TO_POSE>
</APPLY_TO_POSE>
<PROTOCOLS>
<Add mover="pack1" />
<Add mover="min_torsion" />
<Add mover="min_cart" />
</PROTOCOLS>
<OUTPUT scorefxn="r15" />
</ROSETTASCRIPTS>
"""
xml = XmlObjects.create_from_string(min_packer)
min_pack_protocol = xml.get_mover("ParsedProtocol")
min_pack_protocol.apply(pose)
abinfo = AntibodyInfo(original_pose, AHO_Scheme, North)
if light_chain == "kappa":
abinfo.set_light_chain_type(kappa)
elif light_chain == "lambda":
abinfo.set_light_chain_type(LightChainTypeEnum_start)
print(abinfo)
pdbinfo = pose.pdb_info()
chains = ("H","L")
for res in range(1, pose.total_residue()+1):
for atom in range(1, pdbinfo.natoms(res)+1):
pdbinfo.temperature(res,atom, 0.0)
for num in range(1,3):
for res in range(pose.conformation().chain_begin(num), pose.conformation().chain_begin(num)+4):
for atom in range(1, pdbinfo.natoms(res)+1):
pdbinfo.temperature(res,atom, 1.0)
for chain in chains:
for res in range(pdbinfo.pdb2pose(chain, 73), pdbinfo.pdb2pose(chain, 93)):
for atom in range(1, pdbinfo.natoms(res)+1):
pdbinfo.temperature(res,atom, 1.0)
for i in range(1, 7):
CDR_range = abinfo.get_CDR_start(CDRNameEnum(i), pose), abinfo.get_CDR_end(CDRNameEnum(i), pose)
for r in range(CDR_range[0]-3, CDR_range[1]+4):
for atom in range(1, pdbinfo.natoms(r)+1):
pdbinfo.temperature(r,atom, 1.0)
pose.dump_pdb(path +'/'+ Path(structure).stem + '_min.pdb')
pandas_anarci_numberer.main(path +'/'+ Path(structure).stem + '_min.pdb', 'chothia', path +'/'+ structure)
print(scorefxn(original_pose))
print(scorefxn(pose),CA_rmsd(pose, original_pose))
list = []
for i in range(1, 7):
list.append(abinfo.get_cluster_name(abinfo.get_CDR_cluster(CDRNameEnum(i)).cluster()))
list.append(abinfo.get_CDR_cluster(CDRNameEnum(i)).normalized_distance_in_degrees())
list.append(abinfo.get_CDR_sequence_with_stem(CDRNameEnum(i), pose, 3, 3))
abdf_T = pd.DataFrame(list)
abdf = pd.DataFrame(abdf_T.T)
abdf.columns=['H1_cluster', 'H1_distance', 'H1_sequence', 'H2_cluster', 'H2_distance', 'H2_sequence',
'H3_cluster', 'H3_distance', 'H3_sequence', 'L1_cluster', 'L1_distance', 'L1_sequence',
'L2_cluster', 'L2_distance', 'L2_sequence', 'L3_cluster', 'L3_distance', 'L3_sequence']
end = timeit.default_timer()
print(end-start)
print(abdf)
return(abdf)
if __name__ == '__main__':
main(sys.argv[1],sys.argv[2],sys.argv[3]) |
<reponame>reuvenperetz/model_optimization<gh_stars>10-100
# Copyright 2022 Sony Semiconductors Israel, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from typing import List
import numpy as np
import scipy
from model_compression_toolkit.core import common
from model_compression_toolkit.core.common import Graph, BaseNode
from model_compression_toolkit.core.common.defaultdict import DefaultDict
from model_compression_toolkit.core.common.framework_info import FrameworkInfo
from model_compression_toolkit.core.common.quantization.quantization_config import QuantizationConfig
# We assume to have Gaussian distribution before the RelU operation
# Hence, the activations after the RelU operation have Rectified Gaussian distribution
# We need to calculate the "fixed" mean and std of the "new" activations
# For more information about Rectified Gaussian distribution:
# https://en.wikipedia.org/wiki/Rectified_Gaussian_distribution
def fixed_mean_after_relu(mu: np.ndarray,
std: np.ndarray):
"""
Calculate fixed mean after relu
Args:
mu: Mean vector of the activations before the RelU operation.
std: Std vector of the activations before the RelU operation.
Returns:
The fixed mean vector of the activations before the RelU operation.
"""
variance = np.power(std, 2)
mean_pow_2 = np.power(mu, 2)
prob_const = 1 / np.sqrt(2 * variance * np.pi)
second_const = np.sqrt(np.pi / 2) * std * mu
free_const = variance * np.exp(-1 * mean_pow_2 / (2 * variance))
erf_const = mu / (np.sqrt(2) * std)
fixed_mean = prob_const * (second_const * (scipy.special.erf(erf_const) + 1) + free_const)
return fixed_mean
def fixed_second_moment_after_relu(mu: np.ndarray,
std: np.ndarray):
"""
Calculate fixed std after relu
Args:
mu: Mean vector of the activations before the RelU operation.
std: Std vector of the activations before the RelU operation.
Returns:
The fixed std vector of the activations before the RelU operation.
"""
variance = np.power(std, 2)
mean_pow_2 = np.power(mu, 2)
prob_const = 1 / np.sqrt(2 * variance * np.pi)
second_const = np.sqrt(np.pi / 2) * std * (mean_pow_2 + variance)
free_const = mu * variance * np.exp(-1 * mean_pow_2 / (2 * variance))
erf_const = mu / (np.sqrt(2) * std)
fixed_non_var = prob_const * (second_const * (scipy.special.erf(erf_const) + 1) + free_const)
return fixed_non_var
def scale_reshaping(scale: np.ndarray,
op2d: common.BaseNode,
kernel_channel_mapping: DefaultDict,
kernel_str: str,
in_channels: bool = True) -> np.ndarray:
"""
Before scaling a kernel, the scale factor needs to be reshaped to the correct
dimensions. This is a function of the layer that is scaled and whether its input channels or
output channels that should be scaled.
The index of the correct kernel index is obtained from kernel_channel_mapping.
Args:
scale: Scale factor to scale the kernel channels by.
op2d: Node to scale its kernel.
kernel_channel_mapping: Mapping from a layer to a tuple of indices of its output/input kernel channels.
kernel_str: The framework specific attribute name of the convolution layer's weight/kernel.
in_channels: Kernel's index of input channels.
Returns:
The scale factor after reshaping it to the correct shape.
"""
op_ndims = op2d.get_weights_by_keys(kernel_str).ndim
reshape_target = np.ones(op_ndims, dtype=np.int)
reshape_target[kernel_channel_mapping.get(op2d.type)[int(in_channels)]] = -1
return np.reshape(scale, reshape_target)
def update_linear_nodes(fw_info: FrameworkInfo,
first_op2d_node: BaseNode,
second_op2d_node: BaseNode,
scale_factor: np.ndarray,
kernel_str: str,
bias_str: str):
"""
Scale the weights of two linear nodes with a scale factor. Each node is scaled in
the opposite scale factor such that the output of the second node is the same as it
is without the scaling.
The scale factor contain a scale value per-channel.
Args:
fw_info: Information needed for quantization about the specific framework (e.g., kernel channels indices,
groups of layers by how they should be quantized, etc.)
first_op2d_node: Node to multiply its kernel by the scale factor.
second_op2d_node: Node to divide its kernel by the scale factor.
scale_factor: Scaling factor to scale the nodes' weights.
bias_str: The framework specific attribute name of the convolution layer's bias.
kernel_str: The framework specific attribute name of the convolution layer's weight/kernel.
"""
w2_fixed = second_op2d_node.get_weights_by_keys(kernel_str) / scale_reshaping(scale_factor,
second_op2d_node,
fw_info.kernel_channels_mapping,
kernel_str)
w1_fixed = first_op2d_node.get_weights_by_keys(kernel_str) * scale_reshaping(scale_factor,
first_op2d_node,
fw_info.kernel_channels_mapping,
kernel_str,
in_channels=False)
if first_op2d_node.get_weights_by_keys(bias_str) is not None:
b1_fixed = first_op2d_node.get_weights_by_keys(bias_str) * scale_factor
first_op2d_node.set_weights_by_keys(bias_str, b1_fixed)
first_op2d_node.set_weights_by_keys(kernel_str, w1_fixed)
second_op2d_node.set_weights_by_keys(kernel_str, w2_fixed)
def calculate_scale_correction(first_op2d_node: BaseNode) -> tuple:
"""
Compute a scale factor by the activation node's outputs statistics in order to scale the activations by channel.
Args:
first_op2d_node: Node to calculate the scale factor by.
Returns:
Scaling factor
"""
std_vector = np.abs(first_op2d_node.prior_info.std_output)
mean_vector = first_op2d_node.prior_info.mean_output
fixed_second_moment_vector = fixed_second_moment_after_relu(mean_vector, std_vector)
fixed_mean_vector = fixed_mean_after_relu(mean_vector, std_vector)
fixed_std_vector = np.sqrt(fixed_second_moment_vector - np.power(fixed_mean_vector, 2))
scale_factor = 1.0 / fixed_std_vector
scale_factor = np.minimum(scale_factor, 1.0)
return scale_factor
def scale_equalization_lnl(fw_info: FrameworkInfo,
first_op2d_node: BaseNode,
second_op2d_node: BaseNode,
kernel_str: str,
bias_str: str):
"""
Compute a scale factor by the activation node's outputs statistics in order to scale the activations by channel.
A correction (opposite computed scale) needs to be applied on the linear node that
follows the activation node to get the same expected output without the scaling.
Args:
fw_info: Information needed for quantization about the specific framework (e.g., kernel channels indices,
groups of layers by how they should be quantized, etc.)
first_op2d_node: Node to multiply its kernel by the scale factor.
second_op2d_node: Node to divide its kernel by the scale factor.
bias_str: The framework specific attribute name of the convolution layer's bias.
kernel_str: The framework specific attribute name of the convolution layer's weight/kernel.
"""
scale_factor = calculate_scale_correction(first_op2d_node)
update_linear_nodes(fw_info,
first_op2d_node,
second_op2d_node,
scale_factor,
kernel_str,
bias_str)
class BaseScaleEqualization(common.BaseSubstitution):
"""
Substitution to scale the weights of two linear nodes in order to use the entire
constrained range when activations are quantized.
Unlike relu_bound_to_power_of_2, scaling here is per-channel.
"""
def __init__(self,
quant_config: QuantizationConfig,
fw_info: FrameworkInfo,
matcher_instance,
kernel_str: str,
bias_str: str):
"""
Initialize a ScaleEqualization object.
Args:
quant_config: QuantizationConfig containing parameters of how the model should be quantized.
fw_info: Information needed for quantization about the specific framework (e.g., kernel channels indices,
groups of layers by how they should be quantized, etc.)
matcher_instance: Per substitution matcher instance of type WalkMatcher
"""
self.quant_config = quant_config
self.fw_info = fw_info
self.kernel_str = kernel_str
self.bias_str = bias_str
super().__init__(matcher_instance=matcher_instance)
def substitute(self,
graph: Graph,
nodes_list: List[BaseNode]) -> Graph:
"""
Scale each channel of the weights of two linear nodes,
Args:
graph: Graph to apply the substitution on.
nodes_list: List of nodes that match the pattern in the substitution init.
Returns:
Graph after substitution.
"""
first_op2d_node = nodes_list[0]
act_node = nodes_list[1]
second_op2d_node = nodes_list[-1]
if first_op2d_node.prior_info.std_output is not None and act_node.is_activation_quantization_enabled():
scale_equalization_lnl(self.fw_info,
first_op2d_node,
second_op2d_node,
self.kernel_str,
self.bias_str)
return graph
|
# Copyright 2009-2012 10gen, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the connection module."""
import datetime
import os
import sys
import time
import thread
import unittest
sys.path[0:0] = [""]
from nose.plugins.skip import SkipTest
from bson.son import SON
from bson.tz_util import utc
from pymongo.connection import Connection
from pymongo.database import Database
from pymongo.pool import NO_REQUEST, NO_SOCKET_YET, SocketInfo
from pymongo.errors import (AutoReconnect,
ConfigurationError,
ConnectionFailure,
InvalidName,
InvalidURI,
OperationFailure)
from test import version
from test.utils import is_mongos, server_is_master_with_slave, delay
host = os.environ.get("DB_IP", "localhost")
port = int(os.environ.get("DB_PORT", 27017))
def get_connection(*args, **kwargs):
return Connection(host, port, *args, **kwargs)
class TestConnection(unittest.TestCase):
def setUp(self):
self.host = os.environ.get("DB_IP", "localhost")
self.port = int(os.environ.get("DB_PORT", 27017))
def test_types(self):
self.assertRaises(TypeError, Connection, 1)
self.assertRaises(TypeError, Connection, 1.14)
self.assertRaises(TypeError, Connection, "localhost", "27017")
self.assertRaises(TypeError, Connection, "localhost", 1.14)
self.assertRaises(TypeError, Connection, "localhost", [])
self.assertRaises(ConfigurationError, Connection, [])
def test_constants(self):
Connection.HOST = self.host
Connection.PORT = self.port
self.assertTrue(Connection())
Connection.HOST = "somedomainthatdoesntexist.org"
Connection.PORT = 123456789
self.assertRaises(ConnectionFailure, Connection, connectTimeoutMS=600)
self.assertTrue(Connection(self.host, self.port))
Connection.HOST = self.host
Connection.PORT = self.port
self.assertTrue(Connection())
def test_connect(self):
self.assertRaises(ConnectionFailure, Connection,
"somedomainthatdoesntexist.org", connectTimeoutMS=600)
self.assertRaises(ConnectionFailure, Connection, self.host, 123456789)
self.assertTrue(Connection(self.host, self.port))
def test_host_w_port(self):
self.assertTrue(Connection("%s:%d" % (self.host, self.port)))
self.assertRaises(ConnectionFailure, Connection,
"%s:1234567" % (self.host,), self.port)
def test_repr(self):
self.assertEqual(repr(Connection(self.host, self.port)),
"Connection('%s', %d)" % (self.host, self.port))
def test_getters(self):
self.assertEqual(Connection(self.host, self.port).host, self.host)
self.assertEqual(Connection(self.host, self.port).port, self.port)
self.assertEqual(set([(self.host, self.port)]),
Connection(self.host, self.port).nodes)
def test_get_db(self):
connection = Connection(self.host, self.port)
def make_db(base, name):
return base[name]
self.assertRaises(InvalidName, make_db, connection, "")
self.assertRaises(InvalidName, make_db, connection, "te$t")
self.assertRaises(InvalidName, make_db, connection, "te.t")
self.assertRaises(InvalidName, make_db, connection, "te\\t")
self.assertRaises(InvalidName, make_db, connection, "te/t")
self.assertRaises(InvalidName, make_db, connection, "te st")
self.assertTrue(isinstance(connection.test, Database))
self.assertEqual(connection.test, connection["test"])
self.assertEqual(connection.test, Database(connection, "test"))
def test_database_names(self):
connection = Connection(self.host, self.port)
connection.pymongo_test.test.save({"dummy": u"object"})
connection.pymongo_test_mike.test.save({"dummy": u"object"})
dbs = connection.database_names()
self.assertTrue("pymongo_test" in dbs)
self.assertTrue("pymongo_test_mike" in dbs)
def test_drop_database(self):
connection = Connection(self.host, self.port)
self.assertRaises(TypeError, connection.drop_database, 5)
self.assertRaises(TypeError, connection.drop_database, None)
raise SkipTest("This test often fails due to SERVER-2329")
connection.pymongo_test.test.save({"dummy": u"object"})
dbs = connection.database_names()
self.assertTrue("pymongo_test" in dbs)
connection.drop_database("pymongo_test")
dbs = connection.database_names()
self.assertTrue("pymongo_test" not in dbs)
connection.pymongo_test.test.save({"dummy": u"object"})
dbs = connection.database_names()
self.assertTrue("pymongo_test" in dbs)
connection.drop_database(connection.pymongo_test)
dbs = connection.database_names()
self.assertTrue("pymongo_test" not in dbs)
def test_copy_db(self):
c = Connection(self.host, self.port)
self.assertTrue(c.in_request())
self.assertRaises(TypeError, c.copy_database, 4, "foo")
self.assertRaises(TypeError, c.copy_database, "foo", 4)
self.assertRaises(InvalidName, c.copy_database, "foo", "$foo")
c.pymongo_test.test.drop()
c.drop_database("pymongo_test1")
c.drop_database("pymongo_test2")
c.pymongo_test.test.insert({"foo": "bar"})
# Due to SERVER-2329, databases may not disappear from a master in a
# master-slave pair
if not server_is_master_with_slave(c):
self.assertFalse("pymongo_test1" in c.database_names())
self.assertFalse("pymongo_test2" in c.database_names())
c.copy_database("pymongo_test", "pymongo_test1")
# copy_database() didn't accidentally end the request
self.assertTrue(c.in_request())
self.assertTrue("pymongo_test1" in c.database_names())
self.assertEqual("bar", c.pymongo_test1.test.find_one()["foo"])
c.end_request()
c.copy_database("pymongo_test", "pymongo_test2",
"%s:%d" % (self.host, self.port))
# copy_database() didn't accidentally restart the request
self.assertFalse(c.in_request())
self.assertTrue("pymongo_test2" in c.database_names())
self.assertEqual("bar", c.pymongo_test2.test.find_one()["foo"])
if version.at_least(c, (1, 3, 3, 1)):
c.drop_database("pymongo_test1")
c.pymongo_test.add_user("mike", "password")
self.assertRaises(OperationFailure, c.copy_database,
"pymongo_test", "pymongo_test1",
username="foo", password="<PASSWORD>")
if not server_is_master_with_slave(c):
self.assertFalse("pymongo_test1" in c.database_names())
self.assertRaises(OperationFailure, c.copy_database,
"pymongo_test", "pymongo_test1",
username="mike", password="<PASSWORD>")
if not server_is_master_with_slave(c):
self.assertFalse("pymongo_test1" in c.database_names())
if not is_mongos(c):
# See SERVER-6427
c.copy_database("pymongo_test", "pymongo_test1",
username="mike", password="password")
self.assertTrue("pymongo_test1" in c.database_names())
self.assertEqual("bar", c.pymongo_test1.test.find_one()["foo"])
def test_iteration(self):
connection = Connection(self.host, self.port)
def iterate():
[a for a in connection]
self.assertRaises(TypeError, iterate)
def test_disconnect(self):
c = Connection(self.host, self.port)
coll = c.foo.bar
c.disconnect()
c.disconnect()
coll.count()
c.disconnect()
c.disconnect()
coll.count()
def test_from_uri(self):
c = Connection(self.host, self.port)
self.assertEqual(c, Connection("mongodb://%s:%d" %
(self.host, self.port)))
c.admin.system.users.remove({})
c.pymongo_test.system.users.remove({})
try:
# First admin user add fails gle in MongoDB >= 2.1.2
# See SERVER-4225 for more information.
c.admin.add_user("admin", "pass")
except OperationFailure:
pass
c.admin.authenticate("admin", "pass")
c.pymongo_test.add_user("user", "pass")
self.assertRaises(ConfigurationError, Connection,
"mongodb://foo:bar@%s:%d" % (self.host, self.port))
self.assertRaises(ConfigurationError, Connection,
"mongodb://admin:bar@%s:%d" % (self.host, self.port))
self.assertRaises(ConfigurationError, Connection,
"mongodb://user:pass@%s:%d" % (self.host, self.port))
Connection("mongodb://admin:pass@%s:%d" % (self.host, self.port))
self.assertRaises(ConfigurationError, Connection,
"mongodb://admin:pass@%s:%d/pymongo_test" %
(self.host, self.port))
self.assertRaises(ConfigurationError, Connection,
"mongodb://user:foo@%s:%d/pymongo_test" %
(self.host, self.port))
Connection("mongodb://user:pass@%s:%d/pymongo_test" %
(self.host, self.port))
self.assertTrue(Connection("mongodb://%s:%d" %
(self.host, self.port),
slave_okay=True).slave_okay)
self.assertTrue(Connection("mongodb://%s:%d/?slaveok=true;w=2" %
(self.host, self.port)).slave_okay)
c.admin.system.users.remove({})
c.pymongo_test.system.users.remove({})
def test_fork(self):
# Test using a connection before and after a fork.
if sys.platform == "win32":
raise SkipTest()
try:
from multiprocessing import Process, Pipe
except ImportError:
raise SkipTest()
db = Connection(self.host, self.port).pymongo_test
# Failure occurs if the connection is used before the fork
db.test.find_one()
db.connection.end_request()
def loop(pipe):
while True:
try:
db.test.insert({"a": "b"}, safe=True)
for _ in db.test.find():
pass
except:
pipe.send(True)
os._exit(1)
cp1, cc1 = Pipe()
cp2, cc2 = Pipe()
p1 = Process(target=loop, args=(cc1,))
p2 = Process(target=loop, args=(cc2,))
p1.start()
p2.start()
p1.join(1)
p2.join(1)
p1.terminate()
p2.terminate()
p1.join()
p2.join()
cc1.close()
cc2.close()
# recv will only have data if the subprocess failed
try:
cp1.recv()
self.fail()
except EOFError:
pass
try:
cp2.recv()
self.fail()
except EOFError:
pass
def test_document_class(self):
c = Connection(self.host, self.port)
db = c.pymongo_test
db.test.insert({"x": 1})
self.assertEqual(dict, c.document_class)
self.assertTrue(isinstance(db.test.find_one(), dict))
self.assertFalse(isinstance(db.test.find_one(), SON))
c.document_class = SON
self.assertEqual(SON, c.document_class)
self.assertTrue(isinstance(db.test.find_one(), SON))
self.assertFalse(isinstance(db.test.find_one(as_class=dict), SON))
c = Connection(self.host, self.port, document_class=SON)
db = c.pymongo_test
self.assertEqual(SON, c.document_class)
self.assertTrue(isinstance(db.test.find_one(), SON))
self.assertFalse(isinstance(db.test.find_one(as_class=dict), SON))
c.document_class = dict
self.assertEqual(dict, c.document_class)
self.assertTrue(isinstance(db.test.find_one(), dict))
self.assertFalse(isinstance(db.test.find_one(), SON))
def test_timeouts(self):
conn = Connection(self.host, self.port, connectTimeoutMS=10500)
self.assertEqual(10.5, conn._Connection__pool.conn_timeout)
conn = Connection(self.host, self.port, socketTimeoutMS=10500)
self.assertEqual(10.5, conn._Connection__pool.net_timeout)
def test_network_timeout(self):
no_timeout = Connection(self.host, self.port)
timeout_sec = 1
timeout = Connection(self.host, self.port, network_timeout=timeout_sec)
no_timeout.pymongo_test.drop_collection("test")
no_timeout.pymongo_test.test.insert({"x": 1}, safe=True)
# A $where clause that takes a second longer than the timeout
where_func = delay(timeout_sec + 1)
def get_x(db):
doc = db.test.find().where(where_func).next()
return doc["x"]
self.assertEqual(1, get_x(no_timeout.pymongo_test))
self.assertRaises(ConnectionFailure, get_x, timeout.pymongo_test)
def get_x_timeout(db, t):
doc = db.test.find(network_timeout=t).where(where_func).next()
return doc["x"]
self.assertEqual(1, get_x_timeout(timeout.pymongo_test, None))
self.assertRaises(ConnectionFailure, get_x_timeout,
no_timeout.pymongo_test, 0.1)
def test_tz_aware(self):
self.assertRaises(ConfigurationError, Connection, tz_aware='foo')
aware = Connection(self.host, self.port, tz_aware=True)
naive = Connection(self.host, self.port)
aware.pymongo_test.drop_collection("test")
now = datetime.datetime.utcnow()
aware.pymongo_test.test.insert({"x": now}, safe=True)
self.assertEqual(None, naive.pymongo_test.test.find_one()["x"].tzinfo)
self.assertEqual(utc, aware.pymongo_test.test.find_one()["x"].tzinfo)
self.assertEqual(
aware.pymongo_test.test.find_one()["x"].replace(tzinfo=None),
naive.pymongo_test.test.find_one()["x"])
def test_ipv6(self):
try:
connection = Connection("[::1]")
except:
# Either mongod was started without --ipv6
# or the OS doesn't support it (or both).
raise SkipTest("No IPV6")
# Try a few simple things
connection = Connection("mongodb://[::1]:%d" % (self.port,))
connection = Connection("mongodb://[::1]:%d/"
"?slaveOk=true" % (self.port,))
connection = Connection("[::1]:%d,"
"localhost:%d" % (self.port, self.port))
connection = Connection("localhost:%d,"
"[::1]:%d" % (self.port, self.port))
connection.pymongo_test.test.save({"dummy": u"object"})
connection.pymongo_test_bernie.test.save({"dummy": u"object"})
dbs = connection.database_names()
self.assertTrue("pymongo_test" in dbs)
self.assertTrue("pymongo_test_bernie" in dbs)
def test_fsync_lock_unlock(self):
c = get_connection()
if is_mongos(c):
raise SkipTest('fsync/lock not supported by mongos')
self.assertFalse(c.is_locked)
# async flushing not supported on windows...
if sys.platform not in ('cygwin', 'win32'):
c.fsync(async=True)
self.assertFalse(c.is_locked)
c.fsync(lock=True)
self.assertTrue(c.is_locked)
locked = True
c.unlock()
for _ in xrange(5):
locked = c.is_locked
if not locked:
break
time.sleep(1)
self.assertFalse(locked)
def test_contextlib(self):
if sys.version_info < (2, 6):
raise SkipTest()
import contextlib
conn = get_connection(auto_start_request=False)
conn.pymongo_test.drop_collection("test")
conn.pymongo_test.test.insert({"foo": "bar"})
# The socket used for the previous commands has been returned to the
# pool
self.assertEqual(1, len(conn._Connection__pool.sockets))
# We need exec here because if the Python version is less than 2.6
# these with-statements won't even compile.
exec """
with contextlib.closing(conn):
self.assertEqual("bar", conn.pymongo_test.test.find_one()["foo"])
self.assertEqual(0, len(conn._Connection__pool.sockets))
"""
exec """
with get_connection() as connection:
self.assertEqual("bar", connection.pymongo_test.test.find_one()["foo"])
# Calling conn.close() has reset the pool
self.assertEqual(0, len(connection._Connection__pool.sockets))
"""
def get_sock(self, pool):
sock_info = pool.get_socket((self.host, self.port))
return sock_info
def assertSameSock(self, pool):
sock_info0 = self.get_sock(pool)
sock_info1 = self.get_sock(pool)
self.assertEqual(sock_info0, sock_info1)
pool.maybe_return_socket(sock_info0)
pool.maybe_return_socket(sock_info1)
def assertDifferentSock(self, pool):
# We have to hold both SocketInfos at the same time, otherwise the
# first will send its socket back to the pool as soon as its ref count
# goes to zero, in which case the second SocketInfo we get will have
# the same socket as the first.
sock_info0 = self.get_sock(pool)
sock_info1 = self.get_sock(pool)
self.assertNotEqual(sock_info0, sock_info1)
pool.maybe_return_socket(sock_info0)
pool.maybe_return_socket(sock_info1)
def assertNoRequest(self, pool):
self.assertEqual(NO_REQUEST, pool._get_request_state())
def assertNoSocketYet(self, pool):
self.assertEqual(NO_SOCKET_YET, pool._get_request_state())
def assertRequestSocket(self, pool):
self.assertTrue(isinstance(pool._get_request_state(), SocketInfo))
def test_with_start_request(self):
conn = get_connection(auto_start_request=False)
pool = conn._Connection__pool
# No request started
self.assertNoRequest(pool)
self.assertDifferentSock(pool)
# Start a request
request_context_mgr = conn.start_request()
self.assertTrue(
isinstance(request_context_mgr, object)
)
self.assertNoSocketYet(pool)
self.assertSameSock(pool)
self.assertRequestSocket(pool)
# End request
request_context_mgr.__exit__(None, None, None)
self.assertNoRequest(pool)
self.assertDifferentSock(pool)
# Test the 'with' statement
if sys.version_info >= (2, 6):
# We need exec here because if the Python version is less than 2.6
# these with-statements won't even compile.
exec """
with conn.start_request() as request:
self.assertEqual(conn, request.connection)
self.assertNoSocketYet(pool)
self.assertSameSock(pool)
self.assertRequestSocket(pool)
"""
# Request has ended
self.assertNoRequest(pool)
self.assertDifferentSock(pool)
def test_auto_start_request(self):
for bad_horrible_value in (None, 5, 'hi!'):
self.assertRaises(
(TypeError, ConfigurationError),
lambda: get_connection(auto_start_request=bad_horrible_value)
)
# auto_start_request should default to True
conn = get_connection()
self.assertTrue(conn.auto_start_request)
self.assertTrue(conn.in_request())
pool = conn._Connection__pool
# Request started already, just from Connection constructor - it's a
# bit weird, but Connection does some socket stuff when it initializes
# and it ends up with a request socket
self.assertRequestSocket(pool)
self.assertSameSock(pool)
conn.end_request()
self.assertNoRequest(pool)
self.assertDifferentSock(pool)
# Trigger auto_start_request
conn.db.test.find_one()
self.assertRequestSocket(pool)
self.assertSameSock(pool)
def test_interrupt_signal(self):
if sys.platform.startswith('java'):
# We can't figure out how to raise an exception on a thread that's
# blocked on a socket, whether that's the main thread or a worker,
# without simply killing the whole thread in Jython. This suggests
# PYTHON-294 can't actually occur in Jython.
raise SkipTest("Can't test interrupts in Jython")
# Test fix for PYTHON-294 -- make sure Connection closes its
# socket if it gets an interrupt while waiting to recv() from it.
c = get_connection()
db = c.pymongo_test
# A $where clause which takes 1.5 sec to execute
where = delay(1.5)
# Need exactly 1 document so find() will execute its $where clause once
db.drop_collection('foo')
db.foo.insert({'_id': 1}, safe=True)
def interrupter():
# Raises KeyboardInterrupt in the main thread
time.sleep(0.25)
thread.interrupt_main()
thread.start_new_thread(interrupter, ())
raised = False
try:
# Will be interrupted by a KeyboardInterrupt.
db.foo.find({'$where': where}).next()
except KeyboardInterrupt:
raised = True
# Can't use self.assertRaises() because it doesn't catch system
# exceptions
self.assertTrue(raised, "Didn't raise expected KeyboardInterrupt")
# Raises AssertionError due to PYTHON-294 -- Mongo's response to the
# previous find() is still waiting to be read on the socket, so the
# request id's don't match.
self.assertEqual(
{'_id': 1},
db.foo.find().next()
)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
import pytest
from itertools import cycle
DAY='13'
VERT='|'
HORIZ='-'
DIAGDU='/'
DIAGUD='\\'
INTER='+'
CARTUP='^'
CARTDOWN='v'
CARTLEFT='<'
CARTRIGHT='>'
CARTS =(CARTUP, CARTDOWN, CARTLEFT, CARTRIGHT)
CARTSL='L'
CARTSS='S'
CARTSR='R'
CARTSEQUENCE=(CARTSL, CARTSS, CARTSR)
CRASH='X'
class Cart():
def __init__(self, c, x, y):
self.c = c
self.track = VERT if c in [CARTUP, CARTDOWN] else HORIZ
self.x = x
self.y = y
self.turns = cycle(CARTSEQUENCE)
def move(self):
if self.c == CARTUP:
self.y -= 1
elif self.c == CARTDOWN:
self.y += 1
elif self.c == CARTLEFT:
self.x -= 1
elif self.c == CARTRIGHT:
self.x += 1
def get_pos(self):
return (self.x, self.y)
def turn(self):
n_turn = next(self.turns)
if self.c == CARTUP:
if n_turn == CARTSL:
self.c = CARTLEFT
elif n_turn == CARTSR:
self.c = CARTRIGHT
elif self.c == CARTDOWN:
if n_turn == CARTSL:
self.c = CARTRIGHT
elif n_turn == CARTSR:
self.c = CARTLEFT
elif self.c == CARTLEFT:
if n_turn == CARTSL:
self.c = CARTDOWN
elif n_turn == CARTSR:
self.c = CARTUP
elif self.c == CARTRIGHT:
if n_turn == CARTSL:
self.c = CARTUP
elif n_turn == CARTSR:
self.c = CARTDOWN
class Map():
def __init__(self, in_lines):
self.map_l = in_lines
self.carts = []
for y, l in enumerate(self.map_l):
for x, c in enumerate(l):
if c in CARTS:
self.carts.append(Cart(c, x, y))
def crash(self, new_pos):
a=list(self.map_l[new_pos[1]]);a[new_pos[0]]=CRASH
self.map_l[new_pos[1]]=''.join(a);
def tick(self):
cart_positions = set()
for cart in self.carts:
old_pos = cart.get_pos()
#print('Voor: ',cart.get_pos())
cart.move()
#print(self.map_l)
a=list(self.map_l[old_pos[1]]);a[old_pos[0]]=cart.track;
self.map_l[old_pos[1]]=''.join(a);
#print(self.map_l)
new_pos = cart.get_pos()
#print('Na: ',new_pos)
if new_pos in cart_positions:
return self.crash(new_pos)
cart_positions.add(new_pos)
new_track_char = self.map_l[new_pos[1]][new_pos[0]]
cart.track = new_track_char
if new_track_char == DIAGDU: #/
if cart.c == CARTUP:
cart.c = CARTRIGHT
elif cart.c == CARTLEFT:
cart.c = CARTDOWN
elif cart.c == CARTDOWN:
cart.c = CARTLEFT
elif cart.c == CARTRIGHT:
cart.c = CARTUP
elif new_track_char == DIAGUD: #\
if cart.c == CARTUP:
cart.c = CARTLEFT
elif cart.c == CARTLEFT:
cart.c = CARTUP
elif cart.c == CARTDOWN:
cart.c = CARTRIGHT
elif cart.c == CARTRIGHT:
cart.c = CARTDOWN
elif new_track_char == INTER: #+
cart.turn()
a=list(self.map_l[new_pos[1]]);a[new_pos[0]]=cart.c
self.map_l[new_pos[1]]=''.join(a);
#print(self.map_l)
def get_map(self):
return '\n'.join(self.map_l)
def get_crash_location(self):
for y, l in enumerate(self.map_l):
for x, c in enumerate(l):
if c == CRASH:
return (x, y)
@pytest.fixture
def example_input_1():
with open('{}.input.test.1'.format(DAY), 'r') as in_file:
return in_file.read().split('\n')
@pytest.fixture
def example_input_2():
with open('{}.input.test.2'.format(DAY), 'r') as in_file:
return in_file.read().split('\n')
def test_example_1(example_input_1):
print()
m = Map(example_input_1[:7])
print(m.get_map())
m.tick()
print('TICK')
print(m.get_map())
# Skip intermediate results in the example_input_1
assert m.map_l == example_input_1[1*16:1*16+7]
m.tick()
print('TICK')
print(m.get_map())
assert m.map_l == example_input_1[2*16:2*16+7]
def test_example_2(example_input_2):
print()
m = Map(example_input_2[:6])
print(m.get_map())
for i in range(1, 15):
m.tick()
print('TICK')
print(m.get_map())
assert m.map_l == example_input_2[i*7:i*7+6]
assert m.get_crash_location() == (7,3)
if __name__ == '__main__':
with open('{}.input'.format(DAY), 'r') as in_file:
in_lines = in_file.read().split('\n')
print()
m = Map(in_lines)
print(m.get_map())
i = 0
while not m.get_crash_location():
i += 1
m.tick()
print('TICK {:3}'.format(i))
#print(m.get_map())
print(m.get_crash_location())
|
<filename>gql/transport/requests.py
import io
import json
import logging
from typing import Any, Dict, Optional, Tuple, Type, Union
import requests
from graphql import DocumentNode, ExecutionResult, print_ast
from requests.adapters import HTTPAdapter, Retry
from requests.auth import AuthBase
from requests.cookies import RequestsCookieJar
from requests_toolbelt.multipart.encoder import MultipartEncoder
from gql.transport import Transport
from ..utils import extract_files
from .exceptions import (
TransportAlreadyConnected,
TransportClosed,
TransportProtocolError,
TransportServerError,
)
log = logging.getLogger(__name__)
class RequestsHTTPTransport(Transport):
""":ref:`Sync Transport <sync_transports>` used to execute GraphQL queries
on remote servers.
The transport uses the requests library to send HTTP POST requests.
"""
file_classes: Tuple[Type[Any], ...] = (io.IOBase,)
def __init__(
self,
url: str,
headers: Optional[Dict[str, Any]] = None,
cookies: Optional[Union[Dict[str, Any], RequestsCookieJar]] = None,
auth: Optional[AuthBase] = None,
use_json: bool = True,
timeout: Optional[int] = None,
verify: bool = True,
retries: int = 0,
method: str = "POST",
**kwargs: Any,
):
"""Initialize the transport with the given request parameters.
:param url: The GraphQL server URL.
:param headers: Dictionary of HTTP Headers to send with the :class:`Request`
(Default: None).
:param cookies: Dict or CookieJar object to send with the :class:`Request`
(Default: None).
:param auth: Auth tuple or callable to enable Basic/Digest/Custom HTTP Auth
(Default: None).
:param use_json: Send request body as JSON instead of form-urlencoded
(Default: True).
:param timeout: Specifies a default timeout for requests (Default: None).
:param verify: Either a boolean, in which case it controls whether we verify
the server's TLS certificate, or a string, in which case it must be a path
to a CA bundle to use. (Default: True).
:param retries: Pre-setup of the requests' Session for performing retries
:param method: HTTP method used for requests. (Default: POST).
:param kwargs: Optional arguments that ``request`` takes.
These can be seen at the `requests`_ source code or the official `docs`_
.. _requests: https://github.com/psf/requests/blob/master/requests/api.py
.. _docs: https://requests.readthedocs.io/en/master/
"""
self.url = url
self.headers = headers
self.cookies = cookies
self.auth = auth
self.use_json = use_json
self.default_timeout = timeout
self.verify = verify
self.retries = retries
self.method = method
self.kwargs = kwargs
self.session = None
def connect(self):
if self.session is None:
# Creating a session that can later be re-use to configure custom mechanisms
self.session = requests.Session()
# If we specified some retries, we provide a predefined retry-logic
if self.retries > 0:
adapter = HTTPAdapter(
max_retries=Retry(
total=self.retries,
backoff_factor=0.1,
status_forcelist=[500, 502, 503, 504],
allowed_methods=None,
)
)
for prefix in "http://", "https://":
self.session.mount(prefix, adapter)
else:
raise TransportAlreadyConnected("Transport is already connected")
def execute( # type: ignore
self,
document: DocumentNode,
variable_values: Optional[Dict[str, Any]] = None,
operation_name: Optional[str] = None,
timeout: Optional[int] = None,
extra_args: Dict[str, Any] = None,
upload_files: bool = False,
) -> ExecutionResult:
"""Execute GraphQL query.
Execute the provided document AST against the configured remote server. This
uses the requests library to perform a HTTP POST request to the remote server.
:param document: GraphQL query as AST Node object.
:param variable_values: Dictionary of input parameters (Default: None).
:param operation_name: Name of the operation that shall be executed.
Only required in multi-operation documents (Default: None).
:param timeout: Specifies a default timeout for requests (Default: None).
:param extra_args: additional arguments to send to the requests post method
:param upload_files: Set to True if you want to put files in the variable values
:return: The result of execution.
`data` is the result of executing the query, `errors` is null
if no errors occurred, and is a non-empty array if an error occurred.
"""
if not self.session:
raise TransportClosed("Transport is not connected")
query_str = print_ast(document)
payload: Dict[str, Any] = {"query": query_str}
if operation_name:
payload["operationName"] = operation_name
post_args = {
"headers": self.headers,
"auth": self.auth,
"cookies": self.cookies,
"timeout": timeout or self.default_timeout,
"verify": self.verify,
}
if upload_files:
# If the upload_files flag is set, then we need variable_values
assert variable_values is not None
# If we upload files, we will extract the files present in the
# variable_values dict and replace them by null values
nulled_variable_values, files = extract_files(
variables=variable_values, file_classes=self.file_classes,
)
# Save the nulled variable values in the payload
payload["variables"] = nulled_variable_values
# Add the payload to the operations field
operations_str = json.dumps(payload)
log.debug("operations %s", operations_str)
# Generate the file map
# path is nested in a list because the spec allows multiple pointers
# to the same file. But we don't support that.
# Will generate something like {"0": ["variables.file"]}
file_map = {str(i): [path] for i, path in enumerate(files)}
# Enumerate the file streams
# Will generate something like {'0': <_io.BufferedReader ...>}
file_streams = {str(i): files[path] for i, path in enumerate(files)}
# Add the file map field
file_map_str = json.dumps(file_map)
log.debug("file_map %s", file_map_str)
fields = {"operations": operations_str, "map": file_map_str}
# Add the extracted files as remaining fields
for k, v in file_streams.items():
fields[k] = (getattr(v, "name", k), v)
# Prepare requests http to send multipart-encoded data
data = MultipartEncoder(fields=fields)
post_args["data"] = data
if post_args["headers"] is None:
post_args["headers"] = {}
else:
post_args["headers"] = {**post_args["headers"]}
post_args["headers"]["Content-Type"] = data.content_type
else:
if variable_values:
payload["variables"] = variable_values
data_key = "json" if self.use_json else "data"
post_args[data_key] = payload
# Log the payload
if log.isEnabledFor(logging.INFO):
log.info(">>> %s", json.dumps(payload))
# Pass kwargs to requests post method
post_args.update(self.kwargs)
# Pass post_args to requests post method
if extra_args:
post_args.update(extra_args)
# Using the created session to perform requests
response = self.session.request(
self.method, self.url, **post_args # type: ignore
)
def raise_response_error(resp: requests.Response, reason: str):
# We raise a TransportServerError if the status code is 400 or higher
# We raise a TransportProtocolError in the other cases
try:
# Raise a HTTPError if response status is 400 or higher
resp.raise_for_status()
except requests.HTTPError as e:
raise TransportServerError(str(e), e.response.status_code) from e
result_text = resp.text
raise TransportProtocolError(
f"Server did not return a GraphQL result: "
f"{reason}: "
f"{result_text}"
)
try:
result = response.json()
if log.isEnabledFor(logging.INFO):
log.info("<<< %s", response.text)
except Exception:
raise_response_error(response, "Not a JSON answer")
if "errors" not in result and "data" not in result:
raise_response_error(response, 'No "data" or "errors" keys in answer')
return ExecutionResult(
errors=result.get("errors"),
data=result.get("data"),
extensions=result.get("extensions"),
)
def close(self):
"""Closing the transport by closing the inner session"""
if self.session:
self.session.close()
self.session = None
|
<reponame>Gabrideg/Recognizers-Text<filename>Python/libraries/recognizers-date-time/recognizers_date_time/resources/portuguese_date_time.py
# ------------------------------------------------------------------------------
# <auto-generated>
# This code was generated by a tool.
# Changes to this file may cause incorrect behavior and will be lost if
# the code is regenerated.
# </auto-generated>
# ------------------------------------------------------------------------------
from .base_date_time import BaseDateTime
# pylint: disable=line-too-long
class PortugueseDateTime:
TillRegex = f'(?<till>ate|as|às|até|ateh|a|ao|--|-|—|——)(\\s+(o|[aà](s)?))?'
AndRegex = f'(?<and>e|e\\s*o|--|-|—|——)'
DayRegex = f'(?<day>01|02|03|04|05|06|07|08|09|1|10|11|12|13|14|15|16|17|18|19|2|20|21|22|23|24|25|26|27|28|29|3|30|31|4|5|6|7|8|9)(?=\\b|t)'
MonthNumRegex = f'(?<month>01|02|03|04|05|06|07|08|09|10|11|12|1|2|3|4|5|6|7|8|9)\\b'
YearRegex = f'(?<year>((1[5-9]|20)\\d{{2}})|[0-27-9]\\d)'
FullYearRegex = f'\\b(?<year>19\\d{{2}}|20\\d{{2}})\\b'
RelativeMonthRegex = f'(?<relmonth>([nd]?es[st]e|pr[óo]ximo|passsado|[uú]ltimo)\\s+m[eê]s)\\b'
MonthRegex = f'(?<month>Abril|Abr|Agosto|Ago|Dezembro|Dez|Fevereiro|Fev|Janeiro|Jan|Julho|Jul|Junho|Jun|Mar[çc]o|Mar|Maio|Mai|Novembro|Nov|Outubro|Out|Septembro|Setembro|Sept|Set)'
MonthSuffixRegex = f'(?<msuf>((em|no)\\s+|d[eo]\\s+)?({RelativeMonthRegex}|{MonthRegex}))'
DateUnitRegex = f'(?<unit>anos|ano|meses|m[êe]s|semanas|semana|dia(s)?)\\b'
PastRegex = f'(?<past>\\b(passad(a|o)(s)?|[uú]ltim[oa](s)?|anterior(es)?|h[aá]|pr[ée]vi[oa](s)?)\\b)'
FutureRegex = f'(?<past>\\b(seguinte(s)?|pr[oó]xim[oa](s)?|dentro\\s+de|em|daqui\\s+a)\\b)'
SimpleCasesRegex = f'\\b((desde\\s+[oa]|desde|d[oa])\\s+)?(dia\\s+)?({DayRegex})\\s*{TillRegex}\\s*(o dia\\s+)?({DayRegex})\\s+{MonthSuffixRegex}((\\s+|\\s*,\\s*){FullYearRegex})?\\b'
MonthFrontSimpleCasesRegex = f'\\b{MonthSuffixRegex}\\s+((desde\\s+[oa]|desde|d[oa])\\s+)?(dia\\s+)?({DayRegex})\\s*{TillRegex}\\s*({DayRegex})((\\s+|\\s*,\\s*){FullYearRegex})?\\b'
MonthFrontBetweenRegex = f'\\b{MonthSuffixRegex}\\s+((entre|entre\\s+[oa]s?)\\s+)(dias?\\s+)?({DayRegex})\\s*{AndRegex}\\s*({DayRegex})((\\s+|\\s*,\\s*){FullYearRegex})?\\b'
DayBetweenRegex = f'\\b((entre|entre\\s+[oa]s?)\\s+)(dia\\s+)?({DayRegex})\\s*{AndRegex}\\s*({DayRegex})\\s+{MonthSuffixRegex}((\\s+|\\s*,\\s*){FullYearRegex})?\\b'
OneWordPeriodRegex = f'\\b(((pr[oó]xim[oa]?|[nd]?es[st]e|aquel[ea]|[uú]ltim[oa]?|em)\\s+)?(?<month>Abril|Abr|Agosto|Ago|Dezembro|Dez|Fevereiro|Fev|Janeiro|Jan|Julho|Jul|Junho|Jun|Mar[çc]o|Mar|Maio|Mai|Novembro|Nov|Outubro|Out|Septembro|Setembro|Sept|Set)|(?<=\\b(de|do|da|o|a)\\s+)?(pr[oó]xim[oa](s)?|[uú]ltim[oa]s?|est(e|a))\\s+(fim de semana|fins de semana|semana|m[êe]s|ano)|fim de semana|fins de semana|(m[êe]s|anos)? [àa] data)\\b'
MonthWithYearRegex = f'\\b(((pr[oó]xim[oa](s)?|[nd]?es[st]e|aquele|[uú]ltim[oa]?|em)\\s+)?(?<month><month>Abril|Abr|Agosto|Ago|Dezembro|Dez|Fevereiro|Fev|Janeiro|Jan|Julho|Jul|Junho|Jun|Mar[çc]o|Mar|Maio|Mai|Novembro|Nov|Outubro|Out|Septembro|Setembro|Sept|Set)\\s+((de|do|da|o|a)\\s+)?({FullYearRegex}|(?<order>pr[oó]ximo(s)?|[uú]ltimo?|[nd]?es[st]e)\\s+ano))\\b'
MonthNumWithYearRegex = f'({FullYearRegex}(\\s*?)[/\\-\\.](\\s*?){MonthNumRegex})|({MonthNumRegex}(\\s*?)[/\\-](\\s*?){FullYearRegex})'
WeekOfMonthRegex = f'(?<wom>(a|na\\s+)?(?<cardinal>primeira?|1a|segunda|2a|terceira|3a|[qc]uarta|4a|quinta|5a|[uú]ltima)\\s+semana\\s+{MonthSuffixRegex})'
WeekOfYearRegex = f'(?<woy>(a|na\\s+)?(?<cardinal>primeira?|1a|segunda|2a|terceira|3a|[qc]uarta|4a|quinta|5a|[uú]ltima?)\\s+semana(\\s+d[oe]?)?\\s+({FullYearRegex}|(?<order>pr[oó]ximo|[uú]ltimo|[nd]?es[st]e)\\s+ano))'
FollowedDateUnit = f'^\\s*{DateUnitRegex}'
NumberCombinedWithDateUnit = f'\\b(?<num>\\d+(\\.\\d*)?){DateUnitRegex}'
QuarterRegex = f'(n?o\\s+)?(?<cardinal>primeiro|1[oº]|segundo|2[oº]|terceiro|3[oº]|[qc]uarto|4[oº])\\s+trimestre(\\s+d[oe]|\\s*,\\s*)?\\s+({FullYearRegex}|(?<order>pr[oó]ximo(s)?|[uú]ltimo?|[nd]?es[st]e)\\s+ano)'
QuarterRegexYearFront = f'({FullYearRegex}|(?<order>pr[oó]ximo(s)?|[uú]ltimo?|[nd]?es[st]e)\\s+ano)\\s+(n?o\\s+)?(?<cardinal>(primeiro)|1[oº]|segundo|2[oº]|terceiro|3[oº]|[qc]uarto|4[oº])\\s+trimestre'
AllHalfYearRegex = f'^[.]'
PrefixDayRegex = f'^[.]'
SeasonRegex = f'\\b(?<season>(([uú]ltim[oa]|[nd]?es[st][ea]|n?[oa]|(pr[oó]xim[oa]s?|seguinte))\\s+)?(?<seas>primavera|ver[ãa]o|outono|inverno)((\\s+)?(seguinte|((de\\s+|,)?\\s*{FullYearRegex})|((do\\s+)?(?<order>pr[oó]ximo|[uú]ltimo|[nd]?es[st]e)\\s+ano)))?)\\b'
WhichWeekRegex = f'(semana)(\\s*)(?<number>\\d\\d|\\d|0\\d)'
WeekOfRegex = f'(semana)(\\s*)((do|da|de))'
MonthOfRegex = f'(mes)(\\s*)((do|da|de))'
RangeUnitRegex = f'\\b(?<unit>anos|ano|meses|m[êe]s|semanas|semana)\\b'
InConnectorRegex = f'\\b(em)\\b'
WithinNextPrefixRegex = f'^[.]'
CenturySuffixRegex = f'^[.]'
FromRegex = f'((desde|de)(\\s*a(s)?)?)$'
ConnectorAndRegex = f'(e\\s*([àa](s)?)?)$'
BetweenRegex = f'(entre\\s*([oa](s)?)?)'
WeekDayRegex = f'\\b(?<weekday>Domingos?|Segundas?-feiras?|Segundas?\\s+feiras?|Segundas?|2a|Ter[çca]s?-feiras?|Ter[çc]as?\\s+feiras?|Ter[cça]s?|3a|Quartas?-feiras?|Quartas?\\s+feiras?|Quartas?|4a|Quintas?-feiras?|Quintas?\\s+feiras?|Quintas?|5a|Sextas?-feiras?|Sextas?\\s+feiras?|Sextas?|6a|S[aá]bados?|2ª|3ª|4ª|5ª|6ª|Dom|Seg|Ter|Qua|Qui|Sex|Sab|Seg.|Ter[cç].|Qua.|Qui.|Sex.|S[aá]b.|Dom.)\\b'
OnRegex = f'(?<=\\b(em|no)\\s+)({DayRegex}s?)\\b'
RelaxedOnRegex = f'(?<=\\b(em|n[oa]|d[oa])\\s+)(dia\\s+)?((?<day>10|11|12|13|14|15|16|17|18|19|1|20|21|22|23|24|25|26|27|28|29|2|30|31|3|4|5|6|7|8|9)s?)\\b'
ThisRegex = f'\\b(([nd]?es[st][ea]\\s*){WeekDayRegex})|({WeekDayRegex}\\s*([nd]?es[st]a\\s+semana))\\b'
LastDateRegex = f'\\b(([uú]ltim[ao])\\s*{WeekDayRegex})|({WeekDayRegex}(\\s+(([nd]?es[st]a|na|da)\\s+([uú]ltima\\s+)?semana)))\\b'
NextDateRegex = f'\\b(((pr[oó]xim[oa]|seguinte)\\s*){WeekDayRegex})|({WeekDayRegex}(\\s+(da\\s+)?(pr[oó]xima|seguinte)?(\\s*semana\\s+seguinte|\\s*semana)?))\\b'
SpecialDayRegex = f'\\b((d?o\\s+)?(dia\\s+antes\\s+de\\s+ontem|antes\\s+de\\s+ontem|anteontem)|((d?o\\s+)?(dia\\s+|depois\\s+|dia\\s+depois\\s+)?de\\s+amanh[aã])|(o\\s)?dia\\s+seguinte|(o\\s)?pr[oó]ximo\\s+dia|(o\\s+)?[uú]ltimo\\s+dia|ontem|amanh[ãa]|hoje)|(do\\s+dia$)\\b'
SpecialDayWithNumRegex = f'^[.]'
ForTheRegex = f'^[.]'
WeekDayAndDayOfMonthRegex = f'^[.]'
WeekDayOfMonthRegex = f'(?<wom>(n?[ao]\\s+)?(?<cardinal>primeir[ao]|1[ao]|segund[ao]|2[ao]|terceir[ao]|3[ao]|[qc]uart[ao]|4[ao]|quint[ao]|5[ao]|[uú]ltim[ao])\\s+{WeekDayRegex}\\s+{MonthSuffixRegex})'
RelativeWeekDayRegex = f'^[.]'
NumberEndingPattern = f'^[.]'
SpecialDateRegex = f'(?<=\\bno\\s+){DayRegex}\\b'
OfMonthRegex = f'^\\s*de\\s*{MonthSuffixRegex}'
MonthEndRegex = f'({MonthRegex}\\s*(o)?\\s*$)'
WeekDayEnd = f'{WeekDayRegex}\\s*,?\\s*$'
DateExtractor1 = f'\\b({WeekDayRegex}(\\s+|\\s*,\\s*))?{DayRegex}?((\\s*(de)|[/\\\\\\.\\-])\\s*)?{MonthRegex}\\b'
DateExtractor2 = f'\\b({WeekDayRegex}(\\s+|\\s*,\\s*))?{DayRegex}\\s*([\\.\\-]|de)?\\s*{MonthRegex}?(\\s*(,|de)\\s*){YearRegex}\\b'
DateExtractor3 = f'\\b({WeekDayRegex}(\\s+|\\s*,\\s*))?{DayRegex}(\\s+|\\s*,\\s*|\\s+de\\s+|\\s*-\\s*){MonthRegex}((\\s+|\\s*(,|de)\\s*){YearRegex})?\\b'
DateExtractor4 = f'\\b{MonthNumRegex}\\s*[/\\\\\\-]\\s*{DayRegex}\\s*[/\\\\\\-]\\s*{YearRegex}'
DateExtractor5 = f'\\b{DayRegex}\\s*[/\\\\\\-\\.]\\s*{MonthNumRegex}\\s*[/\\\\\\-\\.]\\s*{YearRegex}'
DateExtractor6 = f'(?<=\\b(em|no|o)\\s+){MonthNumRegex}[\\-\\.]{DayRegex}\\b'
DateExtractor7 = f'\\b{MonthNumRegex}\\s*/\\s*{DayRegex}((\\s+|\\s*(,|de)\\s*){YearRegex})?\\b'
DateExtractor8 = f'(?<=\\b(em|no|o)\\s+){DayRegex}[\\\\\\-]{MonthNumRegex}\\b'
DateExtractor9 = f'\\b{DayRegex}\\s*/\\s*{MonthNumRegex}((\\s+|\\s*(,|de)\\s*){YearRegex})?\\b'
DateExtractor10 = f'\\b{YearRegex}\\s*[/\\\\\\-\\.]\\s*{MonthNumRegex}\\s*[/\\\\\\-\\.]\\s*{DayRegex}'
DateExtractor11 = f'(?<=\\b(dia)\\s+){DayRegex}'
HourNumRegex = f'\\b(?<hournum>zero|uma|duas|tr[êe]s|[qc]uatro|cinco|seis|sete|oito|nove|dez|onze|doze)\\b'
DescRegex = f'(?<desc>pm\\b|am\\b|p\\.m\\.|a\\.m\\.)'
AmDescRegex = f'(am\\b|a\\.m\\.|a m\\b|a\\. m\\.\\b|a\\.m\\b|a\\. m\\b)'
PmDescRegex = f'(pm\\b|p\\.m\\.|p\\b|p m\\b|p\\. m\\.\\b|p\\.m\\b|p\\. m\\b)'
AmPmDescRegex = f'(ampm)'
MinuteNumRegex = f'(?<minnum>um|dois|tr[êe]s|[qc]uatro|cinco|seis|sete|oito|nove|dez|onze|doze|treze|catorze|quatorze|quinze|dez[ea]sseis|dez[ea]sete|dezoito|dez[ea]nove|vinte|trinta|[qc]uarenta|cin[qc]uenta)'
DeltaMinuteNumRegex = f'(?<deltaminnum>um|dois|tr[êe]s|[qc]uatro|cinco|seis|sete|oito|nove|dez|onze|doze|treze|catorze|quatorze|quinze|dez[ea]sseis|dez[ea]sete|dezoito|dez[ea]nove|vinte|trinta|[qc]uarenta|cin[qc]uenta)'
OclockRegex = f'(?<oclock>em\\s+ponto)'
PmRegex = f'(?<pm>((pela|de|da|\\b[àa]\\b|na)\\s+(tarde|noite)))|((depois\\s+do|ap[óo]s\\s+o)\\s+(almo[çc]o|meio dia|meio-dia))'
AmRegex = f'(?<am>(pela|de|da|na)\\s+(manh[ãa]|madrugada))'
AmTimeRegex = f'(?<am>([dn]?es[st]a|(pela|de|da|na))\\s+(manh[ãa]|madrugada))'
PmTimeRegex = f'(?<pm>(([dn]?es[st]a|\\b[àa]\\b|(pela|de|da|na))\\s+(tarde|noite)))|((depois\\s+do|ap[óo]s\\s+o)\\s+(almo[çc]o|meio dia|meio-dia))'
LessThanOneHour = f'(?<lth>((\\s+e\\s+)?(quinze|(um\\s+|dois\\s+|tr[êes]\\s+)?quartos?)|quinze|(\\s*)(um\\s+|dois\\s+|tr[êes]\\s+)?quartos?|(\\s+e\\s+)(meia|trinta)|{BaseDateTime.DeltaMinuteRegex}(\\s+(minuto|minutos|min|mins))|{DeltaMinuteNumRegex}(\\s+(minuto|minutos|min|mins))))'
TensTimeRegex = f'(?<tens>dez|vinte|trinta|[qc]uarenta|cin[qc]uenta)'
WrittenTimeRegex = f'(?<writtentime>({HourNumRegex}\\s*((e|menos)\\s+)?({MinuteNumRegex}|({TensTimeRegex}((\\s*e\\s+)?{MinuteNumRegex})?)))|(({MinuteNumRegex}|({TensTimeRegex}((\\s*e\\s+)?{MinuteNumRegex})?))\\s*((para as|pras|antes da|antes das)\\s+)?({HourNumRegex}|{BaseDateTime.HourRegex})))'
TimePrefix = f'(?<prefix>{LessThanOneHour}(\\s+(passad[ao]s)\\s+(as)?|\\s+depois\\s+(das?|do)|\\s+pras?|\\s+(para|antes)?\\s+([àa]s?))?)'
TimeSuffix = f'(?<suffix>({LessThanOneHour}\\s+)?({AmRegex}|{PmRegex}|{OclockRegex}))'
BasicTime = f'(?<basictime>{WrittenTimeRegex}|{HourNumRegex}|{BaseDateTime.HourRegex}:{BaseDateTime.MinuteRegex}(:{BaseDateTime.SecondRegex})?|{BaseDateTime.HourRegex})'
AtRegex = f'\\b(?<=\\b([aà]s?)\\s+)({WrittenTimeRegex}|{HourNumRegex}|{BaseDateTime.HourRegex})\\b'
ConnectNumRegex = f'({BaseDateTime.HourRegex}(?<min>00|01|02|03|04|05|06|07|08|09|10|11|12|13|14|15|16|17|18|19|20|21|22|23|24|25|26|27|28|29|30|31|32|33|34|35|36|37|38|39|40|41|42|43|44|45|46|47|48|49|50|51|52|53|54|55|56|57|58|59)\\s*{DescRegex})'
TimeRegex1 = f'(\\b{TimePrefix}\\s+)?({WrittenTimeRegex}|{HourNumRegex}|{BaseDateTime.HourRegex})\\s*({DescRegex})'
TimeRegex2 = f'(\\b{TimePrefix}\\s+)?(T)?{BaseDateTime.HourRegex}(\\s*)?:(\\s*)?{BaseDateTime.MinuteRegex}((\\s*)?:(\\s*)?{BaseDateTime.SecondRegex})?((\\s*{DescRegex})|\\b)'
TimeRegex3 = f'(\\b{TimePrefix}\\s+)?{BaseDateTime.HourRegex}\\.{BaseDateTime.MinuteRegex}(\\s*{DescRegex})'
TimeRegex4 = f'\\b(({DescRegex}?)|({BasicTime}?)({DescRegex}?))({TimePrefix}\\s*)({HourNumRegex}|{BaseDateTime.HourRegex})?(\\s+{TensTimeRegex}(\\s+e\\s+)?{MinuteNumRegex}?)?({OclockRegex})?\\b'
TimeRegex5 = f'\\b({TimePrefix}|{BasicTime}{TimePrefix})\\s+(\\s*{DescRegex})?{BasicTime}?\\s*{TimeSuffix}\\b'
TimeRegex6 = f'({BasicTime}(\\s*{DescRegex})?\\s+{TimeSuffix}\\b)'
TimeRegex7 = f'\\b{TimeSuffix}\\s+[àa]s?\\s+{BasicTime}((\\s*{DescRegex})|\\b)'
TimeRegex8 = f'\\b{TimeSuffix}\\s+{BasicTime}((\\s*{DescRegex})|\\b)'
TimeRegex9 = f'\\b(?<writtentime>{HourNumRegex}\\s+({TensTimeRegex}\\s*)?(e\\s+)?{MinuteNumRegex}?)\\b'
TimeRegex10 = f'(\\b([àa]|ao?)|na|de|da|pela)\\s+(madrugada|manh[ãa]|meio\\s*dia|meia\\s*noite|tarde|noite)'
TimeRegex11 = f'\\b({WrittenTimeRegex})({DescRegex}?)\\b'
TimeRegex12 = f'(\\b{TimePrefix}\\s+)?{BaseDateTime.HourRegex}(\\s*h\\s*){BaseDateTime.MinuteRegex}(\\s*{DescRegex})?'
PrepositionRegex = f'(?<prep>([àa]s?|em|por|pelo|pela|no|na|de|d[oa]?)?$)'
NowRegex = f'\\b(?<now>((logo|exatamente)\\s+)?agora(\\s+mesmo)?|neste\\s+momento|(assim\\s+que|t[ãa]o\\s+cedo\\s+quanto)\\s+(poss[ií]vel|possas?|possamos)|o\\s+mais\\s+(cedo|r[aá]pido)\\s+poss[íi]vel|recentemente|previamente)\\b'
SuffixRegex = f'^\\s*((e|a|em|por|pelo|pela|no|na|de)\\s+)?(manh[ãa]|madrugada|meio\\s*dia|tarde|noite)\\b'
TimeOfDayRegex = f'\\b(?<timeOfDay>manh[ãa]|madrugada|tarde|noite|((depois\\s+do|ap[óo]s\\s+o)\\s+(almo[çc]o|meio dia|meio-dia)))\\b'
SpecificTimeOfDayRegex = f'\\b(((((a)?\\s+|[nd]?es[st]a|seguinte|pr[oó]xim[oa]|[uú]ltim[oa])\\s+)?{TimeOfDayRegex}))\\b'
TimeOfTodayAfterRegex = f'^\\s*(,\\s*)?([àa]|em|por|pelo|pela|de|no|na?\\s+)?{SpecificTimeOfDayRegex}'
TimeOfTodayBeforeRegex = f'({SpecificTimeOfDayRegex}(\\s*,)?(\\s+(a\\s+la(s)?|para))?\\s*)'
SimpleTimeOfTodayAfterRegex = f'({HourNumRegex}|{BaseDateTime.HourRegex})\\s*(,\\s*)?((en|de(l)?)?\\s+)?{SpecificTimeOfDayRegex}'
SimpleTimeOfTodayBeforeRegex = f'({SpecificTimeOfDayRegex}(\\s*,)?(\\s+(a\\s+la|para))?\\s*({HourNumRegex}|{BaseDateTime.HourRegex}))'
TheEndOfRegex = f'((no|ao)\\s+)?(fi(m|nal)|t[ée]rmin(o|ar))(\\s+d?o(\\s+dia)?(\\s+de)?)?\\s*$'
UnitRegex = f'(?<unit>anos|ano|meses|m[êe]s|semanas|semana|dias|dia|horas|hora|h|hr|hrs|hs|minutos|minuto|mins|min|segundos|segundo|segs|seg)\\b'
ConnectorRegex = f'^(,|t|para [ao]|para as|pras|cerca de|cerca das|perto de|perto das|quase)$'
TimeHourNumRegex = f'(?<hour>vinte e um|vinte e dois|vinte e tr[êe]s|vinte e quatro|zero|um|uma|dois|duas|tr[êe]s|quatro|cinco|seis|sete|oito|nove|dez|onze|doze|treze|quatorze|catorze|quinze|dez[ea]sseis|dez[ea]ssete|dezoito|dez[ea]nove|vinte)'
PureNumFromTo = f'((desde|de|da|das)\\s+(a(s)?\\s+)?)?({BaseDateTime.HourRegex}|{TimeHourNumRegex})(\\s*(?<leftDesc>{DescRegex}))?\\s*{TillRegex}\\s*({BaseDateTime.HourRegex}|{TimeHourNumRegex})\\s*(?<rightDesc>{PmRegex}|{AmRegex}|{DescRegex})?'
PureNumBetweenAnd = f'(entre\\s+((a|as)?\\s+)?)({BaseDateTime.HourRegex}|{TimeHourNumRegex})(\\s*(?<leftDesc>{DescRegex}))?\\s*e\\s*(a(s)?\\s+)?({BaseDateTime.HourRegex}|{TimeHourNumRegex})\\s*(?<rightDesc>{PmRegex}|{AmRegex}|{DescRegex})?'
SpecificTimeFromTo = f'^[.]'
SpecificTimeBetweenAnd = f'^[.]'
TimeUnitRegex = f'(?<unit>horas|hora|h|minutos|minuto|mins|min|segundos|segundo|secs|sec)\\b'
TimeFollowedUnit = f'^\\s*{TimeUnitRegex}'
TimeNumberCombinedWithUnit = f'\\b(?<num>\\d+(\\,\\d*)?)\\s*{TimeUnitRegex}'
DateTimePeriodNumberCombinedWithUnit = f'\\b(?<num>\\d+(\\.\\d*)?)\\s*{TimeUnitRegex}'
PeriodTimeOfDayWithDateRegex = f'\\b((e|[àa]|em|na|no|ao|pel[ao]|de)\\s+)?(?<timeOfDay>manh[ãa]|madrugada|(passado\\s+(o\\s+)?)?meio\\s+dia|tarde|noite)\\b'
RelativeTimeUnitRegex = f'({PastRegex}|{FutureRegex})\\s+{UnitRegex}|{UnitRegex}\\s+({PastRegex}|{FutureRegex})'
SuffixAndRegex = f'(?<suffix>\\s*(e)\\s+(?<suffix_num>meia|(um\\s+)?quarto))'
FollowedUnit = f'^\\s*{UnitRegex}'
LessThanRegex = f'^[.]'
MoreThanRegex = f'^[.]'
DurationNumberCombinedWithUnit = f'\\b(?<num>\\d+(\\,\\d*)?){UnitRegex}'
AnUnitRegex = f'\\b(um(a)?)\\s+{UnitRegex}'
DuringRegex = f'^[.]'
AllRegex = f'\\b(?<all>tod[oa]?\\s+(o|a)\\s+(?<unit>ano|m[êe]s|semana|dia))\\b'
HalfRegex = f'\\b(?<half>mei[oa]\\s+(?<unit>ano|m[êe]s|semana|dia|hora))\\b'
ConjunctionRegex = f'^[.]'
InexactNumberRegex = f'\\b(poucos|pouco|algum|alguns|v[áa]rios)\\b'
InexactNumberUnitRegex = f'\\b(poucos|pouco|algum|alguns|v[áa]rios)\\s+{UnitRegex}'
HolidayRegex1 = f'\\b(?<holiday>sexta-feira santa|sexta-feira da paix[ãa]o|quarta-feira de cinzas|carnaval|dia (de|de los) presidentes?|ano novo chin[eê]s|ano novo|v[ée]spera de ano novo|natal|v[ée]spera de natal|dia de a[cç][ãa]o de gra[çc]as|a[cç][ãa]o de gra[çc]as|yuandan|halloween|dia das bruxas|p[áa]scoa)(\\s+(d[eo]?\\s+)?({FullYearRegex}|(?<order>(pr[oó]xim[oa]?|[nd]?es[st][ea]|[uú]ltim[oa]?|em))\\s+ano))?\\b'
HolidayRegex2 = f'\\b(?<holiday>(dia( d[eoa]s?)? )?(martin luther king|todos os santos|trabalho|s[ãa]o (patr[íi]cio|francisco|jorge|jo[ãa]o)|independ[êe]ncia|trabalhador|trabalho))(\\s+(d[eo]?\\s+)?({FullYearRegex}|(?<order>(pr[oó]xim[oa]?|[nd]?es[st][ea]|[uú]ltim[oa]?|em))\\s+ano))?\\b'
HolidayRegex3 = f'\\b(?<holiday>(dia( d[eoa]s?)? )(trabalhador|trabalhadores|trabalho|m[ãa]es?|pais?|mulher(es)?|crian[çc]as?|marmota|professor|professores))(\\s+(d[eo]?\\s+)?({FullYearRegex}|(?<order>(pr[oó]xim[oa]?|[nd]?es[st][ea]|[uú]ltim[oa]?|em))\\s+ano))?\\b'
BeforeRegex = f'(antes(\\s+(de|dos?|das?)?)?)'
AfterRegex = f'((depois|ap[óo]s)(\\s*(de|d?os?|d?as?)?)?)'
SinceRegex = f'(desde(\\s+(as?|o))?)'
PeriodicRegex = f'\\b(?<periodic>di[áa]ri[ao]|diariamente|mensalmente|semanalmente|quinzenalmente|anualmente)\\b'
EachExpression = f'cada|tod[oa]s?\\s*([oa]s)?'
EachUnitRegex = f'(?<each>({EachExpression})\\s*{UnitRegex})'
EachPrefixRegex = f'(?<each>({EachExpression})\\s*$)'
EachDayRegex = f'\\s*({EachExpression})\\s*dias\\s*\\b'
BeforeEachDayRegex = f'({EachExpression})\\s*dias(\\s+(as|ao))?\\s*\\b'
SetEachRegex = f'(?<each>({EachExpression})\\s*)'
LaterEarlyPeriodRegex = f'^[.]'
WeekWithWeekDayRangeRegex = f'^[.]'
GeneralEndingRegex = f'^[.]'
MiddlePauseRegex = f'^[.]'
PrefixArticleRegex = f'^[\\.]'
OrRegex = f'^[.]'
YearPlusNumberRegex = f'^[.]'
NumberAsTimeRegex = f'^[.]'
TimeBeforeAfterRegex = f'^[.]'
DateNumberConnectorRegex = f'^[.]'
ComplexDatePeriodRegex = f'^[.]'
AgoRegex = f'\\b(antes|atr[áa]s|no passado)\\b'
LaterRegex = f'\\b(depois d[eoa]s?|ap[óo]s (as)?|desde (as|o)|desde|no futuro|mais tarde)\\b'
Tomorrow = 'amanh[ãa]'
UnitMap = dict([('anos', 'Y'),
('ano', 'Y'),
('meses', 'MON'),
('mes', 'MON'),
('mês', 'MON'),
('semanas', 'W'),
('semana', 'W'),
('dias', 'D'),
('dia', 'D'),
('horas', 'H'),
('hora', 'H'),
('hrs', 'H'),
('hr', 'H'),
('h', 'H'),
('minutos', 'M'),
('minuto', 'M'),
('mins', 'M'),
('min', 'M'),
('segundos', 'S'),
('segundo', 'S'),
('segs', 'S'),
('seg', 'S')])
UnitValueMap = dict([('anos', 31536000),
('ano', 31536000),
('meses', 2592000),
('mes', 2592000),
('mês', 2592000),
('semanas', 604800),
('semana', 604800),
('dias', 86400),
('dia', 86400),
('horas', 3600),
('hora', 3600),
('hrs', 3600),
('hr', 3600),
('h', 3600),
('minutos', 60),
('minuto', 60),
('mins', 60),
('min', 60),
('segundos', 1),
('segundo', 1),
('segs', 1),
('seg', 1)])
SeasonMap = dict([('primavera', 'SP'),
('verao', 'SU'),
('verão', 'SU'),
('outono', 'FA'),
('inverno', 'WI')])
SeasonValueMap = dict([('SP', 3),
('SU', 6),
('FA', 9),
('WI', 12)])
CardinalMap = dict([('primeiro', 1),
('primeira', 1),
('1o', 1),
('1a', 1),
('segundo', 2),
('segunda', 2),
('2o', 2),
('2a', 2),
('terceiro', 3),
('terceira', 3),
('3o', 3),
('3a', 3),
('cuarto', 4),
('quarto', 4),
('cuarta', 4),
('quarta', 4),
('4o', 4),
('4a', 4),
('quinto', 5),
('quinta', 5),
('5o', 5),
('5a', 5)])
DayOfWeek = dict([('segunda-feira', 1),
('segundas-feiras', 1),
('segunda', 1),
('segundas', 1),
('terça-feira', 2),
('terças-feiras', 2),
('terça', 2),
('terças', 2),
('terca-feira', 2),
('tercas-feiras', 2),
('terca', 2),
('tercas', 2),
('quarta-feira', 3),
('quartas-feiras', 3),
('quarta', 3),
('quartas', 3),
('quinta-feira', 4),
('quintas-feiras', 4),
('quinta', 4),
('quintas', 4),
('sexta-feira', 5),
('sextas-feiras', 5),
('sexta', 5),
('sextas', 5),
('sabado', 6),
('sabados', 6),
('sábado', 6),
('sábados', 6),
('domingo', 0),
('domingos', 0),
('seg', 1),
('2a', 1),
('ter', 2),
('3a', 2),
('qua', 3),
('4a', 3),
('qui', 4),
('5a', 4),
('sex', 5),
('6a', 5),
('sab', 6),
('dom', 0)])
MonthOfYear = dict([('janeiro', 1),
('fevereiro', 2),
('março', 3),
('marco', 3),
('abril', 4),
('maio', 5),
('junho', 6),
('julho', 7),
('agosto', 8),
('septembro', 9),
('setembro', 9),
('outubro', 10),
('novembro', 11),
('dezembro', 12),
('jan', 1),
('fev', 2),
('mar', 3),
('abr', 4),
('mai', 5),
('jun', 6),
('jul', 7),
('ago', 8),
('sept', 9),
('set', 9),
('out', 10),
('nov', 11),
('dez', 12),
('1', 1),
('2', 2),
('3', 3),
('4', 4),
('5', 5),
('6', 6),
('7', 7),
('8', 8),
('9', 9),
('10', 10),
('11', 11),
('12', 12),
('01', 1),
('02', 2),
('03', 3),
('04', 4),
('05', 5),
('06', 6),
('07', 7),
('08', 8),
('09', 9)])
Numbers = dict([('zero', 0),
('um', 1),
('uma', 1),
('dois', 2),
('tres', 3),
('três', 3),
('quatro', 4),
('cinco', 5),
('seis', 6),
('sete', 7),
('oito', 8),
('nove', 9),
('dez', 10),
('onze', 11),
('doze', 12),
('dezena', 12),
('dezenas', 12),
('treze', 13),
('catorze', 14),
('quatorze', 14),
('quinze', 15),
('dezesseis', 16),
('dezasseis', 16),
('dezessete', 17),
('dezassete', 17),
('dezoito', 18),
('dezenove', 19),
('dezanove', 19),
('vinte', 20),
('vinte e um', 21),
('vinte e uma', 21),
('vinte e dois', 22),
('vinte e duas', 22),
('vinte e tres', 23),
('vinte e três', 23),
('vinte e quatro', 24),
('vinte e cinco', 25),
('vinte e seis', 26),
('vinte e sete', 27),
('vinte e oito', 28),
('vinte e nove', 29),
('trinta', 30)])
HolidayNames = dict([('pai', ['diadopai', 'diadospais']),
('mae', ['diadamae', 'diadasmaes']),
('acaodegracas', ['diadegracas', 'diadeacaodegracas', 'acaodegracas']),
('trabalho', ['diadotrabalho', 'diadotrabalhador', 'diadostrabalhadores']),
('pascoa', ['diadepascoa', 'pascoa']),
('natal', ['natal', 'diadenatal']),
('vesperadenatal', ['vesperadenatal']),
('anonovo', ['anonovo', 'diadeanonovo', 'diadoanonovo']),
('vesperadeanonovo', ['vesperadeanonovo', 'vesperadoanonovo']),
('yuandan', ['yuandan']),
('todosossantos', ['todosossantos']),
('professor', ['diadoprofessor', 'diadosprofessores']),
('crianca', ['diadacrianca', 'diadascriancas']),
('mulher', ['diadamulher'])])
VariableHolidaysTimexDictionary = dict([('pai', '-06-WXX-7-3'),
('mae', '-05-WXX-7-2'),
('acaodegracas', '-11-WXX-4-4'),
('trabalho', '-05-WXX-1-1'),
('memoria', '-03-WXX-2-4')])
DoubleNumbers = dict([('metade', 0.5),
('quarto', 0.25)])
DateTokenPrefix = 'em '
TimeTokenPrefix = 'as '
TokenBeforeDate = 'o '
TokenBeforeTime = 'as '
NextPrefixRegex = f'(pr[oó]xim[oa]|seguinte)\\b'
PastPrefixRegex = f'([uú]ltim[oa])\\b'
ThisPrefixRegex = f'([nd]?es[st][ea])\\b'
RelativeDayRegex = f'^[\\.]'
RestOfDateRegex = f'^[\\.]'
RelativeDurationUnitRegex = f'^[\\.]'
ReferenceDatePeriodRegex = f'^[.]'
FromToRegex = f'\\b(from).+(to)\\b.+'
SingleAmbiguousMonthRegex = f'^(the\\s+)?(may|march)$'
UnspecificDatePeriodRegex = f'^[.]'
PrepositionSuffixRegex = f'\\b(on|in|at|around|from|to)$'
RestOfDateTimeRegex = f'^[\\.]'
SetWeekDayRegex = f'^[\\.]'
NightRegex = f'\\b(meia noite|noite|de noite)\\b'
CommonDatePrefixRegex = f'\\b(dia)\\s+$'
DurationUnitRegex = f'^[\\.]'
DurationConnectorRegex = f'^[.]'
CenturyRegex = f'^[.]'
DecadeRegex = f'^[.]'
DecadeWithCenturyRegex = f'^[.]'
RelativeDecadeRegex = f'^[.]'
FullTextYearRegex = f'^[\\*]'
YearSuffix = f'(,?\\s*({YearRegex}|{FullTextYearRegex}))'
YearAfterRegex = f'^[.]'
YearPeriodRegex = f'^[.]'
FutureSuffixRegex = f'^[.]'
WrittenDecades = dict([('', 0)])
SpecialDecadeCases = dict([('', 0)])
DefaultLanguageFallback = 'DMY'
DurationDateRestrictions = []
# pylint: enable=line-too-long
|
import wx
import re
import os
import sys
import Model
import Utils
import ColGrid
from collections import defaultdict
from FixCategories import FixCategories, SetCategory
from GetResults import GetResults, RidersCanSwap
from ExportGrid import ExportGrid
from RiderDetail import ShowRiderDetailDialog
from EditEntry import CorrectNumber, ShiftNumber, InsertNumber, DeleteEntry, SwapEntry
from Undo import undo
import Flags
bitmapCache = {}
class IOCCodeRenderer(wx.grid.GridCellRenderer):
def getImgWidth( self, ioc, height ):
img = Flags.GetFlagImage( ioc )
if img:
imgHeight = int( height * 0.8 )
imgWidth = int( float(img.GetWidth()) / float(img.GetHeight()) * float(imgHeight) )
padding = int(height * 0.1)
return img, imgWidth, imgHeight, padding
return None, 0, 0, 0
def Draw(self, grid, attr, dc, rect, row, col, isSelected):
text = grid.GetCellValue(row, col)
dc.SetFont( attr.GetFont() )
w, h = dc.GetTextExtent( text )
ioc = text[:3]
img, imgWidth, imgHeight, padding = self.getImgWidth(ioc, h)
fg = attr.GetTextColour()
bg = attr.GetBackgroundColour()
if isSelected:
fg, bg = bg, fg
dc.SetBrush( wx.Brush(bg, wx.SOLID) )
dc.SetPen( wx.TRANSPARENT_PEN )
dc.DrawRectangle( rect )
rectText = wx.Rect( rect.GetX()+padding+imgWidth, rect.GetY(), rect.GetWidth()-padding-imgWidth, rect.GetHeight() )
hAlign, vAlign = attr.GetAlignment()
dc.SetTextForeground( fg )
dc.SetTextBackground( bg )
grid.DrawTextRectangle(dc, text, rectText, hAlign, vAlign)
if img:
key = (ioc, imgHeight)
if key not in bitmapCache:
bitmapCache[key] = img.Scale(imgWidth, imgHeight, wx.IMAGE_QUALITY_HIGH).ConvertToBitmap()
dc.DrawBitmap( bitmapCache[key], rect.GetX(), rect.GetY()+(rect.GetHeight()-imgHeight)//2 )
def GetBestSize(self, grid, attr, dc, row, col):
text = grid.GetCellValue(row, col)
dc.SetFont(attr.GetFont())
w, h = dc.GetTextExtent( text )
img, imgWidth, imgHeight, padding = self.getImgWidth(text[:3], h)
if img:
return wx.Size(w + imgWidth + padding, h)
else:
return wx.Size(w, h)
def Clone(self):
return IOCCodeRenderer()
reNonDigits = re.compile( '[^0-9]' )
reLapMatch = re.compile( '<?Lap>? ([0-9]+)' )
class Results( wx.Panel ):
DisplayLapTimes = 0
DisplayRaceTimes = 1
DisplayLapSpeeds = 2
DisplayRaceSpeeds = 3
def __init__( self, parent, id = wx.ID_ANY ):
super().__init__(parent, id)
self.category = None
self.showRiderData = True
self.selectDisplay = 0
self.firstDraw = True
self.rcInterp = set()
self.rcNumTime = set()
self.numSelect = None
self.isEmpty = True
self.reSplit = re.compile( '[\[\]\+= ]+' ) # separators for the fields.
self.iLap = None
self.entry = None
self.iRow, self.iCol = None, None
self.iLastLap = 0
self.fastestLapRC = None
self.hbs = wx.BoxSizer(wx.HORIZONTAL)
self.categoryLabel = wx.StaticText( self, label = _('Category:') )
self.categoryChoice = wx.Choice( self )
self.Bind(wx.EVT_CHOICE, self.doChooseCategory, self.categoryChoice)
self.showRiderDataToggle = wx.ToggleButton( self, label = _('Show Rider Data'), style=wx.BU_EXACTFIT )
self.showRiderDataToggle.SetValue( self.showRiderData )
self.Bind( wx.EVT_TOGGLEBUTTON, self.onShowRiderData, self.showRiderDataToggle )
self.showLapTimesRadio = wx.RadioButton( self, label = _('Lap Times'), style=wx.BU_EXACTFIT|wx.RB_GROUP )
self.showLapTimesRadio.SetValue( self.selectDisplay == Results.DisplayLapTimes )
self.Bind( wx.EVT_RADIOBUTTON, self.onSelectDisplayOption, self.showLapTimesRadio )
self.showLapTimesRadio.SetToolTip(wx.ToolTip(_('Useful for finding the fastest lap.')))
self.showRaceTimesRadio = wx.RadioButton( self, label = _('Race Times'), style=wx.BU_EXACTFIT )
self.showRaceTimesRadio.SetValue( self.selectDisplay == Results.DisplayRaceTimes )
self.Bind( wx.EVT_RADIOBUTTON, self.onSelectDisplayOption, self.showRaceTimesRadio )
self.showRaceTimesRadio.SetToolTip(wx.ToolTip(_('Useful for finding for Prime winners.\nAfter selecting, click on a lap header to sort.')))
self.showLapSpeedsRadio = wx.RadioButton( self, label = _('Lap Speeds'), style=wx.BU_EXACTFIT )
self.showLapSpeedsRadio.SetValue( self.selectDisplay == Results.DisplayLapSpeeds )
self.Bind( wx.EVT_RADIOBUTTON, self.onSelectDisplayOption, self.showLapSpeedsRadio )
self.showLapSpeedsRadio.SetToolTip(wx.ToolTip(_('Useful for finding the fastest lap.')))
self.showRaceSpeedsRadio = wx.RadioButton( self, label = _('Race Speeds'), style=wx.BU_EXACTFIT )
self.showRaceSpeedsRadio.SetValue( self.selectDisplay == Results.DisplayRaceSpeeds )
self.Bind( wx.EVT_RADIOBUTTON, self.onSelectDisplayOption, self.showRaceSpeedsRadio )
self.showRaceSpeedsRadio.SetToolTip(wx.ToolTip(_("Useful to predict how long a race will take based on rider's average speed.")))
f = self.showLapTimesRadio.GetFont()
self.boldFont = wx.Font( f.GetPointSize()+2, f.GetFamily(), f.GetStyle(), wx.FONTWEIGHT_BOLD, f.GetUnderlined() )
self.search = wx.SearchCtrl(self, size=(80,-1), style=wx.TE_PROCESS_ENTER )
# self.search.ShowCancelButton( True )
self.Bind(wx.EVT_SEARCHCTRL_SEARCH_BTN, self.OnSearch, self.search)
self.Bind(wx.EVT_SEARCHCTRL_CANCEL_BTN, self.OnCancelSearch, self.search)
self.Bind(wx.EVT_TEXT_ENTER, self.OnDoSearch, self.search)
bitmap = wx.Bitmap( os.path.join(Utils.getImageFolder(), 'Zoom-In-icon.png'), wx.BITMAP_TYPE_PNG )
self.zoomInButton = wx.BitmapButton( self, wx.ID_ZOOM_IN, bitmap, style=wx.BU_EXACTFIT | wx.BU_AUTODRAW )
self.Bind( wx.EVT_BUTTON, self.onZoomIn, self.zoomInButton )
bitmap = wx.Bitmap( os.path.join(Utils.getImageFolder(), 'Zoom-Out-icon.png'), wx.BITMAP_TYPE_PNG )
self.zoomOutButton = wx.BitmapButton( self, wx.ID_ZOOM_OUT, bitmap, style=wx.BU_EXACTFIT | wx.BU_AUTODRAW )
self.Bind( wx.EVT_BUTTON, self.onZoomOut, self.zoomOutButton )
self.hbs.Add( self.categoryLabel, flag=wx.TOP | wx.BOTTOM | wx.LEFT | wx.ALIGN_CENTRE_VERTICAL, border=4 )
self.hbs.Add( self.categoryChoice, flag=wx.ALL, border=4 )
self.hbs.Add( self.showRiderDataToggle, flag=wx.ALL | wx.ALIGN_CENTRE_VERTICAL, border=4 )
self.hbs.Add( self.showLapTimesRadio, flag=wx.ALL | wx.ALIGN_CENTRE_VERTICAL, border=4 )
self.hbs.Add( self.showRaceTimesRadio, flag=wx.ALL | wx.ALIGN_CENTRE_VERTICAL, border=4 )
self.hbs.Add( self.showLapSpeedsRadio, flag=wx.ALL | wx.ALIGN_CENTRE_VERTICAL, border=4 )
self.hbs.Add( self.showRaceSpeedsRadio, flag=wx.ALL | wx.ALIGN_CENTRE_VERTICAL, border=4 )
self.hbs.AddStretchSpacer()
self.hbs.Add( self.search, flag=wx.TOP | wx.BOTTOM | wx.LEFT | wx.ALIGN_CENTRE_VERTICAL, border=4 )
self.hbs.Add( self.zoomInButton, flag=wx.TOP | wx.BOTTOM | wx.LEFT | wx.ALIGN_CENTRE_VERTICAL, border=4 )
self.hbs.Add( self.zoomOutButton, flag=wx.TOP | wx.BOTTOM | wx.RIGHT | wx.ALIGN_CENTRE_VERTICAL, border=4 )
self.whiteColour = wx.Colour( 255, 255, 255 )
self.blackColour = wx.Colour( 0, 0, 0 )
self.yellowColour = wx.Colour( 255, 255, 0 )
self.orangeColour = wx.Colour( 255, 165, 0 )
self.greyColour = wx.Colour( 150, 150, 150 )
self.greenColour = wx.Colour( 127, 255, 0 )
self.lightBlueColour = wx.Colour( 153, 205, 255 )
self.splitter = wx.SplitterWindow( self )
self.labelGrid = ColGrid.ColGrid( self.splitter, style=wx.BORDER_SUNKEN )
self.labelGrid.SetRowLabelSize( 0 )
self.labelGrid.SetMargins( 0, 0 )
self.labelGrid.SetRightAlign( True )
self.labelGrid.AutoSizeColumns( True )
self.labelGrid.DisableDragColSize()
self.labelGrid.DisableDragRowSize()
# put a tooltip on the cells in a column
self.labelGrid.GetGridWindow().Bind(wx.EVT_MOTION, self.onMouseOver)
self.lapGrid = ColGrid.ColGrid( self.splitter, style=wx.BORDER_SUNKEN )
self.lapGrid.SetRowLabelSize( 0 )
self.lapGrid.SetMargins( 0, 0 )
self.lapGrid.SetRightAlign( True )
self.lapGrid.AutoSizeColumns( True )
self.lapGrid.DisableDragColSize()
self.lapGrid.DisableDragRowSize()
self.splitter.SetMinimumPaneSize(100)
self.splitter.SplitVertically(self.labelGrid, self.lapGrid, 400)
# Sync the two vertical scrollbars.
self.labelGrid.Bind(wx.EVT_SCROLLWIN, self.onScroll)
self.lapGrid.Bind(wx.EVT_SCROLLWIN, self.onScroll)
self.Bind( wx.grid.EVT_GRID_SELECT_CELL, self.doNumSelect )
self.Bind( wx.grid.EVT_GRID_CELL_LEFT_DCLICK, self.doNumDrilldown )
self.Bind( wx.grid.EVT_GRID_CELL_RIGHT_CLICK, self.doRightClick )
self.lapGrid.Bind( wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.doLabelClick )
self.labelGrid.Bind( wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.doLabelClick )
bs = wx.BoxSizer(wx.VERTICAL)
#bs.Add(self.hbs)
#bs.Add(self.lapGrid, 1, wx.GROW|wx.ALL, 5)
bs.Add(self.hbs, 0, wx.EXPAND )
bs.Add(self.splitter, 1, wx.EXPAND|wx.GROW|wx.ALL, 5 )
self.SetDoubleBuffered( True )
self.SetSizer(bs)
bs.SetSizeHints(self)
def onScroll(self, evt):
if evt.GetOrientation() == wx.SB_VERTICAL:
if evt.GetEventObject() == self.lapGrid:
wx.CallAfter( Utils.AlignVerticalScroll, self.lapGrid, self.labelGrid )
else:
wx.CallAfter( Utils.AlignVerticalScroll, self.labelGrid, self.lapGrid )
evt.Skip()
def onMouseOver( self, event ):
"""
Displays a tooltip for the close finishes.
"""
x, y = self.labelGrid.CalcUnscrolledPosition(event.GetX(),event.GetY())
row, col = self.labelGrid.XYToCell(x, y)
try:
num = int(self.labelGrid.GetCellValue(row, 1))
except Exception:
return
if num in self.closeFinishBibs:
try:
pos = int(self.labelGrid.GetCellValue(row, 0))
except Exception:
return
event.GetEventObject().SetToolTip('{} {}, {} {}: {} {}'.format(
_('Pos'), pos,
_('Bib'), num,
_('close finish to'), ','.join( '{} {}'.format(_('Bib'), bib) for bib in self.closeFinishBibs[num]),
)
)
else:
event.GetEventObject().SetToolTip('')
def alignLabelToLapScroll(self):
Utils.AlignVerticalScroll( self.labelGrid, self.lapGrid )
def alignLapToLabelScroll(self):
Utils.AlignVerticalScroll( self.lapGrid, self.labelGrid )
def OnSearch( self, event ):
self.OnDoSearch()
def OnCancelSearch( self, event ):
self.search.SetValue( '' )
def OnDoSearch( self, event = None ):
wx.CallAfter( self.search.SetFocus )
n = self.search.GetValue()
if n:
n = reNonDigits.sub( '', n )
self.search.SetValue( n )
if not n:
n = None
if n:
self.numSelect = n
if self.category and not self.category.matches( int(n) ):
self.setCategoryAll()
self.refresh()
if Utils.isMainWin():
Utils.getMainWin().setNumSelect( n )
self.ensureVisibleNumSelect()
def onZoomOut( self, event ):
self.labelGrid.Zoom( False )
self.lapGrid.Zoom( False )
self.splitter.UpdateSize()
wx.CallAfter( self.refresh )
def onZoomIn( self, event ):
self.labelGrid.Zoom( True )
self.lapGrid.Zoom( True )
self.splitter.UpdateSize()
wx.CallAfter( self.refresh )
def onShowRiderData( self, event ):
self.showRiderData ^= True
wx.CallAfter( self.refresh )
def onSelectDisplayOption( self, event ):
for i, r in enumerate([self.showLapTimesRadio, self.showRaceTimesRadio, self.showLapSpeedsRadio, self.showRaceSpeedsRadio]):
if r.GetValue():
self.selectDisplay = i
break
wx.CallAfter( self.refresh )
def doLabelClick( self, event ):
col = event.GetCol()
with Model.LockRace() as race:
race.sortLap = None
race.sortLabel = None
if event.GetEventObject() == self.lapGrid:
label = self.lapGrid.GetColLabelValue( col )
if label.startswith( _('Lap') ):
race.sortLap = int(label.split()[1])
else:
label = self.labelGrid.GetColLabelValue( col )
if label[:1] != '<':
race.sortLabel = label
wx.CallAfter( self.refresh )
def doRightClick( self, event ):
wx.CallAfter( self.search.SetFocus )
self.doNumSelect( event )
if self.numSelect is None:
return
allCases = 0
interpCase = 1
nonInterpCase = 2
if not hasattr(self, 'popupInfo'):
self.popupInfo = [
(_('Passings'), _('Switch to Passings tab'), self.OnPopupHistory, allCases),
(_('RiderDetail'), _('Show RiderDetail Dialog'), self.OnPopupRiderDetail, allCases),
(None, None, None, None),
(_('Show Photos'), _('Show Photos'), self.OnPopupShowPhotos, allCases),
(None, None, None, None),
(_('Correct...'), _('Change number or lap time...'), self.OnPopupCorrect, interpCase),
(_('Shift...'), _('Move lap time earlier/later...'), self.OnPopupShift, interpCase),
(_('Delete...'), _('Delete lap time...'), self.OnPopupDelete, nonInterpCase),
(None, None, None, None),
(_('Swap with Rider before'), _('Swap with Rider before'), self.OnPopupSwapBefore, allCases),
(_('Swap with Rider after'), _('Swap with Rider after'), self.OnPopupSwapAfter, allCases),
]
self.menuOptions = {}
for numBefore in [False, True]:
for numAfter in [False, True]:
for caseCode in range(3):
menu = wx.Menu()
for name, text, callback, cCase in self.popupInfo:
if not name:
Utils.addMissingSeparator( menu )
continue
if caseCode < cCase:
continue
if (name.endswith(_('before')) and not numBefore) or (name.endswith(_('after')) and not numAfter):
continue
item = menu.Append( wx.ID_ANY, name, text )
self.Bind( wx.EVT_MENU, callback, item )
Utils.deleteTrailingSeparators( menu )
self.menuOptions[(numBefore,numAfter,caseCode)] = menu
num = int(self.numSelect)
with Model.LockRace() as race:
if not race or num not in race.riders:
return
category = FixCategories( self.categoryChoice, getattr(race, 'resultsCategory', 0) )
riderResults = dict( (r.num, r) for r in GetResults(category) )
entries = race.riders[num].interpolate()
try:
laps = riderResults[num].laps
self.entry = next(e for e in entries if e.t == riderResults[num].raceTimes[laps])
caseCode = 1 if self.entry.interp else 2
except (TypeError, IndexError, KeyError):
caseCode = 0
except StopIteration:
return
self.numBefore, self.numAfter = None, None
for iRow, attr in [(self.iRow - 1, 'numBefore'), (self.iRow + 1, 'numAfter')]:
if not (0 <= iRow < self.lapGrid.GetNumberRows()):
continue
numAdjacent = int( self.labelGrid.GetCellValue(iRow, 1) )
if RidersCanSwap( riderResults, num, numAdjacent ):
setattr( self, attr, numAdjacent )
menu = self.menuOptions[(self.numBefore is not None, self.numAfter is not None, caseCode)]
try:
self.PopupMenu( menu )
except Exception as e:
Utils.writeLog( 'Results:doRightClick: {}'.format(e) )
def OnPopupCorrect( self, event ):
CorrectNumber( self, self.entry )
def OnPopupShift( self, event ):
ShiftNumber( self, self.entry )
def OnPopupDelete( self, event ):
DeleteEntry( self, self.entry )
def swapEntries( self, num, numAdjacent ):
if not num or not numAdjacent:
return
with Model.LockRace() as race:
if (not race or
num not in race.riders or
numAdjacent not in race ):
return
e1 = race.getRider(num).interpolate()
e2 = race.getRider(numAdjacent).interpolate()
category = FixCategories( self.categoryChoice, getattr(race, 'resultsCategory', 0) )
riderResults = dict( (r.num, r) for r in GetResults(category) )
try:
rr1, rr2 = riderResults[num], riderResults[numAdjacent]
laps = rr1.laps
undo.pushState()
ee1 = next( e for e in e1 if e.t == rr1.raceTimes[laps] )
ee2 = next( e for e in e2 if e.t == rr2.raceTimes[laps] )
with Model.LockRace() as race:
SwapEntry( ee1, ee2 )
wx.CallAfter( self.refresh )
except (KeyError, StopIteration):
pass
def showLastLap( self ):
if not self.isEmpty:
self.iLastLap = max( min(self.lapGrid.GetNumberCols()-1, self.iLastLap), 0 )
self.labelGrid.MakeCellVisible( 0, 0 )
self.lapGrid.MakeCellVisible( 0, self.iLastLap )
def OnPopupSwapBefore( self, event ):
self.swapEntries( int(self.numSelect), self.numBefore )
def OnPopupSwapAfter( self, event ):
self.swapEntries( int(self.numSelect), self.numAfter )
def OnPopupHistory( self, event ):
mainWin = Utils.getMainWin()
if mainWin:
mainWin.showPageName( mainWin.iPassingsPage )
def OnPopupRiderDetail( self, event ):
ShowRiderDetailDialog( self, self.numSelect )
def OnPopupShowPhotos( self, event ):
mainWin = Utils.mainWin
if not mainWin:
return
mainWin.photoDialog.Show( True )
mainWin.photoDialog.setNumSelect( int(self.numSelect) )
def ensureVisibleNumSelect( self ):
try:
numSelectSearch = int(self.numSelect)
except (TypeError, ValueError):
return
for r in range(self.labelGrid.GetNumberRows()-1, -1, -1):
try:
cellNum = int(self.labelGrid.GetCellValue(r,1))
except Exception:
continue
if cellNum == numSelectSearch:
self.labelGrid.MakeCellVisible( r, 1 )
wx.CallAfter( Utils.AlignVerticalScroll, self.labelGrid, self.lapGrid )
break
def showNumSelect( self ):
race = Model.race
if race is None:
return
try:
numSelectSearch = int(self.numSelect)
except (TypeError, ValueError):
numSelectSearch = None
textColourLap = {}
backgroundColourLap = { rc:self.yellowColour for rc in self.rcInterp }
backgroundColourLap.update( { rc:self.orangeColour for rc in self.rcNumTime } )
if self.fastestLapRC is not None:
backgroundColourLap[self.fastestLapRC] = self.greenColour
textColourLabel = {}
backgroundColourLabel = {}
timeCol = None
for c in range(self.labelGrid.GetNumberCols()):
if self.labelGrid.GetColLabelValue(c) == _('Time'):
timeCol = c
break
for r in range(self.lapGrid.GetNumberRows()):
try:
cellNum = int(self.labelGrid.GetCellValue(r,1))
except Exception:
continue
if cellNum == numSelectSearch:
for c in range(self.labelGrid.GetNumberCols()):
textColourLabel[ (r,c) ] = self.whiteColour
backgroundColourLabel[ (r,c) ] = self.blackColour
for c in range(self.lapGrid.GetNumberCols()):
textColourLap[ (r,c) ] = self.whiteColour
backgroundColourLap[ (r,c) ] = self.blackColour if (r,c) not in self.rcInterp and (r,c) not in self.rcNumTime else self.greyColour
if cellNum in self.closeFinishBibs:
textColourLabel[ (r,0) ] = self.blackColour
backgroundColourLabel[ (r,0) ] = self.lightBlueColour
if timeCol is not None:
textColourLabel[ (r,timeCol) ] = self.blackColour
backgroundColourLabel[ (r,timeCol) ] = self.lightBlueColour
# Highlight the sorted columns.
for c in range(self.lapGrid.GetNumberCols()):
if self.lapGrid.GetColLabelValue(c).startswith('<'):
for r in range(self.lapGrid.GetNumberRows()):
textColourLap[ (r,c) ] = self.whiteColour
backgroundColourLap[ (r,c) ] = self.blackColour \
if (r,c) not in self.rcInterp and (r,c) not in self.rcNumTime else self.greyColour
break
for c in range(self.labelGrid.GetNumberCols()):
if self.labelGrid.GetColLabelValue(c).startswith('<'):
for r in range(self.labelGrid.GetNumberRows()):
textColourLabel[ (r,c) ] = self.whiteColour
backgroundColourLabel[ (r,c) ] = self.blackColour
break
self.labelGrid.Set( textColour=textColourLabel, backgroundColour=backgroundColourLabel )
self.lapGrid.Set( textColour=textColourLap, backgroundColour=backgroundColourLap )
self.labelGrid.Reset()
self.lapGrid.Reset()
def doNumDrilldown( self, event ):
self.doNumSelect( event )
mainWin = Utils.getMainWin()
if self.numSelect is not None and mainWin:
ShowRiderDetailDialog( self, self.numSelect )
def doNumSelect( self, event ):
grid = event.GetEventObject()
self.iLap = None
if self.isEmpty:
return
row, col = event.GetRow(), event.GetCol()
self.iRow, self.iCol = row, col
if row >= self.labelGrid.GetNumberRows():
return
if grid == self.lapGrid and self.lapGrid.GetCellValue(row, col):
try:
colName = self.lapGrid.GetColLabelValue( col )
self.iLap = int( reLapMatch.match(colName).group(1) )
except Exception:
pass
value = self.labelGrid.GetCellValue( row, 1 )
numSelect = value if value else None
if self.numSelect != numSelect:
self.numSelect = numSelect
self.showNumSelect()
mainWin = Utils.getMainWin()
if mainWin:
historyCategoryChoice = mainWin.history.categoryChoice
historyCat = FixCategories( historyCategoryChoice )
if historyCat is not None:
cat = FixCategories( self.categoryChoice )
if historyCat != cat:
Model.setCategoryChoice( self.categoryChoice.GetSelection(), 'resultsCategory' )
SetCategory( historyCategoryChoice, cat )
mainWin.setNumSelect( numSelect )
def setCategoryAll( self ):
FixCategories( self.categoryChoice, 0 )
Model.setCategoryChoice( 0, 'resultsCategory' )
def setCategory( self, category ):
for i, c in enumerate(Model.race.getCategories( startWaveOnly=False ) if Model.race else [], 1):
if c == category:
SetCategory( self.categoryChoice, c )
Model.setCategoryChoice( i, 'resultsCategory' )
return
SetCategory( self.categoryChoice, None )
Model.setCategoryChoice( 0, 'resultsCategory' )
def doChooseCategory( self, event ):
Model.setCategoryChoice( self.categoryChoice.GetSelection(), 'resultsCategory' )
self.refresh()
def reset( self ):
self.numSelect = None
def setNumSelect( self, num ):
self.numSelect = num if num is None else '{}'.format(num)
if self.numSelect:
self.search.SetValue( self.numSelect )
def clearGrid( self ):
self.labelGrid.Set( data = [], colnames = [], textColour = {}, backgroundColour = {} )
self.labelGrid.Reset()
self.lapGrid.Set( data = [], colnames = [], textColour = {}, backgroundColour = {} )
self.lapGrid.Reset()
def refresh( self ):
self.category = None
self.isEmpty = True
self.iLastLap = 0
self.rcInterp = set() # Set of row/col coordinates of interpolated numbers.
self.rcNumTime = set()
self.search.SelectAll()
CloseFinishTime = 0.07
self.closeFinishBibs = defaultdict( list )
race = Model.race
if not race:
self.clearGrid()
return
category = FixCategories( self.categoryChoice, getattr(race, 'resultsCategory', 0) )
self.hbs.Layout()
for si in self.hbs.GetChildren():
if si.IsWindow():
si.GetWindow().Refresh()
self.category = category
sortLap = getattr( race, 'sortLap', None )
sortLabel = getattr( race, 'sortLabel', None )
if race.isTimeTrial:
def getSortTime( rr ):
try:
return rr.firstTime + rr._lastTimeOrig
except Exception:
return 0
else:
def getSortTime( rr ):
try:
return rr._lastTimeOrig
except Exception:
return 0
results = sorted(
(rr for rr in GetResults(category)
if rr.status==Model.Rider.Finisher and rr.lapTimes and getSortTime(rr) > 0),
key = getSortTime
)
for i in range(1, len(results)):
if results[i]._lastTimeOrig - results[i-1]._lastTimeOrig <= CloseFinishTime:
self.closeFinishBibs[results[i-1].num].append( results[i].num )
self.closeFinishBibs[results[i].num].append( results[i-1].num )
labelLastX, labelLastY = self.labelGrid.GetViewStart()
lapLastX, lapLastY = self.lapGrid.GetViewStart()
exportGrid = ExportGrid()
exportGrid.setResultsOneList( category, self.showRiderData, showLapsFrequency = 1 )
if not exportGrid.colnames:
self.clearGrid()
return
# Fix the speed column.
speedUnit = None
iSpeedCol = None
try:
iSpeedCol = next(i for i, c in enumerate(exportGrid.colnames) if c == _('Speed'))
except StopIteration:
pass
if iSpeedCol is not None:
for r, d in enumerate(exportGrid.data[iSpeedCol]):
d = d.strip()
if not d:
continue
dSplit = d.split()
if not speedUnit and len(dSplit) > 1:
exportGrid.colnames[iSpeedCol] = speedUnit = dSplit[1]
exportGrid.data[iSpeedCol][r] = dSplit[0]
if exportGrid.data[iSpeedCol][r] == '"':
exportGrid.data[iSpeedCol][r] += ' '
colnames = exportGrid.colnames
data = exportGrid.data
sortCol = None
if sortLap:
race.sortLabel = sortLabel = None
for i, name in enumerate(colnames):
if name.startswith(_('Lap')) and int(name.split()[1]) == sortLap:
sortCol = i
break
elif sortLabel:
race.sortLap = sortLap = None
if sortLabel not in {_('Pos'), _('Gap'), _('Time'), _('mph'), _('km/h')}:
for i, name in enumerate(colnames):
if name == sortLabel:
sortCol = i
break
if sortCol is None:
race.sortLabel = race.sortLap = sortLabel = sortLap = None
results = GetResults( category )
hasSpeeds = False
for result in results:
if getattr(result, 'lapSpeeds', None) or getattr(result, 'raceSpeeds', None):
hasSpeeds = True
break
if not hasSpeeds:
self.showLapSpeedsRadio.Enable( False )
self.showRaceSpeedsRadio.Enable( False )
if self.selectDisplay > Results.DisplayRaceTimes:
self.selectDisplay = Results.DisplayRaceTimes
self.showRaceTimesRadio.SetValue( True )
else:
self.showLapSpeedsRadio.Enable( True )
self.showRaceSpeedsRadio.Enable( True )
'''
for r in [self.showLapTimesRadio, self.showRaceTimesRadio, self.showLapSpeedsRadio, self.showRaceSpeedsRadio]:
if r.GetValue():
r.SetFont( self.boldFont )
else:
r.SetFont( wx.NullFont )
self.hbs.Layout()
'''
# Find the fastest lap time.
self.fastestLapRC, fastestLapSpeed, fastestLapTime = None, 0.0, sys.float_info.max
for r, result in enumerate(results):
if getattr(result, 'lapSpeeds', None): # Use speeds if available.
for c, s in enumerate(result.lapSpeeds):
if s > fastestLapSpeed:
fastestLapSpeed = s
self.fastestLapRC = (r, c)
elif result.lapTimes: # Else, use times.
for c, t in enumerate(result.lapTimes):
if t < fastestLapTime:
fastestLapTime = t
self.fastestLapRC = (r, c)
highPrecision = Model.highPrecisionTimes()
try:
firstLapCol = next(i for i, name in enumerate(colnames) if name.startswith(_('Lap')))
except StopIteration:
firstLapCol = len(colnames)
# Convert to race times, lap speeds or race speeds as required.
'''
DisplayLapTimes = 0
DisplayRaceTimes = 1
DisplayLapSpeeds = 2
DisplayRaceSpeeds = 3
'''
if self.selectDisplay == Results.DisplayRaceTimes:
for r, result in enumerate(results):
for i, t in enumerate(result.raceTimes[1:]):
try:
data[i+firstLapCol][r] = Utils.formatTimeCompressed(t, highPrecision)
except IndexError:
pass
elif self.selectDisplay == Results.DisplayLapSpeeds:
for r, result in enumerate(results):
if getattr(result, 'lapSpeeds', None):
for i, s in enumerate(result.lapSpeeds):
try:
data[i+firstLapCol][r] = '{:.2f}'.format(s)
except IndexError:
pass
elif self.selectDisplay == Results.DisplayRaceSpeeds:
for r, result in enumerate(results):
if getattr(result, 'raceSpeeds', None):
for i, s in enumerate(result.raceSpeeds):
try:
data[i+firstLapCol][r] = '{:.2f}'.format(s)
except IndexError:
pass
# Sort by the given lap, if there is one.
# Also, add a position for the lap itself.
if sortCol is not None:
maxVal = 1000.0*24.0*60.0*60.0
if sortLap:
if self.selectDisplay in [Results.DisplayLapTimes, Results.DisplayRaceTimes]:
getFunc = Utils.StrToSeconds
else:
getFunc = lambda x: -float(x)
else:
if colnames[sortCol] in [_('Start'), _('Finish'), _('Time')]:
getFunc = Utils.StrToSeconds
elif colnames[sortCol] in [_('mph'), _('km')]:
getFunc = lambda x: -float(x) if x else 0.0
elif colnames[sortCol] == _('Factor'):
getFunc = lambda x: float(x) if x else maxVal
elif colnames[sortCol] in [_('Pos'), _('Bib')]:
getFunc = lambda x: int(x) if x and '{}'.format(x).isdigit() else maxVal
else:
getFunc = lambda x: '{}'.format(x)
maxVal = '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
sortPairs = []
for r, result in enumerate(results):
try:
k = (getFunc(data[sortCol][r]), r)
except Exception as e:
k = (maxVal, r)
sortPairs.append( (k, r) )
sortPairs.sort()
for c in range(len(data)):
col = data[c]
data[c] = [col[i] if i < len(col) else '' for k, i in sortPairs]
if colnames[sortCol] != _('Bib'):
for r in range(len(data[sortCol])):
if data[sortCol][r]:
data[sortCol][r] = '{} [{}: {}]'.format(data[sortCol][r], r+1, data[1][r])
# Highlight the sorted column.
if sortLap:
colnames = []
for name in exportGrid.colnames:
try:
if int(name.split()[1]) == sortLap:
name = '<{}>\n{}'.format(name,
[_('by Lap Time'), _('by Race Time'), _('by Lap Speed'), _('by Race Speed')][self.selectDisplay])
except Exception:
pass
colnames.append( name )
elif sortLabel:
colnames = []
for name in exportGrid.colnames:
if name == sortLabel:
name = '<{}>'.format(name)
colnames.append( name )
else:
colnames = exportGrid.colnames
try:
iLabelMax = next(i for i, name in enumerate(colnames) if name.startswith(_('Lap')) or name.startswith('<' + _('Lap')))
except StopIteration:
iLabelMax = len(colnames)
colnamesLabels = colnames[:iLabelMax]
dataLabels = data[:iLabelMax]
colnameLaps = colnames[iLabelMax:]
dataLaps = data[iLabelMax:]
self.labelGrid.Set( data = dataLabels, colnames = colnamesLabels )
self.labelGrid.SetLeftAlignCols( exportGrid.leftJustifyCols )
self.labelGrid.AutoSizeColumns( True )
self.labelGrid.Reset()
try:
iUCICodeCol = colnamesLabels.index( _('UCICode') )
self.labelGrid.SetColRenderer( iUCICodeCol, IOCCodeRenderer() )
except ValueError:
pass
try:
iNatCodeCol = colnamesLabels.index( _('NatCode') )
self.labelGrid.SetColRenderer( iNatCodeCol, IOCCodeRenderer() )
except ValueError:
pass
self.lapGrid.Set( data = dataLaps, colnames = colnameLaps )
self.lapGrid.Reset()
self.lapGrid.AutoSizeColumns( self.lapGrid.GetNumberCols() < 100 )
self.isEmpty = False
# Find interpolated entries.
with Model.LockRace() as race:
numTimeInfo = race.numTimeInfo
riders = race.riders
for r in range(self.lapGrid.GetNumberRows()):
try:
rider = riders[int(self.labelGrid.GetCellValue(r, 1))]
except Exception:
continue
try:
entries = rider.interpolate()
except (ValueError, IndexError):
continue
if not entries:
continue
for c in range(self.lapGrid.GetNumberCols()):
if not self.lapGrid.GetCellValue(r, c):
break
try:
if entries[c+1].interp:
self.rcInterp.add( (r, c) )
elif numTimeInfo.getInfo(entries[c+1].num, entries[c+1].t) is not None:
self.rcNumTime.add( (r, c) )
elif c > self.iLastLap:
self.iLastLap = c
except IndexError:
pass
self.labelGrid.Scroll( labelLastX, labelLastY )
self.lapGrid.Scroll( lapLastX, lapLastY )
self.showNumSelect()
if self.firstDraw:
self.firstDraw = False
self.splitter.SetSashPosition( 400 )
# Fix the grids' scrollbars.
self.labelGrid.FitInside()
self.lapGrid.FitInside()
def commit( self ):
pass
if __name__ == '__main__':
Utils.disable_stdout_buffering()
app = wx.App(False)
mainWin = wx.Frame(None,title="CrossMan", size=(600,200))
Model.setRace( Model.Race() )
Model.getRace()._populate()
Model.race.winAndOut = True
results = Results(mainWin)
results.refresh()
mainWin.Show()
app.MainLoop()
|
<gh_stars>1-10
from chainer.functions.array import broadcast
from chainer.functions.array import concat
from chainer.functions.array import reshape
from chainer.functions.array import select_item
from chainer.functions.array import split_axis
from chainer.functions.connection import embed_id
from chainer.functions.math import logsumexp
from chainer.functions.math import minmax
from chainer.functions.math import sum as _sum
def crf1d(cost, xs, ys, reduce='mean'):
"""Calculates negative log-likelihood of linear-chain CRF.
It takes a transition cost matrix, a sequence of costs, and a sequence of
labels. Let :math:`c_{st}` be a transition cost from a label :math:`s` to
a label :math:`t`, :math:`x_{it}` be a cost of a label :math:`t` at
position :math:`i`, and :math:`y_i` be an expected label at position
:math:`i`. The negative log-likelihood of linear-chain CRF is defined as
.. math::
L = -\\left( \\sum_{i=1}^l x_{iy_i} + \\
\\sum_{i=1}^{l-1} c_{y_i y_{i+1}} - {\\log(Z)} \\right) ,
where :math:`l` is the length of the input sequence and :math:`Z` is the
normalizing constant called partition function.
.. note::
When you want to calculate the negative log-likelihood of sequences
which have different lengths, sort the sequences in descending order of
lengths and transpose the sequences.
For example, you have three input sequences:
>>> a1 = a2 = a3 = a4 = np.random.uniform(-1, 1, 3).astype(np.float32)
>>> b1 = b2 = b3 = np.random.uniform(-1, 1, 3).astype(np.float32)
>>> c1 = c2 = np.random.uniform(-1, 1, 3).astype(np.float32)
>>> a = [a1, a2, a3, a4]
>>> b = [b1, b2, b3]
>>> c = [c1, c2]
where ``a1`` and all other variables are arrays with ``(K,)`` shape.
Make a transpose of the sequences:
>>> x1 = np.stack([a1, b1, c1])
>>> x2 = np.stack([a2, b2, c2])
>>> x3 = np.stack([a3, b3])
>>> x4 = np.stack([a4])
and make a list of the arrays:
>>> xs = [x1, x2, x3, x4]
You need to make label sequences in the same fashion.
And then, call the function:
>>> cost = chainer.Variable(
... np.random.uniform(-1, 1, (3, 3)).astype(np.float32))
>>> ys = [np.zeros(x.shape[0:1], dtype=np.int32) for x in xs]
>>> loss = F.crf1d(cost, xs, ys)
It calculates mean of the negative log-likelihood of the three
sequences.
The output is a variable whose value depends on the value of
the option ``reduce``. If it is ``'no'``, it holds the elementwise
loss values. If it is ``'mean'``, it holds mean of the loss values.
Args:
cost (:class:`~chainer.Variable` or :ref:`ndarray`):
A :math:`K \\times K` matrix which holds transition
cost between two labels, where :math:`K` is the number of labels.
xs (list of Variable): Input vector for each label.
``len(xs)`` denotes the length of the sequence,
and each :class:`~chainer.Variable` holds a :math:`B \\times K`
matrix, where :math:`B` is mini-batch size, :math:`K` is the number
of labels.
Note that :math:`B`\\ s in all the variables are not necessary
the same, i.e., it accepts the input sequences with different
lengths.
ys (list of Variable): Expected output labels. It needs to have the
same length as ``xs``. Each :class:`~chainer.Variable` holds a
:math:`B` integer vector.
When ``x`` in ``xs`` has the different :math:`B`, correspoding
``y`` has the same :math:`B`. In other words, ``ys`` must satisfy
``ys[i].shape == xs[i].shape[0:1]`` for all ``i``.
reduce (str): Reduction option. Its value must be either
``'mean'`` or ``'no'``. Otherwise, :class:`ValueError` is raised.
Returns:
~chainer.Variable: A variable holding the average negative
log-likelihood of the input sequences.
.. note::
See detail in the original paper: `Conditional Random Fields:
Probabilistic Models for Segmenting and Labeling Sequence Data
<https://repository.upenn.edu/cis_papers/159/>`_.
"""
if reduce not in ('mean', 'no'):
raise ValueError(
"only 'mean' and 'no' are valid for 'reduce', but '%s' is "
'given' % reduce)
assert xs[0].shape[1] == cost.shape[0]
n_label = cost.shape[0]
n_batch = xs[0].shape[0]
alpha = xs[0]
alphas = []
for x in xs[1:]:
batch = x.shape[0]
if alpha.shape[0] > batch:
alpha, alpha_rest = split_axis.split_axis(alpha, [batch], axis=0)
alphas.append(alpha_rest)
b_alpha, b_cost = broadcast.broadcast(alpha[..., None], cost)
alpha = logsumexp.logsumexp(b_alpha + b_cost, axis=1) + x
if len(alphas) > 0:
alphas.append(alpha)
alpha = concat.concat(alphas[::-1], axis=0)
logz = logsumexp.logsumexp(alpha, axis=1)
cost = reshape.reshape(cost, (cost.size, 1))
score = select_item.select_item(xs[0], ys[0])
scores = []
for x, y, y_prev in zip(xs[1:], ys[1:], ys[:-1]):
batch = x.shape[0]
if score.shape[0] > batch:
y_prev, _ = split_axis.split_axis(y_prev, [batch], axis=0)
score, score_rest = split_axis.split_axis(score, [batch], axis=0)
scores.append(score_rest)
score += (select_item.select_item(x, y) + reshape.reshape(
embed_id.embed_id(y_prev * n_label + y, cost), (batch,)))
if len(scores) > 0:
scores.append(score)
score = concat.concat(scores[::-1], axis=0)
loss = logz - score
if reduce == 'mean':
return _sum.sum(loss) / n_batch
else:
return loss
def argmax_crf1d(cost, xs):
"""Computes a state that maximizes a joint probability of the given CRF.
Args:
cost (:class:`~chainer.Variable` or :ref:`ndarray`):
A :math:`K \\times K` matrix which holds transition
cost between two labels, where :math:`K` is the number of labels.
xs (list of Variable): Input vector for each label.
``len(xs)`` denotes the length of the sequence,
and each :class:`~chainer.Variable` holds a :math:`B \\times K`
matrix, where :math:`B` is mini-batch size, :math:`K` is the number
of labels.
Note that :math:`B`\\ s in all the variables are not necessary
the same, i.e., it accepts the input sequences with different
lengths.
Returns:
tuple: A tuple of :class:`~chainer.Variable` object ``s`` and a
:class:`list` ``ps``.
The shape of ``s`` is ``(B,)``, where ``B`` is the mini-batch size.
i-th element of ``s``, ``s[i]``, represents log-likelihood of i-th
data.
``ps`` is a list of :ref:`ndarray`, and denotes the state that
maximizes the point probability.
``len(ps)`` is equal to ``len(xs)``, and shape of each ``ps[i]`` is
the mini-batch size of the corresponding ``xs[i]``. That means,
``ps[i].shape == xs[i].shape[0:1]``.
"""
alpha = xs[0]
alphas = []
max_inds = []
for x in xs[1:]:
batch = x.shape[0]
if alpha.shape[0] > batch:
alpha, alpha_rest = split_axis.split_axis(alpha, [batch], axis=0)
alphas.append(alpha_rest)
else:
alphas.append(None)
b_alpha, b_cost = broadcast.broadcast(alpha[..., None], cost)
scores = b_alpha + b_cost
max_ind = minmax.argmax(scores, axis=1)
max_inds.append(max_ind)
alpha = minmax.max(scores, axis=1) + x
inds = minmax.argmax(alpha, axis=1)
path = [inds.data]
for m, a in zip(max_inds[::-1], alphas[::-1]):
inds = select_item.select_item(m, inds)
if a is not None:
inds = concat.concat([inds, minmax.argmax(a, axis=1)], axis=0)
path.append(inds.data)
path.reverse()
score = minmax.max(alpha, axis=1)
for a in alphas[::-1]:
if a is None:
continue
score = concat.concat([score, minmax.max(a, axis=1)], axis=0)
return score, path
|
<reponame>diogo1790team/inphinity_DM
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 26 15:24:17 2017
@author: Stage
"""
from SQL_obj_old.Bacterium_sql_old import Bacterium_sql_old
from Bio import SeqIO
from objects_new.Proteins_new import Protein
from objects_new.WholeDNA_new import WholeDNA
from files_treatment.fasta_proteins import *
from files_treatment.fasta_whole_genome import *
from files_treatment.csv_files import *
import re
import numpy as np
class Bacteria_old(object):
def __init__(self, bacterium_id = -1, species = "", strain = "", GI = -1,
nb_proteins = -1, whole_genome = "", prot_dna_cod_seq = "", prot_seq = ""):
self.bacterium_id = bacterium_id
self.species = species
self.strain = strain
self.GI = GI
self.nb_proteins = nb_proteins
self.proteins_fasta_gen = None
self.proteins_DNA_fasta_gen = None
self.dna_whole_genome = None
self.proteins_list = None
self.whole_genom_obj = None
self.number_prots_counted_nucleo = 0
self.number_prots_counted_aa = 0
def get_all_Bacteria(self):
listOfBacteria = []
sqlObj = Bacterium_sql_old()
results = sqlObj.select_all_bacteria_all_attributes()
for element in results:
listOfBacteria.append(Bacteria_old(element[0], element[1], element[2], element[3], element[4], element[5], element[6], element[7]))
return listOfBacteria
def complete_bacteria_from_old_DB(self):
self.get_whole_dna_old_db()
self.get_proteins_old_db()
self.get_dna_old_db()
self.get_qty_prots_old_db()
self.complete_proteins_list()
self.complete_whole_gen()
def complete_proteins_list(self):
prot_fasta_obj = proteins_fasta(self.proteins_fasta_gen, self.proteins_DNA_fasta_gen)
self.proteins_list = prot_fasta_obj.parse_fasta_format()
def complete_whole_gen(self):
whole_dna_obj = whole_genome_fasta(self.dna_whole_genome)
self.whole_genom_obj = whole_dna_obj.get_whole_sequence()
def get_qty_prots_old_db(self):
sqlObj = Bacterium_sql_old()
self.nb_proteins = sqlObj.get_proteins_qty_prot_by_GI(self.GI)
def get_proteins_old_db(self):
sqlObj = Bacterium_sql_old()
prot_seq = sqlObj.get_proteins_sequences_by_GI(self.GI)
self.number_prots_counted_aa = prot_seq[0].count('>')
self.write_temp_file(prot_seq[0])
#fasta_dict = SeqIO.index('/tmp/temp_fasta.fasta', "fasta")
fasta_dict = SeqIO.to_dict(SeqIO.parse('/tmp/temp_fasta.fasta', "fasta"))
self.proteins_fasta_gen = fasta_dict
def get_dna_old_db(self):
sqlObj = Bacterium_sql_old()
prot_dna_cod_seq = sqlObj.get_proteins_dna_sequences_by_GI(self.GI)
self.number_prots_counted_nucleo = prot_dna_cod_seq[0].count('>')
self.write_temp_file(prot_dna_cod_seq[0])
#fasta_dict = SeqIO.index('/tmp/temp_fasta.fasta', "fasta")
fasta_dict = SeqIO.to_dict(SeqIO.parse('/tmp/temp_fasta.fasta', "fasta"))
self.proteins_DNA_fasta_gen = fasta_dict
def get_whole_dna_old_db(self):
sqlObj = Bacterium_sql_old()
whole_dna_seq = sqlObj.select_whole_genemo_by_GI(self.GI)
self.write_temp_file(whole_dna_seq[0])
self.dna_whole_genome = SeqIO.to_dict(SeqIO.parse('/tmp/temp_fasta.fasta', "fasta"))
def write_temp_file(self, content):
path_file = '/tmp/temp_fasta.fasta'
with open(path_file, "w") as out:
out.write(content)
out.close()
def write_csv_list_prots(self):
csv_file_obj = CSV_file((str(self.species) + "_" + str(self.strain) + ".csv") , self.proteins_list)
csv_file_obj.create_CSV_form_list_obj_fields()
|
<filename>improver/precipitation_type/shower_condition_probability.py<gh_stars>0
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown copyright. The Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Plugin to construct a shower conditions probability"""
from typing import Optional, Tuple
import iris
import numpy as np
from iris.cube import Cube, CubeList
from iris.exceptions import CoordinateNotFoundError
from numpy import ndarray
from improver import PostProcessingPlugin
from improver.metadata.constants import FLOAT_DTYPE
from improver.metadata.utilities import (
create_new_diagnostic_cube,
generate_mandatory_attributes,
)
from improver.threshold import BasicThreshold
from improver.utilities.cube_manipulation import collapse_realizations
from .utilities import make_shower_condition_cube
class ShowerConditionProbability(PostProcessingPlugin):
"""Plugin to calculate the probability that conditions are such that
precipitation, should it be present, will be showery, based on input cloud
amounts and the convective ratio."""
def __init__(
self,
cloud_threshold: float = 0.8125,
convection_threshold: float = 0.8,
model_id_attr: Optional[str] = None,
) -> None:
"""
Args:
cloud_threshold:
The fractional cloud coverage value at which to threshold the
cloud data.
convection_threshold:
The convective ratio value at which to threshold the convective
ratio data.
model_id_attr:
Name of the attribute used to identify the source model for
blending.
"""
self.cloud_threshold = cloud_threshold
self.convection_threshold = convection_threshold
self.model_id_attr = model_id_attr
self.cloud_constraint = iris.Constraint(
cube_func=lambda cube: "cloud_area_fraction" in cube.name()
)
self.convection_constraint = iris.Constraint(
cube_func=lambda cube: "convective_ratio" in cube.name()
)
def _create_shower_condition_cube(self, data: ndarray, cube: Cube) -> Cube:
"""
Returns a shower condition cube, with coordinates and mandatory
attributes based upon the provided cube. The threshold coordinate is
modified to describe shower conditions, such that the probabilities
describe the likelihood of conditions being showery. The arbitrary
threshold value is 1.
Args:
data:
The shower condition probabilities to populate the new cube.
cube:
The cube to use as a template, and from which to extract
attributes for use in the new diagnostic cube.
Returns:
A probability of shower conditions cube.
"""
template = make_shower_condition_cube(cube)
attributes = generate_mandatory_attributes(
[cube], model_id_attr=self.model_id_attr
)
result = create_new_diagnostic_cube(
template.name(), "1", template, mandatory_attributes=attributes, data=data,
)
return result
def _extract_inputs(self, cubes: CubeList) -> Tuple[Cube, Cube]:
"""
Extract the required input cubes from the input cubelist and check
they are as required.
Args:
cubes:
A cubelist containing a cube of cloud fraction and one of
convective ratio.
Returns:
The cloud and convection cubes extracted from the cubelist.
Raises:
ValueError: If the expected cubes are not within the cubelist.
ValueError: If the input cubes have different shapes, perhaps due
to a missing realization in one and not the other.
"""
try:
(cloud,) = cubes.extract(self.cloud_constraint)
(convection,) = cubes.extract(self.convection_constraint)
except ValueError:
input_cubes = ", ".join([cube.name() for cube in cubes])
msg = (
"A cloud area fraction and convective ratio are required, "
f"but the inputs were: {input_cubes}"
)
raise ValueError(msg)
if cloud.shape != convection.shape:
msg = (
"The cloud area fraction and convective ratio cubes are not "
"the same shape and cannot be combined to generate a shower"
" probability"
)
raise ValueError(msg)
return cloud, convection
def process(self, cubes: CubeList) -> Cube:
"""
Create a shower condition probability from cloud fraction and convective
ratio fields. This plugin thresholds the two input diagnostics,
creates a hybrid probability field from the resulting binary fields,
and then collapses the realizations to give a non-binary probability
field that represents the likelihood of conditions being showery.
Args:
cubes:
A cubelist containing a cube of cloud fraction and one of
convective ratio.
Returns:
Probability of any precipitation, if present, being classified as
showery
"""
cloud, convection = self._extract_inputs(cubes)
# Threshold cubes
cloud_thresholded = BasicThreshold(
self.cloud_threshold, comparison_operator="<="
).process(cloud)
convection_thresholded = BasicThreshold(self.convection_threshold).process(
convection
)
# Fill any missing data in the convective ratio field with zeroes.
if np.ma.is_masked(convection_thresholded.data):
convection_thresholded.data = convection_thresholded.data.filled(0)
# Create a combined field taking the maximum of each input
shower_probability = np.maximum(
cloud_thresholded.data, convection_thresholded.data
).astype(FLOAT_DTYPE)
result = self._create_shower_condition_cube(
shower_probability, convection_thresholded
)
try:
shower_conditions = collapse_realizations(result)
except CoordinateNotFoundError:
shower_conditions = result
return iris.util.squeeze(shower_conditions)
|
<filename>interfaces/libreasr_pb2.py
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: libreasr.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name="libreasr.proto",
package="ASR",
syntax="proto3",
serialized_options=None,
serialized_pb=b'\n\x0elibreasr.proto\x12\x03\x41SR"!\n\x05\x41udio\x12\x0c\n\x04\x64\x61ta\x18\x01 \x01(\x0c\x12\n\n\x02sr\x18\x03 \x01(\x05"\x1a\n\nTranscript\x12\x0c\n\x04\x64\x61ta\x18\x01 \x01(\t2i\n\x03\x41SR\x12+\n\nTranscribe\x12\n.ASR.Audio\x1a\x0f.ASR.Transcript"\x00\x12\x35\n\x10TranscribeStream\x12\n.ASR.Audio\x1a\x0f.ASR.Transcript"\x00(\x01\x30\x01\x62\x06proto3',
)
_AUDIO = _descriptor.Descriptor(
name="Audio",
full_name="ASR.Audio",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="data",
full_name="ASR.Audio.data",
index=0,
number=1,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"",
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="sr",
full_name="ASR.Audio.sr",
index=1,
number=3,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=23,
serialized_end=56,
)
_TRANSCRIPT = _descriptor.Descriptor(
name="Transcript",
full_name="ASR.Transcript",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="data",
full_name="ASR.Transcript.data",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=58,
serialized_end=84,
)
DESCRIPTOR.message_types_by_name["Audio"] = _AUDIO
DESCRIPTOR.message_types_by_name["Transcript"] = _TRANSCRIPT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Audio = _reflection.GeneratedProtocolMessageType(
"Audio",
(_message.Message,),
{
"DESCRIPTOR": _AUDIO,
"__module__": "libreasr_pb2"
# @@protoc_insertion_point(class_scope:ASR.Audio)
},
)
_sym_db.RegisterMessage(Audio)
Transcript = _reflection.GeneratedProtocolMessageType(
"Transcript",
(_message.Message,),
{
"DESCRIPTOR": _TRANSCRIPT,
"__module__": "libreasr_pb2"
# @@protoc_insertion_point(class_scope:ASR.Transcript)
},
)
_sym_db.RegisterMessage(Transcript)
_ASR = _descriptor.ServiceDescriptor(
name="ASR",
full_name="ASR.ASR",
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=86,
serialized_end=191,
methods=[
_descriptor.MethodDescriptor(
name="Transcribe",
full_name="ASR.ASR.Transcribe",
index=0,
containing_service=None,
input_type=_AUDIO,
output_type=_TRANSCRIPT,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name="TranscribeStream",
full_name="ASR.ASR.TranscribeStream",
index=1,
containing_service=None,
input_type=_AUDIO,
output_type=_TRANSCRIPT,
serialized_options=None,
),
],
)
_sym_db.RegisterServiceDescriptor(_ASR)
DESCRIPTOR.services_by_name["ASR"] = _ASR
# @@protoc_insertion_point(module_scope)
|
<filename>models2df.py
from PIL import Image
import pandas as pd
import os
import sys
import numpy as np
import shutil
import tensorflow as tf
import pdb
sys.path.append('./')
sys.path.append('../')
sys.path.append('../EIG_body_perception')
from config import Options
from demo import main
def make_path(path):
if not os.path.exists(path):
os.makedirs(path)
def attend_crop_img(img, attend_size=350, save_path=None):
"""
Cropping procedure: find bounding box, make bounding box quadratic and scale to fixed crop size
"""
img_np = np.mean(np.array(img), axis=2).T
xs, ys = np.where(img_np != img_np[0, 0]) # find non-background pixels
x0_bb, x1_bb = min(xs), max(xs) # extract bounding box + padding
y0_bb, y1_bb = min(ys), max(ys)
len = max(x1_bb - x0_bb, y1_bb - y0_bb)
x_center, y_center = int((x1_bb + x0_bb) / 2), int((y1_bb + y0_bb) / 2)
cropped_img = img.crop(
(x_center - int(len / 2), y_center - int(len / 2), x_center + int(len / 2), y_center + int(len / 2)))
attended_img = cropped_img.resize([attend_size, attend_size])
if save_path is not None:
make_path(os.path.dirname(save_path))
attended_img.save(save_path)
return attended_img
def center_crop_img(img, crop_size=350, save_path=None):
width, height = img.size # Get dimensions
left = (width - crop_size) / 2
top = (height - crop_size) / 2
right = (width + crop_size) / 2
bottom = (height + crop_size) / 2
cropped_img = img.crop((left, top, right, bottom))
if save_path is not None:
make_path(os.path.dirname(save_path))
cropped_img.save(save_path)
return cropped_img
def models2df(model_names, opt, save_dir=None):
"""
Applies provided models to provided stimuli and outputs network activation
data as a DataFrame for further processing.
"""
activation_data = []
columns = ["StimulusName", "ImageMode", "ModelName", "Region", "Activations"]
for img_mode in opt.feeding_modes:
stim_source, mode = str.split(img_mode, "-")
img_paths = []
if stim_source == "original":
img_dir = opt.stimuli_dir
img_paths = sorted(os.listdir(img_dir))
elif stim_source == "rendered":
if os.path.exists(opt.stimuli_dir+'-trunk-centric'):
img_dir = opt.stimuli_dir+"-trunk-centric"
else:
img_dir = opt.stimuli_dir+"-trunk-centric-uncentered"
img_paths = sorted(os.listdir(img_dir))
img_paths = [path for path in img_paths if "-" not in path]
else:
print("Source in image mode {} is not implemented.".format(img_mode))
# loop through all stimuli/images in this mode
make_path('tmp')
for img_path in img_paths:
stimulus_name = str.split(os.path.basename(img_path), ".")[0]
# loop through image modes (e.g. "raw" or "cropped")
if mode == "raw":
img_path = os.path.join(img_dir, img_path)
elif mode == "cropped":
img_path = os.path.join(img_dir+'-cropped', img_path)
# elif mode == "attended":
# img = attend_crop_img(img, attend_size=opt.attend_size)
else:
print("Mode in image mode {} is not implemented.".format(mode))
# loop through models
for model_name in model_names:
print('\nComputing output for model {} and image {}.\n'.format(model_name, stimulus_name))
output = main(img_path, model_type=model_name)
# loop through labels
for model_region in opt.model_regions[model_name]:
out = output[model_region].flatten()
activation_data.append([stimulus_name, img_mode, model_name, model_region, out])
return pd.DataFrame(activation_data, columns=columns)
opt = Options()
# load pre-trained models
model_names = ['ResNet50-ImageNet', 'ResNet50-HMR']
print("\t Model Data")
# load activations of pre-trained models applied to stimuli
model_data = models2df(model_names, opt)
make_path(opt.model_data_dir)
model_data.to_pickle(os.path.join(opt.model_data_dir, 'ResNets.pkl'))
|
<filename>fastrk/bt_8_9_16.py
'''Butcher Table for Verner's RK8(9)16 algorithm.
Original matlab code at:
https://github.com/USNavalResearchLaboratory/TrackerComponentLibrary/blob/master/Mathematical_Functions/Differential_Equations/RungeKStep.m
'''
def vf(a, b):
return a + b * 6**0.5
A = [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1/12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1/27, 2/27, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1/24, 0, 1/8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[vf(4, 94)/375, 0, vf(-94, -84)/125, vf(328, 208)/375, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[vf(9, -1)/150, 0, 0, vf(312, 32)/1425, vf(69, 29)/570, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[vf(927, -347)/1250, 0, 0, vf(-16248, 7328)/9375, vf(-489, 179)/3750, vf(14268, -5798) / 9375, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[2/27, 0, 0, 0, 0, vf(16, -1)/54, vf(16, 1) / 54, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[19/256, 0, 0, 0, 0, vf(118, -23)/512, vf(118, 23) / 512, -9 / 256, 0, 0, 0, 0, 0, 0, 0, 0],
[11/144, 0, 0, 0, 0, vf(266, -1)/864, vf(266, 1) / 864, -1 / 16, -8 / 27, 0, 0, 0, 0, 0, 0, 0],
[vf(5034, -271)/61440, 0, 0, 0, 0, 0, vf(7859, -1626)/10240, vf(-2232, 813) / 20480, vf(-594, 271) / 960, vf(657, -813) / 5120, 0, 0, 0, 0, 0, 0],
[vf(5996, -3794)/405, 0, 0, 0, 0, vf(-4342, -338)/9, vf(154922, -40458) / 135, vf(-4176, 3794) / 45, vf(-340864, 242816) / 405, vf(26304, -15176) / 45, -26624 / 81, 0, 0, 0, 0, 0],
[vf(3793, 2168)/103680, 0, 0, 0, 0, vf(4042, 2263)/13824, vf(-231278, 40717) / 69120, vf(7947, -2168) / 11520, vf(1048, -542) / 405, vf(-1383, 542) / 720, 2624 / 1053, 3 / 1664, 0, 0, 0, 0],
[-137/1296, 0, 0, 0, 0, vf(5642, -337)/864, vf(5642, 337) / 864, -299 / 48, 184 / 81, -44 / 9, -5120 / 1053, -11 / 468, 16 / 9, 0, 0, 0],
[vf(33617, -2168)/518400, 0, 0, 0, 0, vf(-3846, 31)/13824, vf(155338, -52807) / 345600, vf(-12537, 2168) / 57600, vf(92, 542) / 2025, vf(-1797, -542) / 3600, 320 / 567, -1 / 1920, 4 / 105, 0, 0, 0],
[vf(-36487, -30352)/279600, 0, 0, 0, 0, vf(-29666, -4499) / 7456, vf(2779182, -615973) / 186400, vf(-94329, 91056) / 93200, vf(-232192, 121408) / 17475, vf(101226, -22764) / 5825, -169984 / 9087, -87 / 30290, 492 / 1165, 0, 1260 / 233, 0]
]
c = [0, 1/12, 1/9, 1/6, vf(2, 2)/15, vf(6, 1)/15, vf(6, -1)/15, 2/3, 1/2, 1/3, 1/4, 4/3, 5/6, 1, 1/6, 1]
b_main = [103/1680, 0, 0, 0, 0, 0, 0, -27/140, 76/105, -201/280, 1024/1365, 3/7280, 12/35, 9/280, 0, 0]
b_subs = [23/525, 0, 0, 0, 0, 0, 0, 171/1400, 86/525, 93/280, -2048/6825, -3/18200, 39/175, 0, 9/25, 233/4200]
order = (8, 9)
|
<filename>Titanic-Analysis.py
# coding: utf-8
# # Investigation- Titanic Disaster
#
# by **_<NAME>_** (June, 2017)
# ### Description:
#
# _The sinking of the RMS Titanic is one of the most infamous shipwrecks in history. On April 15, 1912, during her maiden voyage, the Titanic sank after colliding with an iceberg, killing 1502 out of 2224 passengers and crew. This sensational tragedy shocked the international community and led to better safety regulations for ships._
#
# _One of the reasons that the shipwreck led to such loss of life was that there were not enough lifeboats for the passengers and crew. Although there was some element of luck involved in surviving the sinking, some groups of people were more likely to survive than others, such as women, children, and the upper-class._
#
# 
# ### Analysis Overview:
#
# The data set is in csv format and availbale in my repository.
# The goal of this project is to perform **intoductory data analysis** on Titanic DataSet and documents my findings. We'll start by taking a look at dataset and brainstorming what questions we could answer using it. Then we'll use Python libraries to answer the questions we're most interested in, subsequently creating and sharing report of our analysis.
#
# **Python librairies used-**
# * [Pandas](http://pandas.pydata.org/)
# * [Numpy](http://www.numpy.org/)
# * [Matplotlib](https://matplotlib.org/)
#
# **The set of questions we would like to infer in this analysis is as follows.**
# 1. _Does the available personal information we have about the passengers of titanic such as name help us in understanding the probabilty of survival of the passengers?_
# 2. _In such disasters, there is an expectation that the young people have a higher survival probabilty. How age was related to survival?_
# 3. _People were allocated based on 3 classes (Lower, middle and higher). Did class had any effect on survival?_
#
# ### Varibales Short Description:
#
# | Variable | Description |
# |----------|-----------------------------------------------------:|
# | survival | Survival (0 = No; 1 = Yes) |
# | pclass | Passenger Class (1 = 1st; 2 = 2nd; 3 = 3rd |
# | name | Passengers Name |
# | sex | Sex |
# | age | Age |
# | sibsp | Number of Siblings/Spouses Aboard |
# | parch | Number of Parents/Children Aboard |
# | ticket | Ticket Number |
# | fare | Passenger Fare |
# | cabin | Cabin |
# | embarked | Port(C = Cherbourg; Q = Queenstown; S = Southampton) |
#
#
# ### Feature type
#
# * Ordinal: Pclass
# * Continuous: Age, Fare
# * Descrete: SibSp, Parch
# * Categorical: Survived, Sex, and Embarked
#
#
# ### Importing libraies
# In[1]:
# import warnings
import warnings
warnings.filterwarnings('ignore')
# data import and handling libraies
import numpy as np
import pandas as pd
# data visualisation libraries
import matplotlib.pyplot as plt
import seaborn as sns
# Configure Visualisations
get_ipython().magic(u'matplotlib inline')
from IPython.core.interactiveshell import InteractiveShell
#Allows value of multiple statements at once
InteractiveShell.ast_node_interactivity = "all"
# ### Load and check the data
# In[2]:
Titanic = pd.read_csv("titanic-data.csv")
# Preview the data
Titanic.head(3)
Titanic.tail(3)
# In[4]:
# overview of whole data and column
Titanic.info()
# In[5]:
# proportion of overall paseenger who survived
survival_rate = float(Titanic['Survived'].sum())/ Titanic['Survived'].count()
survival_rate
# we see that only ~38% people were lucky to survive the disaster.
# **Lets run through the data summary and look for any missing values.**
# In[6]:
Titanic.describe()
Titanic[['Age', 'Cabin', 'Embarked']].isnull().sum()
# ** As we can see Age and cabing has lot of missing values in them, while Embarked has only two missing values.** We can identify the passengers whose onboarding port imformation was missing. Who were they?
# In[17]:
x = pd.isnull(Titanic.Embarked)
Titanic[x]
# ### EDA focus on few variables
# In[58]:
# Count of Survivors by Gender
survived_passengers = Titanic[Titanic['Survived']==1]['Sex'].value_counts()
dead_passengers = Titanic[Titanic['Survived']==0]['Sex'].value_counts()
survived_df = pd.DataFrame([survived_passengers,dead_passengers])
survived_df.index = ['Survived','Dead']
survived_df
# In[51]:
# let's see survival count using boxplot
plt.figure(figsize=(15,1))
sns.boxplot(data=survived_df, orient='h', palette="Set1");
# In[57]:
# Age Vs Fare Vs Survival
plt.figure(figsize=(15,6))
abc = plt.subplot()
abc.scatter(Titanic[Titanic['Survived']==1]['Age'],Titanic[Titanic['Survived']==1]['Fare'],c='blue',s=20)
abc.scatter(Titanic[Titanic['Survived']==0]['Age'],Titanic[Titanic['Survived']==0]['Fare'],c='orange',s=20)
abc.set_xlabel('Age')
abc.set_ylabel('Fare')
abc.legend(('survived','dead'),scatterpoints=1,loc='upper right',fontsize=15,)
plt.ylim(0,None);
# ### Let's explore Age variable
# In[7]:
# Plot Age values on an histogram
fig = plt.figure(figsize=(15, 6))
Titanic['Age'].hist(bins=80) #bins=80 as ages range from 0 to 80 years old
plt.xlabel('Age')
plt.ylabel('Frequency')
plt.grid(True)
plt.show();
# In[8]:
# we can do mean substitution for the age variable
meanAge = np.mean(Titanic.Age)
Titanic.Age = Titanic.Age.fillna(meanAge)
fig = plt.figure(figsize=(15, 6))
sns.distplot(Titanic.Age, bins = 40, color= 'red')
plt.xlim(0,80);
# plot after the mean age substitution
# ### Which class passengers had a lowest survival rate?
# In[9]:
###### Class Vs Survival Chart ######
survival_by_class = Titanic.groupby(['Pclass', 'Survived']).size().unstack('Survived')
survival_by_class.columns = ['No', 'Yes']
survival_by_class.plot.bar(title='Survival by Class');
sns.barplot(Titanic["Pclass"], Titanic["Survived"], palette="Set1");
# **Here we can see that most passengers who survived held the class 1 ticket (higher class), while majority of class 3 (lower class) people were unfortunate.**
# ** Let's check how family size and age factor has related.**
# In[19]:
sns.lmplot(x='Age', y='Parch', data=Titanic, hue= 'Survived', fit_reg=False)
# **We can see that bigger size family had very less chance of survival and probabaly they sank together.**
# In[27]:
parch_survived = pd.crosstab(Titanic["Parch"],Titanic["Survived"])
pclass_survived = pd.crosstab(Titanic["Pclass"],Titanic["Survived"])
fig, (axis1,axis2) = plt.subplots(1,2,figsize=(15,5))
sns.violinplot(Titanic["Parch"], Titanic["Survived"], palette="Set1", ax=axis1)
sns.violinplot(Titanic["Embarked"], Titanic["Survived"], palette="Set1", ax=axis2)
plt.xticks(rotation=90);
# ### Extracting title from feature 'Name'
# In[18]:
def name_extract(word):
return word.split(',')[1].split('.')[0].strip()
df_surname = pd.DataFrame({'Title' : Titanic['Name'].apply(name_extract)})
Titanic = pd.merge(Titanic, df_surname, left_index=True, right_index=True)
pd.crosstab(Titanic.Sex, Titanic.Title)
# So there are 4 main titles - Mr, Mrs, Master and Miss. We can cobine others as those are less in numbers
# In[19]:
def geoup_titles(old_titles):
if old_titles == 'Mr':
return('Mr')
else:
if old_titles == 'Mrs':
return('Mrs')
else:
if old_titles == 'Master':
return('Master')
else:
if old_titles == 'Miss':
return('Miss')
else:
return('Others')
df_temp = pd.DataFrame({'New_Title':Titanic['Title'].apply(geoup_titles)})
Titanic = pd.merge(Titanic, df_temp, left_index = True, right_index = True)
temp1 = df_temp.groupby('New_Title').count()
# ** Now, we can check how many and from where this people embarked on their journey**
# In[27]:
pd.crosstab(Titanic.Embarked, Titanic.New_Title)
# In[34]:
sns.countplot(data = Titanic, x = 'New_Title', hue='Embarked');
# ### Conclusion:
#
# ** Although much can be exolored using this dataset. There are few limitation where deriving an answer isn't possible here** :
#
# * Handling missing values. The dataset is filled with missing values of the age. The missing age values are imputed with the mean, but we saw that the mean value is being massively over represented which is a limitation as this will effect potential statistical testing. First, there was only 12 columns of data to work with with essentially three of them being irrelevant. Additionally, there were only 891 rows of data, of which 179 were missing important fields such as age.
#
# * The more the data, the better it can be analysed. For example there was no information in 'Name' Column that who were the crew and who were the passengers, how many life boats or security measures were present at that point of time.
#
# * The difference between the sample and the population, we don't know how the sample was chosen from the actual population of people that were on the Titanic. There could have been some sort of intentional or unintentional bias in how the sample was selected.
#
#
# **_Overall, during our analysis we saw that strongest evidence for survival was certainly explained by `Pclass` the passenger's socioeconomic class. Perhaps Upper class passengers being in upper deck cabins, had better access to the lifeboats which were brought near the first-class cabins at shortest of time when disaster struck, while the third-class had to bear the scarcity of lifeboats, causing more deaths._**
# **Resources**
# * Plottings side by side using [Matplotlib fig](https://matplotlib.org/api/figure_api.html) .
# * seaborn scatter plot using [lmplot()](http://seaborn.pydata.org/generated/seaborn.lmplot.html#seaborn.lmplot)
# * Python [Regular Expression](https://docs.python.org/2/library/re.html) for data munging
# * How to display multiple output using Python [InteractiveShell](https://stackoverflow.com/questions/36786722/how-to-display-full-output-in-jupyter-not-only-last-result/36835741)
#
|
<gh_stars>0
from __future__ import division
__copyright__ = "Copyright (C) 2009-2013 <NAME>"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import pymbolic.mapper
import pymbolic.primitives as p
__doc__ = """
.. _prec-constants:
Precedence constants
********************
.. data:: PREC_CALL
.. data:: PREC_POWER
.. data:: PREC_UNARY
.. data:: PREC_PRODUCT
.. data:: PREC_SUM
.. data:: PREC_SHIFT
.. data:: PREC_BITWISE_AND
.. data:: PREC_BITWISE_XOR
.. data:: PREC_BITWISE_OR
.. data:: PREC_COMPARISON
.. data:: PREC_LOGICAL_AND
.. data:: PREC_LOGICAL_OR
.. data:: PREC_NONE
Mappers
*******
.. autoclass:: StringifyMapper
.. automethod:: __call__
.. autoclass:: CSESplittingStringifyMapperMixin
.. autoclass:: LaTeXMapper
"""
PREC_CALL = 15
PREC_POWER = 14
PREC_UNARY = 13
PREC_PRODUCT = 12
PREC_SUM = 11
PREC_SHIFT = 10
PREC_BITWISE_AND = 9
PREC_BITWISE_XOR = 8
PREC_BITWISE_OR = 7
PREC_COMPARISON = 6
PREC_LOGICAL_AND = 5
PREC_LOGICAL_OR = 4
PREC_IF = 3
PREC_NONE = 0
# {{{ stringifier
class StringifyMapper(pymbolic.mapper.Mapper):
"""A mapper to turn an expression tree into a string.
:class:`pymbolic.primitives.Expression.__str__` is often implemented using
this mapper.
When it encounters an unsupported :class:`pymbolic.primitives.Expression`
subclass, it calls its :meth:`pymbolic.primitives.Expression.stringifier`
method to get a :class:`StringifyMapper` that potentially does.
"""
def __init__(self, constant_mapper=None):
if constant_mapper is not None:
from warnings import warn
warn("Overriding constant_mapper is deprecated. "
"Instead, subclass the stringifier to "
"achieve the desired effect. "
"The 'constant_mapper' argument will "
"disappear after June 2020.",
DeprecationWarning, stacklevel=2)
self.constant_mapper = constant_mapper
# {{{ replaceable string composition interface
def format(self, s, *args):
return s % args
def join(self, joiner, iterable):
return self.format(joiner.join("%s" for i in iterable), *iterable)
def rec_with_force_parens_around(self, expr, *args, **kwargs):
force_parens_around = kwargs.pop("force_parens_around", ())
result = self.rec(expr, *args, **kwargs)
if isinstance(expr, force_parens_around):
result = "(%s)" % result
return result
def join_rec(self, joiner, iterable, prec, *args, **kwargs):
f = joiner.join("%s" for i in iterable)
return self.format(f,
*[self.rec_with_force_parens_around(i, prec, *args, **kwargs)
for i in iterable])
def parenthesize(self, s):
return "(%s)" % s
def parenthesize_if_needed(self, s, enclosing_prec, my_prec):
if enclosing_prec > my_prec:
return "(%s)" % s
else:
return s
# }}}
# {{{ mappings
def handle_unsupported_expression(self, expr, enclosing_prec, *args, **kwargs):
strifier = expr.make_stringifier(self)
if isinstance(self, type(strifier)):
raise ValueError("stringifier '%s' can't handle '%s'"
% (self, expr.__class__))
return strifier(
expr, enclosing_prec, *args, **kwargs)
def map_constant(self, expr, enclosing_prec, *args, **kwargs):
if self.constant_mapper is None:
result = str(expr)
else:
result = self.constant_mapper(expr)
if not (result.startswith("(") and result.endswith(")")) \
and ("-" in result or "+" in result) \
and (enclosing_prec > PREC_SUM):
return self.parenthesize(result)
else:
return result
def map_variable(self, expr, enclosing_prec, *args, **kwargs):
return expr.name
def map_function_symbol(self, expr, enclosing_prec, *args, **kwargs):
return expr.__class__.__name__
def map_call(self, expr, enclosing_prec, *args, **kwargs):
return self.format("%s(%s)",
self.rec(expr.function, PREC_CALL, *args, **kwargs),
self.join_rec(", ", expr.parameters, PREC_NONE, *args, **kwargs))
def map_call_with_kwargs(self, expr, enclosing_prec, *args, **kwargs):
args_strings = (
tuple(self.rec(ch, PREC_NONE, *args, **kwargs)
for ch in expr.parameters)
+ # noqa: W504
tuple("%s=%s" % (name, self.rec(ch, PREC_NONE, *args, **kwargs))
for name, ch in expr.kw_parameters.items()))
return self.format("%s(%s)",
self.rec(expr.function, PREC_CALL, *args, **kwargs),
", ".join(args_strings))
def map_subscript(self, expr, enclosing_prec, *args, **kwargs):
if isinstance(expr.index, tuple):
index_str = self.join_rec(", ", expr.index, PREC_NONE, *args, **kwargs)
else:
index_str = self.rec(expr.index, PREC_NONE, *args, **kwargs)
return self.parenthesize_if_needed(
self.format("%s[%s]",
self.rec(expr.aggregate, PREC_CALL, *args, **kwargs),
index_str),
enclosing_prec, PREC_CALL)
def map_lookup(self, expr, enclosing_prec, *args, **kwargs):
return self.parenthesize_if_needed(
self.format("%s.%s",
self.rec(expr.aggregate, PREC_CALL, *args, **kwargs),
expr.name),
enclosing_prec, PREC_CALL)
def map_sum(self, expr, enclosing_prec, *args, **kwargs):
return self.parenthesize_if_needed(
self.join_rec(" + ", expr.children, PREC_SUM, *args, **kwargs),
enclosing_prec, PREC_SUM)
# {{{ multiplicative operators
multiplicative_primitives = (p.Product, p.Quotient, p.FloorDiv, p.Remainder)
def map_product(self, expr, enclosing_prec, *args, **kwargs):
kwargs["force_parens_around"] = (p.Quotient, p.FloorDiv, p.Remainder)
return self.parenthesize_if_needed(
self.join_rec("*", expr.children, PREC_PRODUCT, *args, **kwargs),
enclosing_prec, PREC_PRODUCT)
def map_quotient(self, expr, enclosing_prec, *args, **kwargs):
kwargs["force_parens_around"] = self.multiplicative_primitives
return self.parenthesize_if_needed(
self.format("%s / %s",
# space is necessary--otherwise '/*' becomes
# start-of-comment in C. ('*' from dereference)
self.rec_with_force_parens_around(expr.numerator, PREC_PRODUCT,
*args, **kwargs),
self.rec_with_force_parens_around(
expr.denominator, PREC_PRODUCT, *args, **kwargs)),
enclosing_prec, PREC_PRODUCT)
def map_floor_div(self, expr, enclosing_prec, *args, **kwargs):
kwargs["force_parens_around"] = self.multiplicative_primitives
return self.parenthesize_if_needed(
self.format("%s // %s",
self.rec_with_force_parens_around(
expr.numerator, PREC_PRODUCT, *args, **kwargs),
self.rec_with_force_parens_around(
expr.denominator, PREC_PRODUCT, *args, **kwargs)),
enclosing_prec, PREC_PRODUCT)
def map_remainder(self, expr, enclosing_prec, *args, **kwargs):
kwargs["force_parens_around"] = self.multiplicative_primitives
return self.parenthesize_if_needed(
self.format("%s %% %s",
self.rec_with_force_parens_around(
expr.numerator, PREC_PRODUCT, *args, **kwargs),
self.rec_with_force_parens_around(
expr.denominator, PREC_PRODUCT, *args, **kwargs)),
enclosing_prec, PREC_PRODUCT)
# }}}
def map_power(self, expr, enclosing_prec, *args, **kwargs):
return self.parenthesize_if_needed(
self.format("%s**%s",
self.rec(expr.base, PREC_POWER, *args, **kwargs),
self.rec(expr.exponent, PREC_POWER, *args, **kwargs)),
enclosing_prec, PREC_POWER)
def map_polynomial(self, expr, enclosing_prec, *args, **kwargs):
from pymbolic.primitives import flattened_sum
return self.rec(flattened_sum(
[coeff*expr.base**exp for exp, coeff in expr.data[::-1]]),
enclosing_prec, *args, **kwargs)
def map_left_shift(self, expr, enclosing_prec, *args, **kwargs):
return self.parenthesize_if_needed(
# +1 to address
# https://gitlab.tiker.net/inducer/pymbolic/issues/6
self.format("%s << %s",
self.rec(expr.shiftee, PREC_SHIFT+1, *args, **kwargs),
self.rec(expr.shift, PREC_SHIFT+1, *args, **kwargs)),
enclosing_prec, PREC_SHIFT)
def map_right_shift(self, expr, enclosing_prec, *args, **kwargs):
return self.parenthesize_if_needed(
# +1 to address
# https://gitlab.tiker.net/inducer/pymbolic/issues/6
self.format("%s >> %s",
self.rec(expr.shiftee, PREC_SHIFT+1, *args, **kwargs),
self.rec(expr.shift, PREC_SHIFT+1, *args, **kwargs)),
enclosing_prec, PREC_SHIFT)
def map_bitwise_not(self, expr, enclosing_prec, *args, **kwargs):
return self.parenthesize_if_needed(
"~" + self.rec(expr.child, PREC_UNARY, *args, **kwargs),
enclosing_prec, PREC_UNARY)
def map_bitwise_or(self, expr, enclosing_prec, *args, **kwargs):
return self.parenthesize_if_needed(
self.join_rec(
" | ", expr.children, PREC_BITWISE_OR, *args, **kwargs),
enclosing_prec, PREC_BITWISE_OR)
def map_bitwise_xor(self, expr, enclosing_prec, *args, **kwargs):
return self.parenthesize_if_needed(
self.join_rec(
" ^ ", expr.children, PREC_BITWISE_XOR, *args, **kwargs),
enclosing_prec, PREC_BITWISE_XOR)
def map_bitwise_and(self, expr, enclosing_prec, *args, **kwargs):
return self.parenthesize_if_needed(
self.join_rec(
" & ", expr.children, PREC_BITWISE_AND, *args, **kwargs),
enclosing_prec, PREC_BITWISE_AND)
def map_comparison(self, expr, enclosing_prec, *args, **kwargs):
return self.parenthesize_if_needed(
self.format("%s %s %s",
self.rec(expr.left, PREC_COMPARISON, *args, **kwargs),
expr.operator,
self.rec(expr.right, PREC_COMPARISON, *args, **kwargs)),
enclosing_prec, PREC_COMPARISON)
def map_logical_not(self, expr, enclosing_prec, *args, **kwargs):
return self.parenthesize_if_needed(
"not " + self.rec(expr.child, PREC_UNARY, *args, **kwargs),
enclosing_prec, PREC_UNARY)
def map_logical_or(self, expr, enclosing_prec, *args, **kwargs):
return self.parenthesize_if_needed(
self.join_rec(
" or ", expr.children, PREC_LOGICAL_OR, *args, **kwargs),
enclosing_prec, PREC_LOGICAL_OR)
def map_logical_and(self, expr, enclosing_prec, *args, **kwargs):
return self.parenthesize_if_needed(
self.join_rec(
" and ", expr.children, PREC_LOGICAL_AND, *args, **kwargs),
enclosing_prec, PREC_LOGICAL_AND)
def map_list(self, expr, enclosing_prec, *args, **kwargs):
return self.format(
"[%s]", self.join_rec(", ", expr, PREC_NONE, *args, **kwargs))
map_vector = map_list
def map_tuple(self, expr, enclosing_prec, *args, **kwargs):
el_str = ", ".join(
self.rec(child, PREC_NONE, *args, **kwargs) for child in expr)
if len(expr) == 1:
el_str += ","
return "(%s)" % el_str
def map_numpy_array(self, expr, enclosing_prec, *args, **kwargs):
import numpy
str_array = numpy.zeros(expr.shape, dtype="object")
max_length = 0
for i in numpy.ndindex(expr.shape):
s = self.rec(expr[i], PREC_NONE, *args, **kwargs)
max_length = max(len(s), max_length)
str_array[i] = s.replace("\n", "\n ")
if len(expr.shape) == 1 and max_length < 15:
return "array(%s)" % ", ".join(str_array)
else:
lines = [" %s: %s\n" % (
",".join(str(i_i) for i_i in i), val)
for i, val in numpy.ndenumerate(str_array)]
if max_length > 70:
splitter = " " + "-"*75 + "\n"
return "array(\n%s)" % splitter.join(lines)
else:
return "array(\n%s)" % "".join(lines)
def map_multivector(self, expr, enclosing_prec, *args, **kwargs):
return expr.stringify(self.rec, enclosing_prec, *args, **kwargs)
def map_common_subexpression(self, expr, enclosing_prec, *args, **kwargs):
from pymbolic.primitives import CommonSubexpression
if type(expr) is CommonSubexpression:
type_name = "CSE"
else:
type_name = type(expr).__name__
return self.format("%s(%s)",
type_name, self.rec(expr.child, PREC_NONE, *args, **kwargs))
def map_if(self, expr, enclosing_prec, *args, **kwargs):
return self.parenthesize_if_needed(
"%s if %s else %s" % (
self.rec(expr.then, PREC_LOGICAL_OR, *args, **kwargs),
self.rec(expr.condition, PREC_LOGICAL_OR, *args, **kwargs),
self.rec(expr.else_, PREC_LOGICAL_OR, *args, **kwargs)),
enclosing_prec, PREC_IF)
def map_if_positive(self, expr, enclosing_prec, *args, **kwargs):
return self.parenthesize_if_needed(
"%s if %s > 0 else %s" % (
self.rec(expr.then, PREC_LOGICAL_OR, *args, **kwargs),
self.rec(expr.criterion, PREC_LOGICAL_OR, *args, **kwargs),
self.rec(expr.else_, PREC_LOGICAL_OR, *args, **kwargs)),
enclosing_prec, PREC_IF)
def map_min(self, expr, enclosing_prec, *args, **kwargs):
what = type(expr).__name__.lower()
return self.format("%s(%s)",
what, self.join_rec(", ", expr.children, PREC_NONE, *args, **kwargs))
map_max = map_min
def map_derivative(self, expr, enclosing_prec, *args, **kwargs):
derivs = " ".join(
"d/d%s" % v
for v in expr.variables)
return "%s %s" % (
derivs, self.rec(expr.child, PREC_PRODUCT, *args, **kwargs))
def map_substitution(self, expr, enclosing_prec, *args, **kwargs):
substs = ", ".join(
"%s=%s" % (name, self.rec(val, PREC_NONE, *args, **kwargs))
for name, val in zip(expr.variables, expr.values))
return "[%s]{%s}" % (
self.rec(expr.child, PREC_NONE, *args, **kwargs),
substs)
def map_slice(self, expr, enclosing_prec, *args, **kwargs):
children = []
for child in expr.children:
if child is None:
children.append("")
else:
children.append(self.rec(child, PREC_NONE, *args, **kwargs))
return self.parenthesize_if_needed(
self.join(":", children),
enclosing_prec, PREC_NONE)
# }}}
def __call__(self, expr, prec=PREC_NONE, *args, **kwargs):
"""Return a string corresponding to *expr*. If the enclosing
precedence level *prec* is higher than *prec* (see :ref:`prec-constants`),
parenthesize the result.
"""
return pymbolic.mapper.Mapper.__call__(self, expr, prec, *args, **kwargs)
# }}}
# {{{ cse-splitting stringifier
class CSESplittingStringifyMapperMixin(object):
"""A :term:`mix-in` for subclasses of
:class:`StringifyMapper` that collects
"variable assignments" for
:class:`pymbolic.primitives.CommonSubexpression` objects.
.. attribute:: cse_to_name
A :class:`dict` mapping expressions to CSE variable names.
.. attribute:: cse_names
A :class:`set` of names already assigned.
.. attribute:: cse_name_list
A :class:`list` of tuples of names and their string representations,
in order of their dependencies. When generating code, walk down these names
in order, and the generated code will never reference
an undefined variable.
See :class:`pymbolic.mapper.c_code.CCodeMapper` for an example
of the use of this mix-in.
"""
def map_common_subexpression(self, expr, enclosing_prec, *args, **kwargs):
try:
self.cse_to_name
except AttributeError:
self.cse_to_name = {}
self.cse_names = set()
self.cse_name_list = []
try:
cse_name = self.cse_to_name[expr.child]
except KeyError:
str_child = self.rec(expr.child, PREC_NONE, *args, **kwargs)
if expr.prefix is not None:
def generate_cse_names():
yield expr.prefix
i = 2
while True:
yield expr.prefix + "_%d" % i
i += 1
else:
def generate_cse_names():
i = 0
while True:
yield "CSE"+str(i)
i += 1
for cse_name in generate_cse_names():
if cse_name not in self.cse_names:
break
self.cse_name_list.append((cse_name, str_child))
self.cse_to_name[expr.child] = cse_name
self.cse_names.add(cse_name)
return cse_name
def get_cse_strings(self):
return ["%s : %s" % (cse_name, cse_str)
for cse_name, cse_str in
sorted(getattr(self, "cse_name_list", []))]
# }}}
# {{{ sorting stringifier
class SortingStringifyMapper(StringifyMapper):
def __init__(self, constant_mapper=str, reverse=True):
StringifyMapper.__init__(self, constant_mapper)
self.reverse = reverse
def map_sum(self, expr, enclosing_prec, *args, **kwargs):
entries = [self.rec(i, PREC_SUM, *args, **kwargs) for i in expr.children]
entries.sort(reverse=self.reverse)
return self.parenthesize_if_needed(
self.join(" + ", entries),
enclosing_prec, PREC_SUM)
def map_product(self, expr, enclosing_prec, *args, **kwargs):
entries = [self.rec(i, PREC_PRODUCT, *args, **kwargs) for i in expr.children]
entries.sort(reverse=self.reverse)
return self.parenthesize_if_needed(
self.join("*", entries),
enclosing_prec, PREC_PRODUCT)
# }}}
# {{{ simplifying, sorting stringifier
class SimplifyingSortingStringifyMapper(StringifyMapper):
def __init__(self, constant_mapper=str, reverse=True):
StringifyMapper.__init__(self, constant_mapper)
self.reverse = reverse
def map_sum(self, expr, enclosing_prec, *args, **kwargs):
def get_neg_product(expr):
from pymbolic.primitives import is_zero, Product
if isinstance(expr, Product) \
and len(expr.children) and is_zero(expr.children[0]+1):
if len(expr.children) == 2:
# only the minus sign and the other child
return expr.children[1]
else:
return Product(expr.children[1:])
else:
return None
positives = []
negatives = []
for ch in expr.children:
neg_prod = get_neg_product(ch)
if neg_prod is not None:
negatives.append(self.rec(neg_prod, PREC_PRODUCT, *args, **kwargs))
else:
positives.append(self.rec(ch, PREC_SUM, *args, **kwargs))
positives.sort(reverse=self.reverse)
positives = " + ".join(positives)
negatives.sort(reverse=self.reverse)
negatives = self.join("",
[self.format(" - %s", entry) for entry in negatives])
result = positives + negatives
return self.parenthesize_if_needed(result, enclosing_prec, PREC_SUM)
def map_product(self, expr, enclosing_prec, *args, **kwargs):
entries = []
i = 0
from pymbolic.primitives import is_zero
while i < len(expr.children):
child = expr.children[i]
if False and is_zero(child+1) and i+1 < len(expr.children):
# NOTE: That space needs to be there.
# Otherwise two unary minus signs merge into a pre-decrement.
entries.append(
self.format(
"- %s", self.rec(
expr.children[i+1], PREC_UNARY, *args, **kwargs)))
i += 2
else:
entries.append(self.rec(child, PREC_PRODUCT, *args, **kwargs))
i += 1
entries.sort(reverse=self.reverse)
result = "*".join(entries)
return self.parenthesize_if_needed(result, enclosing_prec, PREC_PRODUCT)
# }}}
# {{{ latex stringifier
class LaTeXMapper(StringifyMapper):
COMPARISON_OP_TO_LATEX = {
"==": r"=",
"!=": r"\ne",
"<=": r"\le",
">=": r"\ge",
"<": r"<",
">": r">",
}
def map_remainder(self, expr, enclosing_prec, *args, **kwargs):
return self.format(r"(%s \bmod %s)",
self.rec(expr.numerator, PREC_PRODUCT, *args, **kwargs),
self.rec(expr.denominator, PREC_POWER, *args, **kwargs)),
def map_left_shift(self, expr, enclosing_prec, *args, **kwargs):
return self.parenthesize_if_needed(
self.format(r"%s \ll %s",
self.rec(expr.shiftee, PREC_SHIFT+1, *args, **kwargs),
self.rec(expr.shift, PREC_SHIFT+1, *args, **kwargs)),
enclosing_prec, PREC_SHIFT)
def map_right_shift(self, expr, enclosing_prec, *args, **kwargs):
return self.parenthesize_if_needed(
self.format(r"%s \gg %s",
self.rec(expr.shiftee, PREC_SHIFT+1, *args, **kwargs),
self.rec(expr.shift, PREC_SHIFT+1, *args, **kwargs)),
enclosing_prec, PREC_SHIFT)
def map_bitwise_xor(self, expr, enclosing_prec, *args, **kwargs):
return self.parenthesize_if_needed(
self.join_rec(
r" \wedge ", expr.children, PREC_BITWISE_XOR, *args, **kwargs),
enclosing_prec, PREC_BITWISE_XOR)
def map_product(self, expr, enclosing_prec, *args, **kwargs):
return self.parenthesize_if_needed(
self.join_rec(" ", expr.children, PREC_PRODUCT, *args, **kwargs),
enclosing_prec, PREC_PRODUCT)
def map_power(self, expr, enclosing_prec, *args, **kwargs):
return self.parenthesize_if_needed(
self.format("{%s}^{%s}",
self.rec(expr.base, PREC_NONE, *args, **kwargs),
self.rec(expr.exponent, PREC_NONE, *args, **kwargs)),
enclosing_prec, PREC_NONE)
def map_min(self, expr, enclosing_prec, *args, **kwargs):
from pytools import is_single_valued
if is_single_valued(expr.children):
return self.rec(expr.children[0], enclosing_prec)
what = type(expr).__name__.lower()
return self.format(r"\%s(%s)",
what, self.join_rec(", ", expr.children, PREC_NONE, *args, **kwargs))
def map_max(self, expr, enclosing_prec):
return self.map_min(expr, enclosing_prec)
def map_floor_div(self, expr, enclosing_prec, *args, **kwargs):
return self.format(r"\lfloor {%s} / {%s} \rfloor",
self.rec(expr.numerator, PREC_NONE, *args, **kwargs),
self.rec(expr.denominator, PREC_NONE, *args, **kwargs))
def map_subscript(self, expr, enclosing_prec, *args, **kwargs):
if isinstance(expr.index, tuple):
index_str = self.join_rec(", ", expr.index, PREC_NONE, *args, **kwargs)
else:
index_str = self.rec(expr.index, PREC_NONE, *args, **kwargs)
return self.format("{%s}_{%s}",
self.rec(expr.aggregate, PREC_CALL, *args, **kwargs),
index_str)
def map_logical_not(self, expr, enclosing_prec, *args, **kwargs):
return self.parenthesize_if_needed(
r"\neg " + self.rec(expr.child, PREC_UNARY, *args, **kwargs),
enclosing_prec, PREC_UNARY)
def map_logical_or(self, expr, enclosing_prec, *args, **kwargs):
return self.parenthesize_if_needed(
self.join_rec(
r" \vee ", expr.children, PREC_LOGICAL_OR, *args, **kwargs),
enclosing_prec, PREC_LOGICAL_OR)
def map_logical_and(self, expr, enclosing_prec, *args, **kwargs):
return self.parenthesize_if_needed(
self.join_rec(
r" \wedge ", expr.children, PREC_LOGICAL_AND, *args, **kwargs),
enclosing_prec, PREC_LOGICAL_AND)
def map_comparison(self, expr, enclosing_prec, *args, **kwargs):
return self.parenthesize_if_needed(
self.format("%s %s %s",
self.rec(expr.left, PREC_COMPARISON, *args, **kwargs),
self.COMPARISON_OP_TO_LATEX[expr.operator],
self.rec(expr.right, PREC_COMPARISON, *args, **kwargs)),
enclosing_prec, PREC_COMPARISON)
def map_substitution(self, expr, enclosing_prec, *args, **kwargs):
substs = ", ".join(
"%s=%s" % (name, self.rec(val, PREC_NONE, *args, **kwargs))
for name, val in zip(expr.variables, expr.values))
return self.format(r"[%s]\{%s\}",
self.rec(expr.child, PREC_NONE, *args, **kwargs),
substs)
def map_derivative(self, expr, enclosing_prec, *args, **kwargs):
derivs = " ".join(
r"\frac{\partial}{\partial %s}" % v
for v in expr.variables)
return self.format("%s %s",
derivs, self.rec(expr.child, PREC_PRODUCT, *args, **kwargs))
# }}}
# vim: fdm=marker
|
import re
import numpy as np
from numpy.core.defchararray import array
from numpy.core.numeric import flatnonzero
from pyRBDL.Dynamics.CompositeRigidBodyAlgorithm import CompositeRigidBodyAlgorithm
from pyRBDL.Dynamics.InverseDynamics import InverseDynamics
from numpy.linalg import inv
from pyRBDL.Contact.DetectContact import DetectContact
from pyRBDL.Contact.CalcContactForceDirect import CalcContactForceDirect
from pyRBDL.Dynamics.ForwardDynamics import ForwardDynamics
from pyRBDL.Kinematics.CalcBodyToBaseCoordinates import CalcBodyToBaseCoordinates
from pyRBDL.Contact.ImpulsiveDynamics import ImpulsiveDynamics
def solver_ode(X: np.ndarray, model: dict, flag_contact: np.array,T:float):
"""
param:
X: current q, qdot
model: add tau, ST
return:
Xdot: next qdot, qddot
"""
from scipy.integrate import solve_ivp
status = -1
# Calculate state vector by ODE
t0 = 0
tf = T
tspan = (t0, tf)
NB=model['NB']
q,qdot = np.array(X[0:NB]), np.array(X[NB:2*NB])
x0 = np.asfarray(np.hstack([q, qdot]))
contact_force = dict()
print("flag contact",flag_contact,model['contactpoint'])
while status != 0:
# ODE calculate
sol = solve_ivp(dynamics_fun, tspan, x0, args=(model, flag_contact,contact_force),\
method='RK45', rtol=1e-3, atol=1e-4)
status = sol.status
if(status == -1):
print("error message:",sol.message)
assert status != -1, "Integration Failed!!!"
if status == 0:
print("The solver successfully reached the end of tspan.")
pass
if status == 1:
print("A termination event occurred")
t_events = sol.t_events
te_idx = t_events.index(min(t_events))
te = float(t_events[te_idx])
xe = sol.y_events[te_idx].flatten()
# Get q qdot
print("xe",xe)
q = xe[0:NB]
qdot = xe[NB:2* NB]
# Detect contact TODO
flag_contact = np.array(flag_contact)#DetectContact(model, q, qdot, contact_cond)
# Impact dynamics TODO
qdot_impulse = ImpulsiveDynamics(model, q, qdot, flag_contact, nf=2);
qdot_impulse = qdot_impulse.flatten()
# Update initial state
x0 = np.hstack([q, qdot_impulse])
tspan = (te, tf)
xk = sol.y[:, -1]
# print("solved xk",xk[0:NB])
return xk, contact_force
def dynamics_fun(t: float, X: np.ndarray, model: dict, flag_contact: np.array, contact_force: dict)->np.ndarray:
"""
param:
X: current q, qdot
model: add tau, ST
return:
Xdot: next qdot, qddot
"""
NB = int(model["NB"])
NC = int(model["NC"])
# Get q qdot tau
q = X[0:NB]
qdot = X[NB: 2 * NB]
tau = model["tau"]
# Calcualte H C
model["H"] = CompositeRigidBodyAlgorithm(model, q)
model["C"] = InverseDynamics(model, q, qdot, np.zeros((NB, 1)))
model["Hinv"] = inv(model["H"])
# Calculate contact force in joint space
if np.sum(flag_contact) !=0:
lam, fqp, fc, fcqp, fcpd = CalcContactForceDirect(model, q, qdot, tau, flag_contact, 2)
contact_force["fc"] = fc
contact_force["fcqp"] = fcqp
contact_force["fcpd"] = fcpd
else:
lam = np.zeros((NB, 1))
contact_force["fc"] = np.zeros((3*NC, 1))
contact_force["fcqp"] = np.zeros((3*NC, 1))
contact_force["fcpd"] = np.zeros((3*NC, 1))
# Forward dynamics
Tau = tau + lam
qddot = ForwardDynamics(model, q, qdot, Tau).flatten()
# Return Xdot
Xdot = np.asfarray(np.hstack([qdot, qddot]))
# print("Xdot in dynamics",Xdot)
return Xdot |
# python常用扩展库 numpy和pandas
import numpy as np
import pandas as pd
# 1 Pandas数据结构-Series
# 主要的数据结构为Series(存储)和DataFrame(分析)
# 1.1 Series-由索引和值组成,索引在左侧,值在右侧
s1 = pd.Series([1, 2, 3, 4, 5])
print('s1= ', s1)
# 还可以设置索引
s2 = pd.Series([1, 2, 3, 4, 5], index=['第一', '第二', '第三', '第四', '第五'])
print('s2= ', s2)
# 通过values和index获取索引和数值,它的切片是左闭右闭的
print('s2.index= ', s2.index)
print('s2.values= ', s2.values)
# 索引是不可以被修改的,但是数据可以被修改
print('s2中第二对应的数据:{}'.format(s2['第二']))
s2['第二'] = 10
print('s2中第二对应的数据:{}'.format(s2['第二']))
print('s2中对应的数据:{}'.format(s2[['第一', '第二', '第三']]))
print('s2中对应的数据:{}'.format(s2['第二': '第三']))
# 1.3 字典类型数据创建Series
s_dic = {'First': 1, 'Two': 2, 'Three': 3, 'Third': 4, 'Four': 5 }
s3 = pd.Series(s_dic)
print('s3= {}'.format(s3))
# 安照指定的顺序排列
s4 = pd.Series(s_dic, index=['First', 'Third', 'Four', 'Two'])
print('s4= {}'.format(s4))
# 还可以用字典中的 in 和 not in
print('s4 中含有six', 'six' in s4)
print('s4 中是不含有six', 'six' not in s4)
# 看字典中是否存在缺失值,如果索引对应的数据为空那么会用NaN表示
s5 = pd.Series(s_dic, index=['First', 'Third', 'Four', 'Six'])
print('s5= {}'.format(s5))
print('s5数据缺失 {}'.format(s5.isnull()))
print('s5数据不缺失 {}'.format(s5.notnull()))
# 1.4 Series的算术运算 不同索引对应的数据会自动对齐
print('s4= ', s4)
print('s5= ', s5)
print('s5+s4= ', s5 + s4)
# 2 DataFrame数据结构
# 它是一种二维表型数据结构,即有行索引也有列索引
df_dic = {'color': ['red', 'yellow', 'blue', 'purple', 'pink'],
'size': ['meidum', 'small', 'big', 'medium', 'small'],
'taste': ['sweet', 'sour', 'salty', 'sweet', 'spicy']}
df = pd.DataFrame(df_dic, columns=['size', 'taste', 'color', 'category'])
# 可以指定列的顺序;如果传入的列不存在,则那一列的值都为Nan
print('df= {}'.format(df))
# 可以设置列的标题和行索引的名称
df.index.name = 'sample'
df.columns.name = 'feature'
print('df=\n{}'.format(df))
# 使用values可以获得所有的数据,并以二维数组的形式返回
print('df.values=\n', df.values)
# 获取列索引的两种方式
print('df中的color列\n{}'.format(df.color))
print('df中的color列\n{}'.format(df['color']))
# 如果要获取某一行,可以用行索引 loc
print('df中的第2行\n{}'.format(df.loc[1]))
# 对应空的列,可以进行数据填充
df['category'] = np.arange(5)
print('df=\n{}'.format(df)) # 填充全部值
df['category'] = pd.Series([2, 3, 4], index=[0, 2, 4])
print('df=\n{}'.format(df)) # 填充部分值
# 如果为不存在的列赋值,则会创建一个新的列
df['country'] = pd.Series(['China', 'UK', 'Japan', 'USA', 'Frank'])
print('df=\n{}'.format(df)) # 增加新的列并赋值
# 可以使用布尔数组选行
print('df 中category小于等于3得数据\n{}'.format(df[df['category'] <= 3]))
# 3 数学统计与计算:可以求和、平均数、分位数等
# mean-均值;median-中位数;count-非缺失值数量;min-最小值;max-最大值
# describe-汇总统计;var-方差;std-标准差;skew-偏度;kurt-峰度;diff-一阶差分;
# cumin-累计最小值;cumax-累计最大值;cumsum-累计和;cumprod-累计积;cov-协方差;corr-相关系数;
df1 = pd.DataFrame([[3, 2, 3, 1], [2, 5, 3, 6], [3, 4, 5, 2], [9, 5, 3, 1]]
, index=['a', 'b', 'c', 'd'], columns=['one', 'two', 'three', 'four'])
print('df1=\n{}'.format(df1))
print('df1.sum_lie=\n{}'.format(df1.sum())) # 按列求和
print('df1.sum_line=\n{}'.format(df1.sum(axis=1))) # 按行求和
print('df1.cumsum=\n{}'.format(df1.cumsum())) # 从上到下累计求和
print('df1.cumsum=\n{}'.format(df1.cumsum(axis=1))) # 从左到右累计求和
# 4 DataFrame的文件操作
# 4.1 读取文件;可以读取csv,txt,excel,sql,json,html,dict
# read_csv = pd.read_csv('E:\\pycharm_repo\\datas\\numpy_arr12.csv', encoding='utf-8')
# print('read_csv=\n', read_csv)
# 4.2 写入文件:csv,txt,excel,sql,json,html,dict
# df.to_csv('datas\\pandas1.csv', sep=',', header=True, index=True, encoding='utf-8')
# 5 数据处理(数据清洗):缺失值,重复值等等
# 5.1 缺失值一般会被记作 numpy.nan
df4 = pd.DataFrame([[3, np.nan, 3, 1], [2, 5, np.nan, 3], [3, 4, 5, np.nan], [5, 3, 1, 3]],
index=['a', 'b', 'c', 'd'], columns=['one', 'two', 'three', 'four'])
print(df4.isnull()) # 判断df4中每个位置是否为缺失值-返回的是布尔类型
print(df4[df4.isnull().any(axis=1)]) # 返回有True的那一行
arr = pd.Series([1, 2, 3, np.nan, 5, 6])
print('arr:{}'.format(arr))
# 过滤缺失值 并不会改变原来的数组
print('过滤缺失值:{}'.format(arr.dropna()))
arr = arr.dropna() # arr.dropna(inplace=True) 结果是一样的
print('过滤缺失值之后的arr:{}'.format(arr))
df4['fifth'] = np.NAN
print('df4:{}'.format(df4))
# fillna 填充函数
# 用0替换缺失值的null
print('df4.fillna(0)\n{}'.format(df4.fillna(0)))
# 用中位数填补缺失值
print('df4_fillna_median\n{}'.format(df4.fillna(df4.median())))
# 向上填充 ffill,向下填充 bfill
print('df4.ffill()\n{}'.format(df4.ffill()))
print('df4.bfill()\n{}'.format(df4.bfill()))
# 5.2 重复值处理
df5 = pd.DataFrame([[3, 5, 3, 1], [2, 5, 5, 6], [3, 4, 5, 3]
, [5, 3, 1, 3], [3, 4, 5, 3], [3, 4, 6, 8]]
, index=['a', 'b', 'c', 'd', 'e', 'f']
, columns=['one', 'two', 'three', 'four'])
print('df5\n', df5)
# 查看是否存在重复行
print('查看df5是否存在重复行\n', df4[df4.duplicated()])
# 查看前两列是否存在重复行
print('查看df5前两列是否存在重复行\n', df4[df4.duplicated(['one', 'two'])])
# 删除重复列,只保存第一次出现的重复行
print('删除重复列,只保存第一次出现的重复行\n', df5.drop_duplicates(subset=['one', 'two'], keep='first'))
# 5.3 数据记录合并与分组
# append合并两个列索引完全相同的 DataFrame
df6 = pd.DataFrame([[1, 7, 3, 1], [3, 5, 5, 6]], index=['g', 'h'], columns=['one', 'two', 'three', 'four'])
print('df5.append(df6)合并\n', df5.append(df6))
# concat合并,可以行合并,也可以列合并(axis=1)
print('df5和df6行合并', pd.concat([df5, df6]))
print('df5和df6列合并', pd.concat([df5, df6], axis=1)) # join参数,可以取并集也可以取交集
# merge合并 这个类似于表的join,可以左连接,右连接,全连接
df7 = pd.DataFrame([[1, 'lily'], [2, 'joke'], [3, 'andy']], index=['a', 'b', 'c'], columns=['id', 'name'])
print('df7:\n', df7)
df8 = pd.DataFrame([[1, 22], [2, 33], [4, 44]], index=['a', 'b', 'c'], columns=['id', 'age'])
print('df8:\n', df8)
print('df7 inner join df8:\n', pd.merge(df7, df8, left_on='id', right_on='id', how='inner')) # 默认inner
print('df7 left join df8:\n', pd.merge(df7, df8, left_on='id', right_on='id', how='left'))
print('df7 right join df8:\n', pd.merge(df7, df8, left_on='id', right_on='id', how='right'))
print('df7 outer join df8:\n', pd.merge(df7, df8, left_on='id', right_on='id', how='outer'))
|
from io import BytesIO
from PIL import Image
import shutil
import arrow
from bs4 import BeautifulSoup
import requests
def get_stream_url(flickr_link):
'''Processes a long flickr link to the main stream page'''
parsed = flickr_link.replace('https://', '').split('/')
url = '/'.join(parsed[:parsed.index('nlowell') + 1])
return 'https://' + url
def get_last_stream_page(stream_url, album=False):
'''Extracts the integer of the last page in a stream'''
stream_page = requests.get(stream_url)
ssoup = BeautifulSoup(stream_page.text, 'html.parser')
if album:
page_class = 'view pagination-view requiredToShowOnServer'
else:
page_class = 'view pagination-view requiredToShowOnServer photostream'
pagination = list(ssoup.find(
class_=page_class
).children)[-4:-3]
last_page = int(pagination[0].text)
return last_page
def get_photostream(stream_url, album=False):
'''Returns a list of images from a photostream page'''
photos = requests.get(stream_url)
psoup = BeautifulSoup(photos.text, 'html.parser')
stream_list = psoup.find_all(
class_="view photo-list-photo-view requiredToShowOnServer photo"
+ "stream awake"
)
if album:
stream_list = psoup.find_all(
class_="view photo-list-photo-view requiredToShowOn"
+ "Server awake"
)
photo_url_list = []
for photo in stream_list:
photo_url = photo['style'][photo['style'].index(
'https://'):-1].replace(
'url(//', 'https://') # replace may no longer be needed
photo_url_list.append(photo_url)
return photo_url_list
def build_flickr_archive(flickr_link, archive, album=False):
'''Iterates through entire photostream to build archive'''
if album:
stream_url = flickr_link
else:
stream_url = get_stream_url(flickr_link)
last_page = get_last_stream_page(stream_url, album)
for page in (range(1, last_page)):
url = stream_url + '/page' + str(page)
print(url)
stream = get_photostream(url, album)
archive = process_photo_stream_page(stream, archive)
return archive
def get_original_photo(photo_id):
'''for a single photo id, returns dictionary of photo attributes'''
info = {}
original_page = 'https://www.flickr.com/photos/nlowell/' \
+ photo_id + '/sizes/o/'
html = requests.get(original_page)
soup = BeautifulSoup(html.text, 'html.parser')
orig_link = soup.find(attrs={'id': 'allsizes-photo'})
src = orig_link.contents[1]
info['link'] = src.get('src')
raw_title = soup.title.text
title = soup.title.text.split('|')[1].strip()
if title.find('.') > 0:
try:
info['condition'] = title.split('.')[-1].strip()
full_title = title.split('.')[0].split(' ')
info['tag'] = full_title[0]
info['temp'] = full_title[1]
info['sun'] = ' '.join(full_title[2:])
except:
info['tag'] = 'Error'
elif title.find(',') > 0:
try:
info['condition'] = title.split(',')[-1].strip()
full_title = title.split(',')[0].split(' ')
info['tag'] = full_title[0]
info['temp'] = full_title[1]
info['sun'] = ' '.join(full_title[2:])
except:
info['tag'] = 'Error'
info['raw_title'] = raw_title # incase titles need to be reprocessed
if info.get('tag') not in ['#tommw', '#tommy']:
info = None
return info
def process_photo_stream_page(stream, archive=None):
if not archive:
archive = get_archive()
for photo in stream:
image_name = photo.split('/')[-1]
photo_id = image_name.split('_')[0]
if photo_id in archive['meta']['ids']:
# skip photos already recorded
continue
# Save the thumb size image
thumb_name = 'images/' + photo_id + '.jpg'
image = requests.get(photo, stream=True)
with open(thumb_name, 'wb') as image_file:
shutil.copyfileobj(image.raw, image_file)
# Only the original has EXIF data
info = get_original_photo(photo_id)
# Format the EXIF data
if info:
info['photo_id'] = photo_id
info['thumb'] = thumb_name
p_url = info.get('link')
if p_url:
camera, timestamp = get_exif(p_url)
else:
camera, timestamp = ('Error', 'Error')
info['camera'] = camera
info['timestamp'] = timestamp
date = timestamp.split(' ')[0]
info['date'] = date
archive['data'].append(info)
archive['meta']['ids'].append(photo_id)
print(photo_id + ' added')
else:
print('no #tommow tag found')
return archive
def get_exif(url):
photo = requests.get(url)
im = Image.open(BytesIO(photo.content))
exif_data = im._getexif()
camera = exif_data[272]
raw_timestamp = exif_data[306]
timestamp = raw_timestamp.replace(':', '-', 2)
return (camera, timestamp)
|
<reponame>select766/neneshogi
"""
教師AIと評価対象AIの指し手を比較する。
USIエンジン対応。
エンジンの設定ファイルは、やねうら王自動対局フレームワークのフォーマットに準拠。以下引用:
1行目にengineの実行ファイル名(同じフォルダに配置)
2行目に思考時のコマンド
3行目以降にsetoption等、初期化時に思考エンジンに送りたいコマンドを書く。
例)
test.exe
go btime 100 wtime 100 byoyomi 0
setoption name Threads value 1
setoption name Hash value 1024
処理
- 2エンジンで、
- positionコマンドで詰みでない局面をエンジンに渡す
- goコマンドで思考
- bestmoveを取得
- 2エンジンのbestmoveが異なるとき、
- 各エンジンの指し手を指した後の局面を教師エンジンに渡す
- goコマンドで試行
- 最後のinfoコマンドでの評価値を取得(思考開始局面が詰み可能性があることに注意)
"""
import pickle
import sys
import os
import subprocess
import argparse
from typing import List
from progressbar import ProgressBar
class EngineConfig:
exe_path: str
go_command: str
options: List[str]
def __init__(self, path):
self.options = []
with open(path) as f:
for i, line in enumerate(f):
l_strip = line.rstrip()
if i == 0:
self.exe_path = l_strip
elif i == 1:
self.go_command = l_strip
else:
self.options.append(l_strip)
class Engine:
config: EngineConfig
process: subprocess.Popen
def __init__(self, config: EngineConfig):
self.config = config
engine_process = subprocess.Popen([config.exe_path],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
cwd=os.path.dirname(config.exe_path))
engine_process.stdin.write(b"usi\r\n")
for option in config.options:
engine_process.stdin.write(option.encode("ascii") + b"\r\n")
engine_process.stdin.write(b"isready\r\n")
engine_process.stdin.flush()
while True:
line = engine_process.stdout.readline()
if len(line) == 0:
raise Exception("Cannot initialize Engine")
if line.startswith(b"readyok"):
break
self.process = engine_process
def close(self):
if self.process is not None:
self.process.stdin.write(b"quit\r\n")
self.process.stdin.flush()
self.process.wait()
self.process = None
class EvalOutput:
bestmove: str
pv: List[str]
score: int
score_type: str
def __repr__(self):
s = f"bestmove {self.bestmove}"
if self.score_type is not None and self.score is not None:
s += f" score {self.score_type} {self.score}"
if self.pv is not None:
s += f" pv {' '.join(self.pv)}"
return s
def eval_pos(moves: List[str], engine: Engine) -> EvalOutput:
engine.process.stdin.write(f"position startpos moves {' '.join(moves)}\r\n".encode("ascii"))
engine.process.stdin.write(engine.config.go_command.encode("ascii") + b"\r\n")
engine.process.stdin.flush()
eval_out = EvalOutput()
while True:
line = engine.process.stdout.readline().decode("ascii").rstrip()
parts = line.split(" ") # type: List[str]
if len(parts) == 0:
continue
cmd = parts.pop(0)
if cmd == "info":
while len(parts) > 0:
subcmd = parts.pop(0)
if subcmd in ["depth", "seldepth", "time", "nodes", "currmove", "hashfull", "nps"]:
parts.pop(0)
continue
elif subcmd == "string":
break
elif subcmd == "pv":
eval_out.pv = parts
break
elif subcmd == "score":
eval_out.score_type = parts.pop(0)
score_str = parts.pop(0)
if score_str == "+": # 詰みだが手数不明の場合
score_val = 1
elif score_str == "-":
score_val = -1
else:
score_val = int(score_str)
eval_out.score = score_val
if len(parts) > 0 and parts[0] in ["lowerbound", "upperbound"]:
parts.pop(0)
elif cmd == "bestmove":
eval_out.bestmove = parts.pop(0)
break
return eval_out
class CompareResult:
moves: List[str]
teacher_pv: List[str]
student_pv: List[str]
teacher_bestmove: str
student_bestmove: str
def compare_engine(moves: List[str], teacher_engine: Engine, student_engine: Engine) -> dict:
root_t_eval = eval_pos(moves, teacher_engine)
root_s_eval = eval_pos(moves, student_engine)
t_after_t_eval = None
t_after_s_eval = None
s_after_t_eval = None
s_after_s_eval = None
if root_s_eval.bestmove != root_t_eval.bestmove:
# 教師・生徒それぞれのbestmoveで局面を進めて、評価値を計算
t_after_t_eval = eval_pos(moves + [root_t_eval.bestmove], teacher_engine)
t_after_s_eval = eval_pos(moves + [root_t_eval.bestmove], student_engine)
s_after_t_eval = eval_pos(moves + [root_s_eval.bestmove], teacher_engine)
s_after_s_eval = eval_pos(moves + [root_s_eval.bestmove], student_engine)
return {"moves": moves, "root_t_eval": root_t_eval, "root_s_eval": root_s_eval,
"t_after_t_eval": t_after_t_eval, "t_after_s_eval": t_after_s_eval,
"s_after_t_eval": s_after_t_eval, "s_after_s_eval": s_after_s_eval}
def run_compare(args):
teacher_config = EngineConfig(args.teacher)
student_config = EngineConfig(args.student)
teacher_engine = Engine(teacher_config)
student_engine = Engine(student_config)
compare_results = []
# プログレスバーを出したいので、あらかじめ要素数を計算
games = []
position_count = 0
with open(args.kifu) as kifu_lines:
for i, kifu_line in enumerate(kifu_lines):
if args.games >= 0 and i >= args.games:
break
moves = kifu_line.rstrip().split(" ")[2:] # startpos moves 7g7f ...
position_count += max(len(moves) - 1 - args.skiplast - args.skipfirst, 0)
games.append(moves)
pb = ProgressBar(max_value=position_count)
for moves in games:
for te in range(args.skipfirst, len(moves) - 1 - args.skiplast): # 最後の手の後は詰んでいるので使えない
compare_results.append(compare_engine(moves[:te], teacher_engine, student_engine))
pb.update(pb.value + 1)
pb.finish()
teacher_engine.close()
student_engine.close()
with open(args.dst, "wb") as f:
pickle.dump(compare_results, f, protocol=pickle.HIGHEST_PROTOCOL)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("kifu")
parser.add_argument("dst")
parser.add_argument("--teacher", default="teacher.txt")
parser.add_argument("--student", default="student.txt")
parser.add_argument("--skipfirst", type=int, default=16)
parser.add_argument("--skiplast", type=int, default=0)
parser.add_argument("--games", type=int, default=-1)
args = parser.parse_args()
run_compare(args)
if __name__ == "__main__":
main()
|
<gh_stars>1-10
import numpy as np
from PyQt5 import QtWidgets as widgets
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.decomposition import PCA
from suss.gui.utils import clear_axes
class ProjectionsPlot(widgets.QFrame):
def __init__(self, parent=None):
super().__init__(parent)
self.setup_plots()
self.setup_data()
self.init_ui()
self.parent().UPDATED_CLUSTERS.connect(self.reset)
self.parent().CLUSTER_HIGHLIGHT.connect(self.on_cluster_highlight)
self.parent().CLUSTER_SELECT.connect(self.on_cluster_select)
def reset(self, new_dataset, old_dataset):
self.ax_1d.clear()
self.ax_2d.clear()
self.canvas.draw_idle()
self.setup_data()
@property
def dataset(self):
return self.parent().dataset
@property
def colors(self):
return self.parent().colors
@property
def selected(self):
return self.parent().selected
def setup_plots(self):
fig = Figure(facecolor="#C0C0C0")
fig.patch.set_alpha(1.0)
fig.set_rasterized(True)
self.canvas = FigureCanvas(fig)
self.canvas.setStyleSheet("background-color:transparent;")
self.ax_2d = fig.add_axes(
[0, 0.25, 1, 0.75],
facecolor="#111111")
self.ax_2d.patch.set_alpha(0.8)
self.ax_1d = fig.add_axes(
[0, 0, 1, 0.25],
facecolor="#111111")
self.ax_1d.patch.set_alpha(0.8)
clear_axes(self.ax_1d, self.ax_2d)
def setup_data(self):
if not len(self.dataset.nodes):
self.canvas.draw_idle()
return
def on_cluster_select(self, selected, old_selected):
self.ax_1d.clear()
self.ax_2d.clear()
clear_axes(self.ax_1d, self.ax_2d)
self.ax_1d.patch.set_alpha(0.8)
self.ax_2d.patch.set_alpha(0.8)
if not len(selected):
self.canvas.draw_idle()
return
selected_data = self.dataset.select(
np.isin(self.dataset.labels, list(selected))
)
skip = max(1, int(selected_data.count / 1000))
if len(selected_data.flatten(1).waveforms) < 2:
self.canvas.draw_idle()
return
self.projector = PCA(n_components=2).fit(selected_data.flatten(1).waveforms)
projected = [
self.projector.transform(node.flatten().waveforms[::1 if skip > len(node.flatten().waveforms) else skip])
for node in selected_data.nodes
]
'''
times = np.concatenate([
node.flatten().times
for node in selected_data.nodes
])
wfs = np.concatenate([
self.projector.transform(node.flatten().waveforms)
for node in selected_data.nodes
])
labels = np.concatenate([
np.ones(node.count).astype(np.int) * label
for label, node in zip(selected_data.labels, selected_data.nodes)
])
time_argsort = np.argsort(times)
sorted_times = times[time_argsort]
sorted_2d = wfs[time_argsort]
sorted_labels = labels[time_argsort]
print(np.where(np.diff(sorted_times) < 0.001)[0])
isi_violations = np.where((np.diff(sorted_times) < 0.001) & (sorted_labels[:-1] != sorted_labels[1:]))[0]
print(isi_violations)
lines_x = np.array([sorted_2d[isi_violations, 0], sorted_2d[isi_violations + 1, 0]])[:, ::1 + skip // 100]
lines_y = np.array([sorted_2d[isi_violations, 1], sorted_2d[isi_violations + 1, 1]])[:, ::1 + skip // 100]
'''
for label, data in zip(selected_data.labels, projected):
self.ax_2d.scatter(
*data.T[:2],
color=self.colors[label],
s=1,
alpha=1,
rasterized=True
)
'''
self.ax_2d.plot(lines_x, lines_y, linewidth=0.5, color="White", linestyle="--")
'''
self.ax_1d.hist(
[data[:, 0] for data in projected],
bins=100,
color=[self.colors[label] for label in selected_data.labels],
alpha=0.9,
stacked=True,
rasterized=True
)
self.canvas.draw_idle()
def on_cluster_highlight(self, new_highlight, old_highlight, temporary):
pass
def init_ui(self):
layout = widgets.QVBoxLayout()
layout.addWidget(self.canvas)
self.setLayout(layout)
|
<reponame>cblades-tc/dendrol
from typing import Iterable, Tuple
import pytest
import yaml
from dendrol import Pattern, PatternTree
from dendrol.debug import PatternTreeLoader
TESTS = yaml.load('''
simple-comparison:
expression: >
[ipv4-addr:value = '1.2.3.4']
pattern:
observation:
objects: {ipv4-addr}
join:
qualifiers:
expressions:
- comparison:
object: ipv4-addr
path: [value]
negated:
operator: '='
value: 1.2.3.4
joined-comparisons:
expression: >
[email-message:subject = 'Yo!' AND email-message:body LIKE '%malicious%']
pattern:
observation:
objects: {email-message}
join: AND
qualifiers:
expressions:
- comparison:
object: email-message
path: [subject]
negated:
operator: '='
value: Yo!
- comparison:
object: email-message
path: [body]
negated:
operator: LIKE
value: '%malicious%'
complex-comparisons:
expression: >
[file:name = 'test.exe' AND (file:size < 4 OR file:size > 4096)]
pattern:
observation:
objects: {file}
join: AND
qualifiers:
expressions:
- comparison:
object: file
path: [name]
negated:
operator: '='
value: test.exe
- expression:
join: OR
expressions:
- comparison:
object: file
path: [size]
negated:
operator: '<'
value: 4
- comparison:
object: file
path: [size]
negated:
operator: '>'
value: 4096
dictionary-object-properties:
expression: >
[email-message:from_ref.value MATCHES '.*']
pattern:
observation:
objects: {email-message}
join:
qualifiers:
expressions:
- comparison:
object: email-message
path:
- from_ref
- value
negated:
operator: MATCHES
value: .*
list-object-properties:
expression: >
[file:extensions.'windows-pebinary-ext'.sections[*].entropy > 7.0]
pattern:
observation:
objects: {file}
join:
qualifiers:
expressions:
- comparison:
object: file
path:
- extensions
- windows-pebinary-ext
- sections
- [*]
- entropy
negated:
operator: '>'
value: 7.0
start-stop-qualifier:
expression: >
[ipv4-addr:value = '1.2.3.4'] START t'2017-06-29T00:00:00Z' STOP t'2017-12-05T00:00:00Z'
pattern:
observation:
objects: {ipv4-addr}
join:
qualifiers:
- start_stop:
start: 2017-06-29 00:00:00
stop: 2017-12-05 00:00:00
expressions:
- comparison:
object: ipv4-addr
path: [value]
negated:
operator: '='
value: '1.2.3.4'
within-qualifier:
expression: >
[ipv4-addr:value = '1.2.3.4'] WITHIN 10 SECONDS
pattern:
observation:
objects: {ipv4-addr}
join:
qualifiers:
- within:
value: 10
unit: SECONDS
expressions:
- comparison:
object: ipv4-addr
path: [value]
negated:
operator: '='
value: '1.2.3.4'
repeated-qualifier:
expression: >
[ipv4-addr:value = '1.2.3.4'] REPEATS 5 TIMES
pattern:
observation:
objects: {ipv4-addr}
join:
qualifiers:
- repeats:
value: 5
expressions:
- comparison:
object: ipv4-addr
path: [value]
negated:
operator: '='
value: '1.2.3.4'
multiple-qualifiers:
expression: >
[ipv4-addr:value = '1.2.3.4'] REPEATS 5 TIMES WITHIN 10 SECONDS
pattern:
observation:
objects: {ipv4-addr}
join:
qualifiers:
- repeats:
value: 5
- within:
value: 10
unit: SECONDS
expressions:
- comparison:
object: ipv4-addr
path: [value]
negated:
operator: '='
value: '1.2.3.4'
joined-observation:
expression: >
[domain-name:value = 'xyz.com'] AND
[file:name = 'test.exe']
pattern:
expression:
join: AND
qualifiers:
expressions:
- observation:
objects: {domain-name}
join:
qualifiers:
expressions:
- comparison:
object: domain-name
path: [value]
negated:
operator: '='
value: xyz.com
- observation:
objects: {file}
join:
qualifiers:
expressions:
- comparison:
object: file
path: [name]
negated:
operator: '='
value: test.exe
stix2-patterning-example:
source:
expression: >
(
[ipv4-addr:value = '198.51.100.1/32' OR
ipv4-addr:value = '203.0.113.33/32' OR
ipv6-addr:value = '2001:0db8:dead:beef:dead:beef:dead:0001/128']
FOLLOWEDBY [
domain-name:value = 'example.com']
) WITHIN 600 SECONDS
pattern:
expression:
join: FOLLOWEDBY
qualifiers:
- within:
value: 600
unit: SECONDS
expressions:
- observation:
objects:
? ipv4-addr
? ipv6-addr
join: OR
qualifiers:
expressions:
- comparison:
object: ipv4-addr
path: [value]
negated:
operator: '='
value: 198.51.100.1/32
- comparison:
object: ipv4-addr
path: [value]
negated:
operator: '='
value: 203.0.113.33/32
- comparison:
object: ipv6-addr
path: [value]
negated:
operator: '='
value: 2001:0db8:dead:beef:dead:beef:dead:0001/128
- observation:
objects: {domain-name}
join:
qualifiers:
expressions:
- comparison:
object: domain-name
path: [value]
negated:
operator: '='
value: example.com
''', Loader=PatternTreeLoader)
def get_tests() -> Iterable[Tuple[str, str, dict]]:
for name, test in TESTS.items():
expression = test['expression']
pattern = test['pattern']
tree = {'pattern': pattern}
expected = PatternTree.from_dict(tree)
yield name, expression, expected
@pytest.mark.parametrize('input,expected', [
pytest.param(expression, expected, id=name)
for name, expression, expected in get_tests()
])
def test_dict_tree_visitor(input, expected):
pattern = Pattern(input)
dict_tree = pattern.to_dict_tree()
expected = expected
actual = dict_tree
assert expected == actual
|
import pytest
import asyncio
from system.utils import *
import logging
logger = logging.getLogger(__name__)
@pytest.mark.usefixtures('docker_setup_and_teardown')
class TestAuditSuite:
@pytest.mark.asyncio
async def test_case_restart_one_node(
self, pool_handler, wallet_handler, get_default_trustee, nodes_num, check_no_failures_fixture
):
trustee_did, _ = get_default_trustee
test_nodes = [NodeHost(i) for i in range(1, 8)]
await ensure_pool_performs_write_read(pool_handler, wallet_handler, trustee_did, nyms_count=5)
test_nodes[5].restart_service()
await ensure_pool_performs_write_read(pool_handler, wallet_handler, trustee_did, nyms_count=5)
await ensure_pool_is_in_sync(nodes_num=nodes_num)
primary1, alias, target_did = await get_primary(pool_handler, wallet_handler, trustee_did)
p1 = NodeHost(primary1)
p1.stop_service()
primary2 = await ensure_primary_changed(pool_handler, wallet_handler, trustee_did, primary1)
p2 = NodeHost(primary2)
await ensure_pool_performs_write_read(pool_handler, wallet_handler, trustee_did, nyms_count=5)
test_nodes[5].restart_service()
await ensure_pool_performs_write_read(pool_handler, wallet_handler, trustee_did, nyms_count=5)
p1.start_service()
p2.stop_service()
await ensure_primary_changed(pool_handler, wallet_handler, trustee_did, primary2)
test_nodes[5].stop_service()
p2.start_service()
await ensure_pool_performs_write_read(pool_handler, wallet_handler, trustee_did, nyms_count=5)
test_nodes[5].start_service()
await ensure_pool_performs_write_read(pool_handler, wallet_handler, trustee_did, nyms_count=5)
await ensure_pool_is_in_sync(nodes_num=nodes_num)
await ensure_pool_is_functional(pool_handler, wallet_handler, trustee_did)
@pytest.mark.parametrize('node_num_shift', [0, 1, 5])
@pytest.mark.asyncio
async def test_case_restart_master_backup_non_primary(
self, pool_handler, wallet_handler, get_default_trustee, node_num_shift, nodes_num,
check_no_failures_fixture
):
trustee_did, _ = get_default_trustee
primary1, alias, target_did = await get_primary(pool_handler, wallet_handler, trustee_did)
p1 = NodeHost(primary1)
p1.stop_service()
primary2 = await ensure_primary_changed(pool_handler, wallet_handler, trustee_did, primary1)
await ensure_pool_performs_write_read(pool_handler, wallet_handler, trustee_did, nyms_count=5)
p1.start_service()
next_node = NodeHost(int(primary2) + node_num_shift)
next_node.restart_service()
await ensure_pool_performs_write_read(pool_handler, wallet_handler, trustee_did, nyms_count=5)
await ensure_pool_is_in_sync(nodes_num=nodes_num)
await ensure_pool_is_functional(pool_handler, wallet_handler, trustee_did)
@pytest.mark.asyncio
async def test_case_restart_all_nodes_at_the_same_time(
self, pool_handler, wallet_handler, get_default_trustee, nodes_num, check_no_failures_fixture
):
trustee_did, _ = get_default_trustee
test_nodes = [NodeHost(i) for i in range(1, 8)]
logger.info("1: Initiating a view change by stopping master primary")
primary1, alias, target_did = await get_primary(pool_handler, wallet_handler, trustee_did)
p1 = NodeHost(primary1)
p1.stop_service()
logger.info("2: Ensure that primary has been changed")
primary2 = await ensure_primary_changed(pool_handler, wallet_handler, trustee_did, primary1)
logger.info("3: Ensure pool works")
await check_pool_is_functional(pool_handler, wallet_handler, trustee_did, nyms_count=15)
p1.start_service()
logger.info("4: Restarting the pool")
restart_pool(test_nodes)
logger.info("5: Ensure pool is in sync")
await ensure_pool_is_in_sync(nodes_num=nodes_num)
logger.info("6: Ensure that primary has not been changed")
primary_after_restart, _, _ = await get_primary(pool_handler, wallet_handler, trustee_did)
assert primary_after_restart == primary2
logger.info("7: Ensure pool works")
await ensure_pool_is_functional(
pool_handler, wallet_handler, trustee_did, nyms_count=30
)
@pytest.mark.asyncio
async def test_case_restart_f_nodes(
self, pool_handler, wallet_handler, get_default_trustee, nodes_num, check_no_failures_fixture
):
trustee_did, _ = get_default_trustee
test_nodes = [NodeHost(i) for i in range(1, 8)]
primary1, alias, target_did = await get_primary(pool_handler, wallet_handler, trustee_did)
p1 = NodeHost(primary1)
p1.stop_service()
await ensure_primary_changed(pool_handler, wallet_handler, trustee_did, primary1)
await ensure_pool_performs_write_read(pool_handler, wallet_handler, trustee_did, nyms_count=5)
p1.start_service()
for node in test_nodes[5:]:
node.restart_service()
await ensure_pool_is_functional(pool_handler, wallet_handler, trustee_did)
await ensure_pool_performs_write_read(pool_handler, wallet_handler, trustee_did, nyms_count=5)
await ensure_pool_is_in_sync(nodes_num=nodes_num)
await ensure_pool_is_functional(pool_handler, wallet_handler, trustee_did)
@pytest.mark.asyncio
async def test_case_restart_n_minus_f_minus_one_nodes(
self, pool_handler, wallet_handler, get_default_trustee, nodes_num, check_no_failures_fixture
):
trustee_did, _ = get_default_trustee
test_nodes = [NodeHost(i) for i in range(1, 8)]
primary1, alias, target_did = await get_primary(pool_handler, wallet_handler, trustee_did)
p1 = NodeHost(primary1)
p1.stop_service()
await ensure_primary_changed(pool_handler, wallet_handler, trustee_did, primary1)
await ensure_pool_performs_write_read(pool_handler, wallet_handler, trustee_did, nyms_count=5)
p1.start_service()
for node in test_nodes[3:]:
node.restart_service()
await ensure_pool_is_functional(pool_handler, wallet_handler, trustee_did)
await ensure_pool_performs_write_read(pool_handler, wallet_handler, trustee_did, nyms_count=5)
await ensure_pool_is_in_sync(nodes_num=nodes_num)
await ensure_pool_is_functional(pool_handler, wallet_handler, trustee_did)
@pytest.mark.asyncio
async def test_case_restart_all_nodes_one_by_one(
self, pool_handler, wallet_handler, get_default_trustee, nodes_num, check_no_failures_fixture
):
trustee_did, _ = get_default_trustee
test_nodes = [NodeHost(i) for i in range(1, 8)]
primary1, alias, target_did = await get_primary(pool_handler, wallet_handler, trustee_did)
p1 = NodeHost(primary1)
p1.stop_service()
await ensure_primary_changed(pool_handler, wallet_handler, trustee_did, primary1)
await ensure_pool_performs_write_read(pool_handler, wallet_handler, trustee_did, nyms_count=5)
p1.start_service()
for node in test_nodes:
node.restart_service()
# do not remove/change with eventually - it is sequential node stopping
await asyncio.sleep(10)
await ensure_pool_performs_write_read(pool_handler, wallet_handler, trustee_did, nyms_count=5)
await ensure_pool_is_in_sync(nodes_num=nodes_num)
await ensure_pool_is_functional(pool_handler, wallet_handler, trustee_did)
@pytest.mark.parametrize('node_num_shift', [0, 1, 5])
@pytest.mark.asyncio
async def test_case_demote_master_backup_non_primary(
self, pool_handler, wallet_handler, get_default_trustee, node_num_shift, nodes_num,
check_no_failures_fixture
):
trustee_did, _ = get_default_trustee
primary1, alias1, target_did1 = await get_primary(pool_handler, wallet_handler, trustee_did)
print('Primary at the beginning is {}'.format(primary1))
p1 = NodeHost(primary1)
p1.stop_service()
primary2 = await ensure_primary_changed(pool_handler, wallet_handler, trustee_did, primary1)
print('Primary after service stop is {}'.format(primary2))
await ensure_pool_performs_write_read(pool_handler, wallet_handler, trustee_did, nyms_count=5)
p1.start_service()
primary, _, _ = await get_primary(pool_handler, wallet_handler, trustee_did)
print('Primary after service start is {}'.format(primary))
# demote master primary / backup primary / non primary here
alias_for_demotion = 'Node{}'.format(int(primary2)+node_num_shift)
print(alias_for_demotion)
target_did_for_demotion = get_pool_info(primary2)[alias_for_demotion]
print(target_did_for_demotion)
primary, _, _ = await get_primary(pool_handler, wallet_handler, trustee_did)
print('Primary before demotion is {}'.format(primary))
await eventually(
demote_node, pool_handler, wallet_handler, trustee_did, alias_for_demotion, target_did_for_demotion
)
primary3 = await ensure_primary_changed(pool_handler, wallet_handler, trustee_did, primary2)
print('Primary after demotion is {}'.format(primary3))
await ensure_pool_performs_write_read(pool_handler, wallet_handler, trustee_did, nyms_count=5)
await eventually(
promote_node, pool_handler, wallet_handler, trustee_did, alias_for_demotion, target_did_for_demotion
)
primary4 = await ensure_primary_changed(pool_handler, wallet_handler, trustee_did, primary3)
print('Primary after promotion is {}'.format(primary4))
await ensure_pool_is_in_sync(nodes_num=nodes_num)
await ensure_pool_is_functional(pool_handler, wallet_handler, trustee_did)
|
<filename>tensorflow/python/eager/wrap_function.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unidiomatic-typecheck
"""Prototype decorator for defining legacy-graph-mode functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import function
from tensorflow.python.eager import lift_to_graph
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import nest
class VariableHolder(object):
"""Holds variables for a python function."""
def __init__(self, fn):
self._fn = fn
self._variables = []
def variable_creator_scope(self, next_creator, **kwargs):
v = next_creator(**kwargs)
self._variables.append(v)
return v
def __call__(self, *args, **kwargs):
with variable_scope.variable_creator_scope(self.variable_creator_scope):
return self._fn(*args, **kwargs)
class WrappedFunction(function.Function):
"""Wraps a tf V1 piece of code in a function."""
def __init__(self, fn_graph, variable_holder, attrs=None, signature=None):
super(WrappedFunction, self).__init__(
fn_graph, attrs=attrs, signature=signature)
self._variable_holder = variable_holder
def prune(self, feeds, fetches):
flat_feeds, flat_fetches = nest.flatten(feeds), nest.flatten(fetches)
for f in flat_feeds + flat_fetches:
if not isinstance(f, ops.Tensor):
raise ValueError("Feeds and fetches must be tensors.")
if f.graph is not self._func_graph:
raise ValueError(
"Can only prune function whose feeds and fetches "
"are from this graph (%s). Tensor %s from graph %s" % (
self._func_graph, f, f.graph))
with self._func_graph.as_default():
pruned_graph = func_graph.FuncGraph("pruned")
sink_tensor = array_ops.identity_n(flat_fetches)[0]
lift_map = lift_to_graph.lift_to_graph(
sink_tensor, pruned_graph, sources=flat_feeds)
pruned_graph.outputs.extend(lift_map[x] for x in flat_fetches)
pruned_graph.inputs.extend(lift_map[x] for x in flat_feeds)
pruned_fn = WrappedFunction(
pruned_graph, variable_holder=self._variable_holder)
pruned_fn._num_positional_args = len(flat_feeds) # pylint: disable=protected-access
pruned_fn._arg_keywords = [] # pylint: disable=protected-access
return pruned_fn
def wrap_function(fn, signature, name=None):
"""Wraps the TF 1.x function fn into a graph function.
The python function `fn` will be called once with symbolic arguments specified
in the `signature`, traced, and turned into a graph function. Any variables
created by `fn` will be owned by the object returned by `wrap_function`. The
resulting graph function can be called with tensors which match the
signature.
```python
def f(x, do_add):
v = tf.Variable(5.0)
if do_add:
op = v.assign_add(x)
else:
op = v.assign_sub(x)
with tf.control_dependencies([op]):
return v.read_value()
f_add = tf.compat.v1.wrap_function(f, [tf.TensorSpec((), tf.float32), True])
assert float(f_add(1.0)) == 6.0
assert float(f_add(1.0)) == 7.0
# Can call tf.compat.v1.wrap_function again to get a new trace, a new set
# of variables, and possibly different non-template arguments.
f_sub= tf.compat.v1.wrap_function(f, [tf.TensorSpec((), tf.float32), False])
assert float(f_sub(1.0)) == 4.0
assert float(f_sub(1.0)) == 3.0
```
Args:
fn: python function to be wrapped
signature: the placeholder and python arguments to be passed to the
wrapped function
name: Optional. The name of the function.
Returns:
the wrapped graph function.
"""
holder = VariableHolder(fn)
return WrappedFunction(
func_graph.func_graph_from_py_func(
name,
holder,
args=None, kwargs=None, signature=signature,
add_control_dependencies=False),
variable_holder=holder,
signature=signature)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Ekta Space PS 5 Colourspace
===========================
Defines the *Ekta Space PS 5* colourspace:
- :attr:`EKTA_SPACE_PS_5_COLOURSPACE`.
See Also
--------
`RGB Colourspaces IPython Notebook
<http://nbviewer.ipython.org/github/colour-science/colour-ipython/blob/master/notebooks/models/rgb.ipynb>`_ # noqa
References
----------
.. [1] http://www.josephholmes.com/Ekta_Space.zip
(Last accessed 13 April 2014)
"""
from __future__ import division, unicode_literals
import numpy as np
from colour.colorimetry import ILLUMINANTS
from colour.models import RGB_Colourspace, normalised_primary_matrix
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013 - 2014 - Colour Developers'
__license__ = 'New BSD License - http://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = ['EKTA_SPACE_PS_5_PRIMARIES',
'EKTA_SPACE_PS_5_WHITEPOINT',
'EKTA_SPACE_PS_5_TO_XYZ_MATRIX',
'XYZ_TO_EKTA_SPACE_PS_5_MATRIX',
'EKTA_SPACE_PS_5_TRANSFER_FUNCTION',
'EKTA_SPACE_PS_5_INVERSE_TRANSFER_FUNCTION',
'EKTA_SPACE_PS_5_COLOURSPACE']
EKTA_SPACE_PS_5_PRIMARIES = np.array(
[[0.6947368421052631, 0.30526315789473685],
[0.26000000000000001, 0.69999999999999996],
[0.10972850678733032, 0.0045248868778280547]])
"""
*Ekta Space PS 5* colourspace primaries.
EKTA_SPACE_PS_5_PRIMARIES : ndarray, (3, 2)
"""
EKTA_SPACE_PS_5_WHITEPOINT = ILLUMINANTS.get(
'CIE 1931 2 Degree Standard Observer').get('D50')
"""
*Ekta Space PS 5* colourspace whitepoint.
EKTA_SPACE_PS_5_WHITEPOINT : tuple
"""
EKTA_SPACE_PS_5_TO_XYZ_MATRIX = normalised_primary_matrix(
EKTA_SPACE_PS_5_PRIMARIES, EKTA_SPACE_PS_5_WHITEPOINT)
"""
*Ekta Space PS 5* colourspace to *CIE XYZ* colourspace matrix.
EKTA_SPACE_PS_5_TO_XYZ_MATRIX : array_like, (3, 3)
"""
XYZ_TO_EKTA_SPACE_PS_5_MATRIX = np.linalg.inv(EKTA_SPACE_PS_5_TO_XYZ_MATRIX)
"""
*CIE XYZ* colourspace to *Ekta Space PS 5* colourspace matrix.
XYZ_TO_EKTA_SPACE_PS_5_MATRIX : array_like, (3, 3)
"""
EKTA_SPACE_PS_5_TRANSFER_FUNCTION = lambda x: x ** (1 / 2.2)
"""
Transfer function from linear to *Ekta Space PS 5* colourspace.
EKTA_SPACE_PS_5_TRANSFER_FUNCTION : object
"""
EKTA_SPACE_PS_5_INVERSE_TRANSFER_FUNCTION = lambda x: x ** 2.2
"""
Inverse transfer function from *Ekta Space PS 5* colourspace to linear.
EKTA_SPACE_PS_5_INVERSE_TRANSFER_FUNCTION : object
"""
EKTA_SPACE_PS_5_COLOURSPACE = RGB_Colourspace(
'Ekta Space PS 5',
EKTA_SPACE_PS_5_PRIMARIES,
EKTA_SPACE_PS_5_WHITEPOINT,
EKTA_SPACE_PS_5_TO_XYZ_MATRIX,
XYZ_TO_EKTA_SPACE_PS_5_MATRIX,
EKTA_SPACE_PS_5_TRANSFER_FUNCTION,
EKTA_SPACE_PS_5_INVERSE_TRANSFER_FUNCTION)
"""
*Ekta Space PS 5* colourspace.
EKTA_SPACE_PS_5_COLOURSPACE : RGB_Colourspace
"""
|
<reponame>Vultaire/charm-ceilometer
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from mock import patch, MagicMock, call
# python-apt is not installed as part of test-requirements but is imported by
# some charmhelpers modules so create a fake import.
mock_apt = MagicMock()
sys.modules['apt'] = mock_apt
mock_apt.apt_pkg = MagicMock()
import ceilometer_utils
# Patch out register_configs for import of hooks
_register_configs = ceilometer_utils.register_configs
ceilometer_utils.register_configs = MagicMock()
with patch('charmhelpers.contrib.hardening.harden.harden') as mock_dec:
mock_dec.side_effect = (lambda *dargs, **dkwargs: lambda f:
lambda *args, **kwargs: f(*args, **kwargs))
import ceilometer_hooks as hooks
# Renable old function
ceilometer_utils.register_configs = _register_configs
from test_utils import CharmTestCase
TO_PATCH = [
'relation_get',
'relation_set',
'configure_installation_source',
'openstack_upgrade_available',
'do_openstack_upgrade',
'apt_install',
'apt_update',
'open_port',
'close_port',
'config',
'log',
'relation_ids',
'filter_installed_packages',
'CONFIGS',
'get_ceilometer_context',
'lsb_release',
'get_packages',
'service_restart',
'update_nrpe_config',
'peer_retrieve',
'peer_store',
'configure_https',
'status_set',
'update_dns_ha_resource_params',
'reload_systemd',
'run_in_apache',
'mkdir',
'init_is_systemd',
'get_relation_ip',
'is_clustered',
'get_os_codename_install_source',
]
class CeilometerHooksTest(CharmTestCase):
def setUp(self):
super(CeilometerHooksTest, self).setUp(hooks, TO_PATCH)
self.config.side_effect = self.test_config.get
self.get_packages.return_value = \
ceilometer_utils.CEILOMETER_BASE_PACKAGES
self.filter_installed_packages.return_value = \
ceilometer_utils.CEILOMETER_BASE_PACKAGES
self.lsb_release.return_value = {'DISTRIB_CODENAME': 'precise'}
self.get_os_codename_install_source.return_value = 'mitaka'
@patch('charmhelpers.payload.execd.default_execd_dir',
return_value=os.path.join(os.getcwd(), 'exec.d'))
@patch('charmhelpers.core.hookenv.config')
def test_configure_source(self, mock_config, mock_execd_dir):
self.test_config.set('openstack-origin', 'cloud:precise-havana')
hooks.hooks.execute(['hooks/install.real'])
self.configure_installation_source.\
assert_called_with('cloud:precise-havana')
@patch('charmhelpers.payload.execd.default_execd_dir',
return_value=os.path.join(os.getcwd(), 'exec.d'))
@patch('charmhelpers.core.hookenv.config')
def test_install_hook_precise(self, mock_config, mock_execd_dir):
hooks.hooks.execute(['hooks/install.real'])
self.configure_installation_source.\
assert_called_with('cloud:precise-grizzly')
self.apt_update.assert_called_with(fatal=True)
self.apt_install.assert_called_with(
ceilometer_utils.CEILOMETER_BASE_PACKAGES,
fatal=True
)
@patch('charmhelpers.payload.execd.default_execd_dir',
return_value=os.path.join(os.getcwd(), 'exec.d'))
@patch('charmhelpers.core.hookenv.config')
def test_install_hook_distro(self, mock_config, mock_execd_dir):
self.lsb_release.return_value = {'DISTRIB_CODENAME': 'saucy'}
hooks.hooks.execute(['hooks/install.real'])
self.configure_installation_source.\
assert_called_with('distro')
self.apt_update.assert_called_with(fatal=True)
self.apt_install.assert_called_with(
ceilometer_utils.CEILOMETER_BASE_PACKAGES,
fatal=True
)
@patch('charmhelpers.core.hookenv.config')
def test_amqp_joined(self, mock_config):
hooks.hooks.execute(['hooks/amqp-relation-joined'])
self.relation_set.assert_called_with(
username=self.test_config.get('rabbit-user'),
vhost=self.test_config.get('rabbit-vhost'))
@patch('charmhelpers.core.hookenv.config')
def test_db_joined(self, mock_config):
hooks.hooks.execute(['hooks/shared-db-relation-joined'])
self.relation_set.assert_called_with(
ceilometer_database='ceilometer')
@patch.object(hooks, 'ceilometer_upgrade')
@patch.object(hooks, 'keystone_joined')
@patch('charmhelpers.core.hookenv.config')
@patch.object(hooks, 'ceilometer_joined')
def test_any_changed_with_metrics(self, ceilometer_joined, mock_config,
keystone_joined, ceilometer_upgrade):
self.CONFIGS.complete_contexts.return_value = [
'metric-service',
'identity-service',
'mongodb'
]
self.relation_ids.return_value = ['identity-service:1']
hooks.hooks.execute(['hooks/shared-db-relation-changed'])
self.CONFIGS.write_all.assert_called_once()
ceilometer_joined.assert_called_once()
keystone_joined.assert_called_with(relid='identity-service:1')
ceilometer_upgrade.assert_called_once()
self.configure_https.assert_called_once()
@patch.object(hooks, 'ceilometer_upgrade')
@patch.object(hooks, 'keystone_joined')
@patch('charmhelpers.core.hookenv.config')
@patch.object(hooks, 'ceilometer_joined')
def test_any_changed(self, ceilometer_joined, mock_config,
keystone_joined, ceilometer_upgrade):
self.relation_ids.return_value = ['identity-service:1']
hooks.hooks.execute(['hooks/shared-db-relation-changed'])
self.assertTrue(self.CONFIGS.write_all.called)
self.assertTrue(ceilometer_joined.called)
keystone_joined.assert_called_with(relid='identity-service:1')
ceilometer_upgrade.assert_not_called()
self.configure_https.assert_called_once()
@patch('charmhelpers.core.hookenv.config')
@patch.object(hooks, 'install')
@patch.object(hooks, 'any_changed')
def test_upgrade_charm(self, changed, install, mock_config):
hooks.hooks.execute(['hooks/upgrade-charm'])
self.assertTrue(changed.called)
self.assertTrue(install.called)
@patch.object(hooks, 'any_changed')
@patch('charmhelpers.core.hookenv.config')
@patch.object(hooks, 'cluster_joined')
def test_upgrade_charm_with_cluster(self, cluster_joined, mock_config,
any_changed):
self.relation_ids.return_value = ['ceilometer/0',
'ceilometer/1',
'ceilometer/2']
hooks.hooks.execute(['hooks/upgrade-charm'])
self.assertEqual(cluster_joined.call_count, 3)
any_changed.assert_called_once()
@patch.object(hooks, 'install_event_pipeline_setting')
@patch('charmhelpers.core.hookenv.config')
@patch.object(hooks, 'ceilometer_joined')
def test_config_changed_no_upgrade(self,
joined, mock_config, event_pipe):
self.openstack_upgrade_available.return_value = False
hooks.hooks.execute(['hooks/config-changed'])
self.openstack_upgrade_available.\
assert_called_with('ceilometer-common')
self.assertFalse(self.do_openstack_upgrade.called)
self.assertTrue(event_pipe.called)
self.assertTrue(self.CONFIGS.write_all.called)
self.assertTrue(joined.called)
self.assertTrue(self.reload_systemd.called)
self.open_port.assert_called_with(hooks.CEILOMETER_PORT)
@patch.object(hooks, 'install_event_pipeline_setting')
@patch('charmhelpers.core.hookenv.config')
@patch.object(hooks, 'ceilometer_joined')
def test_config_changed_queens(self,
joined, mock_config, event_pipe):
self.openstack_upgrade_available.return_value = False
self.get_os_codename_install_source.return_value = 'queens'
hooks.hooks.execute(['hooks/config-changed'])
self.openstack_upgrade_available.\
assert_called_with('ceilometer-common')
self.assertFalse(self.do_openstack_upgrade.called)
self.assertTrue(event_pipe.called)
self.assertTrue(self.CONFIGS.write_all.called)
self.assertTrue(joined.called)
self.assertTrue(self.reload_systemd.called)
self.close_port.assert_called_with(hooks.CEILOMETER_PORT)
self.open_port.assert_not_called()
@patch.object(hooks, 'install_event_pipeline_setting')
@patch('charmhelpers.core.hookenv.config')
@patch.object(hooks, 'ceilometer_joined')
def test_config_changed_upgrade(self,
joined, mock_config, event_pipe):
self.openstack_upgrade_available.return_value = True
hooks.hooks.execute(['hooks/config-changed'])
self.openstack_upgrade_available.\
assert_called_with('ceilometer-common')
self.assertTrue(self.do_openstack_upgrade.called)
self.assertTrue(event_pipe.called)
self.assertTrue(self.CONFIGS.write_all.called)
self.assertTrue(joined.called)
self.assertTrue(self.reload_systemd.called)
self.open_port.assert_called_with(hooks.CEILOMETER_PORT)
@patch.object(hooks, 'install_event_pipeline_setting')
def test_config_changed_with_openstack_upgrade_action(self,
event_pipe):
self.openstack_upgrade_available.return_value = True
self.test_config.set('action-managed-upgrade', True)
hooks.hooks.execute(['hooks/config-changed'])
self.assertFalse(self.do_openstack_upgrade.called)
self.assertTrue(event_pipe.called)
self.open_port.assert_called_with(hooks.CEILOMETER_PORT)
def test_keystone_credentials_joined(self):
hooks.hooks.execute(['hooks/identity-credentials-relation-joined'])
self.relation_set.assert_called_with(
username=hooks.CEILOMETER_SERVICE,
requested_roles=hooks.CEILOMETER_ROLE,
relation_id=None)
@patch.object(hooks, 'canonical_url')
@patch('charmhelpers.core.hookenv.config')
def test_keystone_joined(self, mock_config, _canonical_url):
_canonical_url.return_value = "http://thishost"
self.test_config.set('region', 'myregion')
hooks.hooks.execute(['hooks/identity-service-relation-joined'])
url = "http://{}:{}".format('thishost', hooks.CEILOMETER_PORT)
self.relation_set.assert_called_with(
service=hooks.CEILOMETER_SERVICE,
public_url=url, admin_url=url, internal_url=url,
requested_roles=hooks.CEILOMETER_ROLE,
region='myregion', relation_id=None)
@patch('charmhelpers.contrib.openstack.ip.service_name',
lambda *args: 'ceilometer')
@patch('charmhelpers.contrib.openstack.ip.unit_get')
@patch('charmhelpers.contrib.openstack.ip.is_clustered')
@patch('charmhelpers.core.hookenv.config')
@patch('charmhelpers.contrib.openstack.ip.config')
def test_keystone_joined_url_override(self, _config, mock_config,
_is_clustered, _unit_get):
_unit_get.return_value = "thishost"
_is_clustered.return_value = False
_config.side_effect = self.test_config.get
mock_config.side_effect = self.test_config.get
self.test_config.set('region', 'myregion')
self.test_config.set('os-public-hostname', 'ceilometer.example.com')
hooks.keystone_joined(None)
url = "http://{}:{}".format('thishost', hooks.CEILOMETER_PORT)
public_url = "http://{}:{}".format('ceilometer.example.com',
hooks.CEILOMETER_PORT)
self.relation_set.assert_called_with(
service=hooks.CEILOMETER_SERVICE,
public_url=public_url, admin_url=url, internal_url=url,
requested_roles=hooks.CEILOMETER_ROLE,
region='myregion', relation_id=None)
def test_keystone_joined_partial_cluster(self):
self.is_clustered.return_value = False
self.test_config.set('vip', '10.0.0.10')
hooks.keystone_joined()
self.assertFalse(self.relation_set.called)
@patch('charmhelpers.core.hookenv.config')
def test_ceilometer_joined(self, mock_config):
self.relation_ids.return_value = ['ceilometer:0']
self.get_ceilometer_context.return_value = {'test': 'data'}
hooks.hooks.execute(['hooks/ceilometer-service-relation-joined'])
self.relation_set.assert_called_with('ceilometer:0',
{'test': 'data'})
@patch('charmhelpers.core.hookenv.config')
def test_identity_notifications_changed(self, mock_config):
self.relation_ids.return_value = ['keystone-notifications:0']
self.relation_get.return_value = None
hooks.hooks.execute(['hooks/identity-notifications-relation-changed'])
self.relation_get.return_value = {('%s-endpoint-changed' %
(hooks.CEILOMETER_SERVICE)): 1}
hooks.hooks.execute(['hooks/identity-notifications-relation-changed'])
call1 = call('ceilometer-alarm-evaluator')
call2 = call('ceilometer-alarm-notifier')
self.service_restart.assert_has_calls([call1, call2], any_order=False)
@patch('charmhelpers.core.hookenv.config')
@patch.object(hooks, 'is_elected_leader')
def test_cluster_joined_not_leader(self, mock_leader,
mock_config):
mock_leader.return_value = False
hooks.hooks.execute(['hooks/cluster-relation-joined'])
self.assertTrue(self.relation_set.called)
self.assertTrue(self.CONFIGS.write_all.called)
@patch('charmhelpers.core.hookenv.config')
@patch.object(hooks, 'get_shared_secret')
@patch.object(hooks, 'is_elected_leader')
def test_cluster_joined_is_leader(self, mock_leader,
shared_secret, mock_config):
mock_leader.return_value = True
shared_secret.return_value = 'secret'
hooks.hooks.execute(['hooks/cluster-relation-joined'])
self.assertTrue(self.peer_store.called)
self.peer_store.assert_called_with('shared_secret', 'secret')
self.assertTrue(self.CONFIGS.write_all.called)
@patch('charmhelpers.core.hookenv.config')
@patch.object(hooks, 'is_elected_leader')
def test_cluster_joined(self, mock_leader, mock_config):
mock_leader.return_value = False
self.get_relation_ip.side_effect = [
'10.0.0.100', '10.0.1.100', '10.0.2.100', '10.0.3.100']
rel_settings = {'private-address': '10.0.3.100',
'public-address': '10.0.2.100',
'internal-address': '10.0.1.100',
'admin-address': '10.0.0.100'}
hooks.hooks.execute(['hooks/cluster-relation-joined'])
self.relation_set.assert_called_with(relation_id=None,
relation_settings=rel_settings)
@patch('charmhelpers.core.hookenv.config')
@patch.object(hooks, 'set_shared_secret')
def test_cluster_changed(self, shared_secret, mock_config):
self.peer_retrieve.return_value = None
hooks.hooks.execute(['hooks/cluster-relation-changed'])
self.assertFalse(shared_secret.called)
@patch('charmhelpers.core.hookenv.config')
@patch.object(hooks, 'get_shared_secret')
@patch.object(hooks, 'set_shared_secret')
def test_cluster_changed_new_secret(self, mock_set_secret, mock_get_secret,
mock_config):
self.peer_retrieve.return_value = "leader_secret"
mock_get_secret.return_value = "my_secret"
hooks.hooks.execute(['hooks/cluster-relation-changed'])
mock_set_secret.assert_called_with("leader_secret")
@patch('charmhelpers.core.hookenv.config')
@patch.object(hooks, 'get_shared_secret')
@patch.object(hooks, 'set_shared_secret')
def test_cluster_changed_old_secret(self, mock_set_secret, mock_get_secret,
mock_config):
self.peer_retrieve.return_value = "leader_secret"
mock_get_secret.return_value = "leader_secret"
hooks.hooks.execute(['hooks/cluster-relation-changed'])
self.assertEqual(mock_set_secret.call_count, 0)
@patch('charmhelpers.core.hookenv.config')
@patch.object(hooks, 'get_hacluster_config')
@patch.object(hooks, 'get_iface_for_address')
@patch.object(hooks, 'get_netmask_for_address')
def test_ha_joined(self, mock_netmask, mock_iface, mock_cluster_config,
mock_config):
mock_cluster_config.return_value = {'vip': '10.0.5.100',
'ha-bindiface': 'bnd0',
'ha-mcastport': 5802}
mock_iface.return_value = 'eth0'
mock_netmask.return_value = '255.255.255.10'
hooks.hooks.execute(['hooks/ha-relation-joined'])
self.assertEqual(self.relation_set.call_count, 2)
exp_resources = {
'res_ceilometer_haproxy': 'lsb:haproxy',
'res_ceilometer_agent_central': 'lsb:ceilometer-agent-central',
'res_ceilometer_eth0_vip': 'ocf:heartbeat:IPaddr2'
}
exp_resource_params = {
'res_ceilometer_haproxy': 'op monitor interval="5s"',
'res_ceilometer_agent_central': 'op monitor interval="30s"',
'res_ceilometer_eth0_vip': ('params ip="10.0.5.100" '
'cidr_netmask="255.255.255.10" '
'nic="eth0"')
}
exp_clones = {'cl_ceilometer_haproxy': 'res_ceilometer_haproxy'}
call1 = call(relation_id=None,
groups={'grp_ceilometer_vips': 'res_ceilometer_eth0_vip'})
call2 = call(relation_id=None,
init_services={'res_ceilometer_haproxy': 'haproxy'},
corosync_bindiface='bnd0',
corosync_mcastport=5802,
resources=exp_resources,
resource_params=exp_resource_params,
delete_resources=['res_ceilometer_polling'],
clones=exp_clones)
self.relation_set.assert_has_calls([call1, call2], any_order=False)
@patch.object(hooks, 'get_hacluster_config')
def test_ha_joined_dns_ha(self, mock_cluster_config):
def _fake_update(resources, resource_params, relation_id=None):
resources.update({'res_ceilometer_public_hostname':
'ocf:maas:dns'})
resource_params.update({'res_ceilometer_public_hostname':
'params fqdn="ceilometer.maas" '
'ip_address="10.0.0.1"'})
self.test_config.set('dns-ha', True)
mock_cluster_config.return_value = {
'vip': None,
'ha-bindiface': 'em0',
'ha-mcastport': '8080',
'os-admin-hostname': None,
'os-internal-hostname': None,
'os-public-hostname': 'ceilometer.maas',
}
args = {
'relation_id': None,
'corosync_bindiface': 'em0',
'corosync_mcastport': '8080',
'init_services': {'res_ceilometer_haproxy': 'haproxy'},
'resources': {'res_ceilometer_public_hostname': 'ocf:maas:dns',
'res_ceilometer_haproxy': 'lsb:haproxy',
'res_ceilometer_agent_central':
'lsb:ceilometer-agent-central'},
'resource_params': {
'res_ceilometer_public_hostname':
'params fqdn="ceilometer.maas" '
'ip_address="10.0.0.1"',
'res_ceilometer_haproxy': 'op monitor interval="5s"',
'res_ceilometer_agent_central': 'op monitor interval="30s"'},
'delete_resources': ['res_ceilometer_polling'],
'clones': {'cl_ceilometer_haproxy': 'res_ceilometer_haproxy'}
}
self.update_dns_ha_resource_params.side_effect = _fake_update
hooks.ha_joined()
self.assertTrue(self.update_dns_ha_resource_params.called)
self.relation_set.assert_called_with(**args)
@patch('charmhelpers.core.hookenv.config')
@patch.object(hooks, 'keystone_joined')
def test_ha_changed_not_clustered(self, mock_keystone_joined, mock_config):
self.relation_get.return_value = None
hooks.hooks.execute(['hooks/ha-relation-changed'])
self.assertEqual(mock_keystone_joined.call_count, 0)
@patch('charmhelpers.core.hookenv.config')
@patch.object(hooks, 'keystone_joined')
def test_ha_changed_clustered(self, mock_keystone_joined, mock_config):
self.relation_get.return_value = 'yes'
self.relation_ids.return_value = ['identity-service/0']
hooks.hooks.execute(['hooks/ha-relation-changed'])
self.assertEqual(mock_keystone_joined.call_count, 1)
def test_metric_service_joined(self):
self.filter_installed_packages.return_value = ['python-gnocchiclient']
hooks.hooks.execute(['hooks/metric-service-relation-joined'])
self.filter_installed_packages.assert_called_with(
['python-gnocchiclient']
)
self.apt_install.assert_called_with(['python-gnocchiclient'],
fatal=True)
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# code for dealing with CQL's syntax, rules, interpretation
# i.e., stuff that's not necessarily cqlsh-specific
import re
import traceback
from . import pylexotron, util, helptopics
from cql import cqltypes
Hint = pylexotron.Hint
SYSTEM_KEYSPACES = ('system',)
cqldocs = helptopics.CQL2HelpTopics()
class CqlParsingRuleSet(pylexotron.ParsingRuleSet):
keywords = set((
'select', 'from', 'where', 'and', 'key', 'insert', 'update', 'with',
'limit', 'using', 'consistency', 'one', 'quorum', 'all', 'any',
'local_quorum', 'each_quorum', 'two', 'three', 'use', 'count', 'set',
'begin', 'apply', 'batch', 'truncate', 'delete', 'in', 'create',
'keyspace', 'schema', 'columnfamily', 'table', 'index', 'on', 'drop',
'primary', 'into', 'values', 'timestamp', 'ttl', 'alter', 'add', 'type',
'first', 'reversed'
))
columnfamily_options = (
# (CQL option name, Thrift option name (or None if same))
('comment', None),
('comparator', 'comparator_type'),
('read_repair_chance', None),
('gc_grace_seconds', None),
('default_validation', 'default_validation_class'),
('min_compaction_threshold', None),
('max_compaction_threshold', None),
('replicate_on_write', None),
('compaction_strategy_class', 'compaction_strategy'),
('populate_io_cache_on_flush', None),
)
obsolete_cf_options = (
('key_cache_size', None),
('row_cache_size', None),
('row_cache_save_period_in_seconds', None),
('key_cache_save_period_in_seconds', None),
('memtable_throughput_in_mb', None),
('memtable_operations_in_millions', None),
('memtable_flush_after_mins', None),
('row_cache_provider', None),
)
all_columnfamily_options = columnfamily_options + obsolete_cf_options
columnfamily_map_options = (
('compaction_strategy_options', None,
()),
('compression_parameters', 'compression_options',
('sstable_compression', 'chunk_length_kb', 'crc_check_chance')),
)
available_compression_classes = (
'DeflateCompressor',
'SnappyCompressor',
)
available_compaction_classes = (
'LeveledCompactionStrategy',
'SizeTieredCompactionStrategy'
)
replication_strategies = (
'SimpleStrategy',
'OldNetworkTopologyStrategy',
'NetworkTopologyStrategy'
)
replication_factor_strategies = (
'SimpleStrategy',
'org.apache.cassandra.locator.SimpleStrategy',
'OldNetworkTopologyStrategy',
'org.apache.cassandra.locator.OldNetworkTopologyStrategy'
)
consistency_levels = (
'ANY',
'ONE',
'TWO',
'THREE',
'QUORUM',
'ALL',
'LOCAL_QUORUM',
'EACH_QUORUM'
)
# if a term matches this, it shouldn't need to be quoted to be valid cql
valid_cql_word_re = re.compile(r"^(?:[a-z][a-z0-9_]*|-?[0-9][0-9.]*)$", re.I)
def __init__(self, *args, **kwargs):
pylexotron.ParsingRuleSet.__init__(self, *args, **kwargs)
# note: commands_end_with_newline may be extended by callers.
self.commands_end_with_newline = set()
self.set_keywords_as_syntax()
def completer_for(self, rulename, symname):
def registrator(f):
def completerwrapper(ctxt):
cass = ctxt.get_binding('cassandra_conn', None)
if cass is None:
return ()
return f(ctxt, cass)
completerwrapper.func_name = 'completerwrapper_on_' + f.func_name
self.register_completer(completerwrapper, rulename, symname)
return completerwrapper
return registrator
def explain_completion(self, rulename, symname, explanation=None):
if explanation is None:
explanation = '<%s>' % (symname,)
@self.completer_for(rulename, symname)
def explainer(ctxt, cass):
return [Hint(explanation)]
return explainer
def set_keywords_as_syntax(self):
syntax = []
for k in self.keywords:
syntax.append('<K_%s> ::= "%s" ;' % (k.upper(), k))
self.append_rules('\n'.join(syntax))
def cql_massage_tokens(self, toklist):
curstmt = []
output = []
term_on_nl = False
for t in toklist:
if t[0] == 'endline':
if term_on_nl:
t = ('endtoken',) + t[1:]
else:
# don't put any 'endline' tokens in output
continue
curstmt.append(t)
if t[0] == 'endtoken':
term_on_nl = False
output.extend(curstmt)
curstmt = []
else:
if len(curstmt) == 1:
# first token in statement; command word
cmd = t[1].lower()
term_on_nl = bool(cmd in self.commands_end_with_newline)
output.extend(curstmt)
return output
def cql_parse(self, text, startsymbol='Start'):
tokens = self.lex(text)
tokens = self.cql_massage_tokens(tokens)
return self.parse(startsymbol, tokens, init_bindings={'*SRC*': text})
def cql_whole_parse_tokens(self, toklist, srcstr=None, startsymbol='Start'):
return self.whole_match(startsymbol, toklist, srcstr=srcstr)
def cql_split_statements(self, text):
tokens = self.lex(text)
tokens = self.cql_massage_tokens(tokens)
stmts = util.split_list(tokens, lambda t: t[0] == 'endtoken')
output = []
in_batch = False
for stmt in stmts:
if in_batch:
output[-1].extend(stmt)
else:
output.append(stmt)
if len(stmt) > 2:
if stmt[-3][0] == 'K_APPLY':
in_batch = False
elif stmt[0][0] == 'K_BEGIN':
in_batch = True
return output, in_batch
def cql_complete_single(self, text, partial, init_bindings={}, ignore_case=True,
startsymbol='Start'):
tokens = (self.cql_split_statements(text)[0] or [[]])[-1]
bindings = init_bindings.copy()
# handle some different completion scenarios- in particular, completing
# inside a string literal
prefix = None
dequoter = util.identity
lasttype = None
if tokens:
lasttype = tokens[-1][0]
if lasttype == 'unclosedString':
prefix = self.token_dequote(tokens[-1])
tokens = tokens[:-1]
partial = prefix + partial
dequoter = self.dequote_value
requoter = self.escape_value
elif lasttype == 'unclosedName':
prefix = self.token_dequote(tokens[-1])
tokens = tokens[:-1]
partial = prefix + partial
dequoter = self.dequote_name
requoter = self.escape_name
elif lasttype == 'unclosedComment':
return []
bindings['partial'] = partial
bindings['*LASTTYPE*'] = lasttype
bindings['*SRC*'] = text
# find completions for the position
completions = self.complete(startsymbol, tokens, bindings)
hints, strcompletes = util.list_bifilter(pylexotron.is_hint, completions)
# it's possible to get a newline token from completion; of course, we
# don't want to actually have that be a candidate, we just want to hint
if '\n' in strcompletes:
strcompletes.remove('\n')
if partial == '':
hints.append(Hint('<enter>'))
# find matches with the partial word under completion
if ignore_case:
partial = partial.lower()
f = lambda s: s and dequoter(s).lower().startswith(partial)
else:
f = lambda s: s and dequoter(s).startswith(partial)
candidates = filter(f, strcompletes)
if prefix is not None:
# dequote, re-escape, strip quotes: gets us the right quoted text
# for completion. the opening quote is already there on the command
# line and not part of the word under completion, and readline
# fills in the closing quote for us.
candidates = [requoter(dequoter(c))[len(prefix)+1:-1] for c in candidates]
# the above process can result in an empty string; this doesn't help for
# completions
candidates = filter(None, candidates)
# prefix a space when desirable for pleasant cql formatting
if tokens:
newcandidates = []
for c in candidates:
if self.want_space_between(tokens[-1], c) \
and prefix is None \
and not text[-1].isspace() \
and not c[0].isspace():
c = ' ' + c
newcandidates.append(c)
candidates = newcandidates
# append a space for single, complete identifiers
if len(candidates) == 1 and candidates[0][-1].isalnum() \
and lasttype != 'unclosedString' \
and lasttype != 'unclosedName':
candidates[0] += ' '
return candidates, hints
@staticmethod
def want_space_between(tok, following):
if following in (',', ')', ':'):
return False
if tok[0] == 'op' and tok[1] in (',', ')', '='):
return True
if tok[0] == 'stringLiteral' and following[0] != ';':
return True
if tok[0] == 'star' and following[0] != ')':
return True
if tok[0] == 'endtoken':
return True
if tok[1][-1].isalnum() and following[0] != ',':
return True
return False
def cql_complete(self, text, partial, cassandra_conn=None, ignore_case=True, debug=False,
startsymbol='Start'):
init_bindings = {'cassandra_conn': cassandra_conn}
if debug:
init_bindings['*DEBUG*'] = True
print "cql_complete(%r, partial=%r)" % (text, partial)
completions, hints = self.cql_complete_single(text, partial, init_bindings,
startsymbol=startsymbol)
if hints:
hints = [h.text for h in hints]
hints.append('')
if len(completions) == 1 and len(hints) == 0:
c = completions[0]
if debug:
print "** Got one completion: %r. Checking for further matches...\n" % (c,)
if not c.isspace():
new_c = self.cql_complete_multiple(text, c, init_bindings, startsymbol=startsymbol)
completions = [new_c]
if debug:
print "** New list of completions: %r" % (completions,)
return hints + completions
def cql_complete_multiple(self, text, first, init_bindings, startsymbol='Start'):
debug = init_bindings.get('*DEBUG*', False)
try:
completions, hints = self.cql_complete_single(text + first, '', init_bindings,
startsymbol=startsymbol)
except Exception:
if debug:
print "** completion expansion had a problem:"
traceback.print_exc()
return first
if hints:
if not first[-1].isspace():
first += ' '
if debug:
print "** completion expansion found hints: %r" % (hints,)
return first
if len(completions) == 1 and completions[0] != '':
if debug:
print "** Got another completion: %r." % (completions[0],)
if completions[0][0] in (',', ')', ':') and first[-1] == ' ':
first = first[:-1]
first += completions[0]
else:
common_prefix = util.find_common_prefix(completions)
if common_prefix == '':
return first
if common_prefix[0] in (',', ')', ':') and first[-1] == ' ':
first = first[:-1]
if debug:
print "** Got a partial completion: %r." % (common_prefix,)
first += common_prefix
if debug:
print "** New total completion: %r. Checking for further matches...\n" % (first,)
return self.cql_complete_multiple(text, first, init_bindings, startsymbol=startsymbol)
@classmethod
def is_valid_cql_word(cls, s):
return cls.valid_cql_word_re.match(s) is not None and s.lower() not in cls.keywords
@staticmethod
def cql_extract_orig(toklist, srcstr):
# low end of span for first token, to high end of span for last token
return srcstr[toklist[0][2][0]:toklist[-1][2][1]]
@staticmethod
def token_dequote(tok):
if tok[0] == 'stringLiteral':
# strip quotes
return tok[1][1:-1].replace("''", "'")
if tok[0] == 'unclosedString':
# strip one quote
return tok[1][1:].replace("''", "'")
if tok[0] == 'unclosedComment':
return ''
return tok[1]
@staticmethod
def token_is_word(tok):
return tok[0] == 'identifier'
@classmethod
def cql2_maybe_escape_name(cls, name):
if cls.is_valid_cql_word(name):
return name
return cls.cql2_escape_name(name)
# XXX: this doesn't really belong here.
@classmethod
def is_counter_col(cls, cfdef, colname):
col_info = [cm for cm in cfdef.column_metadata if cm.name == colname]
return bool(col_info and cqltypes.is_counter_type(col_info[0].validation_class))
@staticmethod
def cql2_dequote_value(cqlword):
cqlword = cqlword.strip()
if cqlword == '':
return cqlword
if cqlword[0] == "'" and cqlword[-1] == "'":
cqlword = cqlword[1:-1].replace("''", "'")
return cqlword
@staticmethod
def cql2_escape_value(value):
if value is None:
return 'NULL' # this totally won't work
if isinstance(value, bool):
value = str(value).lower()
elif isinstance(value, float):
return '%f' % value
elif isinstance(value, int):
return str(value)
return "'%s'" % value.replace("'", "''")
# use _name for keyspace, cf, and column names, and _value otherwise.
# also use the cql2_ prefix when dealing with cql2, or leave it off to
# get whatever behavior is default for this CqlParsingRuleSet.
cql2_dequote_name = dequote_name = dequote_value = cql2_dequote_value
cql2_escape_name = escape_name = escape_value = cql2_escape_value
maybe_escape_name = cql2_maybe_escape_name
dequote_any = cql2_dequote_value
CqlRuleSet = CqlParsingRuleSet()
# convenience for remainder of module
shorthands = ('completer_for', 'explain_completion',
'dequote_value', 'dequote_name',
'escape_value', 'escape_name',
'maybe_escape_name')
for shorthand in shorthands:
globals()[shorthand] = getattr(CqlRuleSet, shorthand)
# BEGIN SYNTAX/COMPLETION RULE DEFINITIONS
syntax_rules = r'''
<Start> ::= <CQL_Statement>*
;
<CQL_Statement> ::= [statements]=<statementBody> ";"
;
# the order of these terminal productions is significant:
<endline> ::= /\n/ ;
JUNK ::= /([ \t\r\f\v]+|(--|[/][/])[^\n\r]*([\n\r]|$)|[/][*].*?[*][/])/ ;
<stringLiteral> ::= /'([^']|'')*'/ ;
<float> ::= /-?[0-9]+\.[0-9]+/ ;
<integer> ::= /-?[0-9]+/ ;
<uuid> ::= /[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/ ;
<identifier> ::= /[a-z][a-z0-9_]*/ ;
<colon> ::= ":" ;
<star> ::= "*" ;
<range> ::= ".." ;
<endtoken> ::= ";" ;
<op> ::= /[-+=,().]/ ;
<cmp> ::= /[<>]=?/ ;
<unclosedString> ::= /'([^']|'')*/ ;
<unclosedComment> ::= /[/][*][^\n]*$/ ;
<symbol> ::= <star>
| <range>
| <op>
| <cmp>
;
<name> ::= <identifier>
| <stringLiteral>
| <integer>
;
<term> ::= <stringLiteral>
| <integer>
| <float>
| <uuid>
;
<colname> ::= <term>
| <identifier>
| nocomplete=<K_KEY>
;
<statementBody> ::= <useStatement>
| <selectStatement>
| <dataChangeStatement>
| <schemaChangeStatement>
;
<dataChangeStatement> ::= <insertStatement>
| <updateStatement>
| <deleteStatement>
| <truncateStatement>
| <batchStatement>
;
<schemaChangeStatement> ::= <createKeyspaceStatement>
| <createColumnFamilyStatement>
| <createIndexStatement>
| <dropKeyspaceStatement>
| <dropColumnFamilyStatement>
| <dropIndexStatement>
| <alterTableStatement>
;
<consistencylevel> ::= cl=<identifier> ;
<storageType> ::= typename=( <identifier> | <stringLiteral> ) ;
<keyspaceName> ::= ksname=<name> ;
<columnFamilyName> ::= ( ksname=<name> "." )? cfname=<name> ;
'''
@completer_for('colname', 'nocomplete')
def nocomplete(ctxt, cass):
return ()
@completer_for('consistencylevel', 'cl')
def cl_completer(ctxt, cass):
return CqlRuleSet.consistency_levels
@completer_for('storageType', 'typename')
def storagetype_completer(ctxt, cass):
return cqltypes.cql_types
@completer_for('keyspaceName', 'ksname')
def ks_name_completer(ctxt, cass):
return map(maybe_escape_name, cass.get_keyspace_names())
@completer_for('columnFamilyName', 'ksname')
def cf_ks_name_completer(ctxt, cass):
return [maybe_escape_name(ks) + '.' for ks in cass.get_keyspace_names()]
@completer_for('columnFamilyName', 'cfname')
def cf_name_completer(ctxt, cass):
ks = ctxt.get_binding('ksname', None)
if ks is not None:
ks = dequote_name(ks)
try:
cfnames = cass.get_columnfamily_names(ks)
except Exception:
if ks is None:
return ()
raise
return map(maybe_escape_name, cfnames)
def get_cfdef(ctxt, cass):
ks = ctxt.get_binding('ksname', None)
cf = ctxt.get_binding('cfname')
return cass.get_columnfamily(cf, ksname=ks)
syntax_rules += r'''
<useStatement> ::= "USE" ksname=<keyspaceName>
;
<selectStatement> ::= "SELECT" <whatToSelect>
"FROM" cf=<columnFamilyName>
("USING" "CONSISTENCY" selcl=<consistencylevel>)?
("WHERE" <selectWhereClause>)?
("LIMIT" limit=<integer>)?
;
<selectWhereClause> ::= <relation> ("AND" <relation>)*
| keyname=<colname> "IN" "(" <term> ("," <term>)* ")"
;
<relation> ::= [rel_lhs]=<colname> ("=" | "<" | ">" | "<=" | ">=") <colname>
;
<whatToSelect> ::= colname=<colname> ("," colname=<colname>)*
| ("FIRST" <integer>)? "REVERSED"? (rangestart=<colname> ".." rangeend=<colname>
| "*")
| "COUNT" countparens="(" "*" ")"
;
'''
@completer_for('selectStatement', 'selcl')
def select_statement_consistencylevel(ctxt, cass):
return [cl for cl in CqlRuleSet.consistency_levels if cl != 'ANY']
@completer_for('selectWhereClause', 'keyname')
def select_where_keyname_completer(ctxt, cass):
cfdef = get_cfdef(ctxt, cass)
return [cfdef.key_alias if cfdef.key_alias is not None else 'KEY']
@completer_for('relation', 'rel_lhs')
def select_relation_lhs_completer(ctxt, cass):
cfdef = get_cfdef(ctxt, cass)
return map(maybe_escape_name, cass.filterable_column_names(cfdef))
@completer_for('whatToSelect', 'countparens')
def select_count_parens_completer(ctxt, cass):
return ['(*)']
explain_completion('whatToSelect', 'colname')
explain_completion('whatToSelect', 'rangestart', '<range_start>')
explain_completion('whatToSelect', 'rangeend', '<range_end>')
syntax_rules += r'''
<insertStatement> ::= "INSERT" "INTO" cf=<columnFamilyName>
"(" keyname=<colname> ","
[colname]=<colname> ( "," [colname]=<colname> )* ")"
"VALUES" "(" <term> "," <term> ( "," <term> )* ")"
( "USING" [insertopt]=<usingOption>
( "AND" [insertopt]=<usingOption> )* )?
;
<usingOption> ::= "CONSISTENCY" <consistencylevel>
| "TIMESTAMP" <integer>
| "TTL" <integer>
;
'''
@completer_for('insertStatement', 'keyname')
def insert_keyname_completer(ctxt, cass):
cfdef = get_cfdef(ctxt, cass)
return [cfdef.key_alias if cfdef.key_alias is not None else 'KEY']
explain_completion('insertStatement', 'colname')
@completer_for('insertStatement', 'insertopt')
def insert_option_completer(ctxt, cass):
opts = set('CONSISTENCY TIMESTAMP TTL'.split())
for opt in ctxt.get_binding('insertopt', ()):
opts.discard(opt.split()[0])
return opts
syntax_rules += r'''
<updateStatement> ::= "UPDATE" cf=<columnFamilyName>
( "USING" [updateopt]=<usingOption>
( "AND" [updateopt]=<usingOption> )* )?
"SET" <assignment> ( "," <assignment> )*
"WHERE" <updateWhereClause>
;
<assignment> ::= updatecol=<colname> "=" update_rhs=<colname>
( counterop=( "+" | "-"? ) <integer> )?
;
<updateWhereClause> ::= updatefiltercol=<colname> "=" <term>
| updatefilterkey=<colname> filter_in="IN" "(" <term> ( "," <term> )* ")"
;
'''
@completer_for('updateStatement', 'updateopt')
def insert_option_completer(ctxt, cass):
opts = set('CONSISTENCY TIMESTAMP TTL'.split())
for opt in ctxt.get_binding('updateopt', ()):
opts.discard(opt.split()[0])
return opts
@completer_for('assignment', 'updatecol')
def update_col_completer(ctxt, cass):
cfdef = get_cfdef(ctxt, cass)
colnames = map(maybe_escape_name, [cm.name for cm in cfdef.column_metadata])
return colnames + [Hint('<colname>')]
@completer_for('assignment', 'update_rhs')
def update_countername_completer(ctxt, cass):
cfdef = get_cfdef(ctxt, cass)
curcol = dequote_name(ctxt.get_binding('updatecol', ''))
return [maybe_escape_name(curcol)] if CqlRuleSet.is_counter_col(cfdef, curcol) else [Hint('<term>')]
@completer_for('assignment', 'counterop')
def update_counterop_completer(ctxt, cass):
cfdef = get_cfdef(ctxt, cass)
curcol = dequote_name(ctxt.get_binding('updatecol', ''))
return ['+', '-'] if CqlRuleSet.is_counter_col(cfdef, curcol) else []
@completer_for('updateWhereClause', 'updatefiltercol')
def update_filtercol_completer(ctxt, cass):
cfdef = get_cfdef(ctxt, cass)
return map(maybe_escape_name, cass.filterable_column_names(cfdef))
@completer_for('updateWhereClause', 'updatefilterkey')
def update_filterkey_completer(ctxt, cass):
cfdef = get_cfdef(ctxt, cass)
return [cfdef.key_alias if cfdef.key_alias is not None else 'KEY']
@completer_for('updateWhereClause', 'filter_in')
def update_filter_in_completer(ctxt, cass):
cfdef = get_cfdef(ctxt, cass)
fk = ctxt.get_binding('updatefilterkey')
return ['IN'] if fk in ('KEY', cfdef.key_alias) else []
syntax_rules += r'''
<deleteStatement> ::= "DELETE" ( [delcol]=<colname> ( "," [delcol]=<colname> )* )?
"FROM" cf=<columnFamilyName>
( "USING" [delopt]=<deleteOption> ( "AND" [delopt]=<deleteOption> )* )?
"WHERE" <updateWhereClause>
;
<deleteOption> ::= "CONSISTENCY" <consistencylevel>
| "TIMESTAMP" <integer>
;
'''
@completer_for('deleteStatement', 'delopt')
def delete_opt_completer(ctxt, cass):
opts = set('CONSISTENCY TIMESTAMP'.split())
for opt in ctxt.get_binding('delopt', ()):
opts.discard(opt.split()[0])
return opts
explain_completion('deleteStatement', 'delcol', '<column_to_delete>')
syntax_rules += r'''
<batchStatement> ::= "BEGIN" "BATCH"
( "USING" [batchopt]=<usingOption>
( "AND" [batchopt]=<usingOption> )* )?
[batchstmt]=<batchStatementMember> ";"
( [batchstmt]=<batchStatementMember> ";" )*
"APPLY" "BATCH"
;
<batchStatementMember> ::= <insertStatement>
| <updateStatement>
| <deleteStatement>
;
'''
@completer_for('batchStatement', 'batchopt')
def batch_opt_completer(ctxt, cass):
opts = set('CONSISTENCY TIMESTAMP'.split())
for opt in ctxt.get_binding('batchopt', ()):
opts.discard(opt.split()[0])
return opts
syntax_rules += r'''
<truncateStatement> ::= "TRUNCATE" cf=<columnFamilyName>
;
'''
syntax_rules += r'''
<createKeyspaceStatement> ::= "CREATE" "KEYSPACE" ksname=<name>
"WITH" [optname]=<optionName> "=" [optval]=<optionVal>
( "AND" [optname]=<optionName> "=" [optval]=<optionVal> )*
;
<optionName> ::= <identifier> ( ":" ( <identifier> | <integer> ) )?
;
<optionVal> ::= <stringLiteral>
| <identifier>
| <integer>
;
'''
explain_completion('createKeyspaceStatement', 'ksname', '<new_keyspace_name>')
@completer_for('createKeyspaceStatement', 'optname')
def create_ks_opt_completer(ctxt, cass):
exist_opts = ctxt.get_binding('optname', ())
try:
stratopt = exist_opts.index('strategy_class')
except ValueError:
return ['strategy_class =']
vals = ctxt.get_binding('optval')
stratclass = dequote_value(vals[stratopt])
if stratclass in CqlRuleSet.replication_factor_strategies:
return ['strategy_options:replication_factor =']
return [Hint('<strategy_option_name>')]
@completer_for('createKeyspaceStatement', 'optval')
def create_ks_optval_completer(ctxt, cass):
exist_opts = ctxt.get_binding('optname', (None,))
if exist_opts[-1] == 'strategy_class':
return map(escape_value, CqlRuleSet.replication_strategies)
return [Hint('<option_value>')]
syntax_rules += r'''
<createColumnFamilyStatement> ::= "CREATE" ( "COLUMNFAMILY" | "TABLE" ) cf=<name>
"(" keyalias=<colname> <storageType> "PRIMARY" "KEY"
( "," colname=<colname> <storageType> )* ")"
( "WITH" [cfopt]=<cfOptionName> "=" [optval]=<cfOptionVal>
( "AND" [cfopt]=<cfOptionName> "=" [optval]=<cfOptionVal> )* )?
;
<cfOptionName> ::= cfoptname=<identifier> ( cfoptsep=":" cfsubopt=( <identifier> | <integer> ) )?
;
<cfOptionVal> ::= <identifier>
| <stringLiteral>
| <integer>
| <float>
;
'''
explain_completion('createColumnFamilyStatement', 'keyalias', '<new_key_name>')
explain_completion('createColumnFamilyStatement', 'cf', '<new_table_name>')
explain_completion('createColumnFamilyStatement', 'colname', '<new_column_name>')
@completer_for('cfOptionName', 'cfoptname')
def create_cf_option_completer(ctxt, cass):
return [c[0] for c in CqlRuleSet.columnfamily_options] + \
[c[0] + ':' for c in CqlRuleSet.columnfamily_map_options]
@completer_for('cfOptionName', 'cfoptsep')
def create_cf_suboption_separator(ctxt, cass):
opt = ctxt.get_binding('cfoptname')
if any(opt == c[0] for c in CqlRuleSet.columnfamily_map_options):
return [':']
return ()
@completer_for('cfOptionName', 'cfsubopt')
def create_cf_suboption_completer(ctxt, cass):
opt = ctxt.get_binding('cfoptname')
if opt == 'compaction_strategy_options':
# try to determine the strategy class in use
prevopts = ctxt.get_binding('cfopt', ())
prevvals = ctxt.get_binding('optval', ())
for prevopt, prevval in zip(prevopts, prevvals):
if prevopt == 'compaction_strategy_class':
csc = dequote_value(prevval)
break
else:
cf = ctxt.get_binding('cf')
try:
csc = cass.get_columnfamily(cf).compaction_strategy
except Exception:
csc = ''
csc = csc.split('.')[-1]
if csc == 'SizeTieredCompactionStrategy':
return ['min_sstable_size']
elif csc == 'LeveledCompactionStrategy':
return ['sstable_size_in_mb']
for optname, _, subopts in CqlRuleSet.columnfamily_map_options:
if opt == optname:
return subopts
return ()
def create_cf_option_val_completer(ctxt, cass):
exist_opts = ctxt.get_binding('cfopt')
this_opt = exist_opts[-1]
if this_opt == 'compression_parameters:sstable_compression':
return map(escape_value, CqlRuleSet.available_compression_classes)
if this_opt == 'compaction_strategy_class':
return map(escape_value, CqlRuleSet.available_compaction_classes)
if any(this_opt == opt[0] for opt in CqlRuleSet.obsolete_cf_options):
return ["'<obsolete_option>'"]
if this_opt in ('comparator', 'default_validation'):
return cqltypes.cql_types
if this_opt == 'read_repair_chance':
return [Hint('<float_between_0_and_1>')]
if this_opt in ('replicate_on_write', 'populate_io_cache_on_flush'):
return [Hint('<yes_or_no>')]
if this_opt in ('min_compaction_threshold', 'max_compaction_threshold', 'gc_grace_seconds'):
return [Hint('<integer>')]
return [Hint('<option_value>')]
completer_for('createColumnFamilyStatement', 'optval') \
(create_cf_option_val_completer)
syntax_rules += r'''
<createIndexStatement> ::= "CREATE" "INDEX" indexname=<identifier>? "ON"
cf=<name> "(" col=<colname> ")"
;
'''
explain_completion('createIndexStatement', 'indexname', '<new_index_name>')
@completer_for('createIndexStatement', 'cf')
def create_index_cf_completer(ctxt, cass):
return map(maybe_escape_name, cass.get_columnfamily_names())
@completer_for('createIndexStatement', 'col')
def create_index_col_completer(ctxt, cass):
cfdef = cass.get_columnfamily(dequote_name(ctxt.get_binding('cf')))
colnames = [md.name for md in cfdef.column_metadata if md.index_name is None]
return map(maybe_escape_name, colnames)
syntax_rules += r'''
<dropKeyspaceStatement> ::= "DROP" "KEYSPACE" ksname=<keyspaceName>
;
'''
@completer_for('dropKeyspaceStatement', 'ksname')
def drop_ks_completer(ctxt, cass):
return map(maybe_escape_name, cass.get_keyspace_names())
syntax_rules += r'''
<dropColumnFamilyStatement> ::= "DROP" ( "COLUMNFAMILY" | "TABLE" ) cf=<name>
;
'''
@completer_for('dropColumnFamilyStatement', 'cf')
def drop_cf_completer(ctxt, cass):
return map(maybe_escape_name, cass.get_columnfamily_names())
syntax_rules += r'''
<dropIndexStatement> ::= "DROP" "INDEX" indexname=<name>
;
'''
@completer_for('dropIndexStatement', 'indexname')
def drop_index_completer(ctxt, cass):
return map(maybe_escape_name, cass.get_index_names())
syntax_rules += r'''
<alterTableStatement> ::= "ALTER" ( "COLUMNFAMILY" | "TABLE" ) cf=<name> <alterInstructions>
;
<alterInstructions> ::= "ALTER" existcol=<name> "TYPE" <storageType>
| "ADD" newcol=<name> <storageType>
| "DROP" existcol=<name>
| "WITH" [cfopt]=<cfOptionName> "=" [optval]=<cfOptionVal>
( "AND" [cfopt]=<cfOptionName> "=" [optval]=<cfOptionVal> )*
;
'''
@completer_for('alterTableStatement', 'cf')
def alter_table_cf_completer(ctxt, cass):
return map(maybe_escape_name, cass.get_columnfamily_names())
@completer_for('alterInstructions', 'existcol')
def alter_table_col_completer(ctxt, cass):
cfdef = cass.get_columnfamily(dequote_name(ctxt.get_binding('cf')))
cols = [md.name for md in cfdef.column_metadata]
if cfdef.key_alias is not None:
cols.append(cfdef.key_alias)
return map(maybe_escape_name, cols)
explain_completion('alterInstructions', 'newcol', '<new_column_name>')
completer_for('alterInstructions', 'optval') \
(create_cf_option_val_completer)
# END SYNTAX/COMPLETION RULE DEFINITIONS
CqlRuleSet.append_rules(syntax_rules)
|
<gh_stars>1-10
import chess
import chess.pgn
import chess.polyglot
MAX_BOOK_PLIES = 60
MAX_BOOK_WEIGHT = 10000
def format_zobrist_key_hex(zobrist_key):
return "%0.16x" % zobrist_key
def get_zobrist_key_hex(board):
return format_zobrist_key_hex(chess.polyglot.zobrist_hash(board))
class BookMove():
def __init__(self):
self.weight = 0
self.move = None
class BookPosition():
def __init__(self):
self.moves = {}
self.fen = ""
def get_move(self, uci):
if uci in self.moves:
return self.moves[uci]
self.moves[uci] = BookMove()
return self.moves[uci]
class Book():
def __init__(self):
self.positions = {}
def get_position(self, zobrist_key_hex):
if zobrist_key_hex in self.positions:
return self.positions[zobrist_key_hex]
self.positions[zobrist_key_hex] = BookPosition()
return self.positions[zobrist_key_hex]
def normalize_weights(self):
for zobrist_key_hex in self.positions:
bp = self.positions[zobrist_key_hex]
max_weight = 0
total_weight = 0
for uci in bp.moves:
bm = bp.moves[uci]
if bm.weight > max_weight:
max_weight = bm.weight
total_weight+=bm.weight
if max_weight > MAX_BOOK_WEIGHT:
for uci in bp.moves:
bm = bp.moves[uci]
bm.weight = int( bm.weight / total_weight * MAX_BOOK_WEIGHT )
pass
def save_as_polyglot(self, path):
with open(path, 'wb') as outfile:
allentries=[]
for zobrist_key_hex in self.positions:
zbytes = bytes.fromhex(zobrist_key_hex)
bp = self.positions[zobrist_key_hex]
for uci in bp.moves:
m = bp.moves[uci].move
mi = m.to_square+(m.from_square << 6)
if not m.promotion==None:
mi+=((m.promotion-1) << 12)
mbytes = bytes.fromhex("%0.4x" % mi)
weight = bp.moves[uci].weight
wbytes = bytes.fromhex("%0.4x" % weight)
lbytes = bytes.fromhex("%0.8x" % 0)
allbytes = zbytes + mbytes + wbytes + lbytes
if weight > 0:
allentries.append(allbytes)
sorted_weights = sorted(allentries, key=lambda entry:entry[10:12], reverse=True)
sorted_entries = sorted(sorted_weights, key=lambda entry:entry[0:8])
print("total of {} moves added to book {}".format(len(allentries), path))
for entry in sorted_entries:
outfile.write(entry)
def merge_file(self, path):
reader = chess.polyglot.open_reader(path)
cnt=0
for entry in reader:
cnt+=1
zobrist_key_hex = format_zobrist_key_hex(entry.key)
bp = self.get_position(zobrist_key_hex)
move = entry.move()
uci = move.uci()
bm = bp.get_move(uci)
bm.move = move
bm.weight+=entry.weight
if cnt % 10000 == 0:
print("merged {} moves".format(cnt))
class LichessGame():
def __init__(self, game):
self.game=game
def get_id(self):
url=self.game.headers["Site"]
parts=url.split("/")
game_id=parts[-1]
return game_id
def get_time(self):
dtstr = self.game.headers["UTCDate"]+"T"+self.game.headers["UTCTime"]
dtobj = datetime.datetime(1970,1,1)
gamedt = dtobj.strptime(dtstr,"%Y.%m.%dT%H:%M:%S")
return gamedt.timestamp()
def result(self):
return self.game.headers.get("Result", "*")
def white(self):
return self.game.headers.get("White", "?")
def black(self):
return self.game.headers.get("Black", "?")
def score(self):
res = self.result()
if res == "1/2-1/2":
return 1
if res == "1-0":
return 2
return 0
def build_book_file(pgnpath, bookpath):
print("building book {} from {}".format(pgnpath, bookpath))
book = Book()
pgn = open(pgnpath)
cnt = 0
while True:
rawgame = chess.pgn.read_game(pgn)
if rawgame == None:
break
ligame = LichessGame(rawgame)
cnt+=1
if cnt % 100 == 0:
print("added {:8d} games".format(cnt))
board = rawgame.board()
zobrist_key_hex = get_zobrist_key_hex(board)
bp = book.get_position(zobrist_key_hex)
bp.fen = board.fen()
ply = 0
for move in rawgame.main_line():
if ply < MAX_BOOK_PLIES:
uci = move.uci()
#correct castling uci
fromp = board.piece_at(move.from_square)
if fromp.piece_type == chess.KING:
if uci=="e1g1":
uci="e1h1"
elif uci=="e1c1":
uci="e1a1"
elif uci=="e8g8":
uci="e8h8"
elif uci=="e8c8":
uci="e8a8"
bm = bp.get_move(uci)
bm.move = chess.Move.from_uci(uci)
game_score = ligame.score()
score_corr = game_score
if board.turn == chess.BLACK:
score_corr = 2 - game_score
bm.weight+=score_corr
board.push(move)
zobrist_key_hex = get_zobrist_key_hex(board)
bp = book.get_position(zobrist_key_hex)
bp.fen = board.fen()
ply+=1
else:
break
book.normalize_weights()
book.save_as_polyglot(bookpath)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.