hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acfe7a3fe7f7a976fcdcea755f846b479807d8d2
| 30,934
|
py
|
Python
|
src/translate.py
|
darrenyaoyao/VR_motion_predict
|
2039197d017a16460caefff57bfb117c0bd814bc
|
[
"MIT"
] | null | null | null |
src/translate.py
|
darrenyaoyao/VR_motion_predict
|
2039197d017a16460caefff57bfb117c0bd814bc
|
[
"MIT"
] | null | null | null |
src/translate.py
|
darrenyaoyao/VR_motion_predict
|
2039197d017a16460caefff57bfb117c0bd814bc
|
[
"MIT"
] | null | null | null |
"""Simple code for training an RNN for motion prediction."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import random
import sys
import time
import h5py
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import data_utils
import seq2seq_model
import torch
import torch.optim as optim
from torch.autograd import Variable
import argparse
class pose_predict():
def __init__(self,sampling=True):
# def create_model(actions, sampling=False):
# Learning
self.seq_out = 25
parser = argparse.ArgumentParser(description='Train RNN for human pose estimation')
parser.add_argument('--learning_rate', dest='learning_rate',
help='Learning rate',
default=0.005, type=float)
parser.add_argument('--learning_rate_decay_factor', dest='learning_rate_decay_factor',
help='Learning rate is multiplied by this much. 1 means no decay.',
default=0.95, type=float)
parser.add_argument('--learning_rate_step', dest='learning_rate_step',
help='Every this many steps, do decay.',
default=10000, type=int)
parser.add_argument('--batch_size', dest='batch_size',
help='Batch size to use during training.',
default=16, type=int)
parser.add_argument('--max_gradient_norm', dest='max_gradient_norm',
help='Clip gradients to this norm.',
default=5, type=float)
parser.add_argument('--iterations', dest='iterations',
help='Iterations to train for.',
default=1e5, type=int)
parser.add_argument('--test_every', dest='test_every',
help='',
default=100, type=int)
# Architecture
parser.add_argument('--architecture', dest='architecture',
help='Seq2seq architecture to use: [basic, tied].',
default='tied', type=str)
parser.add_argument('--loss_to_use', dest='loss_to_use',
help='The type of loss to use, supervised or sampling_based',
default='sampling_based', type=str)
parser.add_argument('--residual_velocities', dest='residual_velocities',
help='Add a residual connection that effectively models velocities',action='store_true',
default=False)
parser.add_argument('--size', dest='size',
help='Size of each model layer.',
default=1024, type=int)
parser.add_argument('--num_layers', dest='num_layers',
help='Number of layers in the model.',
default=1, type=int)
parser.add_argument('--seq_length_in', dest='seq_length_in',
help='Number of frames to feed into the encoder. 25 fp',
default=50, type=int)
parser.add_argument('--seq_length_out', dest='seq_length_out',
help='Number of frames that the decoder has to predict. 25fps',
default=self.seq_out, type=int)
parser.add_argument('--omit_one_hot', dest='omit_one_hot',
help='', action='store_true',
default=False)
# Directories
parser.add_argument('--data_dir', dest='data_dir',
help='Data directory',
default=os.path.normpath("./data_IK/h3.6m/dataset"), type=str)
parser.add_argument('--train_dir', dest='train_dir',
help='Training directory',
default=os.path.normpath("./experiments/"), type=str)
parser.add_argument('--action', dest='action',
help='The action to train on. all means all the actions, all_periodic means walking, eating and smoking',
default="posing", type=str)
parser.add_argument('--use_cpu', dest='use_cpu',
help='', action='store_true',
default=False)
# parser.add_argument('--load', dest='load',
# help='Try to load a previous checkpoint.',
# default=, type=int)
parser.add_argument('--sample', dest='sample',
help='Set to True for sampling.', action='store_true',
default=False)
self.args = parser.parse_args()
self.model = seq2seq_model.Seq2SeqModel(
self.args.architecture,
self.args.seq_length_in if not sampling else 50,
self.args.seq_length_out if not sampling else self.seq_out,
self.args.size, # hidden layer size
self.args.num_layers,
self.args.max_gradient_norm,
self.args.batch_size,
self.args.learning_rate,
self.args.learning_rate_decay_factor,
self.args.loss_to_use if not sampling else "sampling_based",
1,
not self.args.omit_one_hot,
self.args.residual_velocities,
dtype=torch.float32)
print("Loading model")
self.model = torch.load('./25_best_model_7100')
self.model.source_seq_len = 50
self.model.target_seq_len = self.seq_out
act=["posing"]
train_subject_ids = [1,6]
train_set, complete_train = data_utils.load_data( self.args.data_dir, train_subject_ids, act, not self.args.omit_one_hot )
self.data_mean, self.data_std, self.dim_to_ignore, self.dim_to_use = data_utils.normalization_stats(complete_train)
def sample(self,input_):
"""Sample predictions for srnn's seeds"""
actions = ["posing"]
if True:
# === Create the model ===
# print("Creating %d layers of %d units." % (args.num_layers, args.size))
# sampling = True
# model = create_model(actions, sampling)
if not self.args.use_cpu:
self.model = self.model.cuda()
# print("Model created")
# Load all the data
# train_set, test_set, data_mean, data_std, dim_to_ignore, dim_to_use = read_all_data(
# actions, args.seq_length_in, args.seq_length_out, args.data_dir, not args.omit_one_hot )
#"++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
nactions = len( actions )
subjects = [5]
testData = {}
tmp = input_
# start = time.time()
for subj in subjects:
for action_idx in np.arange(len(actions)):
action = actions[ action_idx ]
for subact in [1]: # subactions
action_sequence = tmp
n, d = action_sequence.shape
even_list = range(0, n, 2)
if not self.args.omit_one_hot:
# Add a one-hot encoding at the end of the representation
the_sequence = np.zeros( (len(even_list), d + nactions), dtype=float )
the_sequence[ :, 0:d ] = action_sequence[even_list, :]
the_sequence[ :, d+action_idx ] = 1
testData[(subj, action, subact, 'even')] = the_sequence
else:
testData[(subj, action, subact, 'even')] = action_sequence[even_list, :]
test_set = data_utils.normalize_data( testData, self.data_mean, self.data_std, self.dim_to_use, actions, not self.args.omit_one_hot )
#"++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
# === Read and denormalize the gt with srnn's seeds, as we'll need them
# many times for evaluation in Euler Angles ===
#srnn_gts_expmap = self.get_srnn_gts( actions, self.model, test_set, self.data_mean,
#self.data_std, self.dim_to_ignore, not self.args.omit_one_hot, to_euler=False )
#srnn_gts_exmap是ground truth!!
# print(data_mean.shape)
# print(data_std.shape)
# # srnn_gts_euler = get_srnn_gts( actions, model, test_set, data_mean,
# data_std, dim_to_ignore, not args.omit_one_hot )
# Clean and create a new h5 file of samples
# SAMPLES_FNAME = 'samples.h5'
# try:
# os.remove( SAMPLES_FNAME )
# except OSError:
# pass
# Predict and save for each action
for action in actions:
# Make prediction with srnn' seeds
encoder_inputs, decoder_inputs = self.model.get_batch_srnn( test_set, action, False )
# print("shape:",encoder_inputs.shape)
# print(decoder_inputs.shape)
encoder_inputs = torch.from_numpy(encoder_inputs).float()
decoder_inputs = torch.from_numpy(decoder_inputs).float()
#decoder_outputs = torch.from_numpy(decoder_outputs).float()
if not self.args.use_cpu:
encoder_inputs = encoder_inputs.cuda()
decoder_inputs = decoder_inputs.cuda()
#decoder_outputs = decoder_outputs.cuda()
encoder_inputs = Variable(encoder_inputs)
decoder_inputs = Variable(decoder_inputs)
#decoder_outputs = Variable(decoder_outputs)
srnn_poses = self.model(encoder_inputs, decoder_inputs)
# print(encoder_inputs.shape)
# print(decoder_inputs.shape)
#srnn_loss = (srnn_poses - decoder_outputs)**2
#srnn_loss.cpu().data.numpy()
#srnn_loss = srnn_loss.mean()
srnn_poses = srnn_poses.cpu().data.numpy()
srnn_poses = srnn_poses.transpose([1,0,2])
#srnn_loss = srnn_loss.cpu().data.numpy()
# denormalizes too
srnn_pred_expmap = data_utils.revert_output_format(srnn_poses, self.data_mean, self.data_std, self.dim_to_ignore, actions, not self.args.omit_one_hot )
return srnn_pred_expmap[0]
def get_srnn_gts(self, actions, model, test_set, data_mean, data_std, dim_to_ignore, one_hot, to_euler=True ):
# """
# Get the ground truths for srnn's sequences, and convert to Euler angles.
# (the error is always computed in Euler angles).
# Args
# actions: a list of actions to get ground truths for.
# model: training model we are using (we only use the "get_batch" method).
# test_set: dictionary with normalized training data.
# data_mean: d-long vector with the mean of the training data.
# data_std: d-long vector with the standard deviation of the training data.
# dim_to_ignore: dimensions that we are not using to train/predict.
# one_hot: whether the data comes with one-hot encoding indicating action.
# to_euler: whether to convert the angles to Euler format or keep thm in exponential map
# Returns
# srnn_gts_euler: a dictionary where the keys are actions, and the values
# are the ground_truth, denormalized expected outputs of srnns's seeds.
# """
srnn_gts_euler = {}
for action in actions:
srnn_gt_euler = []
_, _, srnn_expmap = model.get_batch_srnn( test_set, action )
# expmap -> rotmat -> euler
for i in np.arange( srnn_expmap.shape[0] ):
denormed = data_utils.unNormalizeData(srnn_expmap[i,:,:], data_mean, data_std, dim_to_ignore, actions, one_hot )
# if to_euler:
# for j in np.arange( denormed.shape[0] ):
# for k in np.arange(3,97,3):
# denormed[j,k:k+3] = data_utils.rotmat2euler( data_utils.expmap2rotmat( denormed[j,k:k+3] ))
srnn_gt_euler.append( denormed )
# Put back in the dictionary
srnn_gts_euler[action] = srnn_gt_euler
return srnn_gts_euler
def define_actions(self,action):
# """
# Define the list of actions we are using.
# Args
# action: String with the passed action. Could be "all"
# Returns
# actions: List of strings of actions
# Raises
# ValueError if the action is not included in H3.6M
# """
actions = ["walking", "eating", "smoking", "discussion", "directions",
"greeting", "phoning", "posing", "purchases", "sitting",
"sittingdown", "takingphoto", "waiting", "walkingdog",
"walkingtogether"]
if action in actions:
return [action]
if action == "all":
return actions
if action == "all_srnn":
return ["walking", "eating", "smoking", "discussion"]
raise( ValueError, "Unrecognized action: %d" % action )
# train_dir = os.path.normpath(os.path.join( args.train_dir, args.action,
# 'out_{0}'.format(args.seq_length_out),
# 'iterations_{0}'.format(args.iterations),
# args.architecture,
# args.loss_to_use,
# 'omit_one_hot' if args.omit_one_hot else 'one_hot',
# 'depth_{0}'.format(args.num_layers),
# 'size_{0}'.format(args.size),
# 'lr_{0}'.format(args.learning_rate),
# 'residual_vel' if args.residual_velocities else 'not_residual_vel'))
# print(train_dir)
# os.makedirs(train_dir, exist_ok=True)
# def train():
# """Train a seq2seq model on human motion"""
# actions = define_actions( args.action )
# number_of_actions = len( actions )
# train_set, test_set, data_mean, data_std, dim_to_ignore, dim_to_use = read_all_data(
# actions, args.seq_length_in, args.seq_length_out, args.data_dir, not args.omit_one_hot )
# # Limit TF to take a fraction of the GPU memory
# if True:
# model = create_model(actions, args.sample)
# if not args.use_cpu:
# model = model.cuda()
# # === Read and denormalize the gt with srnn's seeds, as we'll need them
# # many times for evaluation in Euler Angles ===
# srnn_gts_euler = get_srnn_gts( actions, model, test_set, data_mean,
# data_std, dim_to_ignore, not args.omit_one_hot )
# #=== This is the training loop ===
# step_time, loss, val_loss = 0.0, 0.0, 0.0
# current_step = 0 if args.load <= 0 else args.load + 1
# previous_losses = []
# step_time, loss = 0, 0
# optimiser = optim.SGD(model.parameters(), lr=args.learning_rate)
# #optimiser = optim.Adam(model.parameters(), lr=learning_rate, betas = (0.9, 0.999))
# for _ in range( args.iterations ):
# optimiser.zero_grad()
# model.train()
# start_time = time.time()
# # Actual training
# # === Training step ===
# encoder_inputs, decoder_inputs, decoder_outputs = model.get_batch( train_set, not args.omit_one_hot )
# encoder_inputs = torch.from_numpy(encoder_inputs).float()
# decoder_inputs = torch.from_numpy(decoder_inputs).float()
# decoder_outputs = torch.from_numpy(decoder_outputs).float()
# if not args.use_cpu:
# encoder_inputs = encoder_inputs.cuda()
# decoder_inputs = decoder_inputs.cuda()
# decoder_outputs = decoder_outputs.cuda()
# encoder_inputs = Variable(encoder_inputs)
# decoder_inputs = Variable(decoder_inputs)
# decoder_outputs = Variable(decoder_outputs)
# preds = model(encoder_inputs, decoder_inputs)
# step_loss = (preds-decoder_outputs)**2
# step_loss = step_loss.mean()
# # Actual backpropagation
# step_loss.backward()
# optimiser.step()
# step_loss = step_loss.cpu().data.numpy()
# if current_step % 10 == 0:
# print("step {0:04d}; step_loss: {1:.4f}".format(current_step, step_loss ))
# step_time += (time.time() - start_time) / args.test_every
# loss += step_loss / args.test_every
# current_step += 1
# # === step decay ===
# if current_step % args.learning_rate_step == 0:
# args.learning_rate = args.learning_rate*args.learning_rate_decay_factor
# optimiser = optim.Adam(model.parameters(), lr=args.learning_rate, betas = (0.9, 0.999))
# print("Decay learning rate. New value at " + str(args.learning_rate))
# #cuda.empty_cache()
# # Once in a while, we save checkpoint, print statistics, and run evals.
# if current_step % args.test_every == 0:
# model.eval()
# # === Validation with randomly chosen seeds ===
# encoder_inputs, decoder_inputs, decoder_outputs = model.get_batch( test_set, not args.omit_one_hot )
# encoder_inputs = torch.from_numpy(encoder_inputs).float()
# decoder_inputs = torch.from_numpy(decoder_inputs).float()
# decoder_outputs = torch.from_numpy(decoder_outputs).float()
# if not args.use_cpu:
# encoder_inputs = encoder_inputs.cuda()
# decoder_inputs = decoder_inputs.cuda()
# decoder_outputs = decoder_outputs.cuda()
# encoder_inputs = Variable(encoder_inputs)
# decoder_inputs = Variable(decoder_inputs)
# decoder_outputs = Variable(decoder_outputs)
# preds = model(encoder_inputs, decoder_inputs)
# step_loss = (preds-decoder_outputs)**2
# step_loss = step_loss.mean()
# val_loss = step_loss # Loss book-keeping
# print()
# print("{0: <16} |".format("milliseconds"), end="")
# for ms in [80, 160, 320, 400, 560, 1000]:
# print(" {0:5d} |".format(ms), end="")
# print()
# # === Validation with srnn's seeds ===
# for action in actions:
# # Evaluate the model on the test batches
# encoder_inputs, decoder_inputs, decoder_outputs = model.get_batch_srnn( test_set, action )
# #### Evaluate model on action
# encoder_inputs = torch.from_numpy(encoder_inputs).float()
# decoder_inputs = torch.from_numpy(decoder_inputs).float()
# decoder_outputs = torch.from_numpy(decoder_outputs).float()
# if not args.use_cpu:
# encoder_inputs = encoder_inputs.cuda()
# decoder_inputs = decoder_inputs.cuda()
# decoder_outputs = decoder_outputs.cuda()
# encoder_inputs = Variable(encoder_inputs)
# decoder_inputs = Variable(decoder_inputs)
# decoder_outputs = Variable(decoder_outputs)
# srnn_poses = model(encoder_inputs, decoder_inputs)
# srnn_loss = (srnn_poses - decoder_outputs)**2
# srnn_loss.cpu().data.numpy()
# srnn_loss = srnn_loss.mean()
# srnn_poses = srnn_poses.cpu().data.numpy()
# srnn_poses = srnn_poses.transpose([1,0,2])
# srnn_loss = srnn_loss.cpu().data.numpy()
# # Denormalize the output
# srnn_pred_expmap = data_utils.revert_output_format( srnn_poses,
# data_mean, data_std, dim_to_ignore, actions, not args.omit_one_hot )
# # Save the errors here
# mean_errors = np.zeros( (len(srnn_pred_expmap), srnn_pred_expmap[0].shape[0]) )
# # Training is done in exponential map, but the error is reported in
# # Euler angles, as in previous work.
# # See https://github.com/asheshjain399/RNNexp/issues/6#issuecomment-247769197
# N_SEQUENCE_TEST = 8
# for i in np.arange(N_SEQUENCE_TEST):
# eulerchannels_pred = srnn_pred_expmap[i]
# # Convert from exponential map to Euler angles
# for j in np.arange( eulerchannels_pred.shape[0] ):
# for k in np.arange(3,97,3):
# eulerchannels_pred[j,k:k+3] = data_utils.rotmat2euler(
# data_utils.expmap2rotmat( eulerchannels_pred[j,k:k+3] ))
# # The global translation (first 3 entries) and global rotation
# # (next 3 entries) are also not considered in the error, so the_key
# # are set to zero.
# # See https://github.com/asheshjain399/RNNexp/issues/6#issuecomment-249404882
# gt_i=np.copy(srnn_gts_euler[action][i])
# gt_i[:,0:6] = 0
# # Now compute the l2 error. The following is numpy port of the error
# # function provided by Ashesh Jain (in matlab), available at
# # https://github.com/asheshjain399/RNNexp/blob/srnn/structural_rnn/CRFProblems/H3.6m/dataParser/Utils/motionGenerationError.m#L40-L54
# idx_to_use = np.where( np.std( gt_i, 0 ) > 1e-4 )[0]
# euc_error = np.power( gt_i[:,idx_to_use] - eulerchannels_pred[:,idx_to_use], 2)
# euc_error = np.sum(euc_error, 1)
# euc_error = np.sqrt( euc_error )
# mean_errors[i,:] = euc_error
# # This is simply the mean error over the N_SEQUENCE_TEST examples
# mean_mean_errors = np.mean( mean_errors, 0 )
# # Pretty print of the results for 80, 160, 320, 400, 560 and 1000 ms
# print("{0: <16} |".format(action), end="")
# for ms in [1,3,7,9,13,24]:
# if args.seq_length_out >= ms+1:
# print(" {0:.3f} |".format( mean_mean_errors[ms] ), end="")
# else:
# print(" n/a |", end="")
# print()
# print()
# print("============================\n"
# "Global step: %d\n"
# "Learning rate: %.4f\n"
# "Step-time (ms): %.4f\n"
# "Train loss avg: %.4f\n"
# "--------------------------\n"
# "Val loss: %.4f\n"
# "srnn loss: %.4f\n"
# "============================" % (current_step,
# args.learning_rate, step_time*1000, loss,
# val_loss, srnn_loss))
# torch.save(model, train_dir + '/model_' + str(current_step))
# print()
# previous_losses.append(loss)
# # Reset global time and loss
# step_time, loss = 0, 0
# sys.stdout.flush()
# def get_srnn_gts( actions, model, test_set, data_mean, data_std, dim_to_ignore, one_hot, to_euler=True ):
# """
# Get the ground truths for srnn's sequences, and convert to Euler angles.
# (the error is always computed in Euler angles).
# Args
# actions: a list of actions to get ground truths for.
# model: training model we are using (we only use the "get_batch" method).
# test_set: dictionary with normalized training data.
# data_mean: d-long vector with the mean of the training data.
# data_std: d-long vector with the standard deviation of the training data.
# dim_to_ignore: dimensions that we are not using to train/predict.
# one_hot: whether the data comes with one-hot encoding indicating action.
# to_euler: whether to convert the angles to Euler format or keep thm in exponential map
# Returns
# srnn_gts_euler: a dictionary where the keys are actions, and the values
# are the ground_truth, denormalized expected outputs of srnns's seeds.
# """
# srnn_gts_euler = {}
# for action in actions:
# srnn_gt_euler = []
# _, _, srnn_expmap = model.get_batch_srnn( test_set, action )
# # expmap -> rotmat -> euler
# for i in np.arange( srnn_expmap.shape[0] ):
# denormed = data_utils.unNormalizeData(srnn_expmap[i,:,:], data_mean, data_std, dim_to_ignore, actions, one_hot )
# if to_euler:
# for j in np.arange( denormed.shape[0] ):
# for k in np.arange(3,97,3):
# denormed[j,k:k+3] = data_utils.rotmat2euler( data_utils.expmap2rotmat( denormed[j,k:k+3] ))
# srnn_gt_euler.append( denormed )
# # Put back in the dictionary
# srnn_gts_euler[action] = srnn_gt_euler
# return srnn_gts_euler
# def sample():
# """Sample predictions for srnn's seeds"""
# actions = define_actions( args.action )
# if True:
# # === Create the model ===
# print("Creating %d layers of %d units." % (args.num_layers, args.size))
# sampling = True
# model = create_model(actions, sampling)
# if not args.use_cpu:
# model = model.cuda()
# print("Model created")
# # Load all the data
# # train_set, test_set, data_mean, data_std, dim_to_ignore, dim_to_use = read_all_data(
# # actions, args.seq_length_in, args.seq_length_out, args.data_dir, not args.omit_one_hot )
# #"++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
# nactions = len( actions )
# subjects = [5]
# testData = {}
# tmp = np.random.randn(102,99)
# start = time.time()
# for subj in subjects:
# for action_idx in np.arange(len(actions)):
# action = actions[ action_idx ]
# for subact in [1, 2]: # subactions
# action_sequence = tmp
# n, d = action_sequence.shape
# even_list = range(0, n, 2)
# if not args.omit_one_hot:
# # Add a one-hot encoding at the end of the representation
# the_sequence = np.zeros( (len(even_list), d + nactions), dtype=float )
# the_sequence[ :, 0:d ] = action_sequence[even_list, :]
# the_sequence[ :, d+action_idx ] = 1
# testData[(subj, action, subact, 'even')] = the_sequence
# else:
# testData[(subj, action, subact, 'even')] = action_sequence[even_list, :]
# test_set = data_utils.normalize_data( testData, data_mean, data_std, dim_to_use, actions, not args.omit_one_hot )
# #"++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
# # === Read and denormalize the gt with srnn's seeds, as we'll need them
# # many times for evaluation in Euler Angles ===
# srnn_gts_expmap = get_srnn_gts( actions, model, test_set, data_mean,
# data_std, dim_to_ignore, not args.omit_one_hot, to_euler=False )
# #srnn_gts_exmap是ground truth!!
# # print(data_mean.shape)
# # print(data_std.shape)
# # # srnn_gts_euler = get_srnn_gts( actions, model, test_set, data_mean,
# # data_std, dim_to_ignore, not args.omit_one_hot )
# # Clean and create a new h5 file of samples
# SAMPLES_FNAME = 'samples.h5'
# try:
# os.remove( SAMPLES_FNAME )
# except OSError:
# pass
# # Predict and save for each action
# for action in actions:
# # Make prediction with srnn' seeds
# encoder_inputs, decoder_inputs, decoder_outputs = model.get_batch_srnn( test_set, action )
# encoder_inputs = torch.from_numpy(encoder_inputs).float()
# decoder_inputs = torch.from_numpy(decoder_inputs).float()
# decoder_outputs = torch.from_numpy(decoder_outputs).float()
# if not args.use_cpu:
# encoder_inputs = encoder_inputs.cuda()
# decoder_inputs = decoder_inputs.cuda()
# decoder_outputs = decoder_outputs.cuda()
# encoder_inputs = Variable(encoder_inputs)
# decoder_inputs = Variable(decoder_inputs)
# decoder_outputs = Variable(decoder_outputs)
# srnn_poses = model(encoder_inputs, decoder_inputs)
# print(encoder_inputs.shape)
# print(decoder_inputs.shape)
# srnn_loss = (srnn_poses - decoder_outputs)**2
# srnn_loss.cpu().data.numpy()
# srnn_loss = srnn_loss.mean()
# srnn_poses = srnn_poses.cpu().data.numpy()
# srnn_poses = srnn_poses.transpose([1,0,2])
# srnn_loss = srnn_loss.cpu().data.numpy()
# # denormalizes too
# srnn_pred_expmap = data_utils.revert_output_format(srnn_poses, data_mean, data_std, dim_to_ignore, actions, not args.omit_one_hot )
# end = time.time()
# print("time spend: ",(end - start))
# # Save the samples
# print(srnn_pred_expmap[0].shape)
# with h5py.File( SAMPLES_FNAME, 'a' ) as hf:
# for i in np.arange(1):
# # Save conditioning ground truth
# node_name = 'expmap/gt/{1}_{0}'.format(i, action)
# hf.create_dataset( node_name, data=srnn_gts_expmap[action][i] )
# # Save prediction
# node_name = 'expmap/preds/{1}_{0}'.format(i, action)
# hf.create_dataset( node_name, data=srnn_pred_expmap[i] )
# # # Compute and save the errors here
# # mean_errors = np.zeros( (len(srnn_pred_expmap), srnn_pred_expmap[0].shape[0]) )
# # for i in np.arange(8):
# # eulerchannels_pred = srnn_pred_expmap[i]
# # for j in np.arange( eulerchannels_pred.shape[0] ):
# # for k in np.arange(3,97,3):
# # eulerchannels_pred[j,k:k+3] = data_utils.rotmat2euler(
# # data_utils.expmap2rotmat( eulerchannels_pred[j,k:k+3] ))
# # eulerchannels_pred[:,0:6] = 0
# # # Pick only the dimensions with sufficient standard deviation. Others are ignored.
# # idx_to_use = np.where( np.std( eulerchannels_pred, 0 ) > 1e-4 )[0]
# # euc_error = np.power( srnn_gts_euler[action][i][:,idx_to_use] - eulerchannels_pred[:,idx_to_use], 2)
# # euc_error = np.sum(euc_error, 1)
# # euc_error = np.sqrt( euc_error )
# # mean_errors[i,:] = euc_error
# # mean_mean_errors = np.mean( mean_errors, 0 )
# # print( action )
# # print( ','.join(map(str, mean_mean_errors.tolist() )) )
# # with h5py.File( SAMPLES_FNAME, 'a' ) as hf:
# # node_name = 'mean_{0}_error'.format( action )
# # hf.create_dataset( node_name, data=mean_mean_errors )
# return
# def main():
# sample()
# def read_all_data( actions, seq_length_in, seq_length_out, data_dir, one_hot ):
# """
# Loads data for training/testing and normalizes it.
# Args
# actions: list of strings (actions) to load
# seq_length_in: number of frames to use in the burn-in sequence
# seq_length_out: number of frames to use in the output sequence
# data_dir: directory to load the data from
# one_hot: whether to use one-hot encoding per action
# Returns
# train_set: dictionary with normalized training data
# test_set: dictionary with test data
# data_mean: d-long vector with the mean of the training data
# data_std: d-long vector with the standard dev of the training data
# dim_to_ignore: dimensions that are not used becaused stdev is too small
# dim_to_use: dimensions that we are actually using in the model
# """
# # === Read training data ===
# print ("Reading training data (seq_len_in: {0}, seq_len_out {1}).".format(
# seq_length_in, seq_length_out))
# train_subject_ids = [1,6,7,8,9,11]
# test_subject_ids = [5]
# train_set, complete_train = data_utils.load_data( data_dir, train_subject_ids, actions, one_hot )
# test_set, complete_test = data_utils.load_data( data_dir, test_subject_ids, actions, one_hot )
# # print("here!!!!!", test_set[5, 'greeting', 1, 'even'])
# # Compute normalization stats
# data_mean, data_std, dim_to_ignore, dim_to_use = data_utils.normalization_stats(complete_train)
# # print("mean:",data_mean)
# # print("std:",data_std)
# # print("dim_to_ignore",dim_to_ignore)
# # print("dim_to_use",dim_to_use)
# # Normalize -- subtract mean, divide by stdev
# train_set = data_utils.normalize_data( train_set, data_mean, data_std, dim_to_use, actions, one_hot )
# test_set = data_utils.normalize_data( test_set, data_mean, data_std, dim_to_use, actions, one_hot )
# print("done reading data.")
# return train_set, test_set, data_mean, data_std, dim_to_ignore, dim_to_use
| 40.226268
| 159
| 0.617282
|
acfe7a46d5efc68fe481212821e88af938b2b85f
| 9,239
|
py
|
Python
|
pystella/model/sn_flx.py
|
baklanovp/pystella
|
47a8b9c3dcd343bf80fba80c8468b803f0f842ce
|
[
"MIT"
] | 1
|
2019-08-08T13:11:57.000Z
|
2019-08-08T13:11:57.000Z
|
pystella/model/sn_flx.py
|
cradesto/pystella
|
f6f44ed12d9648585a52a09e15d494daa4c70c59
|
[
"MIT"
] | 9
|
2015-07-11T16:39:57.000Z
|
2021-11-23T07:31:49.000Z
|
pystella/model/sn_flx.py
|
cradesto/pystella
|
f6f44ed12d9648585a52a09e15d494daa4c70c59
|
[
"MIT"
] | 1
|
2019-08-08T13:08:55.000Z
|
2019-08-08T13:08:55.000Z
|
#!/usr/bin/env python
# -.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.
#
# File Name : sn_flx.py
#
# Purpose :
#
# Creation Date : 30-10-2015
#
# Last Modified : Wed 23 Nov 2016 09:42:32 CET
#
# Created By : UMN
#
# Modified: Petr Baklanov
# _._._._._._._._._._._._._._._._._._._._._.
import struct
import numpy as np
import logging
import matplotlib.pyplot as plt
# create logger
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s -%(name)s - %(levelname)s: %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
class _flx_record_reader(object):
"""Read a single fortran record of a Stella flx file.
Used by flx_reader.
Parameters
----------
fstream : file
file object associated with the Stella flx file
Attributes
----------
Lsave : int
number of snapshots saved in the record
Mfreq : int
total number of frequency bins
Tcurved : [day] array
time of the snapshots
Nfrus : np.ndarray
number of active frequency bins
Flsave : 2D np.ndarray
emergent Flux, shape: (Mfreq x Lsave)
"""
def __init__(self, fstream):
"""
Nomenclature:
i4 -- 32 bit integer
f4 -- 32 bit floating point number
f8 -- 64 bit floating point number
Format of flx binary file record:
i4 -- Nrecord: record length in byte
i4 -- Lsaved: number of save snapshots
Lsaved repetitions:
f4 -- Tcurved: time of snapshot
i4 -- Nfrus: number of used frequencies
Mfreq repetitions:
f8 -- flsave: stored frequency-dependent flux
i4 -- Nrecord: record length in byte
Notes:
* relevant information was extracted from:
- node _flx in ttfitsimpler4.trf (stellam/strad)
- node _stinfoflux in stradio.trf (stellam/src)
* Mfreq defined in zone.inc (not stored in flx file)
"""
self.fstream = fstream
self._read_record()
def _read_record(self):
"""Parse one Stella flx file record"""
self.Nrec = struct.unpack("i", self.fstream.load(4))[0]
self.Lsave = struct.unpack("i", self.fstream.load(4))[0]
self.Mfreq = (self.Nrec - 4 - 8 * self.Lsave) // (self.Lsave * 8)
logging.info("Calculated Mfreq: %d" % self.Mfreq)
self.Tcurv = np.zeros(self.Lsave)
self.Nfrus = np.zeros(self.Lsave)
self.Flsave = np.zeros((self.Mfreq, self.Lsave))
for i in range(self.Lsave):
self.Tcurv[i] = struct.unpack("f", self.fstream.load(4))[0]
self.Nfrus[i] = struct.unpack("i", self.fstream.load(4))[0]
self.Flsave[:, i] = np.array(
struct.unpack("{:d}d".format(self.Mfreq),
self.fstream.load(8 * self.Mfreq)))
try:
assert (self.Nrec == struct.unpack("i", self.fstream.load(4))[0])
except AssertionError:
logger.exception(
"Mismatch in record length at start and end stamps")
raise IOError
logger.info("Record successfully read")
# Time is stored in days
self.Tcurv = self.Tcurv # * units.d
# Units of Flsave not clear
class flx_reader(object):
"""Reader for Stella flx binary files
Relies on _flx_record_reader
Parameters
----------
fname : str
full filename (including extension) of the flx file
"""
extension = ".flx"
def __init__(self, fname):
if fname[-4:] != flx_reader.extension:
fname = fname + flx_reader.extension
self.fname = fname
self.fstream = open(fname, "rb")
self.records = []
self._read_flx_file()
self._prep_easy_access()
def _read_flx_file(self):
"""Main Parser"""
while 1:
fpos = self.fstream.tell()
buffer = self.fstream.read(4)
if buffer == "":
# EOF
logger.info("EOF triggered")
break
else:
if struct.unpack("i", buffer)[0] == 4:
# last record with Lsave = 0
logger.info("Last, empty, record found: stopping")
break
else:
self.fstream.seek(fpos)
self.records.append(_flx_record_reader(self.fstream))
logger.info("Now at stream position {:d}".format(self.fstream.tell()))
self.fstream.close()
def _prep_easy_access(self):
"""Facilitates access of important record information
Generates arrays holding the information of all records concerning the
quantities:
- Tcurv
- Nfrus
- Flsave
"""
from pystella.util.arr_dict import first
ntot = np.sum([rec.Lsave for rec in self.records])
Mfreq = first(self.records).Mfreq
self.time = np.zeros(ntot) # * rec.Tcurv.unit
self.Nfrus = np.zeros(ntot)
self.Flsave = np.zeros((Mfreq, ntot))
n = 0
for rec in self.records:
k = rec.Lsave
self.time[n:n + k] = rec.Tcurv
self.Nfrus[n:n + k] = rec.Nfrus
self.Flsave[:, n:n + k] = rec.Flsave
n = n + k
def show_emergent_Fl(self, nu=None, logx=True, logy=True, logz=True,
cmap="afmhot", floor=1e-20, vmax=1e3, vmin=1e-7):
"""Visualize time-dependent evolution of Flsave
A pcolormesh plot of Flsave will be created, with time on the y-axis
and the frequency grid on the x-axis. The frequency grid is not stored
in the .flx file and thus has be provided (for example from the prf
file reader). If no nu values are provided, the plot will display
Flsave against the nu grid indices.
WARNING: unit of Flsave is still unknown
WARNING: the nu grid in the prf file typically has the shape (Nfreq),
while Flsave uses (Mfreq). However, the additional Flsave values are
not used in any case (IMHO) so they are not displayed. In other words,
we always display the first n values of Flsave, with n being the length
of nu. If nu is None, n is Mfreq.
Parameters
----------
nu : None, Hz array
the frequency grid; has to be supplied by hand. If None, Flsave
will be shown versus the index of the frequency bins (default None)
logx : bool
use logarithmic scaling on the x-Axis (default True)
logy : bool
use logarithmic scaling on the y-Axis (default True)
logz : bool
use logarithmic scaling for the Flsave values (default True)
floor : float
floor value for Flsave to eliminate zeros and negative values which
cause problems in the logz=True mode (default 1e-20)
cmap : str
name of the colormap
vmin : float
minimum value for the color representation of the Flsave
values (smaller values will be clipped). If logz=True, the
logarithm of the vmin value will be passed to pcolormesh (default
1e-7)
vmax : float
maximum value for the color representation of the Flsave
values (larger values will be clipped). If logz=True, the
logarithm of the vmin value will be passed to pcolormesh (default
1e3)
Returns
-------
fig : plt.figure
figure instance containing plot
aux : dict
a dictionary containing references to other important plotting
objects
"""
if nu is None:
logger.warning("No frequencies supplied - will plot vs. bin index")
x = np.arange(self.records[0].Mfreq).astype(np.float) + 1
xlabel = r"frequency bin index"
else:
x = nu
# try:
# x = nu.to("Hz").value
# except (units.UnitsError, AttributeError):
# logger.error(
# "nu must be astropy quantity with a frequency unit")
# raise
xlabel = r"$\nu$ [Hz]"
lenZ = len(x)
y = self.time # .to("d").value
X, Y = np.meshgrid(x, y)
Z = np.maximum(self.Flsave[:lenZ, :].T, floor)
if logz:
Z = np.log10(Z)
vmax = np.log10(vmax)
vmin = np.log10(vmin)
zlabel = r"$\log$ Flsave"
else:
zlabel = r"Flsave"
fig = plt.figure()
ax = fig.add_subplot(111)
im = ax.pcolormesh(X, Y, Z, rasterized="True", cmap=cmap, vmin=vmin, vmax=vmax)
cbar = plt.colorbar(im)
if logx:
ax.set_xscale("log")
if logy:
ax.set_yscale("log")
ax.set_xlabel(xlabel)
ax.set_ylabel(r"$t$ [d]")
cbar.set_label(zlabel)
ax.axis("tight")
return fig, {"ax": ax, "cbar": cbar, "im": im}
if __name__ == "__main__":
test = flx_reader("m100101wgrid1304.flx")
| 31.212838
| 87
| 0.564996
|
acfe7b2e9ed51bb86761c2c7d3edf3ee5a071506
| 4,517
|
py
|
Python
|
tensorflow_datasets/summarization/newsroom.py
|
kmh4321/datasets
|
286d7a8a5eb3e073f18f8fee4f774bafc23fb445
|
[
"Apache-2.0"
] | 1
|
2020-03-12T14:43:25.000Z
|
2020-03-12T14:43:25.000Z
|
tensorflow_datasets/summarization/newsroom.py
|
kmh4321/datasets
|
286d7a8a5eb3e073f18f8fee4f774bafc23fb445
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_datasets/summarization/newsroom.py
|
kmh4321/datasets
|
286d7a8a5eb3e073f18f8fee4f774bafc23fb445
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2019 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""NEWSROOM Dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import tensorflow as tf
import tensorflow_datasets.public_api as tfds
_CITATION = """
@article{Grusky_2018,
title={Newsroom: A Dataset of 1.3 Million Summaries with Diverse Extractive Strategies},
url={http://dx.doi.org/10.18653/v1/n18-1065},
DOI={10.18653/v1/n18-1065},
journal={Proceedings of the 2018 Conference of the North American Chapter of
the Association for Computational Linguistics: Human Language
Technologies, Volume 1 (Long Papers)},
publisher={Association for Computational Linguistics},
author={Grusky, Max and Naaman, Mor and Artzi, Yoav},
year={2018}
}
"""
_DESCRIPTION = """
NEWSROOM is a large dataset for training and evaluating summarization systems.
It contains 1.3 million articles and summaries written by authors and
editors in the newsrooms of 38 major publications.
Dataset features includes:
- text: Input news text.
- summary: Summary for the news.
And additional features:
- title: news title.
- url: url of the news.
- date: date of the article.
- density: extractive density.
- coverage: extractive coverage.
- compression: compression ratio.
- density_bin: low, medium, high.
- coverage_bin: extractive, abstractive.
- compression_bin: low, medium, high.
This dataset can be downloaded upon requests. Unzip all the contents
"train.jsonl, dev.josnl, test.jsonl" to the tfds folder.
"""
_DOCUMENT = "text"
_SUMMARY = "summary"
_ADDITIONAL_TEXT_FEATURES = [
"title", "url", "date", "density_bin", "coverage_bin", "compression_bin"
]
_ADDITIONAL_FLOAT_FEATURES = [
"density",
"coverage",
"compression",
]
class Newsroom(tfds.core.GeneratorBasedBuilder):
"""NEWSROOM Dataset."""
VERSION = tfds.core.Version("1.0.0")
MANUAL_DOWNLOAD_INSTRUCTIONS = """\
You should download the dataset from https://summari.es/download/
The webpage requires registration.
After downloading, please put dev.jsonl, test.jsonl and train.jsonl
files in the manual_dir.
"""
def _info(self):
features = {
k: tfds.features.Text()
for k in [_DOCUMENT, _SUMMARY] + _ADDITIONAL_TEXT_FEATURES
}
features.update({
k: tfds.features.Tensor(shape=[], dtype=tf.float32)
for k in _ADDITIONAL_FLOAT_FEATURES
})
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict(features),
supervised_keys=(_DOCUMENT, _SUMMARY),
homepage="https://summari.es",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
gen_kwargs={
"input_file": os.path.join(dl_manager.manual_dir, "train.jsonl")
},
),
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
gen_kwargs={
"input_file": os.path.join(dl_manager.manual_dir, "dev.jsonl")
},
),
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
gen_kwargs={
"input_file": os.path.join(dl_manager.manual_dir, "test.jsonl")
},
),
]
def _generate_examples(self, input_file=None):
"""Yields examples."""
with tf.io.gfile.GFile(input_file) as f:
for i, line in enumerate(f):
d = json.loads(line)
# fields are "url", "archive", "title", "date", "text",
# "compression_bin", "density_bin", "summary", "density",
# "compression', "coverage", "coverage_bin",
yield i, {
k: d[k] for k in [_DOCUMENT, _SUMMARY] + _ADDITIONAL_TEXT_FEATURES +
_ADDITIONAL_FLOAT_FEATURES
}
| 31.587413
| 91
| 0.675448
|
acfe7c705c7b43d8cb2110c7476df7639b29114a
| 12,322
|
py
|
Python
|
tasks.py
|
colour-science/colour-datasets
|
464c387c17739f08a0cceb5185f6b225872adb6c
|
[
"BSD-3-Clause"
] | 28
|
2019-06-15T03:07:28.000Z
|
2022-03-28T14:11:51.000Z
|
tasks.py
|
colour-science/colour-datasets
|
464c387c17739f08a0cceb5185f6b225872adb6c
|
[
"BSD-3-Clause"
] | 12
|
2020-03-24T17:35:36.000Z
|
2021-11-09T08:49:39.000Z
|
tasks.py
|
colour-science/colour-datasets
|
464c387c17739f08a0cceb5185f6b225872adb6c
|
[
"BSD-3-Clause"
] | 8
|
2019-10-27T15:00:52.000Z
|
2022-01-26T15:29:38.000Z
|
# -*- coding: utf-8 -*-
"""
Invoke - Tasks
==============
"""
import biblib.bib
import fnmatch
import os
import re
import uuid
from invoke import task
import colour_datasets
from colour.utilities import message_box
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2019-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-developers@colour-science.org'
__status__ = 'Production'
__all__ = [
'APPLICATION_NAME', 'APPLICATION_VERSION', 'PYTHON_PACKAGE_NAME',
'PYPI_PACKAGE_NAME', 'BIBLIOGRAPHY_NAME', 'clean', 'formatting', 'tests',
'quality', 'examples', 'preflight', 'docs', 'todo', 'requirements',
'build', 'virtualise', 'tag', 'release', 'sha256'
]
APPLICATION_NAME = colour_datasets.__application_name__
APPLICATION_VERSION = colour_datasets.__version__
PYTHON_PACKAGE_NAME = colour_datasets.__name__
PYPI_PACKAGE_NAME = 'colour-datasets'
BIBLIOGRAPHY_NAME = 'BIBLIOGRAPHY.bib'
@task
def clean(ctx, docs=True, bytecode=False):
"""
Cleans the project.
Parameters
----------
ctx : invoke.context.Context
Context.
docs : bool, optional
Whether to clean the *docs* directory.
bytecode : bool, optional
Whether to clean the bytecode files, e.g. *.pyc* files.
Returns
-------
bool
Task success.
"""
message_box('Cleaning project...')
patterns = ['build', '*.egg-info', 'dist']
if docs:
patterns.append('docs/_build')
patterns.append('docs/generated')
if bytecode:
patterns.append('**/__pycache__')
patterns.append('**/*.pyc')
for pattern in patterns:
ctx.run("rm -rf {}".format(pattern))
@task
def formatting(ctx, yapf=True, asciify=True, bibtex=True):
"""
Formats the codebase with *Yapf*, converts unicode characters to ASCII and
cleanup the "BibTeX" file.
Parameters
----------
ctx : invoke.context.Context
Context.
yapf : bool, optional
Whether to format the codebase with *Yapf*.
asciify : bool, optional
Whether to convert unicode characters to ASCII.
bibtex : bool, optional
Whether to cleanup the *BibTeX* file.
Returns
-------
bool
Task success.
"""
if yapf:
message_box('Formatting codebase with "Yapf"...')
ctx.run('yapf -p -i -r --exclude \'.git\' .')
if asciify:
message_box('Converting unicode characters to ASCII...')
with ctx.cd('utilities'):
ctx.run('./unicode_to_ascii.py')
message_box('Cleaning up "BibTeX" file...')
bibtex_path = BIBLIOGRAPHY_NAME
with open(bibtex_path) as bibtex_file:
bibtex = biblib.bib.Parser().parse(bibtex_file.read()).get_entries()
for entry in sorted(bibtex.values(), key=lambda x: x.key):
try:
del entry['file']
except KeyError:
pass
for key, value in entry.items():
entry[key] = re.sub('(?<!\\\\)\\&', '\\&', value)
with open(bibtex_path, 'w') as bibtex_file:
for entry in sorted(bibtex.values(), key=lambda x: x.key):
bibtex_file.write(entry.to_bib())
bibtex_file.write('\n')
@task
def tests(ctx, nose=True):
"""
Runs the unit tests with *Nose* or *Pytest*.
Parameters
----------
ctx : invoke.context.Context
Context.
nose : bool, optional
Whether to use *Nose* or *Pytest*.
Returns
-------
bool
Task success.
"""
if nose:
message_box('Running "Nosetests"...')
ctx.run('nosetests --with-doctest --with-coverage '
'--traverse-namespace --cover-package={0} {0}'.format(
PYTHON_PACKAGE_NAME))
else:
message_box('Running "Pytest"...')
ctx.run('py.test --disable-warnings --doctest-modules '
'--ignore={0}/examples {0}'.format(PYTHON_PACKAGE_NAME))
@task
def quality(ctx, flake8=True, rstlint=True):
"""
Checks the codebase with *Flake8* and lints various *restructuredText*
files with *rst-lint*.
Parameters
----------
ctx : invoke.context.Context
Context.
flake8 : bool, optional
Whether to check the codebase with *Flake8*.
rstlint : bool, optional
Whether to lint various *restructuredText* files with *rst-lint*.
Returns
-------
bool
Task success.
"""
if flake8:
message_box('Checking codebase with "Flake8"...')
ctx.run('flake8 {0} --exclude=examples'.format(PYTHON_PACKAGE_NAME))
if rstlint:
message_box('Linting "README.rst" file...')
ctx.run('rst-lint README.rst')
@task
def examples(ctx):
"""
Runs the examples.
Parameters
----------
ctx : invoke.context.Context
Context.
Returns
-------
bool
Task success.
"""
message_box('Running examples...')
for root, dirnames, filenames in os.walk(
os.path.join(PYTHON_PACKAGE_NAME, 'examples')):
for filename in fnmatch.filter(filenames, '*.py'):
ctx.run('python {0}'.format(os.path.join(root, filename)))
@task(formatting, tests, quality, examples)
def preflight(ctx):
"""
Performs the preflight tasks, i.e. *formatting*, *tests*, *quality*, and
*examples*.
Parameters
----------
ctx : invoke.context.Context
Context.
Returns
-------
bool
Task success.
"""
message_box('Finishing "Preflight"...')
@task
def docs(ctx, html=True, pdf=True):
"""
Builds the documentation.
Parameters
----------
ctx : invoke.context.Context
Context.
html : bool, optional
Whether to build the *HTML* documentation.
pdf : bool, optional
Whether to build the *PDF* documentation.
Returns
-------
bool
Task success.
"""
with ctx.prefix('export COLOUR_SCIENCE__DOCUMENTATION_BUILD=True'):
with ctx.cd('docs'):
if html:
message_box('Building "HTML" documentation...')
ctx.run('make html')
if pdf:
message_box('Building "PDF" documentation...')
ctx.run('make latexpdf')
@task
def todo(ctx):
"""
Export the TODO items.
Parameters
----------
ctx : invoke.context.Context
Context.
Returns
-------
bool
Task success.
"""
message_box('Exporting "TODO" items...')
with ctx.cd('utilities'):
ctx.run('./export_todo.py')
@task
def requirements(ctx):
"""
Exports the *requirements.txt* file.
Parameters
----------
ctx : invoke.context.Context
Context.
Returns
-------
bool
Task success.
"""
message_box('Exporting "requirements.txt" file...')
ctx.run('poetry run pip list --format=freeze | '
'egrep -v "colour-datasets=" '
'> requirements.txt')
@task(clean, preflight, docs, todo, requirements)
def build(ctx):
"""
Builds the project and runs dependency tasks, i.e. *docs*, *todo*, and
*preflight*.
Parameters
----------
ctx : invoke.context.Context
Context.
Returns
-------
bool
Task success.
"""
message_box('Building...')
ctx.run('poetry build')
with ctx.cd('dist'):
ctx.run('tar -xvf {0}-{1}.tar.gz'.format(PYPI_PACKAGE_NAME,
APPLICATION_VERSION))
ctx.run('cp {0}-{1}/setup.py ../'.format(PYPI_PACKAGE_NAME,
APPLICATION_VERSION))
ctx.run('rm -rf {0}-{1}'.format(PYPI_PACKAGE_NAME,
APPLICATION_VERSION))
with open('setup.py') as setup_file:
source = setup_file.read()
setup_kwargs = []
def sub_callable(match):
setup_kwargs.append(match)
return ''
template = """
setup({0}
)
"""
source = re.sub('from setuptools import setup',
'import codecs\nfrom setuptools import setup', source)
source = re.sub(
'setup_kwargs = {(.*)}.*setup\\(\\*\\*setup_kwargs\\)',
sub_callable,
source,
flags=re.DOTALL)[:-2]
setup_kwargs = setup_kwargs[0].group(1).splitlines()
for i, line in enumerate(setup_kwargs):
setup_kwargs[i] = re.sub('^\\s*(\'(\\w+)\':\\s?)', ' \\2=', line)
if setup_kwargs[i].strip().startswith('long_description'):
setup_kwargs[i] = (' long_description='
'codecs.open(\'README.rst\', encoding=\'utf8\')'
'.read(),')
source += template.format('\n'.join(setup_kwargs))
with open('setup.py', 'w') as setup_file:
setup_file.write(source)
ctx.run('twine check dist/*')
@task
def virtualise(ctx, tests=True):
"""
Create a virtual environment for the project build.
Parameters
----------
ctx : invoke.context.Context
Context.
tests : bool, optional
Whether to run tests on the virtual environment.
Returns
-------
bool
Task success.
"""
unique_name = '{0}-{1}'.format(PYPI_PACKAGE_NAME, uuid.uuid1())
with ctx.cd('dist'):
ctx.run('tar -xvf {0}-{1}.tar.gz'.format(PYPI_PACKAGE_NAME,
APPLICATION_VERSION))
ctx.run('mv {0}-{1} {2}'.format(PYPI_PACKAGE_NAME, APPLICATION_VERSION,
unique_name))
with ctx.cd(unique_name):
ctx.run('poetry env use 3')
ctx.run('poetry install')
ctx.run('source $(poetry env info -p)/bin/activate')
if tests:
ctx.run('poetry run nosetests')
@task
def tag(ctx):
"""
Tags the repository according to defined version using *git-flow*.
Parameters
----------
ctx : invoke.context.Context
Context.
Returns
-------
bool
Task success.
"""
message_box('Tagging...')
result = ctx.run('git rev-parse --abbrev-ref HEAD', hide='both')
assert result.stdout.strip() == 'develop', (
'Are you still on a feature or master branch?')
with open(os.path.join(PYTHON_PACKAGE_NAME, '__init__.py')) as file_handle:
file_content = file_handle.read()
major_version = re.search("__major_version__\\s+=\\s+'(.*)'",
file_content).group(1)
minor_version = re.search("__minor_version__\\s+=\\s+'(.*)'",
file_content).group(1)
change_version = re.search("__change_version__\\s+=\\s+'(.*)'",
file_content).group(1)
version = '.'.join((major_version, minor_version, change_version))
result = ctx.run('git ls-remote --tags upstream', hide='both')
remote_tags = result.stdout.strip().split('\n')
tags = set()
for remote_tag in remote_tags:
tags.add(
remote_tag.split('refs/tags/')[1].replace('refs/tags/', '^{}'))
tags = sorted(list(tags))
assert 'v{0}'.format(version) not in tags, (
'A "{0}" "v{1}" tag already exists in remote repository!'.format(
PYTHON_PACKAGE_NAME, version))
ctx.run('git flow release start v{0}'.format(version))
ctx.run('git flow release finish v{0}'.format(version))
@task(build)
def release(ctx):
"""
Releases the project to *Pypi* with *Twine*.
Parameters
----------
ctx : invoke.context.Context
Context.
Returns
-------
bool
Task success.
"""
message_box('Releasing...')
with ctx.cd('dist'):
ctx.run('twine upload *.tar.gz')
ctx.run('twine upload *.whl')
@task
def sha256(ctx):
"""
Computes the project *Pypi* package *sha256* with *OpenSSL*.
Parameters
----------
ctx : invoke.context.Context
Context.
Returns
-------
bool
Task success.
"""
message_box('Computing "sha256"...')
with ctx.cd('dist'):
ctx.run('openssl sha256 {0}-*.tar.gz'.format(PYPI_PACKAGE_NAME))
| 24.94332
| 79
| 0.566791
|
acfe7cd77721314d49c76e0b9f994350236b8156
| 1,991
|
py
|
Python
|
test/record/parser/test_response_whois_uniregistry_net_tattoo_status_available.py
|
huyphan/pyyawhois
|
77fb2f73a9c67989f1d41d98f37037406a69d136
|
[
"MIT"
] | null | null | null |
test/record/parser/test_response_whois_uniregistry_net_tattoo_status_available.py
|
huyphan/pyyawhois
|
77fb2f73a9c67989f1d41d98f37037406a69d136
|
[
"MIT"
] | null | null | null |
test/record/parser/test_response_whois_uniregistry_net_tattoo_status_available.py
|
huyphan/pyyawhois
|
77fb2f73a9c67989f1d41d98f37037406a69d136
|
[
"MIT"
] | null | null | null |
# This file is autogenerated. Do not edit it manually.
# If you want change the content of this file, edit
#
# spec/fixtures/responses/whois.uniregistry.net/tattoo/status_available
#
# and regenerate the tests with the following script
#
# $ scripts/generate_tests.py
#
from nose.tools import *
from dateutil.parser import parse as time_parse
import yawhois
class TestWhoisUniregistryNetTattooStatusAvailable(object):
def setUp(self):
fixture_path = "spec/fixtures/responses/whois.uniregistry.net/tattoo/status_available.txt"
host = "whois.uniregistry.net"
part = yawhois.record.Part(open(fixture_path, "r").read(), host)
self.record = yawhois.record.Record(None, [part])
def test_status(self):
eq_(self.record.status, 'available')
def test_available(self):
eq_(self.record.available, True)
def test_domain(self):
eq_(self.record.domain, None)
def test_nameservers(self):
eq_(self.record.nameservers.__class__.__name__, 'list')
eq_(self.record.nameservers, [])
def test_admin_contacts(self):
eq_(self.record.admin_contacts.__class__.__name__, 'list')
eq_(self.record.admin_contacts, [])
def test_registered(self):
eq_(self.record.registered, False)
def test_created_on(self):
eq_(self.record.created_on, None)
def test_registrar(self):
eq_(self.record.registrar, None)
def test_registrant_contacts(self):
eq_(self.record.registrant_contacts.__class__.__name__, 'list')
eq_(self.record.registrant_contacts, [])
def test_technical_contacts(self):
eq_(self.record.technical_contacts.__class__.__name__, 'list')
eq_(self.record.technical_contacts, [])
def test_updated_on(self):
eq_(self.record.updated_on, None)
def test_domain_id(self):
eq_(self.record.domain_id, None)
def test_expires_on(self):
eq_(self.record.expires_on, None)
| 30.166667
| 98
| 0.692617
|
acfe7dc4ddd6362af6d5aa9df0f2375061c5fd08
| 1,451
|
py
|
Python
|
blog/migrations/0007_auto_20210205_1734.py
|
LeulAria/blogtalk
|
69f6ce2f83e61476c5b2f0981a8cb7973b454eae
|
[
"MIT"
] | null | null | null |
blog/migrations/0007_auto_20210205_1734.py
|
LeulAria/blogtalk
|
69f6ce2f83e61476c5b2f0981a8cb7973b454eae
|
[
"MIT"
] | null | null | null |
blog/migrations/0007_auto_20210205_1734.py
|
LeulAria/blogtalk
|
69f6ce2f83e61476c5b2f0981a8cb7973b454eae
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.5 on 2021-02-05 17:34
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('blog', '0006_auto_20210205_1035'),
]
operations = [
migrations.AlterField(
model_name='post',
name='created_on',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AlterField(
model_name='post',
name='updated_on',
field=models.DateTimeField(auto_now=True, null=True),
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('body', models.TextField()),
('created_on', models.DateTimeField()),
('edited', models.BooleanField(default=False)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='auth_comments', to=settings.AUTH_USER_MODEL)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='blog.post')),
],
options={
'ordering': ['created_on'],
},
),
]
| 35.390244
| 150
| 0.593384
|
acfe7ea695b6e4846a89ec00f133b9cc2964a4cc
| 63
|
py
|
Python
|
psi/pair/__init__.py
|
delta-mpc/python-psi
|
1665de12a713b37abd889268c66de84cddb1bf84
|
[
"Apache-2.0"
] | 35
|
2021-05-28T10:03:09.000Z
|
2022-03-24T12:08:19.000Z
|
psi/pair/__init__.py
|
delta-mpc/python-psi
|
1665de12a713b37abd889268c66de84cddb1bf84
|
[
"Apache-2.0"
] | 9
|
2021-07-15T09:16:34.000Z
|
2022-03-31T03:59:16.000Z
|
psi/pair/__init__.py
|
delta-mpc/python-psi
|
1665de12a713b37abd889268c66de84cddb1bf84
|
[
"Apache-2.0"
] | 16
|
2021-06-18T02:18:56.000Z
|
2022-03-25T02:43:48.000Z
|
from .pair import Address, Pair
from .factory import make_pair
| 21
| 31
| 0.809524
|
acfe7ed06a8b5096199cc3507db02219ea5d141d
| 5,724
|
py
|
Python
|
bindings/python/setup.py
|
aoighost/capstone
|
7ed984fac73e1cb3e4abcd8495af7cb4c6774194
|
[
"BSD-3-Clause"
] | null | null | null |
bindings/python/setup.py
|
aoighost/capstone
|
7ed984fac73e1cb3e4abcd8495af7cb4c6774194
|
[
"BSD-3-Clause"
] | null | null | null |
bindings/python/setup.py
|
aoighost/capstone
|
7ed984fac73e1cb3e4abcd8495af7cb4c6774194
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
import glob
import os
import platform
import shutil
import stat
import sys
from distutils import log
from distutils import dir_util
from distutils.command.build_clib import build_clib
from distutils.command.sdist import sdist
from distutils.core import setup
from distutils.sysconfig import get_python_lib
# prebuilt libraries for Windows - for sdist
PATH_LIB64 = "prebuilt/win64/capstone.dll"
PATH_LIB32 = "prebuilt/win32/capstone.dll"
# package name can be 'capstone' or 'capstone-windows'
PKG_NAME = 'capstone'
if os.path.exists(PATH_LIB64) and os.path.exists(PATH_LIB32):
PKG_NAME = 'capstone-windows'
VERSION = '3.0.2'
SYSTEM = sys.platform
SITE_PACKAGES = os.path.join(get_python_lib(), "capstone")
SETUP_DATA_FILES = []
# adapted from commit e504b81 of Nguyen Tan Cong
# Reference: https://docs.python.org/2/library/platform.html#cross-platform
is_64bits = sys.maxsize > 2**32
def copy_sources():
"""Copy the C sources into the source directory.
This rearranges the source files under the python distribution
directory.
"""
src = []
try:
dir_util.remove_tree("src/")
except (IOError, OSError):
pass
dir_util.copy_tree("../../arch", "src/arch/")
dir_util.copy_tree("../../include", "src/include/")
dir_util.copy_tree("../../msvc/headers", "src/msvc/headers")
src.extend(glob.glob("../../*.[ch]"))
src.extend(glob.glob("../../*.mk"))
src.extend(glob.glob("../../Makefile"))
src.extend(glob.glob("../../LICENSE*"))
src.extend(glob.glob("../../README"))
src.extend(glob.glob("../../*.TXT"))
src.extend(glob.glob("../../RELEASE_NOTES"))
src.extend(glob.glob("../../make.sh"))
src.extend(glob.glob("../../CMakeLists.txt"))
for filename in src:
outpath = os.path.join("./src/", os.path.basename(filename))
log.info("%s -> %s" % (filename, outpath))
shutil.copy(filename, outpath)
class custom_sdist(sdist):
"""Reshuffle files for distribution."""
def run(self):
# if prebuilt libraries are existent, then do not copy source
if os.path.exists(PATH_LIB64) and os.path.exists(PATH_LIB32):
return sdist.run(self)
copy_sources()
return sdist.run(self)
class custom_build_clib(build_clib):
"""Customized build_clib command."""
def run(self):
log.info('running custom_build_clib')
build_clib.run(self)
def finalize_options(self):
# We want build-clib to default to build-lib as defined by the "build"
# command. This is so the compiled library will be put in the right
# place along side the python code.
self.set_undefined_options('build',
('build_lib', 'build_clib'),
('build_temp', 'build_temp'),
('compiler', 'compiler'),
('debug', 'debug'),
('force', 'force'))
build_clib.finalize_options(self)
def build_libraries(self, libraries):
if SYSTEM == "win32":
# if Windows prebuilt library is available, then include it
if is_64bits and os.path.exists(PATH_LIB64):
SETUP_DATA_FILES.append(PATH_LIB64)
return
elif os.path.exists(PATH_LIB32):
SETUP_DATA_FILES.append(PATH_LIB32)
return
# build library from source if src/ is existent
if not os.path.exists('src'):
return
try:
for (lib_name, build_info) in libraries:
log.info("building '%s' library", lib_name)
os.chdir("src")
# platform description refers at https://docs.python.org/2/library/sys.html#sys.platform
if SYSTEM != "win32":
os.chmod("make.sh", stat.S_IREAD|stat.S_IEXEC)
os.system("CAPSTONE_BUILD_CORE_ONLY=yes ./make.sh")
else:
# Windows build: this process requires few things:
# - CMake + MSVC installed
# - Run this command in an environment setup for MSVC
os.mkdir("build")
os.chdir("build")
# Do not build tests & static library
os.system('cmake -DCMAKE_BUILD_TYPE=RELEASE -DCAPSTONE_BUILD_TESTS=0 -DCAPSTONE_BUILD_STATIC=0 -G "NMake Makefiles" ..')
os.system("nmake")
os.chdir("..")
if SYSTEM == "darwin":
SETUP_DATA_FILES.append("src/libcapstone.dylib")
elif SYSTEM != "win32":
SETUP_DATA_FILES.append("src/libcapstone.so")
else: # Windows
SETUP_DATA_FILES.append("src/build/capstone.dll")
os.chdir("..")
except:
pass
def dummy_src():
return []
setup(
provides=['capstone'],
packages=['capstone'],
name=PKG_NAME,
version=VERSION,
author='Nguyen Anh Quynh',
author_email='aquynh@gmail.com',
description='Capstone disassembly engine',
url='http://www.capstone-engine.org',
classifiers=[
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
],
requires=['ctypes'],
cmdclass=dict(
build_clib=custom_build_clib,
sdist=custom_sdist,
),
libraries=[(
'capstone', dict(
package='capstone',
sources=dummy_src()
),
)],
data_files=[(SITE_PACKAGES, SETUP_DATA_FILES)],
)
| 31.8
| 140
| 0.584382
|
acfe7faf5a9bd332b66bb44257f4560796472d74
| 817
|
py
|
Python
|
examples/gateway.py
|
anegramotnov/prometheus_udp_gateway
|
08e20bfde6fecdb687d2ed62ffd1d393fa498253
|
[
"WTFPL"
] | null | null | null |
examples/gateway.py
|
anegramotnov/prometheus_udp_gateway
|
08e20bfde6fecdb687d2ed62ffd1d393fa498253
|
[
"WTFPL"
] | null | null | null |
examples/gateway.py
|
anegramotnov/prometheus_udp_gateway
|
08e20bfde6fecdb687d2ed62ffd1d393fa498253
|
[
"WTFPL"
] | null | null | null |
import argparse
import prometheus_udp_gateway
import metrics
# TODO: entrypoint in setup.py
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Prometheus UDP Gateway'
)
parser.add_argument('-p', '--prometheus_port', required=True, type=int)
parser.add_argument('-g', '--udp_gateway_port', required=True, type=int)
parser.add_argument('-l', '--log_level', required=False, type=str, default='INFO')
args = parser.parse_args()
gateway = prometheus_udp_gateway.PrometheusUdpGateway(
# TODO: remove udp_registry, add metrics ad-hoc
udp_registry=metrics.udp_registry,
prometheus_port=args.prometheus_port,
gateway_port=args.udp_gateway_port,
log_level=args.log_level,
)
gateway.run()
| 30.259259
| 87
| 0.680539
|
acfe801506789e87d59decba65c5fd80de5e51ae
| 1,981
|
py
|
Python
|
main.py
|
milosgajdos83/creep-dreamz
|
a1d12f19bdeb5493c2a7327f760282180decbfd2
|
[
"Apache-2.0"
] | 1
|
2021-10-15T13:10:25.000Z
|
2021-10-15T13:10:25.000Z
|
main.py
|
milosgajdos/creep-dreamz
|
a1d12f19bdeb5493c2a7327f760282180decbfd2
|
[
"Apache-2.0"
] | 9
|
2020-03-24T15:36:16.000Z
|
2022-02-09T23:29:54.000Z
|
main.py
|
milosgajdos83/creep-dreamz
|
a1d12f19bdeb5493c2a7327f760282180decbfd2
|
[
"Apache-2.0"
] | 1
|
2021-10-15T13:10:09.000Z
|
2021-10-15T13:10:09.000Z
|
import argparse
from creep_dreamz import CreepDream
from image import save, preprocess
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Creep Dreamz with Keras.')
parser.add_argument('-i', '--input', type=str,
help='Path to the input data', required=True)
parser.add_argument('-o', '--output', type=str,
help='Path to the output data', required=True)
parser.add_argument('-m', '--model', type=str,
help='Keras model name', required=True)
parser.add_argument('-iter', '--iterations', type=int,
help='Number of gradient ascent steps per scale', required=False)
parser.add_argument('-s', '--step', type=float,
help='Gradient ascent step size', required=False)
parser.add_argument('-oct', '--octave', type=int,
help='Number of scales at which to run gradient ascent', required=False)
parser.add_argument('-ocs', '--octavescale', type=float,
help='Size ratio between scales', required=False)
parser.add_argument('-mxl', '--maxloss', type=float,
help='Maximum gradient ascent loss', required=False)
# These are the names of the InceptionV3 50 layers
# for which we try to maximize activation,
# as well as their weight in the loss # we try to maximize.
# You can tweak these setting to obtain new visual effects.
config = {
'mixed2': 0.2,
'mixed3': 0.5,
'mixed4': 2.,
'mixed5': 1.5,
}
args = parser.parse_args()
dream = CreepDream(args.model, config)
dream = dream.compile()
# preprocess image
img = preprocess(args.input, args.model)
# start creep dreaming
img = dream.run(img, args.iterations, args.step, args.octave,
args.octavescale, args.maxloss)
# save resulting image to hard drive
save(img, fname=args.output)
| 42.148936
| 96
| 0.611307
|
acfe80cb642080b3ae9cc02d75ebdf5eef8aaaa2
| 14,681
|
py
|
Python
|
test/functional/p2p_unrequested_blocks.py
|
PETTOKEN/PETT
|
46dbd0f86b002e87e615d6237535d1a389a678e2
|
[
"MIT"
] | null | null | null |
test/functional/p2p_unrequested_blocks.py
|
PETTOKEN/PETT
|
46dbd0f86b002e87e615d6237535d1a389a678e2
|
[
"MIT"
] | null | null | null |
test/functional/p2p_unrequested_blocks.py
|
PETTOKEN/PETT
|
46dbd0f86b002e87e615d6237535d1a389a678e2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test processing of unrequested blocks.
Setup: two nodes, node0+node1, not connected to each other. Node1 will have
nMinimumChainWork set to 0x10, so it won't process low-work unrequested blocks.
We have one P2PInterface connection to node0 called test_node, and one to node1
called min_work_node.
The test:
1. Generate one block on each node, to leave IBD.
2. Mine a new block on each tip, and deliver to each node from node's peer.
The tip should advance for node0, but node1 should skip processing due to
nMinimumChainWork.
Node1 is unused in tests 3-7:
3. Mine a block that forks from the genesis block, and deliver to test_node.
Node0 should not process this block (just accept the header), because it
is unrequested and doesn't have more or equal work to the tip.
4a,b. Send another two blocks that build on the forking block.
Node0 should process the second block but be stuck on the shorter chain,
because it's missing an intermediate block.
4c.Send 288 more blocks on the longer chain (the number of blocks ahead
we currently store).
Node0 should process all but the last block (too far ahead in height).
5. Send a duplicate of the block in #3 to Node0.
Node0 should not process the block because it is unrequested, and stay on
the shorter chain.
6. Send Node0 an inv for the height 3 block produced in #4 above.
Node0 should figure out that Node0 has the missing height 2 block and send a
getdata.
7. Send Node0 the missing block again.
Node0 should process and the tip should advance.
8. Create a fork which is invalid at a height longer than the current chain
(ie to which the node will try to reorg) but which has headers built on top
of the invalid block. Check that we get disconnected if we send more headers
on the chain the node now knows to be invalid.
9. Test Node1 is able to sync when connected to node0 (which should have sufficient
work on its chain).
"""
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import time
from test_framework.blocktools import create_block, create_coinbase, create_transaction
class AcceptBlockTest(BitcoinTestFramework):
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("pettokenD", "pettokend"),
help="pettokend binary to test")
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [[], ["-minimumchainwork=0x10"]]
def setup_network(self):
# Node0 will be used to test behavior of processing unrequested blocks
# from peers which are not whitelisted, while Node1 will be used for
# the whitelisted case.
# Node2 will be used for non-whitelisted peers to test the interaction
# with nMinimumChainWork.
self.setup_nodes()
def run_test(self):
# Setup the p2p connections and start up the network thread.
# test_node connects to node0 (not whitelisted)
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
# min_work_node connects to node1 (whitelisted)
min_work_node = self.nodes[1].add_p2p_connection(P2PInterface())
network_thread_start()
# Test logic begins here
test_node.wait_for_verack()
min_work_node.wait_for_verack()
# 1. Have nodes mine a block (leave IBD)
[ n.generate(1) for n in self.nodes ]
tips = [ int("0x" + n.getbestblockhash(), 0) for n in self.nodes ]
# 2. Send one block that builds on each tip.
# This should be accepted by node0
blocks_h2 = [] # the height 2 blocks on each node's chain
block_time = int(time.time()) + 1
for i in range(2):
blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time))
blocks_h2[i].nVersion = 0x20000000
blocks_h2[i].solve()
block_time += 1
test_node.send_message(msg_block(blocks_h2[0]))
min_work_node.send_message(msg_block(blocks_h2[1]))
for x in [test_node, min_work_node]:
x.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
assert_equal(self.nodes[1].getblockcount(), 1)
self.log.info("First height 2 block accepted by node0; correctly rejected by node1")
# 3. Send another block that builds on genesis.
block_h1f = create_block(int("0x" + self.nodes[0].getblockhash(0), 0), create_coinbase(1), block_time)
block_h1f.nVersion = 0x20000000
block_time += 1
block_h1f.solve()
test_node.send_message(msg_block(block_h1f))
test_node.sync_with_ping()
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h1f.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_h1f.hash)
# 4. Send another two block that build on the fork.
block_h2f = create_block(block_h1f.sha256, create_coinbase(2), block_time)
block_h2f.nVersion = 0x20000000
block_time += 1
block_h2f.solve()
test_node.send_message(msg_block(block_h2f))
test_node.sync_with_ping()
# Since the earlier block was not processed by node, the new block
# can't be fully validated.
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h2f.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
# But this block should be accepted by node since it has equal work.
self.nodes[0].getblock(block_h2f.hash)
self.log.info("Second height 2 block accepted, but not reorg'ed to")
# 4b. Now send another block that builds on the forking chain.
block_h3 = create_block(block_h2f.sha256, create_coinbase(3), block_h2f.nTime+1)
block_h3.nVersion = 0x20000000
block_h3.solve()
test_node.send_message(msg_block(block_h3))
test_node.sync_with_ping()
# Since the earlier block was not processed by node, the new block
# can't be fully validated.
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h3.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
self.nodes[0].getblock(block_h3.hash)
# But this block should be accepted by node since it has more work.
self.nodes[0].getblock(block_h3.hash)
self.log.info("Unrequested more-work block accepted")
# 4c. Now mine 288 more blocks and deliver; all should be processed but
# the last (height-too-high) on node (as long as its not missing any headers)
tip = block_h3
all_blocks = []
for i in range(288):
next_block = create_block(tip.sha256, create_coinbase(i + 4), tip.nTime+1)
next_block.nVersion = 0x20000000
next_block.solve()
all_blocks.append(next_block)
tip = next_block
# Now send the block at height 5 and check that it wasn't accepted (missing header)
test_node.send_message(msg_block(all_blocks[1]))
test_node.sync_with_ping()
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblock, all_blocks[1].hash)
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblockheader, all_blocks[1].hash)
# The block at height 5 should be accepted if we provide the missing header, though
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(all_blocks[0]))
test_node.send_message(headers_message)
test_node.send_message(msg_block(all_blocks[1]))
test_node.sync_with_ping()
self.nodes[0].getblock(all_blocks[1].hash)
# Now send the blocks in all_blocks
for i in range(288):
test_node.send_message(msg_block(all_blocks[i]))
test_node.sync_with_ping()
# Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead
for x in all_blocks[:-1]:
self.nodes[0].getblock(x.hash)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash)
# 5. Test handling of unrequested block on the node that didn't process
# Should still not be processed (even though it has a child that has more
# work).
# The node should have requested the blocks at some point, so
# disconnect/reconnect first
self.nodes[0].disconnect_p2ps()
self.nodes[1].disconnect_p2ps()
network_thread_join()
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
network_thread_start()
test_node.wait_for_verack()
test_node.send_message(msg_block(block_h1f))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
self.log.info("Unrequested block that would complete more-work chain was ignored")
# 6. Try to get node to request the missing block.
# Poke the node with an inv for block at height 3 and see if that
# triggers a getdata on block 2 (it should if block 2 is missing).
with mininode_lock:
# Clear state so we can check the getdata request
test_node.last_message.pop("getdata", None)
test_node.send_message(msg_inv([CInv(2, block_h3.sha256)]))
test_node.sync_with_ping()
with mininode_lock:
getdata = test_node.last_message["getdata"]
# Check that the getdata includes the right block
assert_equal(getdata.inv[0].hash, block_h1f.sha256)
self.log.info("Inv at tip triggered getdata for unprocessed block")
# 7. Send the missing block for the third time (now it is requested)
test_node.send_message(msg_block(block_h1f))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 290)
self.nodes[0].getblock(all_blocks[286].hash)
assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[287].hash)
self.log.info("Successfully reorged to longer chain from non-whitelisted peer")
# 8. Create a chain which is invalid at a height longer than the
# current chain, but which has more blocks on top of that
block_289f = create_block(all_blocks[284].sha256, create_coinbase(289), all_blocks[284].nTime+1)
block_289f.nVersion = 0x20000000
block_289f.solve()
block_290f = create_block(block_289f.sha256, create_coinbase(290), block_289f.nTime+1)
block_290f.nVersion = 0x20000000
block_290f.solve()
block_291 = create_block(block_290f.sha256, create_coinbase(291), block_290f.nTime+1)
block_291.nVersion = 0x20000000
# block_291 spends a coinbase below maturity!
block_291.vtx.append(create_transaction(block_290f.vtx[0], 0, b"42", 1))
block_291.hashMerkleRoot = block_291.calc_merkle_root()
block_291.solve()
block_292 = create_block(block_291.sha256, create_coinbase(292), block_291.nTime+1)
block_292.nVersion = 0x20000000
block_292.solve()
# Now send all the headers on the chain and enough blocks to trigger reorg
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(block_289f))
headers_message.headers.append(CBlockHeader(block_290f))
headers_message.headers.append(CBlockHeader(block_291))
headers_message.headers.append(CBlockHeader(block_292))
test_node.send_message(headers_message)
test_node.sync_with_ping()
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_292.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_292.hash)
test_node.send_message(msg_block(block_289f))
test_node.send_message(msg_block(block_290f))
test_node.sync_with_ping()
self.nodes[0].getblock(block_289f.hash)
self.nodes[0].getblock(block_290f.hash)
test_node.send_message(msg_block(block_291))
# At this point we've sent an obviously-bogus block, wait for full processing
# without assuming whether we will be disconnected or not
try:
# Only wait a short while so the test doesn't take forever if we do get
# disconnected
test_node.sync_with_ping(timeout=1)
except AssertionError:
test_node.wait_for_disconnect()
self.nodes[0].disconnect_p2ps()
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
network_thread_start()
test_node.wait_for_verack()
# We should have failed reorg and switched back to 290 (but have block 291)
assert_equal(self.nodes[0].getblockcount(), 290)
assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)
assert_equal(self.nodes[0].getblock(block_291.hash)["confirmations"], -1)
# Now send a new header on the invalid chain, indicating we're forked off, and expect to get disconnected
block_293 = create_block(block_292.sha256, create_coinbase(293), block_292.nTime+1)
block_293.nVersion = 0x20000000
block_293.solve()
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(block_293))
test_node.send_message(headers_message)
test_node.wait_for_disconnect()
# 9. Connect node1 to node0 and ensure it is able to sync
connect_nodes(self.nodes[0], 1)
sync_blocks([self.nodes[0], self.nodes[1]])
self.log.info("Successfully synced nodes 1 and 0")
if __name__ == '__main__':
AcceptBlockTest().main()
| 43.95509
| 113
| 0.675431
|
acfe82493fd95d619eeab90c3ff2180c973eb722
| 3,477
|
py
|
Python
|
smart_qq_plugins/basic.py
|
LiuSecone/DeeCamp-13
|
83c0d6a233c0164c5d2daedac5ca4dafe54ef0c4
|
[
"MIT"
] | 5
|
2018-08-10T01:45:50.000Z
|
2019-10-10T16:23:01.000Z
|
smart_qq_plugins/basic.py
|
LiuSecone/DeeCamp-13
|
83c0d6a233c0164c5d2daedac5ca4dafe54ef0c4
|
[
"MIT"
] | null | null | null |
smart_qq_plugins/basic.py
|
LiuSecone/DeeCamp-13
|
83c0d6a233c0164c5d2daedac5ca4dafe54ef0c4
|
[
"MIT"
] | 1
|
2018-11-18T09:55:52.000Z
|
2018-11-18T09:55:52.000Z
|
# -*- coding: utf-8 -*-
import random
from smart_qq_bot.logger import logger
from smart_qq_bot.signals import (
on_all_message,
on_group_message,
on_private_message,
on_discuss_message,
)
import urllib
import sys
import time
sys.path.append('../chatbot_cut/')
# =====唤出插件=====
# 机器人连续回复相同消息时可能会出现
# 服务器响应成功,但实际并没有发送成功的现象
# 所以尝试通过随机后缀来尽量避免这一问题
REPLY_SUFFIX = (
'~',
'!',
'?',
'||',
)
dict={}
@on_all_message(name='basic[callout]')
def callout(msg, bot):
if "智障机器人" in msg.content:
reply = bot.reply_msg(msg, return_function=True)
logger.info("RUNTIMELOG " + str(msg.from_uin) + " calling me out, trying to reply....")
reply_content = "干嘛(‘·д·)" + random.choice(REPLY_SUFFIX)
reply(reply_content)
elif len(msg.content)>=2 and "!"==msg.content[0] and "!"!=msg.content[1] and msg.content.find('|')!=-1 and len(msg.content[1:msg.content.find('|')])!=0:
s1=msg.content[1:msg.content.find('|')]
s2=msg.content[msg.content.find('|')+1:]
dict[s1]=s2
reply = bot.reply_msg(msg, return_function=True)
reply("add!")
else:
for term in dict:
if term in msg.content:
reply = bot.reply_msg(msg, return_function=True)
reply(dict[term])
return
if("!"==msg.content[0]):
print("===========-------------=")#test
with open('qq_intput.txt', 'w', encoding='utf-8') as f:
print(msg.content[1:],file=f)
print('1')
time.sleep(3)
flag=1
while flag==1:
with open('qq_intput.txt', 'r', encoding='utf-8') as f:
st=f.read()
if(st[0]=='%'):
flag=0
print('4')
st=st[1:].strip()
time.sleep(0.1)
reply = bot.reply_msg(msg, return_function=True)
reply(st)
return
# =====复读插件=====
class Recorder(object):
def __init__(self):
self.msg_list = list()
self.last_reply = ""
recorder = Recorder()
@on_group_message(name='basic[repeat]')
def repeat(msg, bot):
global recorder
reply = bot.reply_msg(msg, return_function=True)
if len(recorder.msg_list) > 0 and recorder.msg_list[-1].content == msg.content and recorder.last_reply != msg.content:
if str(msg.content).strip() not in ("", " ", "[图片]", "[表情]"):
logger.info("RUNTIMELOG " + str(msg.group_code) + " repeating, trying to reply " + str(msg.content))
reply(msg.content)
recorder.last_reply = msg.content
recorder.msg_list.append(msg)
@on_group_message(name='basic[三个问题]')
def nick_call(msg, bot):
if "我是谁" == msg.content:
bot.reply_msg(msg, "你是{}({})!".format(msg.src_sender_card or msg.src_sender_name, msg.src_sender_id))
elif "我在哪" == msg.content:
bot.reply_msg(msg, "你在{name}({id})!".format(name=msg.src_group_name, id=msg.src_group_id))
elif msg.content in ("我在干什么", "我在做什么"):
bot.reply_msg(msg, "你在调戏我!!")
@on_discuss_message(name='basic[讨论组三个问题]')
def discuss_three_questions(msg, bot):
if "我是谁" == msg.content:
bot.reply_msg(msg, "你是{}!".format(msg.src_sender_name))
elif "我在哪" == msg.content:
bot.reply_msg(msg, "你在{name}!".format(name=msg.src_discuss_name))
elif msg.content in ("我在干什么", "我在做什么"):
bot.reply_msg(msg, "你在调戏我!!")
| 31.609091
| 156
| 0.571757
|
acfe8284bcff0c04cc3709dd27f0bc6b1cf39451
| 2,715
|
py
|
Python
|
mimic3-readmission/mimic3models/common_utils.py
|
yzhouas/MIMIC-III_ICU_Readmission_Analysis
|
b876a2b6f1c431bf556b690cc9fcb9e3ce75c81c
|
[
"MIT"
] | 16
|
2018-06-01T09:07:55.000Z
|
2022-02-24T04:23:46.000Z
|
mimic3-readmission/mimic3models/common_utils.py
|
yzhouas/MIMIC-III_ICU_Readmission_Analysis
|
b876a2b6f1c431bf556b690cc9fcb9e3ce75c81c
|
[
"MIT"
] | 2
|
2021-07-20T02:53:00.000Z
|
2021-07-25T03:09:41.000Z
|
mimic3-readmission/mimic3models/common_utils.py
|
yzhouas/MIMIC-III_ICU_Readmission_Analysis
|
b876a2b6f1c431bf556b690cc9fcb9e3ce75c81c
|
[
"MIT"
] | 8
|
2018-06-01T09:08:00.000Z
|
2022-02-24T04:24:17.000Z
|
import os
def read_chunk(reader, chunk_size):
data = {}
for i in range(chunk_size):
ret = reader.read_next()
for k, v in ret.items():
if k not in data:
data[k] = []
data[k].append(v)
data["header"] = data["header"][0]
return data
def add_common_arguments(parser):
""" Add all the parameters which are common across the tasks
"""
parser.add_argument('--network', type=str, required=True)
parser.add_argument('--dim', type=int, default=256,
help='number of hidden units')
parser.add_argument('--depth', type=int, default=1,
help='number of bi-LSTMs')
parser.add_argument('--epochs', type=int, default=100,
help='number of chunks to train')
parser.add_argument('--load_state', type=str, default="",
help='state file path')
parser.add_argument('--mode', type=str, default="train",
help='mode: train or test')
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--l2', type=float, default=0, help='L2 regularization')
parser.add_argument('--l1', type=float, default=0, help='L1 regularization')
parser.add_argument('--save_every', type=int, default=1,
help='save state every x epoch')
parser.add_argument('--prefix', type=str, default="",
help='optional prefix of network name')
parser.add_argument('--dropout', type=float, default=0.0)
parser.add_argument('--rec_dropout', type=float, default=0.0,
help="dropout rate for recurrent connections")
parser.add_argument('--batch_norm', type=bool, default=False,
help='batch normalization')
parser.add_argument('--timestep', type=float, default=1.0,
help="fixed timestep used in the dataset")
parser.add_argument('--imputation', type=str, default='previous')
parser.add_argument('--small_part', dest='small_part', action='store_true')
parser.add_argument('--whole_data', dest='small_part', action='store_false')
parser.add_argument('--optimizer', type=str, default='adam')
parser.add_argument('--lr', type=float, default=0.001, help='learning rate')
parser.add_argument('--beta_1', type=float, default=0.9,
help='beta_1 param for Adam optimizer')
parser.add_argument('--verbose', type=int, default=2)
parser.add_argument('--size_coef', type=float, default=4.0)
parser.set_defaults(small_part=False)
def create_directory(directory):
if not os.path.exists(directory):
os.makedirs(directory)
| 45.25
| 80
| 0.618416
|
acfe82c27d8d58eaa6969b7195950465279f5e3d
| 8,795
|
py
|
Python
|
cogs/init.py
|
turbomam/cogs
|
df2c89c0c6adcadfbe565ebcad58d1574205fb18
|
[
"BSD-3-Clause"
] | 5
|
2020-06-30T19:03:34.000Z
|
2022-01-04T21:26:11.000Z
|
cogs/init.py
|
turbomam/cogs
|
df2c89c0c6adcadfbe565ebcad58d1574205fb18
|
[
"BSD-3-Clause"
] | 89
|
2020-06-09T19:46:24.000Z
|
2022-02-11T01:26:11.000Z
|
cogs/init.py
|
turbomam/cogs
|
df2c89c0c6adcadfbe565ebcad58d1574205fb18
|
[
"BSD-3-Clause"
] | 2
|
2020-06-30T17:53:45.000Z
|
2022-02-02T23:49:25.000Z
|
import csv
import gspread
import json
import logging
import os
import warnings
from cogs.exceptions import InitError
from cogs.helpers import is_email, is_valid_role, get_client, get_version, set_logging
default_fields = [
{
"Field": "sheet",
"Label": "Sheet",
"Datatype": "cogs:sql_id",
"Description": "The identifier for this sheet",
},
{
"Field": "label",
"Label": "Label",
"Datatype": "cogs:label",
"Description": "The label for this row",
},
{
"Field": "file_path",
"Label": "File Path",
"Datatype": "cogs:file_path",
"Description": "The relative path of the TSV file for this sheet",
},
{
"Field": "description",
"Label": "Description",
"Datatype": "cogs:text",
"Description": "A description of this row",
},
{
"Field": "field",
"Label": "Field",
"Datatype": "cogs:sql_id",
"Description": "The identifier for this field",
},
{
"Field": "datatype",
"Label": "Datatype",
"Datatype": "cogs:curie",
"Description": "The datatype for this row",
},
]
# 0 = error, 1 = warn, 2 = info
default_formats = {
"0": {
"backgroundColor": {"blue": 0.7019608, "green": 0.7019608, "red": 1},
"backgroundColorStyle": {"rgbColor": {"blue": 0.7019608, "green": 0.7019608, "red": 1}},
"borders": {
"bottom": {"color": {}, "colorStyle": {"rgbColor": {}}, "style": "SOLID", "width": 1,},
"left": {"color": {}, "colorStyle": {"rgbColor": {}}, "style": "SOLID", "width": 1,},
"right": {"color": {}, "colorStyle": {"rgbColor": {}}, "style": "SOLID", "width": 1,},
"top": {"color": {}, "colorStyle": {"rgbColor": {}}, "style": "SOLID", "width": 1,},
},
},
"1": {
"backgroundColor": {"blue": 0.5921569, "green": 1, "red": 1},
"backgroundColorStyle": {"rgbColor": {"blue": 0.5921569, "green": 1, "red": 1}},
"borders": {
"bottom": {"color": {}, "colorStyle": {"rgbColor": {}}, "style": "SOLID", "width": 1,},
"left": {"color": {}, "colorStyle": {"rgbColor": {}}, "style": "SOLID", "width": 1,},
"right": {"color": {}, "colorStyle": {"rgbColor": {}}, "style": "SOLID", "width": 1,},
"top": {"color": {}, "colorStyle": {"rgbColor": {}}, "style": "SOLID", "width": 1,},
},
},
"2": {
"backgroundColor": {"blue": 1, "green": 0.87058824, "red": 0.7254902},
"backgroundColorStyle": {"rgbColor": {"blue": 1, "green": 0.87058824, "red": 0.7254902}},
"borders": {
"bottom": {"color": {}, "colorStyle": {"rgbColor": {}}, "style": "SOLID", "width": 1,},
"left": {"color": {}, "colorStyle": {"rgbColor": {}}, "style": "SOLID", "width": 1,},
"right": {"color": {}, "colorStyle": {"rgbColor": {}}, "style": "SOLID", "width": 1,},
"top": {"color": {}, "colorStyle": {"rgbColor": {}}, "style": "SOLID", "width": 1,},
},
},
}
def get_users(user=None, users_file=None, role="writer"):
"""Return a dict of user emails to their roles."""
users = {}
# Single user specified
if user:
# Validate the email
if not is_email(user):
raise InitError(f"{user} is not a valid email")
# Validate the role
if not is_valid_role(role):
raise InitError(f"'{role}' is not a valid role")
users[user] = role
# Multiple users specified
elif users_file:
if not os.path.exists(users_file):
raise InitError(f"users file '{users_file}' does not exist")
with open(users_file, "r") as f:
reader = csv.reader(f, delimiter="\t")
i = 1
for row in reader:
email = row[0]
role = row[1].strip().lower()
if not is_email(email):
if i == 1:
# Skip the first line if it does not have an email in the first column
# Allowing for users to have their own headers, or not
continue
else:
# Any line past the first should always have an email in the first column
raise InitError(
f"{email} is not a valid email address ({users_file}, line {i})"
)
# Validate the role
if not is_valid_role(role):
raise InitError(f"'{role}' is not a valid role ({users_file}, line {i})")
users[email] = role
i += 1
else:
# Show a warning - get_users does nothing
warnings.warn("`get_users()` called with no user or users", RuntimeWarning)
return users
def write_data(sheet, title, credentials=None):
"""Create COGS data files in COGS directory: config.tsv, sheet.tsv, format.tsv, formats.json,
note.tsv, and validation.tsv."""
# Create the "tracked" directory
os.mkdir(".cogs/tracked")
# Store COGS configuration
with open(".cogs/config.tsv", "w") as f:
writer = csv.DictWriter(f, delimiter="\t", lineterminator="\n", fieldnames=["Key", "Value"])
v = get_version()
writer.writerow({"Key": "COGS", "Value": "https://github.com/ontodev/cogs"})
writer.writerow({"Key": "COGS Version", "Value": v})
if credentials:
writer.writerow({"Key": "Credentials", "Value": credentials})
writer.writerow({"Key": "Title", "Value": title})
writer.writerow({"Key": "Spreadsheet ID", "Value": sheet.id})
# sheet.tsv contains sheet (table/tab) details from the spreadsheet
with open(".cogs/sheet.tsv", "w") as f:
writer = csv.DictWriter(
f,
delimiter="\t",
lineterminator="\n",
fieldnames=[
"ID",
"Title",
"Path",
"Description",
"Frozen Rows",
"Frozen Columns",
"Ignore",
],
)
writer.writeheader()
# format.tsv contains all cells with formats -> format IDs
with open(".cogs/format.tsv", "w") as f:
writer = csv.DictWriter(
f, delimiter="\t", lineterminator="\n", fieldnames=["Sheet Title", "Cell", "Format ID"],
)
writer.writeheader()
with open(".cogs/formats.json", "w") as f:
f.write(json.dumps(default_formats, sort_keys=True, indent=4))
# note.tsv contains all cells with notes -> note
with open(".cogs/note.tsv", "w") as f:
writer = csv.DictWriter(
f, delimiter="\t", lineterminator="\n", fieldnames=["Sheet Title", "Cell", "Note"],
)
writer.writeheader()
with open(".cogs/validation.tsv", "w") as f:
writer = csv.DictWriter(
f,
delimiter="\t",
lineterminator="\n",
fieldnames=["Sheet Title", "Range", "Condition", "Value"],
)
writer.writeheader()
def init(title, user=None, role="writer", users_file=None, credentials=None, verbose=False):
"""Init a new .cogs configuration directory in the current working directory. If one already
exists, display an error message. Return True if project was created. Return False if a COGS
project already exists in the directory."""
set_logging(verbose)
cwd = os.getcwd()
if os.path.exists(".cogs"):
# Do not raise CogsError, or else .cogs/ will be deleted
logging.critical(f"COGS project already exists in {cwd}/.cogs/")
return False
logging.info(f"initializing COGS project '{title}' in {cwd}/.cogs/")
os.mkdir(".cogs")
# Process supplied users
users = {}
if user or users_file:
users = get_users(user=user, role=role, users_file=users_file)
# Create a Client to access API
if credentials:
# Use a credentials file
gc = get_client(credentials_path=credentials)
else:
# Use environment vars
gc = get_client()
# Create the new Sheet
try:
spreadsheet = gc.create(title)
except gspread.exceptions.APIError as e:
raise InitError(f"Unable to create new spreadsheet '{title}'\n" f"CAUSE: {e.response.text}")
# Share with each user
for email, role in users.items():
try:
spreadsheet.share(email, perm_type="user", role=role)
except gspread.exceptions.APIError as e:
logging.error(f"Unable to share '{title}' with {email} as {role}\n" + e.response.text)
# Write data to COGS directory
write_data(spreadsheet, title, credentials=credentials)
return True
| 36.645833
| 100
| 0.541558
|
acfe83162f0952c458a2d46212a4b1c865ccced5
| 1,574
|
py
|
Python
|
tests/case2/github/test_repo_with_branch_develop.py
|
cffbots/howfairis
|
008552b7266e229bd38553631d7dfe3554df18b2
|
[
"Apache-2.0"
] | 27
|
2020-09-10T10:04:56.000Z
|
2022-02-07T23:24:13.000Z
|
tests/case2/github/test_repo_with_branch_develop.py
|
cffbots/howfairis
|
008552b7266e229bd38553631d7dfe3554df18b2
|
[
"Apache-2.0"
] | 297
|
2020-09-07T14:10:08.000Z
|
2022-02-18T09:46:30.000Z
|
tests/case2/github/test_repo_with_branch_develop.py
|
cffbots/howfairis
|
008552b7266e229bd38553631d7dfe3554df18b2
|
[
"Apache-2.0"
] | 6
|
2020-09-10T12:58:37.000Z
|
2022-03-11T10:17:21.000Z
|
from howfairis import Platform
from howfairis import Repo
from tests.contracts.repo import Contract
def get_repo():
return Repo("https://github.com/fair-software/repo1", branch="develop")
class TestRepoWithBranchDevelop(Contract):
def test_api(self, mocker):
with mocker:
repo = get_repo()
assert repo.api == "https://api.github.com/repos/fair-software/repo1"
def test_branch(self, mocker):
with mocker:
repo = get_repo()
assert repo.branch == "develop"
def test_default_branch(self, mocker):
with mocker:
repo = get_repo()
assert repo.default_branch is None
def test_owner(self, mocker):
with mocker:
repo = get_repo()
assert repo.owner == "fair-software"
def test_path(self, mocker):
with mocker:
repo = get_repo()
assert repo.path == ""
def test_platform(self, mocker):
with mocker:
repo = get_repo()
assert repo.platform == Platform.GITHUB
def test_raw_url_format_string(self, mocker):
with mocker:
repo = get_repo()
assert repo.raw_url_format_string == "https://raw.githubusercontent.com/fair-software/repo1/develop/{0}"
def test_repo(self, mocker):
with mocker:
repo = get_repo()
assert repo.repo == "repo1"
def test_url(self, mocker):
with mocker:
repo = get_repo()
assert repo.url == "https://github.com/fair-software/repo1"
| 28.107143
| 116
| 0.598475
|
acfe8377dd93daa447754286c081ec430d377778
| 434
|
py
|
Python
|
ST_DM/KDD2021-MSTPAC/code/MST-PAC/utils/model_eval/log_vdl.py
|
zhangyimi/Research
|
866f91d9774a38d205d6e9a3b1ee6293748261b3
|
[
"Apache-2.0"
] | 1,319
|
2020-02-14T10:42:07.000Z
|
2022-03-31T15:42:18.000Z
|
ST_DM/KDD2021-MSTPAC/code/MST-PAC/utils/model_eval/log_vdl.py
|
green9989/Research
|
94519a72e7936c77f62a31709634b72c09aabf74
|
[
"Apache-2.0"
] | 192
|
2020-02-14T02:53:34.000Z
|
2022-03-31T02:25:48.000Z
|
ST_DM/KDD2021-MSTPAC/code/MST-PAC/utils/model_eval/log_vdl.py
|
green9989/Research
|
94519a72e7936c77f62a31709634b72c09aabf74
|
[
"Apache-2.0"
] | 720
|
2020-02-14T02:12:38.000Z
|
2022-03-31T12:21:15.000Z
|
import os
import sys
from visualdl import LogWriter
global_step = int(sys.argv[2])
print(global_step)
print('VDL_PATH', os.getenv("VDL_LOG_PATH"))
with open(sys.argv[1]) as f, \
LogWriter("{}/log_{}".format(os.getenv("VDL_LOG_PATH"), "METRICS"), file_name='vdlrecords.metrics.log') as writer:
for line in f:
line = line.strip().split(':')
writer.add_scalar(tag=line[0], value=float(line[1]), step=global_step)
| 36.166667
| 118
| 0.68894
|
acfe8444628b72f3093503a9f5a1c009e2dd83b5
| 1,555
|
py
|
Python
|
var/spack/repos/builtin/packages/r-rngtools/package.py
|
varioustoxins/spack
|
cab0e4cb240f34891a6d753f3393e512f9a99e9a
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/r-rngtools/package.py
|
varioustoxins/spack
|
cab0e4cb240f34891a6d753f3393e512f9a99e9a
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 6
|
2022-01-08T08:41:11.000Z
|
2022-03-14T19:28:07.000Z
|
var/spack/repos/builtin/packages/r-rngtools/package.py
|
foeroyingur/spack
|
5300cbbb2e569190015c72d0970d25425ea38647
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RRngtools(RPackage):
"""Utility Functions for Working with Random Number Generators
Provides a set of functions for working with Random Number Generators
(RNGs). In particular, a generic S4 framework is defined for
getting/setting the current RNG, or RNG data that are embedded into objects
for reproducibility. Notably, convenient default methods greatly facilitate
the way current RNG settings can be changed."""
homepage = "https://renozao.github.io/rngtools"
url = "https://cloud.r-project.org/src/contrib/rngtools_1.4.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/rngtools"
version('1.5', sha256='8274873b73f7acbe0ce007e62893bf4d369d2aab8768754a60da46b3f078f575')
version('1.4', sha256='3aa92366e5d0500537964302f5754a750aff6b169a27611725e7d84552913bce')
version('1.3.1.1', sha256='99e1a8fde6b81128d0946746c1ef84ec5b6c2973ad843a080098baf73aa3364c')
version('1.3.1', sha256='763fc493cb821a4d3e514c0dc876d602a692c528e1d67f295dde70c77009e224')
depends_on('r@3.0.0:', type=('build', 'run'))
depends_on('r@3.2.0:', when='@1.4:', type=('build', 'run'))
depends_on('r-digest', type=('build', 'run'))
depends_on('r-pkgmaker@0.20:', when='@:1.4', type=('build', 'run'))
depends_on('r-stringr', when='@:1.4', type=('build', 'run'))
| 48.59375
| 97
| 0.729904
|
acfe84604314a032875134fbd19e29bd36420e98
| 576
|
py
|
Python
|
2015/05/part1.py
|
timofurrer/aoc-2020
|
446b688a57601d9891f520e43b7f822c373a6ff4
|
[
"MIT"
] | null | null | null |
2015/05/part1.py
|
timofurrer/aoc-2020
|
446b688a57601d9891f520e43b7f822c373a6ff4
|
[
"MIT"
] | null | null | null |
2015/05/part1.py
|
timofurrer/aoc-2020
|
446b688a57601d9891f520e43b7f822c373a6ff4
|
[
"MIT"
] | null | null | null |
from pathlib import Path
with (Path(__file__).parent / "input.txt").open() as puzzle_input_file:
puzzle_input_raw = puzzle_input_file.read()
contains_at_least_three_vowels = lambda w: len([x for x in w if x in {"a", "e", "i", "o", "u"}]) >= 3
two_letters_in_row = lambda w: any(x == y for x, y in zip(w, w[1:]))
does_not_contain = lambda w: all(x not in w for x in {"ab", "cd", "pq", "xy"})
is_nice = lambda w: contains_at_least_three_vowels(w) and two_letters_in_row(w) and does_not_contain(w)
nice = sum(is_nice(w) for w in puzzle_input_raw.splitlines())
print(nice)
| 44.307692
| 103
| 0.699653
|
acfe84a5ade8cbfe3e21878c749784a0eb6af898
| 445
|
py
|
Python
|
tests/conftest.py
|
hengwei-chan/aescore
|
0eba684e757db2994fc6062736eef73ba8365a9b
|
[
"BSD-3-Clause"
] | 4
|
2021-03-04T12:51:49.000Z
|
2022-03-25T13:48:23.000Z
|
tests/conftest.py
|
hengwei-chan/aescore
|
0eba684e757db2994fc6062736eef73ba8365a9b
|
[
"BSD-3-Clause"
] | 14
|
2020-11-19T16:19:38.000Z
|
2022-03-24T14:40:43.000Z
|
tests/conftest.py
|
hengwei-chan/aescore
|
0eba684e757db2994fc6062736eef73ba8365a9b
|
[
"BSD-3-Clause"
] | 3
|
2021-04-13T08:14:26.000Z
|
2021-08-17T22:21:53.000Z
|
import os
import pytest
@pytest.fixture
def testdir():
wdir = os.path.dirname(os.path.abspath(__file__))
return os.path.join(wdir, "testdata")
@pytest.fixture
def testdata():
wdir = os.path.dirname(os.path.abspath(__file__))
return os.path.join(wdir, "testdata/systems.dat")
@pytest.fixture
def testvsdata():
wdir = os.path.dirname(os.path.abspath(__file__))
return os.path.join(wdir, "testdata/systemsvs.dat")
| 17.8
| 55
| 0.703371
|
acfe85f7b904eb81c8d341dc6f598f0c9c46a26c
| 28,099
|
py
|
Python
|
r_statistics/r_dataNormalization.py
|
dmccloskey/r_statistics
|
459a50a376ba82914e6e27fbeb7e430d36029f76
|
[
"MIT"
] | null | null | null |
r_statistics/r_dataNormalization.py
|
dmccloskey/r_statistics
|
459a50a376ba82914e6e27fbeb7e430d36029f76
|
[
"MIT"
] | null | null | null |
r_statistics/r_dataNormalization.py
|
dmccloskey/r_statistics
|
459a50a376ba82914e6e27fbeb7e430d36029f76
|
[
"MIT"
] | null | null | null |
from .r_dependencies import *
from .r_base import r_base
class r_dataNormalization(r_base):
def calculate_glogNormalization_v1(self,data_I):
'''normalize the data using a glog transformation using LMGene
https://www.bioconductor.org/packages/release/bioc/html/LMGene.html
Citation: Rocke D, Lee GC, Tillinghast J, Durbin-Johnson B and Wu S (2013). LMGene: LMGene Software for Data Transformation and Identification of Differentially Expressed Genes in Gene Expression Arrays. R package version 2.26.0, http://dmrocke.ucdavis.edu/software.html.
INPUT:
data_I = listDict
...
OUTPUT:
data_O = listDict of the transformed data
concentrations = original data matrix
concentrations_glog = normalized data matrix
TODO:
1. break into individual functions and calls to R
2. add in optional input for calls to tranest()
'''
#make the ExpressionSet
#format into R matrix and list objects
# convert data dict to matrix filling in missing values
# with 'NA'
sns = []
cn = []
#replicates = [];
sample_name_abbreviations = [];
for d in data_I:
sns.append(d['sample_name_short']);
#replicates.append(d['sample_replicate']);
sample_name_abbreviations.append(d['sample_name_abbreviation'])
cn.append(d['component_name']);
sns_sorted = sorted(set(sns))
#replicates_sorted = sorted(set(replicates))
cn_sorted = sorted(set(cn))
sample_name_abbreviations_sorted = sorted(set(sample_name_abbreviations))
# extract out replicates
replicates_dict = {};
for sns in sns_sorted:
replicates_dict[sns]=None;
cnt_reps = 0;
for sna_sorted in sample_name_abbreviations_sorted:
for sns in sns_sorted:
for d in data_I:
if d['sample_name_short'] == sns and d['sample_name_abbreviation'] == sna_sorted:
replicates_dict[sns] = cnt_reps;
cnt_reps+=1;
break;
cnt_reps = 0;
concentrations = ['NA' for r in range(len(sns_sorted)*len(cn_sorted))];
experiment_ids = ['' for r in range(len(sns_sorted)*len(cn_sorted))];
time_points = ['' for r in range(len(sns_sorted)*len(cn_sorted))];
component_group_names = ['' for r in range(len(sns_sorted)*len(cn_sorted))];
analysis_ids = ['' for r in range(len(sns_sorted)*len(cn_sorted))];
calculated_concentration_units = ['' for r in range(len(sns_sorted)*len(cn_sorted))];
cnt = 0;
cnt_bool = True;
cnt_reps = 0;
sna = []
replicates = []
for c in cn_sorted:
for s in sns_sorted:
for d in data_I:
if d['sample_name_short'] == s and d['component_name'] == c:
if d['calculated_concentration']:
concentrations[cnt] = d['calculated_concentration'];
experiment_ids[cnt] = d['experiment_id'];
time_points[cnt] = d['time_point'];
component_group_names[cnt] = d['component_group_name'];
analysis_ids[cnt] = d['analysis_id'];
calculated_concentration_units[cnt] = d['calculated_concentration_units'];
if cnt_bool:
sna.append(d['sample_name_abbreviation']);
replicates.append(replicates_dict[s]);
#replicates.append(replicates_sorted[cnt_reps]);
#if cnt_reps < len(replicates_sorted)-1:
# cnt_reps+=1;
#else:
# cnt_reps=0;
break;
cnt = cnt+1
cnt_bool = False;
# check if there were any missing values in the data set in the first place
mv = 0;
for c in concentrations:
if c=='NA':
mv += 1;
if mv==0:
# Call to R
try:
# convert lists to R matrix
concentrations_r = '';
for c in concentrations:
concentrations_r = (concentrations_r + ',' + str(c));
concentrations_r = concentrations_r[1:];
r_statement = ('concentrations = c(%s)' % concentrations_r);
ans = robjects.r(r_statement);
r_statement = ('concentrations_m = matrix(concentrations, nrow = %s, ncol = %s, byrow = TRUE)' %(len(cn_sorted),len(sns_sorted)));
ans = robjects.r(r_statement);
# convert lists to R list
sna_r = '';
for c in sna:
sna_r = (sna_r + ',' + '"' + c + '"');
sna_r = sna_r[1:];
replicates_r = '';
for c in replicates:
replicates_r = (replicates_r + ',' + str(c));
replicates_r = replicates_r[1:];
r_statement = ('sna = c(%s)' % sna_r);
ans = robjects.r(r_statement);
r_statement = ('replicates = c(%s)' % replicates_r);
ans = robjects.r(r_statement);
r_statement = ('concentrations_l = list(sna=sna,replicates=replicates)');
ans = robjects.r(r_statement);
#convert to Expression Set
r_statement = ('eS = neweS(concentrations_m,concentrations_l)');
ans = robjects.r(r_statement);
#estimate the parameters for g-log transformation
#r_statement = ('tranpar = tranest(eS)');
#r_statement = ('tranpar = tranest(eS, lowessnorm=TRUE)');
#r_statement = ('tranpar = tranest(eS, mult=TRUE, lowessnorm=TRUE)');
r_statement = ('tranpar = tranest(eS, mult=TRUE)'); # Matches metabo-analyst and produces the most uniform distribution
ans = robjects.r(r_statement);
r_statement = ('eS_transformed <- transeS(eS, tranpar$lambda, tranpar$alpha)');
ans = robjects.r(r_statement);
# extract out data matrices
r_statement = ('exprs(eS_transformed)');
ans = robjects.r(r_statement);
concentrations_glog = np.array(ans);
# convert array back to dict
data_O = [];
cnt = 0;
for c in range(len(cn_sorted)):
for s in range(len(sns_sorted)):
if isinstance(concentrations_glog[c,s], (int, float, complex)):
data_tmp = {};
data_tmp['sample_name_short'] = sns_sorted[s]
data_tmp['component_name'] = cn_sorted[c]
data_tmp['component_group_name'] = component_group_names[cnt];
data_tmp['calculated_concentration'] = concentrations_glog[c,s];
data_tmp['experiment_id'] = experiment_ids[cnt];
data_tmp['time_point'] = time_points[cnt];
data_tmp['analysis_id'] = analysis_ids[cnt];
data_tmp['calculated_concentration_units'] = calculated_concentration_units[cnt]+ '_glog_normalized';
data_tmp['comment_'] = None;
data_tmp['used_'] = True;
data_O.append(data_tmp);
cnt+=1;
else:
print('concentration value is not a number.');
#for c in range(len(sns_sorted)):
# for r in range(len(cgn_sorted)):
#if isinstance(concentrations_glog[r,c], (int, long, float, complex)):
# data_tmp = {};
# data_tmp['sample_name_short'] = sns_sorted[c]
# data_tmp['component_name'] = cgn_sorted[r]
# data_tmp['calculated_concentration'] = concentrations_glog[r,c];
# #sns_O.append(sns_sorted[c]);
# #cn_O.append(cgn_sorted[r]);
# #cc_O.append(ans[c*len(cgn_sorted)+r]);
except Exception as e:
print(e);
exit(-1);
# reshape original concentrations
concentrations_original = np.array(concentrations);
concentrations = concentrations_original.reshape(len(cn_sorted),len(sns_sorted));
return data_O, concentrations, concentrations_glog;
else:
print('missing values found in data!');
def calculate_glogNormalization(self,data_I,
mult="TRUE",
lowessnorm="FALSE"
):
'''normalize the data using a glog transformation using LMGene
https://www.bioconductor.org/packages/release/bioc/html/LMGene.html
Citation: Rocke D, Lee GC, Tillinghast J, Durbin-Johnson B and Wu S (2013). LMGene: LMGene Software for Data Transformation and Identification of Differentially Expressed Genes in Gene Expression Arrays. R package version 2.26.0, http://dmrocke.ucdavis.edu/software.html.
INPUT:
data_I = listDict
...
OUTPUT:
data_O = listDict of the transformed data
concentrations = original data matrix
concentrations_glog = normalized data matrix
TODO:
1. break into individual functions and calls to R
2. add in optional input for calls to tranest()
'''
#make R matrix lists
listdict = listDict(data_I);
concentrations,cn_sorted,sns_sorted,row_variables,column_variables = listdict.convert_listDict2dataMatrixList_pd(
row_label_I='component_name',
column_label_I='sample_name_short',
value_label_I='calculated_concentration',
row_variables_I=['component_group_name','calculated_concentration_units'],
column_variables_I=['sample_name_abbreviation','experiment_id','time_point','analysis_id'],
na_str_I="NA");
cgn = row_variables['component_group_name'];
calculated_concentration_units = row_variables['calculated_concentration_units'];
experiment_ids = column_variables['experiment_id'];
time_points = column_variables['time_point'];
analysis_ids = column_variables['analysis_id'];
sna = column_variables['sample_name_abbreviation'];
# check if there were any missing values in the data set in the first place
mv = 0;
mv = listdict.count_missingValues_pivotTable();
#make replicate numbers for each sample abbreviation
listdict = listDict()
listdict.set_dictList({'sample_name_abbreviation':sna})
listdict.convert_dictList2DataFrame()
listdict.make_dummyIndexColumn(column_index_I='sna_index',column_label_I='sample_name_abbreviation')
replicates=listdict.dataFrame['sna_index'].get_values();
## extract out replicates
#nsna_unique,sna_unique = listdict.get_uniqueValues('sample_name_abbreviation');
#replicates_dict = {};
#for sns in sns_sorted:
# replicates_dict[sns]=None;
#cnt_reps = 0;
#for sna_sorted in sna_unique:
# for sns in sns_sorted:
# for d in data_I:
# if d['sample_name_short'] == sns and d['sample_name_abbreviation'] == sna_sorted:
# replicates_dict[sns] = cnt_reps;
# cnt_reps+=1;
# break;
# cnt_reps = 0;
#replicates = [];
#for s in sns_sorted:
# replicates.append(replicates_dict[s]);
if mv==0:
# Call to R
try:
# clear the R workspace
self.clear_workspace();
# convert lists to R matrix
self.make_matrixFromList(concentrations,len(cn_sorted),len(sns_sorted),'concentrations_m');
#concentrations_r = '';
#for c in concentrations:
# concentrations_r = (concentrations_r + ',' + str(c));
#concentrations_r = concentrations_r[1:];
#r_statement = ('concentrations = c(%s)' % concentrations_r);
#ans = robjects.r(r_statement);
#r_statement = ('concentrations_m = matrix(concentrations, nrow = %s, ncol = %s, byrow = TRUE)' %(len(cn_sorted),len(sns_sorted)));
#ans = robjects.r(r_statement);
# convert lists to R list
self.make_vectorFromList(sna,'sna');
#sna_r = '';
#for c in sna:
# sna_r = (sna_r + ',' + '"' + c + '"');
#sna_r = sna_r[1:];
#r_statement = ('sna = c(%s)' % sna_r);
#ans = robjects.r(r_statement);
self.make_vectorFromList(replicates,'replicates');
#replicates_r = '';
#for c in replicates:
# replicates_r = (replicates_r + ',' + str(c));
#replicates_r = replicates_r[1:];
#r_statement = ('replicates = c(%s)' % replicates_r);
#ans = robjects.r(r_statement);
# make the R factor list
self.make_factorList('sna','replicates','concentrations_l');
#r_statement = ('concentrations_l = list(sna=sna,replicates=replicates)');
#ans = robjects.r(r_statement);
#convert to Expression Set
self.convert_matrix2ExpressionSet(matrix_I='concentrations_m',vlist_I='concentrations_l',es_O='eS');
#r_statement = ('eS = neweS(concentrations_m,concentrations_l)');
#ans = robjects.r(r_statement);
# estimate the parameters
self.call_tranest('eS','tranpar',
mult=mult,
lowessnorm=lowessnorm
);
#r_statement = ('tranpar = tranest(eS, mult=TRUE)'); # Matches metabo-analyst and produces the most uniform distribution
#ans = robjects.r(r_statement);
# transform the expression set
self.call_transeS('eS','tranpar','eS_transformed');
#r_statement = ('eS_transformed <- transeS(eS, tranpar$lambda, tranpar$alpha)');
#ans = robjects.r(r_statement);
# extract out data matrices
concentrations_glog = self.extract_expressionSet('eS_transformed');
#r_statement = ('exprs(eS_transformed)');
#ans = robjects.r(r_statement);
#concentrations_glog = np.array(ans);
# convert array back to dict
data_O = [];
cnt = 0;
for c in range(len(cn_sorted)):
for s in range(len(sns_sorted)):
if isinstance(concentrations_glog[c,s], (int, float, complex)):
data_tmp = {};
data_tmp['sample_name_short'] = sns_sorted[s]
data_tmp['component_group_name'] = cgn[c]
data_tmp['component_name'] = cn_sorted[c]
data_tmp['calculated_concentration_units'] = calculated_concentration_units[c] + '_glog_normalized';
data_tmp['calculated_concentration'] = concentrations_glog[c,s];
data_tmp['experiment_id'] = experiment_ids[s];
data_tmp['time_point'] = time_points[s];
data_tmp['analysis_id'] = analysis_ids[s];
data_tmp['imputation_method'] = None;
data_tmp['normalization_method'] = 'glog';
data_tmp['normalization_ooptions'] = {'mult':"TRUE",'lowessnorm':"FALSE"};
data_tmp['comment_'] = None;
data_tmp['used_'] = True;
data_O.append(data_tmp);
cnt+=1;
else:
print('concentration value is not a number.');
#for c in range(len(sns_sorted)):
# for r in range(len(cgn_sorted)):
#if isinstance(concentrations_glog[r,c], (int, long, float, complex)):
# data_tmp = {};
# data_tmp['sample_name_short'] = sns_sorted[c]
# data_tmp['component_name'] = cgn_sorted[r]
# data_tmp['calculated_concentration'] = concentrations_glog[r,c];
# #sns_O.append(sns_sorted[c]);
# #cn_O.append(cgn_sorted[r]);
# #cc_O.append(ans[c*len(cgn_sorted)+r]);
except Exception as e:
print(e);
exit(-1);
# reshape original concentrations
concentrations_original = np.array(concentrations);
concentrations = concentrations_original.reshape(len(cn_sorted),len(sns_sorted));
return data_O, concentrations, concentrations_glog;
else:
print('missing values found in data!');
def convert_matrix2ExpressionSet(self,matrix_I,vlist_I,es_O):
'''
Convert a matrix to an expressions set in R
INPUT:
matrix_I = string, matrix variable in the R workspace
vlist_I = string, list variable in the R workspace
OUTPUT:
es_O = string, name of the expressionSet variable in the R workspace
Description
This function converts a data matrix into an ExpressionSet object.
Usage
neweS(mat, vlist, vlabel = as.list(names(vlist)))
Arguments
mat A data matrix to be converted.
vlist A list, each component of which describes a factor in the experimental design.
vlabel A list of labels for each component of vlist.
Details
Each element of a component of vlist corresponds to a column of mat. See vlist for an example.
Value
eset An ExpressionSet object.
'''
try:
r_statement = ('%s = neweS(%s,%s)' %(es_O,matrix_I,vlist_I));
ans = robjects.r(r_statement);
except Exception as e:
print(e);
exit(-1);
def make_factorList(self,sna,replicates,list_O):
'''
make factor list for LMGene
INPUT:
sna = string, name of the R workspace variable
replicates = string, name of the R workspace variable
OUTPUT:
list_O = string, name of the R workspace variable
'''
try:
r_statement = ('%s = list(sna=%s,replicates=%s)' %(list_O,sna,replicates));
ans = robjects.r(r_statement);
except Exception as e:
print(e);
exit(-1);
def extract_expressionSet(self,es_I):
'''
Extract out data matrices from an expression set
INPUT:
es_I = string, name of the expression set in the R workspace
OUTPUT:
data_O = np matrix
'''
data_O = None;
try:
r_statement = ('exprs(eS_transformed)');
ans = robjects.r(r_statement);
data_O = np.array(ans);
except Exception as e:
print(e);
exit(-1);
return data_O;
def call_tranest(self,es_I,transpar_O,
mult="TRUE",
lowessnorm="FALSE"
):
'''
estimate the glog transformation parameters
INPUT:
es_I = string, name of the expression set R workspace variable
OUTPUT:
transpar_O = string, name of the R woskpace variable
NOTES: r_statement = ('tranpar = tranest(eS, mult=TRUE)'); # Matches metabo-analyst and produces the most uniform distribution
Description
Estimates parameters for the glog transformation, by maximum likelihood or by minimizing the
stability score.
Usage
tranest(eS, ngenes = -1, starting = FALSE, lambda = 1000, alpha = 0,
gradtol = 1e-3, lowessnorm = FALSE, method=1, mult=FALSE, model=NULL,
SD = FALSE, rank = TRUE, model.based = TRUE, rep.arrays = NULL)
Arguments
eS An ExpressionSet object
ngenes Number of genes to be used in parameter estimation. Default is to use all genes
unless there are more than 100,000, in which case a subset of 50,000 genes is
selected at random.
starting If TRUE, user-specified starting values for lambda and alpha are input to the
optimization routine
lambda Starting value for parameter lambda. Ignored unless starting = TRUE
alpha Starting value for parameter alpha. Ignored unless starting = TRUE
gradtol A positive scalar giving the tolerance at which the scaled gradient is considered
close enough to zero to terminate the algorithm
lowessnorm If TRUE, lowess normalization (using lnorm) is used in calculating the likelihood.
method Determines optimization method. Default is 1, which corresponds to a Newtontype
method (see nlm and details.)
mult If TRUE, tranest will use a vector alpha with one (possibly different) entry per
sample. Default is to use same alpha for every sample. SD and mult may not
both be TRUE.
model Specifies model to be used. Default is to use all variables from eS without
interactions. See details.
SD If TRUE, transformation parameters are estimated by minimizing the stability
score rather than by maximum likelihood. See details.
rank If TRUE, the stability score is calculated by regressing the replicate standard deviations
on the ranks of the gene/row means (rather than on the means themselves).
Ignored unless SD = TRUE
model.based If TRUE, the stability score is calculated using the standard deviations of residuals
from the linear model in model. Ignored unless SD = TRUE
rep.arrays List of sets of replicate arrays. Each element of rep.arrays should be a vector
with entries corresponding to arrays (columns) in exprs(eS) conducted under
the same experimental conditions, i.e., with identical rows in pData(eS). Ignored
unless SD = TRUE and model.based = FALSE
tranest 19
Details
If you have data in a matrix and information about experimental design factors, then you can use
neweS to convert the data into an ExpressionSet object. Please see neweS for more detail.
The model argument is an optional character string, constructed like the right-hand side of a formula
for lm. It specifies which of the variables in the ExpressionSet will be used in the model
and whether interaction terms will be included. If model=NULL, it uses all variables from the
ExpressionSet without interactions. Be careful of using interaction terms with factors; this often
leads to overfitting, which will yield an error.
The default estimation method is maximum likelihood. The likelihood is derived by assuming that
there exist values for lambda and alpha such that the residuals from the linear model in model, fit
to glog-transformed data using those values for lambda and alpha, follow a normal distribution.
See Durbin and Rocke (2003) for details.
If SD = TRUE, lambda and alpha are estimated by minimizing the stability score rather than by
maximum likelihood. The stability score is defined as the absolute value of the slope coefficient
from the regression of the replicate/residual standard deviation on the gene/row means, or on the
rank of the gene/row means. If model.based = TRUE, the stability score is calculated using the
standard deviation of residuals from the linear model in model. Otherwise, the stability score is
calculated using the pooled standard deviation over sets of replicates in rep.arrays. See Wu and
Rocke (2009) for details.
Optimization methods in method are as follows:
1 = Newton-type method, using nlm
2 = Nelder-Mead, using optim
3 = BFGS, using optim
4 = Conjugate gradients, using optim
5 = Simulated annealing, using optim (may only be used when mult = TRUE)
Value
A list with components:
lambda Estimate of transformation parameter lambda
alpha Estimate of transformation parameter alpha
'''
try:
r_statement = ('%s = tranest(%s, mult=%s, lowessnorm=%s)'
%(transpar_O,es_I,mult,lowessnorm));
ans = robjects.r(r_statement);
except Exception as e:
print(e);
exit(-1);
def call_transeS(self,es_I,transpar_I,es_O):
'''
call transeS Function to apply the glog transform to an expression set.
INPUT:
es_I = string, name of the expression set R workspace variable
transpar_I = string, name of the R woskpace variable
OUTPUT:
es_I = string, name of the transformed expression set R workspace variable
Description
For each element in the array of expression data, this function applies the glog transform y -> glog
(y-alpha, lambda). If alpha is a vector, it must have one element for each column in exprs(eS).
Usage
transeS(eS, lambda, alpha)
Arguments
eS An ExpressionSet or AffyBatch object
lambda The parameter lambda to be used in the glog transform.
alpha The alpha parameter(s) for the glog transform. May be a single number used for
all samples, or a vector with one entry per sample.
Details
The glog transformation of a variable y is defined as log(y + sqrt(y^2 + lambda)). Using
lambda = 0 corresponds to the log transformation, up to a scale factor of 2. (Other, equivalent
expressions exist for the glog transformation. See Durbin et al. (2002) and Huber et al. (2002) for
futher details.)
transeS subtracts a (scalar or vector) parameter alpha prior to application of the glog transformation,
resulting in the expression log(y - alpha + sqrt((y - alpha)^2 + lambda)).
The parameters lambda and alpha may be estimated using tranest.
Value
Returns an ExpressionSet or AffyBatch object with the expression matrix glog-transformed.
'''
try:
r_statement = ('%s = transeS(%s, %s$lambda, %s$lambda)'
%(es_O,es_I,transpar_I,transpar_I));
ans = robjects.r(r_statement);
except Exception as e:
print(e);
exit(-1);
| 51.369287
| 280
| 0.562476
|
acfe863dd06da0d9ed2daec532916a880d3d4362
| 17,333
|
py
|
Python
|
src/examples/RuleBasedDistributedModel/test/customStrategy.py
|
wei12f8158/YAFS
|
88df88b6f1dc8d88e0c2424939464d9c7b708d28
|
[
"MIT"
] | null | null | null |
src/examples/RuleBasedDistributedModel/test/customStrategy.py
|
wei12f8158/YAFS
|
88df88b6f1dc8d88e0c2424939464d9c7b708d28
|
[
"MIT"
] | null | null | null |
src/examples/RuleBasedDistributedModel/test/customStrategy.py
|
wei12f8158/YAFS
|
88df88b6f1dc8d88e0c2424939464d9c7b708d28
|
[
"MIT"
] | 2
|
2021-02-22T17:18:42.000Z
|
2021-02-24T17:30:33.000Z
|
from collections import defaultdict
import random
import pickle
import networkx as nx
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import colors
import matplotlib as mpl
import math
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
import json
import pandas as pd
class CustomStrategy():
LIMIT_TURNS = 100
def get_app_name(self, service):
return service[:service.index("_")]
def __init__(self,pathExp,pathResults,total_services,draw_grid_topology_dimension,pathCSV):
self.activation = 0
# self.pathExp = pathExp
self.pathResults = pathResults
self.agents = {} #key(service,node)
self.pathCSV = pathCSV
self.__total_services = total_services
self.__draw_controlUser = {}
self.__dimension = draw_grid_topology_dimension
self.__currentOccupation = {}
self.__my_hash_service_map ={}
self.__inc_my_map = 1
self.previous_number_samples = 0
#Load user contraints
self.constraints={}
dataPopulation = json.load(open(pathExp + 'fognodesDefinition.json'))
for element in dataPopulation["sources"]:
node = element["id_resource"]
app = element["app"]
self.constraints[(node, app)] = element["constraint"]
def __str__(self):
print("Number of evolutions %i" % self.activation)
def transform_node_name(self, node):
return self.__dimension * node[0] + node[1]
# def __my_hash_service(self,str):
# hash = 0
# # Take ordinal number of char in str, and just add
# for x in str: hash += (ord(x))
# return (hash % self.totalservices) # Depending on the range, do a modulo operation.
def __my_map_service(self,str):
if str not in self.__my_hash_service_map:
self.__my_hash_service_map[str]=self.__inc_my_map
self.__inc_my_map+=1
return self.__my_hash_service_map[str]
def deploy_module(self,sim,service,idtopo):
app_name = service[0:service.index("_")]
app = sim.apps[app_name]
services = app.services
idDES = sim.deploy_module(app_name, service, services[service], [idtopo])
### FUNCTION MOVED TO core.py
# def remove_module(self, sim, service_name, idtopo):
#
# sim.print_debug_assignaments()
#
# app_name = service_name[0:service_name.index("_")]
# # Stopping related processes deployed in the module and clearing main structure: alloc_DES
# all_des = []
# for k, v in sim.alloc_DES.items():
# if v == idtopo:
# all_des.append(k)
#
# # Clearing other related structures
# for des in sim.alloc_module[app_name][service_name]:
# if des in all_des:
# print "REMOVE PROCESS ", des
# sim.alloc_module[app_name][service_name].remove(des)
# sim.stop_process(des)
# del sim.alloc_DES[des]
def is_already_deployed(self,sim,service_name,idtopo):
app_name = service_name[0:service_name.index("_")]
all_des = []
for k, v in sim.alloc_DES.items():
if v == idtopo:
all_des.append(k)
# Clearing other related structures
for des in sim.alloc_module[app_name][service_name]:
if des in all_des:
return True
def get_current_services(self,sim):
""" returns a dictionary with name_service and a list of node where they are deployed
example: defaultdict(<type 'list'>, {u'2_19': [15], u'3_22': [5]})
"""
current_services = sim.get_alloc_entities()
nodes_with_services = defaultdict(list)
current_services = dict((k, v) for k, v in current_services.items() if len(v)>0)
deployed_services = defaultdict(list)
for k,v in current_services.items():
for service_name in v:
if not "None" in service_name: #[u'2#2_19']
deployed_services[service_name[service_name.index("#")+1:]].append(k)
else:
nodes_with_services[k].append(service_name[:service_name.index("#")])
return deployed_services,nodes_with_services
def drawNetwork(self,sim,nodes_with_users):
#CTS
piesize = .05
p2 = piesize / 2.
tab20 = plt.cm.get_cmap('tab20', self.__total_services)
bounds = range(self.__total_services)
newcolors = tab20(np.linspace(0, 1, self.__total_services))
newcolors[0] = np.array([250.0 / 256.0, 250. / 256., 250. / 256., 1])
newcmp = mpl.colors.ListedColormap(newcolors)
norm = mpl.colors.BoundaryNorm(bounds, newcmp.N)
pos = {self.transform_node_name((x, y)): (x, y) for x in range(self.__dimension) for y in range(self.__dimension)}
fig, ax = plt.subplots(figsize=(16.0, 10.0))
# Nodes + Egdes
plt.text(4., -2., "Step: %i" % self.activation, {'color': 'black', 'fontsize': 16})
nx.draw(sim.topology.G, pos, with_labels=False, node_size=200, node_color="#1260A0", edge_color="gray", node_shape="o",
font_size=7, font_color="white", ax=ax)
# Labels on nodes
for x in range(self.__dimension-1, -1, -1):
for y in range(self.__dimension-1, -1, -1):
ax.text(x + piesize * 2.5, y + piesize * 7.5, "%i-%i" % (x, y), fontsize=8)
# Plotting users dots
self.__draw_controlUser = {}
for node in nodes_with_users:
for app in nodes_with_users[node]:
for i, v in enumerate(self.__my_hash_service_map.keys()):#getting the index of the app #TODO improve
if "%s_"%app in v:
break
self.__draw_showUser(pos[node], i+1, ax, newcolors)
# LAST STEP ALWAYS: to maintain coordinates
# Displaying capacity, changing node shape
trans = ax.transData.transform
trans2 = fig.transFigure.inverted().transform
for n in sim.topology.G:
xx, yy = trans(pos[n]) # figure coordinates
xa, ya = trans2((xx, yy)) # axes coordinates
a = plt.axes([xa - p2, ya - p2, piesize, piesize])
a.set_aspect('equal')
shape = np.array(eval(sim.topology.G.nodes[n]["capacity"]))
if n in self.__currentOccupation:
occ = self.__currentOccupation[n]
else:
occ = np.array(eval((sim.topology.G.nodes[n]["occupation"])))
data = occ.reshape(shape)
# data=[[1.,3.,1.]]
a.imshow(data, cmap=newcmp, interpolation='none', norm=norm)
# a.axis('off')
a.axes.get_yaxis().set_visible(False)
a.axes.get_xaxis().set_visible(False)
#plt.text(2, 1000, "Step: %i" % self.activation, {'color': 'C0', 'fontsize': 16})
fig.savefig(self.pathResults +'net_%03d.png' % self.activation) # save the figure to file
plt.close(fig) # close the figure
def __draw_showUser(self,node, service,ax,newcolors):
piesize = .05
p2 = piesize / 2.
if node not in self.__draw_controlUser.keys():
self.__draw_controlUser[node] = 0
total = self.__draw_controlUser[node]
line = int(total / 4) + 1
duy =0.2* line
dux = 0.15 * (total % 4)
self.__draw_controlUser[node] += 1
if node[0] == 0: # este
dx = -piesize * 10. - (dux * .8)
dy = piesize * 8.5 - (duy * 1.4)
elif node[1] == 0: # south
dx = -piesize * 9.5 + duy
dy = -piesize * 10. - dux
elif node[0] == self.__dimension-1: # west
dx = piesize * 10. + dux
dy = piesize * 9. - duy
elif node[1] == self.__dimension-1: # north
dx = -piesize * 9 + (duy * .8)
dy = piesize * 10. + (dux * 1.4)
ax.scatter(node[0] + dx, node[1] + dy, s=100.0, marker='o', color=newcolors[service])
def __call__(self, sim, routing,case, stop_time, it):
#The occupation of a node can be managed by the simulator but to easy integration with the visualization both structures are different
self.__currentOccupation = {}
self.activation +=1
routing.invalid_cache_value = True
print("*" * 30)
print("*" * 30)
print("STEP: ", self.activation)
print("*" * 30)
print("*" * 30)
####
###
# Data Gathering - AGENT INTERFACE
###
####
# Current utilization of services
service_calls = defaultdict(list)
nodes_with_users= defaultdict(list)
for k,v in routing.controlServices.items():
service_calls[k[1]].append(v[0])
#nodes_with_users[v[0][0]].append(k[1])
print("Current service calls:")
print(service_calls)
print("-" * 30)
nodes_with_deployed_services = defaultdict(list)
current_services,nodes_with_users = self.get_current_services(sim)
for service in current_services:
for node in current_services[service]:
nodes_with_deployed_services[node].append(service)
print("Current services:")
print(current_services)
print("-" * 30)
print("Nodes with users: (nodes_with_users)")
print(nodes_with_users)
print("-" * 30)
print("Nodes with deployed services:")
print(nodes_with_deployed_services)
print("-" * 30)
#TODO Extraer ocupacion a otros procesos
for node in nodes_with_deployed_services:
if node not in self.__currentOccupation:
self.__currentOccupation[node] =np.array(eval((sim.topology.G.nodes[node]["occupation"])))
for service in nodes_with_deployed_services[node]:
pos = list(self.__currentOccupation[node]).index(0.) # it could be incremental
self.__currentOccupation[node][pos] = self.__my_map_service(service)
print("Current node occupation with service mapped:")
print(self.__currentOccupation)
print("-" * 30)
# Unused deployed services
services_not_used = defaultdict(list)
for k in current_services:
if k not in service_calls.keys():
# Unused service
None
else:
for service in current_services[k]:
found = False
for path in service_calls[k]:
if path[-1] == service:
found = True
break
#endfor
if not found:
services_not_used[k].append(service)
print("Unused deployed services")
print(services_not_used)
print("-"*30)
####
###
# Taking a picture
###
####
self.drawNetwork(sim,nodes_with_users)
####
##
# AGENT MIDDLEWARE & QoS from current requests between self.activations
##
####
# UPDATE AGENTS status (creation*1, feed-channel, partial-network)
# It only must be triggered one time (really in self.activation == 1)
for service, currentnodes in current_services.items():
app_name = self.get_app_name(service)
if self.activation == 1:
# check first time: initial existence (one time)
# These service are in the cloud, are indelible!
for node in currentnodes:
if (service, node) not in self.agents:
a = Agent(service, node)
a.time_creation = self.activation
a.inCloud = True
self.agents[(service, node)] = a
#checking user paths
for user_path in service_calls[service]:
#print(app_name," Current position: ",user_path[-1])
user_position = user_path[0]
a = np.array(nodes_with_users[user_position])
unique, counts = np.unique(a, return_counts=True)
ocurrences = dict(zip(unique, counts))
# print("Agent(Service: %s,pos: %i), updating path: %s, ocurrences: %i"%(app_name,user_path[-1],user_path,ocurrences[app_name]))
self.agents[(service, user_path[-1])].updateNx(user_path,ocurrences[app_name])
# We analyse the performance of the multiples requests: QoS
# Force the flush of metrics
sim.metrics.flush()
# Loading samples generated along current period (self.activations-1,self.activation)
df = pd.read_csv(self.pathCSV + ".csv", skiprows=range(1, self.previous_number_samples)) # include header
# print("Number of samples: %i (from: %i)" % (len(df.index)-1, self.previous_number_samples))
self.previous_number_samples += len(df.index) - 1 # avoid header
# print(df.head(3)) ## checks
# print(df.tail(3))
df["response"] = df['time_out'] - df['time_emit']
# The user are separated
df2 = df.groupby(['DES.dst', 'TOPO.dst', 'TOPO.src', 'module']).agg({"response": ['mean', 'std', 'count'],
"service": 'mean'}).reset_index()
df2.columns = ["USER", "SRC", "DST", "module", "mean", "std", "transmissions", "service"]
# Same users deployed in same node are grouped
df2 = df2.groupby(['SRC', 'DST', 'module']).agg({"mean": np.mean, "std": np.mean, "transmissions": np.mean, "service": np.mean}).reset_index()
for i, row in df2.iterrows():
service_node= int(row["SRC"])
service_name = row["module"]
user_node = int(row["DST"])
self.agents[(service_name, service_node)].update_response_log(user_node, row[3:])
####
###
#
# ACTIONS
#
# TAKING NEW ACTIONS WITH PREVIOUS INFORMATION
#
###
####
shuffle_agents = list(self.agents.keys())
attempts = 0
while len(shuffle_agents)>0 and attempts < CustomStrategy.LIMIT_TURNS:
k = random.randint(0,len(shuffle_agents)-1)
key_agent = shuffle_agents[k]
del shuffle_agents[k]
action,force_third_agents_actions = self.agents[key_agent].get_action(self.activation,self.constraints) #guardar en el registor del agente, por posibilidad de cambio
# the action is achievable in the agent's turn
#first we foce third agent actions
for agent,action in force_third_agents_actions:
#perfom third action
##perfom_action
if agent not in shuffle_agents:
shuffle_agents.append(agent)
# perform agent action
##perfom_action
print(key_agent,action)
attempts+=1
if attempts >= CustomStrategy.LIMIT_TURNS:
print("REALLY!? A big war among services is declared. External diplomatic actions are required.")
exit()
# # We remove all the services not used but they have been called in a previous step
# for service_name,nodes in services_not_used.items():
# for node in nodes:
# app_name = service_name[0:service_name.index("_")]
# print("Removing module: %s from node: %i"%(service_name,node))
# sim.undeploy_module(app_name,service_name,node)
# # por cada servicio se toma una decision:
# # clonarse
# for service in service_calls:
# #TODO other type of operation
# if random.random()<1.0:
# #clonning
# clones = len(service_calls[service]) # numero de veces que se solicita
# for clon in range(clones):
# path = service_calls[service][clon]
# # path[-1] is the current location of the service
# if len(path)>2:
# nextLocation = path[-2]
# #TODO capacity control
# if not self.is_already_deployed(sim,service,nextLocation):
# self.deploy_module(sim,service,nextLocation)
# # controlling things :S
# entities = sim.get_alloc_entities()
# f = open(self.pathResults + "/file_alloc_entities_%s_%i_%i_%i.pkl" % (case, stop_time, it,self.activations), "wb")
# pickle.dump(entities, f)
# f.close()
# Debug: CLOSING CICLE
print("-__"*30)
print("AGENTS in cicle: ", self.activation)
for k, agent in self.agents.items():
print(agent)
print("-_-" * 30)
# FOR TESTING & DEBUGGING CONTROL
# Debug:
if self.activation==2:
print("Activation :", self.activation)
sim.print_debug_assignaments()
exit()
### LAST STEP Cleaning temporal variables from agent
for k,agent in self.agents.items():
agent.clear()
| 36.878723
| 177
| 0.57278
|
acfe875d74d22695363cb0328aabcf18b6704d4e
| 10,582
|
py
|
Python
|
tools/ci/tc/tests/test_valid.py
|
ijjk/web-platform-tests
|
bbbcde8645524e0f81f751db4d1518c4f448185c
|
[
"BSD-3-Clause"
] | 1
|
2020-06-13T22:46:20.000Z
|
2020-06-13T22:46:20.000Z
|
tools/ci/tc/tests/test_valid.py
|
harper-covamez/wpt
|
d699674d7b2ae45feadb40f8a02a6a5256270b16
|
[
"BSD-3-Clause"
] | null | null | null |
tools/ci/tc/tests/test_valid.py
|
harper-covamez/wpt
|
d699674d7b2ae45feadb40f8a02a6a5256270b16
|
[
"BSD-3-Clause"
] | null | null | null |
import json
import os
from io import open
import jsone
import mock
import pytest
import requests
import yaml
from jsonschema import validate
from tools.ci.tc import decision
here = os.path.dirname(__file__)
root = os.path.abspath(os.path.join(here, "..", "..", "..", ".."))
def data_path(filename):
return os.path.join(here, "..", "testdata", filename)
def test_verify_taskcluster_yml():
"""Verify that the json-e in the .taskcluster.yml is valid"""
with open(os.path.join(root, ".taskcluster.yml"), encoding="utf8") as f:
template = yaml.safe_load(f)
events = [("pr_event.json", "github-pull-request", "Pull Request"),
("master_push_event.json", "github-push", "Push to master")]
for filename, tasks_for, title in events:
with open(data_path(filename), encoding="utf8") as f:
event = json.load(f)
context = {"tasks_for": tasks_for,
"event": event,
"as_slugid": lambda x: x}
jsone.render(template, context)
def test_verify_payload():
"""Verify that the decision task produces tasks with a valid payload"""
from tools.ci.tc.decision import decide
r = requests.get("https://community-tc.services.mozilla.com/schemas/queue/v1/create-task-request.json")
r.raise_for_status()
create_task_schema = r.json()
# TODO(Hexcles): Change it to https://community-tc.services.mozilla.com/references/schemas/docker-worker/v1/payload.json
# after the next Community-TC release (see https://bugzilla.mozilla.org/show_bug.cgi?id=1639732)..
r = requests.get(
"https://raw.githubusercontent.com/taskcluster/taskcluster/"
"3ed511ef9119da54fc093e976b7b5955874c9b54/workers/docker-worker/schemas/v1/payload.json")
r.raise_for_status()
payload_schema = r.json()
jobs = ["lint",
"manifest_upload",
"resources_unittest",
"tools_unittest",
"wpt_integration",
"wptrunner_infrastructure",
"wptrunner_unittest"]
for filename in ["pr_event.json", "master_push_event.json"]:
with open(data_path(filename), encoding="utf8") as f:
event = json.load(f)
with mock.patch("tools.ci.tc.decision.get_fetch_rev", return_value=(None, event["after"], None)):
with mock.patch("tools.ci.tc.decision.get_run_jobs", return_value=set(jobs)):
task_id_map = decide(event)
for name, (task_id, task_data) in task_id_map.items():
try:
validate(instance=task_data, schema=create_task_schema)
validate(instance=task_data["payload"], schema=payload_schema)
except Exception as e:
print("Validation failed for task '%s':\n%s" % (name, json.dumps(task_data, indent=2)))
raise e
@pytest.mark.parametrize("event_path,is_pr,files_changed,expected", [
("master_push_event.json", False, None,
{'download-firefox-nightly',
'wpt-firefox-nightly-testharness-1',
'wpt-firefox-nightly-testharness-2',
'wpt-firefox-nightly-testharness-3',
'wpt-firefox-nightly-testharness-4',
'wpt-firefox-nightly-testharness-5',
'wpt-firefox-nightly-testharness-6',
'wpt-firefox-nightly-testharness-7',
'wpt-firefox-nightly-testharness-8',
'wpt-firefox-nightly-testharness-9',
'wpt-firefox-nightly-testharness-10',
'wpt-firefox-nightly-testharness-11',
'wpt-firefox-nightly-testharness-12',
'wpt-firefox-nightly-testharness-13',
'wpt-firefox-nightly-testharness-14',
'wpt-firefox-nightly-testharness-15',
'wpt-firefox-nightly-testharness-16',
'wpt-chrome-dev-testharness-1',
'wpt-chrome-dev-testharness-2',
'wpt-chrome-dev-testharness-3',
'wpt-chrome-dev-testharness-4',
'wpt-chrome-dev-testharness-5',
'wpt-chrome-dev-testharness-6',
'wpt-chrome-dev-testharness-7',
'wpt-chrome-dev-testharness-8',
'wpt-chrome-dev-testharness-9',
'wpt-chrome-dev-testharness-10',
'wpt-chrome-dev-testharness-11',
'wpt-chrome-dev-testharness-12',
'wpt-chrome-dev-testharness-13',
'wpt-chrome-dev-testharness-14',
'wpt-chrome-dev-testharness-15',
'wpt-chrome-dev-testharness-16',
'wpt-firefox-nightly-reftest-1',
'wpt-firefox-nightly-reftest-2',
'wpt-firefox-nightly-reftest-3',
'wpt-firefox-nightly-reftest-4',
'wpt-firefox-nightly-reftest-5',
'wpt-chrome-dev-reftest-1',
'wpt-chrome-dev-reftest-2',
'wpt-chrome-dev-reftest-3',
'wpt-chrome-dev-reftest-4',
'wpt-chrome-dev-reftest-5',
'wpt-firefox-nightly-wdspec-1',
'wpt-chrome-dev-wdspec-1',
'wpt-firefox-nightly-crashtest-1',
'wpt-chrome-dev-crashtest-1',
'lint'}),
("pr_event.json", True, {".taskcluster.yml",".travis.yml","tools/ci/start.sh"},
{'download-firefox-nightly',
'lint',
'tools/ unittests (Python 2)',
'tools/ unittests (Python 3.6)',
'tools/ unittests (Python 3.8)',
'tools/ integration tests (Python 2)',
'tools/ integration tests (Python 3.6)',
'tools/ integration tests (Python 3.8)',
'resources/ tests',
'infrastructure/ tests',
'infrastructure/ tests (Python 3)'}),
# More tests are affected in the actual PR but it shouldn't affect the scheduled tasks
("pr_event_tests_affected.json", True, {"layout-instability/clip-negative-bottom-margin.html",
"layout-instability/composited-element-movement.html"},
{'download-firefox-nightly',
'wpt-firefox-nightly-stability',
'wpt-firefox-nightly-results',
'wpt-firefox-nightly-results-without-changes',
'wpt-chrome-dev-stability',
'wpt-chrome-dev-results',
'wpt-chrome-dev-results-without-changes',
'lint'}),
("epochs_daily_push_event.json", False, None,
{'download-firefox-stable',
'wpt-chrome-stable-reftest-1',
'wpt-chrome-stable-reftest-2',
'wpt-chrome-stable-reftest-3',
'wpt-chrome-stable-reftest-4',
'wpt-chrome-stable-reftest-5',
'wpt-chrome-stable-testharness-1',
'wpt-chrome-stable-testharness-10',
'wpt-chrome-stable-testharness-11',
'wpt-chrome-stable-testharness-12',
'wpt-chrome-stable-testharness-13',
'wpt-chrome-stable-testharness-14',
'wpt-chrome-stable-testharness-15',
'wpt-chrome-stable-testharness-16',
'wpt-chrome-stable-testharness-2',
'wpt-chrome-stable-testharness-3',
'wpt-chrome-stable-testharness-4',
'wpt-chrome-stable-testharness-5',
'wpt-chrome-stable-testharness-6',
'wpt-chrome-stable-testharness-7',
'wpt-chrome-stable-testharness-8',
'wpt-chrome-stable-testharness-9',
'wpt-chrome-stable-wdspec-1',
'wpt-chrome-stable-crashtest-1',
'wpt-firefox-stable-reftest-1',
'wpt-firefox-stable-reftest-2',
'wpt-firefox-stable-reftest-3',
'wpt-firefox-stable-reftest-4',
'wpt-firefox-stable-reftest-5',
'wpt-firefox-stable-testharness-1',
'wpt-firefox-stable-testharness-10',
'wpt-firefox-stable-testharness-11',
'wpt-firefox-stable-testharness-12',
'wpt-firefox-stable-testharness-13',
'wpt-firefox-stable-testharness-14',
'wpt-firefox-stable-testharness-15',
'wpt-firefox-stable-testharness-16',
'wpt-firefox-stable-testharness-2',
'wpt-firefox-stable-testharness-3',
'wpt-firefox-stable-testharness-4',
'wpt-firefox-stable-testharness-5',
'wpt-firefox-stable-testharness-6',
'wpt-firefox-stable-testharness-7',
'wpt-firefox-stable-testharness-8',
'wpt-firefox-stable-testharness-9',
'wpt-firefox-stable-wdspec-1',
'wpt-firefox-stable-crashtest-1',
'wpt-webkitgtk_minibrowser-nightly-reftest-1',
'wpt-webkitgtk_minibrowser-nightly-reftest-2',
'wpt-webkitgtk_minibrowser-nightly-reftest-3',
'wpt-webkitgtk_minibrowser-nightly-reftest-4',
'wpt-webkitgtk_minibrowser-nightly-reftest-5',
'wpt-webkitgtk_minibrowser-nightly-testharness-1',
'wpt-webkitgtk_minibrowser-nightly-testharness-10',
'wpt-webkitgtk_minibrowser-nightly-testharness-11',
'wpt-webkitgtk_minibrowser-nightly-testharness-12',
'wpt-webkitgtk_minibrowser-nightly-testharness-13',
'wpt-webkitgtk_minibrowser-nightly-testharness-14',
'wpt-webkitgtk_minibrowser-nightly-testharness-15',
'wpt-webkitgtk_minibrowser-nightly-testharness-16',
'wpt-webkitgtk_minibrowser-nightly-testharness-2',
'wpt-webkitgtk_minibrowser-nightly-testharness-3',
'wpt-webkitgtk_minibrowser-nightly-testharness-4',
'wpt-webkitgtk_minibrowser-nightly-testharness-5',
'wpt-webkitgtk_minibrowser-nightly-testharness-6',
'wpt-webkitgtk_minibrowser-nightly-testharness-7',
'wpt-webkitgtk_minibrowser-nightly-testharness-8',
'wpt-webkitgtk_minibrowser-nightly-testharness-9',
'wpt-webkitgtk_minibrowser-nightly-wdspec-1',
'wpt-webkitgtk_minibrowser-nightly-crashtest-1',
'wpt-servo-nightly-reftest-1',
'wpt-servo-nightly-reftest-2',
'wpt-servo-nightly-reftest-3',
'wpt-servo-nightly-reftest-4',
'wpt-servo-nightly-reftest-5',
'wpt-servo-nightly-testharness-1',
'wpt-servo-nightly-testharness-10',
'wpt-servo-nightly-testharness-11',
'wpt-servo-nightly-testharness-12',
'wpt-servo-nightly-testharness-13',
'wpt-servo-nightly-testharness-14',
'wpt-servo-nightly-testharness-15',
'wpt-servo-nightly-testharness-16',
'wpt-servo-nightly-testharness-2',
'wpt-servo-nightly-testharness-3',
'wpt-servo-nightly-testharness-4',
'wpt-servo-nightly-testharness-5',
'wpt-servo-nightly-testharness-6',
'wpt-servo-nightly-testharness-7',
'wpt-servo-nightly-testharness-8',
'wpt-servo-nightly-testharness-9',
'wpt-servo-nightly-wdspec-1',
'wpt-servo-nightly-crashtest-1',})
])
def test_schedule_tasks(event_path, is_pr, files_changed, expected):
with mock.patch("tools.ci.tc.decision.get_fetch_rev", return_value=(None, None, None)):
with mock.patch("tools.wpt.testfiles.repo_files_changed",
return_value=files_changed):
with open(data_path(event_path), encoding="utf8") as event_file:
event = json.load(event_file)
scheduled = decision.decide(event)
assert set(scheduled.keys()) == expected
| 41.175097
| 124
| 0.659422
|
acfe87e4fda6cfc14a660bef3fd4d5262e2414b3
| 22,274
|
py
|
Python
|
asyncpg/connect_utils.py
|
shadchin/asyncpg
|
53bea985bfda80f0af9eb3659f8e4568677e27e8
|
[
"Apache-2.0"
] | 1
|
2021-03-17T13:55:49.000Z
|
2021-03-17T13:55:49.000Z
|
asyncpg/connect_utils.py
|
shadchin/asyncpg
|
53bea985bfda80f0af9eb3659f8e4568677e27e8
|
[
"Apache-2.0"
] | null | null | null |
asyncpg/connect_utils.py
|
shadchin/asyncpg
|
53bea985bfda80f0af9eb3659f8e4568677e27e8
|
[
"Apache-2.0"
] | 1
|
2021-11-17T16:15:36.000Z
|
2021-11-17T16:15:36.000Z
|
# Copyright (C) 2016-present the asyncpg authors and contributors
# <see AUTHORS file>
#
# This module is part of asyncpg and is released under
# the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0
import asyncio
import collections
import functools
import getpass
import os
import pathlib
import platform
import re
import socket
import ssl as ssl_module
import stat
import struct
import time
import typing
import urllib.parse
import warnings
import inspect
from . import compat
from . import exceptions
from . import protocol
_ConnectionParameters = collections.namedtuple(
'ConnectionParameters',
[
'user',
'password',
'database',
'ssl',
'ssl_is_advisory',
'connect_timeout',
'server_settings',
])
_ClientConfiguration = collections.namedtuple(
'ConnectionConfiguration',
[
'command_timeout',
'statement_cache_size',
'max_cached_statement_lifetime',
'max_cacheable_statement_size',
])
_system = platform.uname().system
if _system == 'Windows':
PGPASSFILE = 'pgpass.conf'
else:
PGPASSFILE = '.pgpass'
def _read_password_file(passfile: pathlib.Path) \
-> typing.List[typing.Tuple[str, ...]]:
passtab = []
try:
if not passfile.exists():
return []
if not passfile.is_file():
warnings.warn(
'password file {!r} is not a plain file'.format(passfile))
return []
if _system != 'Windows':
if passfile.stat().st_mode & (stat.S_IRWXG | stat.S_IRWXO):
warnings.warn(
'password file {!r} has group or world access; '
'permissions should be u=rw (0600) or less'.format(
passfile))
return []
with passfile.open('rt') as f:
for line in f:
line = line.strip()
if not line or line.startswith('#'):
# Skip empty lines and comments.
continue
# Backslash escapes both itself and the colon,
# which is a record separator.
line = line.replace(R'\\', '\n')
passtab.append(tuple(
p.replace('\n', R'\\')
for p in re.split(r'(?<!\\):', line, maxsplit=4)
))
except IOError:
pass
return passtab
def _read_password_from_pgpass(
*, passfile: typing.Optional[pathlib.Path],
hosts: typing.List[str],
ports: typing.List[int],
database: str,
user: str):
"""Parse the pgpass file and return the matching password.
:return:
Password string, if found, ``None`` otherwise.
"""
passtab = _read_password_file(passfile)
if not passtab:
return None
for host, port in zip(hosts, ports):
if host.startswith('/'):
# Unix sockets get normalized into 'localhost'
host = 'localhost'
for phost, pport, pdatabase, puser, ppassword in passtab:
if phost != '*' and phost != host:
continue
if pport != '*' and pport != str(port):
continue
if pdatabase != '*' and pdatabase != database:
continue
if puser != '*' and puser != user:
continue
# Found a match.
return ppassword
return None
def _validate_port_spec(hosts, port):
if isinstance(port, list):
# If there is a list of ports, its length must
# match that of the host list.
if len(port) != len(hosts):
raise exceptions.InterfaceError(
'could not match {} port numbers to {} hosts'.format(
len(port), len(hosts)))
else:
port = [port for _ in range(len(hosts))]
return port
def _parse_hostlist(hostlist, port, *, unquote=False):
if ',' in hostlist:
# A comma-separated list of host addresses.
hostspecs = hostlist.split(',')
else:
hostspecs = [hostlist]
hosts = []
hostlist_ports = []
if not port:
portspec = os.environ.get('PGPORT')
if portspec:
if ',' in portspec:
default_port = [int(p) for p in portspec.split(',')]
else:
default_port = int(portspec)
else:
default_port = 5432
default_port = _validate_port_spec(hostspecs, default_port)
else:
port = _validate_port_spec(hostspecs, port)
for i, hostspec in enumerate(hostspecs):
if not hostspec.startswith('/'):
addr, _, hostspec_port = hostspec.partition(':')
else:
addr = hostspec
hostspec_port = ''
if unquote:
addr = urllib.parse.unquote(addr)
hosts.append(addr)
if not port:
if hostspec_port:
if unquote:
hostspec_port = urllib.parse.unquote(hostspec_port)
hostlist_ports.append(int(hostspec_port))
else:
hostlist_ports.append(default_port[i])
if not port:
port = hostlist_ports
return hosts, port
def _parse_connect_dsn_and_args(*, dsn, host, port, user,
password, passfile, database, ssl,
connect_timeout, server_settings):
# `auth_hosts` is the version of host information for the purposes
# of reading the pgpass file.
auth_hosts = None
if dsn:
parsed = urllib.parse.urlparse(dsn)
if parsed.scheme not in {'postgresql', 'postgres'}:
raise ValueError(
'invalid DSN: scheme is expected to be either '
'"postgresql" or "postgres", got {!r}'.format(parsed.scheme))
if parsed.netloc:
if '@' in parsed.netloc:
dsn_auth, _, dsn_hostspec = parsed.netloc.partition('@')
else:
dsn_hostspec = parsed.netloc
dsn_auth = ''
else:
dsn_auth = dsn_hostspec = ''
if dsn_auth:
dsn_user, _, dsn_password = dsn_auth.partition(':')
else:
dsn_user = dsn_password = ''
if not host and dsn_hostspec:
host, port = _parse_hostlist(dsn_hostspec, port, unquote=True)
if parsed.path and database is None:
dsn_database = parsed.path
if dsn_database.startswith('/'):
dsn_database = dsn_database[1:]
database = urllib.parse.unquote(dsn_database)
if user is None and dsn_user:
user = urllib.parse.unquote(dsn_user)
if password is None and dsn_password:
password = urllib.parse.unquote(dsn_password)
if parsed.query:
query = urllib.parse.parse_qs(parsed.query, strict_parsing=True)
for key, val in query.items():
if isinstance(val, list):
query[key] = val[-1]
if 'port' in query:
val = query.pop('port')
if not port and val:
port = [int(p) for p in val.split(',')]
if 'host' in query:
val = query.pop('host')
if not host and val:
host, port = _parse_hostlist(val, port)
if 'dbname' in query:
val = query.pop('dbname')
if database is None:
database = val
if 'database' in query:
val = query.pop('database')
if database is None:
database = val
if 'user' in query:
val = query.pop('user')
if user is None:
user = val
if 'password' in query:
val = query.pop('password')
if password is None:
password = val
if 'passfile' in query:
val = query.pop('passfile')
if passfile is None:
passfile = val
if 'sslmode' in query:
val = query.pop('sslmode')
if ssl is None:
ssl = val
if query:
if server_settings is None:
server_settings = query
else:
server_settings = {**query, **server_settings}
if not host:
hostspec = os.environ.get('PGHOST')
if hostspec:
host, port = _parse_hostlist(hostspec, port)
if not host:
auth_hosts = ['localhost']
if _system == 'Windows':
host = ['localhost']
else:
host = ['/run/postgresql', '/var/run/postgresql',
'/tmp', '/private/tmp', 'localhost']
if not isinstance(host, list):
host = [host]
if auth_hosts is None:
auth_hosts = host
if not port:
portspec = os.environ.get('PGPORT')
if portspec:
if ',' in portspec:
port = [int(p) for p in portspec.split(',')]
else:
port = int(portspec)
else:
port = 5432
elif isinstance(port, (list, tuple)):
port = [int(p) for p in port]
else:
port = int(port)
port = _validate_port_spec(host, port)
if user is None:
user = os.getenv('PGUSER')
if not user:
user = getpass.getuser()
if password is None:
password = os.getenv('PGPASSWORD')
if database is None:
database = os.getenv('PGDATABASE')
if database is None:
database = user
if user is None:
raise exceptions.InterfaceError(
'could not determine user name to connect with')
if database is None:
raise exceptions.InterfaceError(
'could not determine database name to connect to')
if password is None:
if passfile is None:
passfile = os.getenv('PGPASSFILE')
if passfile is None:
homedir = compat.get_pg_home_directory()
if homedir:
passfile = homedir / PGPASSFILE
else:
passfile = None
else:
passfile = pathlib.Path(passfile)
if passfile is not None:
password = _read_password_from_pgpass(
hosts=auth_hosts, ports=port,
database=database, user=user,
passfile=passfile)
addrs = []
have_tcp_addrs = False
for h, p in zip(host, port):
if h.startswith('/'):
# UNIX socket name
if '.s.PGSQL.' not in h:
h = os.path.join(h, '.s.PGSQL.{}'.format(p))
addrs.append(h)
else:
# TCP host/port
addrs.append((h, p))
have_tcp_addrs = True
if not addrs:
raise ValueError(
'could not determine the database address to connect to')
if ssl is None:
ssl = os.getenv('PGSSLMODE')
if ssl is None and have_tcp_addrs:
ssl = 'prefer'
# ssl_is_advisory is only allowed to come from the sslmode parameter.
ssl_is_advisory = None
if isinstance(ssl, str):
SSLMODES = {
'disable': 0,
'allow': 1,
'prefer': 2,
'require': 3,
'verify-ca': 4,
'verify-full': 5,
}
try:
sslmode = SSLMODES[ssl]
except KeyError:
modes = ', '.join(SSLMODES.keys())
raise exceptions.InterfaceError(
'`sslmode` parameter must be one of: {}'.format(modes))
# sslmode 'allow' is currently handled as 'prefer' because we're
# missing the "retry with SSL" behavior for 'allow', but do have the
# "retry without SSL" behavior for 'prefer'.
# Not changing 'allow' to 'prefer' here would be effectively the same
# as changing 'allow' to 'disable'.
if sslmode == SSLMODES['allow']:
sslmode = SSLMODES['prefer']
# docs at https://www.postgresql.org/docs/10/static/libpq-connect.html
# Not implemented: sslcert & sslkey & sslrootcert & sslcrl params.
if sslmode <= SSLMODES['allow']:
ssl = False
ssl_is_advisory = sslmode >= SSLMODES['allow']
else:
ssl = ssl_module.create_default_context()
ssl.check_hostname = sslmode >= SSLMODES['verify-full']
ssl.verify_mode = ssl_module.CERT_REQUIRED
if sslmode <= SSLMODES['require']:
ssl.verify_mode = ssl_module.CERT_NONE
ssl_is_advisory = sslmode <= SSLMODES['prefer']
elif ssl is True:
ssl = ssl_module.create_default_context()
if server_settings is not None and (
not isinstance(server_settings, dict) or
not all(isinstance(k, str) for k in server_settings) or
not all(isinstance(v, str) for v in server_settings.values())):
raise ValueError(
'server_settings is expected to be None or '
'a Dict[str, str]')
params = _ConnectionParameters(
user=user, password=password, database=database, ssl=ssl,
ssl_is_advisory=ssl_is_advisory, connect_timeout=connect_timeout,
server_settings=server_settings)
return addrs, params
def _parse_connect_arguments(*, dsn, host, port, user, password, passfile,
database, timeout, command_timeout,
statement_cache_size,
max_cached_statement_lifetime,
max_cacheable_statement_size,
ssl, server_settings):
local_vars = locals()
for var_name in {'max_cacheable_statement_size',
'max_cached_statement_lifetime',
'statement_cache_size'}:
var_val = local_vars[var_name]
if var_val is None or isinstance(var_val, bool) or var_val < 0:
raise ValueError(
'{} is expected to be greater '
'or equal to 0, got {!r}'.format(var_name, var_val))
if command_timeout is not None:
try:
if isinstance(command_timeout, bool):
raise ValueError
command_timeout = float(command_timeout)
if command_timeout <= 0:
raise ValueError
except ValueError:
raise ValueError(
'invalid command_timeout value: '
'expected greater than 0 float (got {!r})'.format(
command_timeout)) from None
addrs, params = _parse_connect_dsn_and_args(
dsn=dsn, host=host, port=port, user=user,
password=password, passfile=passfile, ssl=ssl,
database=database, connect_timeout=timeout,
server_settings=server_settings)
config = _ClientConfiguration(
command_timeout=command_timeout,
statement_cache_size=statement_cache_size,
max_cached_statement_lifetime=max_cached_statement_lifetime,
max_cacheable_statement_size=max_cacheable_statement_size,)
return addrs, params, config
class TLSUpgradeProto(asyncio.Protocol):
def __init__(self, loop, host, port, ssl_context, ssl_is_advisory):
self.on_data = _create_future(loop)
self.host = host
self.port = port
self.ssl_context = ssl_context
self.ssl_is_advisory = ssl_is_advisory
def data_received(self, data):
if data == b'S':
self.on_data.set_result(True)
elif (self.ssl_is_advisory and
self.ssl_context.verify_mode == ssl_module.CERT_NONE and
data == b'N'):
# ssl_is_advisory will imply that ssl.verify_mode == CERT_NONE,
# since the only way to get ssl_is_advisory is from
# sslmode=prefer (or sslmode=allow). But be extra sure to
# disallow insecure connections when the ssl context asks for
# real security.
self.on_data.set_result(False)
else:
self.on_data.set_exception(
ConnectionError(
'PostgreSQL server at "{host}:{port}" '
'rejected SSL upgrade'.format(
host=self.host, port=self.port)))
def connection_lost(self, exc):
if not self.on_data.done():
if exc is None:
exc = ConnectionError('unexpected connection_lost() call')
self.on_data.set_exception(exc)
async def _create_ssl_connection(protocol_factory, host, port, *,
loop, ssl_context, ssl_is_advisory=False):
tr, pr = await loop.create_connection(
lambda: TLSUpgradeProto(loop, host, port,
ssl_context, ssl_is_advisory),
host, port)
tr.write(struct.pack('!ll', 8, 80877103)) # SSLRequest message.
try:
do_ssl_upgrade = await pr.on_data
except (Exception, asyncio.CancelledError):
tr.close()
raise
if hasattr(loop, 'start_tls'):
if do_ssl_upgrade:
try:
new_tr = await loop.start_tls(
tr, pr, ssl_context, server_hostname=host)
except (Exception, asyncio.CancelledError):
tr.close()
raise
else:
new_tr = tr
pg_proto = protocol_factory()
pg_proto.connection_made(new_tr)
new_tr.set_protocol(pg_proto)
return new_tr, pg_proto
else:
conn_factory = functools.partial(
loop.create_connection, protocol_factory)
if do_ssl_upgrade:
conn_factory = functools.partial(
conn_factory, ssl=ssl_context, server_hostname=host)
sock = _get_socket(tr)
sock = sock.dup()
_set_nodelay(sock)
tr.close()
try:
return await conn_factory(sock=sock)
except (Exception, asyncio.CancelledError):
sock.close()
raise
async def _connect_addr(
*,
addr,
loop,
timeout,
params,
config,
connection_class,
record_class
):
assert loop is not None
if timeout <= 0:
raise asyncio.TimeoutError
connected = _create_future(loop)
params_input = params
if callable(params.password):
if inspect.iscoroutinefunction(params.password):
password = await params.password()
else:
password = params.password()
params = params._replace(password=password)
proto_factory = lambda: protocol.Protocol(
addr, connected, params, record_class, loop)
if isinstance(addr, str):
# UNIX socket
connector = loop.create_unix_connection(proto_factory, addr)
elif params.ssl:
connector = _create_ssl_connection(
proto_factory, *addr, loop=loop, ssl_context=params.ssl,
ssl_is_advisory=params.ssl_is_advisory)
else:
connector = loop.create_connection(proto_factory, *addr)
connector = asyncio.ensure_future(connector)
before = time.monotonic()
tr, pr = await compat.wait_for(connector, timeout=timeout)
timeout -= time.monotonic() - before
try:
if timeout <= 0:
raise asyncio.TimeoutError
await compat.wait_for(connected, timeout=timeout)
except (Exception, asyncio.CancelledError):
tr.close()
raise
con = connection_class(pr, tr, loop, addr, config, params_input)
pr.set_connection(con)
return con
async def _connect(*, loop, timeout, connection_class, record_class, **kwargs):
if loop is None:
loop = asyncio.get_event_loop()
addrs, params, config = _parse_connect_arguments(timeout=timeout, **kwargs)
last_error = None
addr = None
for addr in addrs:
before = time.monotonic()
try:
con = await _connect_addr(
addr=addr,
loop=loop,
timeout=timeout,
params=params,
config=config,
connection_class=connection_class,
record_class=record_class,
)
except (OSError, asyncio.TimeoutError, ConnectionError) as ex:
last_error = ex
else:
return con
finally:
timeout -= time.monotonic() - before
raise last_error
async def _cancel(*, loop, addr, params: _ConnectionParameters,
backend_pid, backend_secret):
class CancelProto(asyncio.Protocol):
def __init__(self):
self.on_disconnect = _create_future(loop)
def connection_lost(self, exc):
if not self.on_disconnect.done():
self.on_disconnect.set_result(True)
if isinstance(addr, str):
tr, pr = await loop.create_unix_connection(CancelProto, addr)
else:
if params.ssl:
tr, pr = await _create_ssl_connection(
CancelProto,
*addr,
loop=loop,
ssl_context=params.ssl,
ssl_is_advisory=params.ssl_is_advisory)
else:
tr, pr = await loop.create_connection(
CancelProto, *addr)
_set_nodelay(_get_socket(tr))
# Pack a CancelRequest message
msg = struct.pack('!llll', 16, 80877102, backend_pid, backend_secret)
try:
tr.write(msg)
await pr.on_disconnect
finally:
tr.close()
def _get_socket(transport):
sock = transport.get_extra_info('socket')
if sock is None:
# Shouldn't happen with any asyncio-complaint event loop.
raise ConnectionError(
'could not get the socket for transport {!r}'.format(transport))
return sock
def _set_nodelay(sock):
if not hasattr(socket, 'AF_UNIX') or sock.family != socket.AF_UNIX:
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
def _create_future(loop):
try:
create_future = loop.create_future
except AttributeError:
return asyncio.Future(loop=loop)
else:
return create_future()
| 30.181572
| 79
| 0.564874
|
acfe8826864653ef660ef04485ce047dc158f6b0
| 1,130
|
py
|
Python
|
uf_examples/courses/manage_columns_filter.py
|
mconlon17/vivo-org-ingest
|
7ea3130c78322c18cc263263ee369588c3e1fc0a
|
[
"BSD-2-Clause"
] | 6
|
2015-04-22T15:17:51.000Z
|
2019-03-01T16:26:35.000Z
|
uf_examples/courses/manage_columns_filter.py
|
mconlon17/vivo-org-ingest
|
7ea3130c78322c18cc263263ee369588c3e1fc0a
|
[
"BSD-2-Clause"
] | 174
|
2015-02-18T13:32:39.000Z
|
2019-06-26T18:56:18.000Z
|
uf_examples/courses/manage_columns_filter.py
|
mconlon17/vivo-org-ingest
|
7ea3130c78322c18cc263263ee369588c3e1fc0a
|
[
"BSD-2-Clause"
] | 11
|
2015-04-22T15:17:59.000Z
|
2019-06-24T10:49:40.000Z
|
#!/usr/bin/env/python
"""
manage_columns_filter.py -- add needed columns, remove unused columns
"""
__author__ = "Michael Conlon"
__copyright__ = "Copyright 2016 (c) Michael Conlon"
__license__ = "New BSD License"
__version__ = "0.01"
import sys
from pump.vivopump import read_csv_fp, write_csv_fp, improve_course_title
data_in = read_csv_fp(sys.stdin)
var_names = data_in[data_in.keys()[1]].keys() # create a list of var_names from the first row
print >>sys.stderr, "Columns in", var_names
data_out = {}
for row, data in data_in.items():
new_data =dict(data)
# Add these columns
new_data['remove'] = ''
new_data['uri'] = ''
new_data['title'] = improve_course_title(new_data['UF_COURSE_TITLE'])
new_data['ccn'] = new_data['UF_COURSE_CD']
# Delete all the upper case column names
for name in new_data.keys():
if name == name.upper():
del new_data[name]
data_out[row] = new_data
var_names = data_out[data_out.keys()[1]].keys() # create a list of var_names from the first row
print >>sys.stderr, "Columns out", var_names
write_csv_fp(sys.stdout, data_out)
| 25.111111
| 96
| 0.69469
|
acfe88b2ff6a426b00ca119936f789f7f3bf616d
| 1,544
|
py
|
Python
|
lineplotter.py
|
mrawls/FDBinary-tools
|
26b78a12df53d9a1a477fcf247cbf2dad8c39836
|
[
"MIT"
] | 2
|
2020-05-25T09:45:03.000Z
|
2020-08-28T14:43:53.000Z
|
lineplotter.py
|
mrawls/FDBinary-tools
|
26b78a12df53d9a1a477fcf247cbf2dad8c39836
|
[
"MIT"
] | null | null | null |
lineplotter.py
|
mrawls/FDBinary-tools
|
26b78a12df53d9a1a477fcf247cbf2dad8c39836
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
import sys
sys.path.append('../BF-rvplotter') # hey this works!
from BF_functions import read_specfiles
'''
Plot a few absorption lines from a stellar spectra to compare with a model.
Note read_specfiles requires timestamps in bjdfile, but they do not need to be correct.
'''
infiles = '../../FDBinary/9246715/infiles_lineplot1.txt'
bjdfile = '../../FDBinary/9246715/bjds_baryvels.txt'
isAPOGEE = False
nspec, filenamelist, datetimelist, wavelist, speclist, source = read_specfiles(infiles, bjdfile, isAPOGEE)
# We assume [0] is star1 to 8400 A, [1] is star1 out past 8400 A
# We assume [2] is star2 to 8400 A, [3] is star2 out past 8400 A
# We assume [4] is a comparison model spectrum to plot below each actual spectrum
fig, ((ax1, ax2, ax3), (ax4, ax5, ax6)) = plt.subplots(nrows=2, ncols=3)
ax1.axis([5860,5888, -0.6,1.4])
ax2.axis([6545,6582, -0.6,1.4])
ax3.axis([8484,8560, -0.6,1.4])
ax4.axis([5860,5888, -0.6,1.4])
ax5.axis([6545,6582, -0.6,1.4])
ax6.axis([8484,8560, -0.6,1.4])
ax1.plot(wavelist[0], speclist[0])
ax2.plot(wavelist[0], speclist[0])
ax3.plot(wavelist[1], speclist[1])
ax4.plot(wavelist[2], speclist[2])
ax5.plot(wavelist[2], speclist[2])
ax6.plot(wavelist[3], speclist[3])
ax1.plot(wavelist[4], speclist[4]-0.7)
ax2.plot(wavelist[4], speclist[4]-0.7)
ax3.plot(wavelist[4], speclist[4]-0.7)
ax4.plot(wavelist[4], speclist[4]-0.7)
ax5.plot(wavelist[4], speclist[4]-0.7)
ax6.plot(wavelist[4], speclist[4]-0.7)
plt.tight_layout()
plt.show()
| 35.906977
| 106
| 0.713083
|
acfe8a145abba379b21d247aab01e27375ffdd1d
| 2,650
|
py
|
Python
|
tests/zookeeper/test_party.py
|
denissmirnov/kiel
|
fa80aa1ccd790c0fbbd8cc46a72162195e1aed69
|
[
"Apache-2.0"
] | 47
|
2016-02-23T18:32:55.000Z
|
2021-12-03T00:50:52.000Z
|
tests/zookeeper/test_party.py
|
denissmirnov/kiel
|
fa80aa1ccd790c0fbbd8cc46a72162195e1aed69
|
[
"Apache-2.0"
] | 21
|
2016-02-23T01:41:18.000Z
|
2020-10-27T21:09:32.000Z
|
tests/zookeeper/test_party.py
|
denissmirnov/kiel
|
fa80aa1ccd790c0fbbd8cc46a72162195e1aed69
|
[
"Apache-2.0"
] | 13
|
2016-05-18T06:16:48.000Z
|
2019-10-31T19:35:25.000Z
|
import unittest
from mock import Mock
from kazoo.exceptions import NoNodeError
from kiel.zookeeper import party
class PartyTests(unittest.TestCase):
def test_start_ensures_path_and_watches_changes(self):
client = Mock()
def collect_callback(fn):
client.change_callback = fn
client.ChildrenWatch.return_value.side_effect = collect_callback
on_change = Mock()
p = party.Party(client, "host.local", "/my/party", on_change)
p.start()
client.ensure_path.assert_called_once_with("/my/party")
client.ChildrenWatch.assert_called_once_with("/my/party")
assert on_change.called is False
client.change_callback(["foo", "bar"])
on_change.assert_called_once_with(["foo", "bar"])
def test_join_when_znode_does_not_exist(self):
client = Mock()
client.exists.return_value = None
p = party.Party(client, "host.local", "/my/party", Mock())
p.join()
client.exists.assert_called_once_with("/my/party/host.local")
client.create.assert_called_once_with(
"/my/party/host.local", ephemeral=True, makepath=True
)
def test_join_when_znode_belongs_to_someone_else(self):
client = Mock()
client.exists.return_value = Mock(owner_session_id=1234)
client.client_id = (4321, 0)
p = party.Party(client, "host.local", "/my/party", Mock())
p.join()
client.transaction.assert_called_once_with()
transaction = client.transaction.return_value
transaction.delete.assert_called_once_with("/my/party/host.local")
transaction.create.assert_called_once_with(
"/my/party/host.local", ephemeral=True
)
transaction.commit.assert_called_once_with()
def test_join_when_znode_belongs_to_us(self):
client = Mock()
client.exists.return_value = Mock(owner_session_id=1234)
client.client_id = (1234, 0)
p = party.Party(client, "host.local", "/my/party", Mock())
p.join()
assert client.create.called is False
assert client.transaction.called is False
def test_leave(self):
client = Mock()
p = party.Party(client, "host.local", "/my/party", Mock())
p.leave()
client.delete.assert_called_once_with("/my/party/host.local")
def test_leave_znode_does_not_exist(self):
client = Mock()
client.delete.side_effect = NoNodeError
p = party.Party(client, "host.local", "/my/party", Mock())
p.leave()
client.delete.assert_called_once_with("/my/party/host.local")
| 28.191489
| 74
| 0.652453
|
acfe8a49d05fc2170c6895dbfe0d1e700310678b
| 9,408
|
py
|
Python
|
VAE/vae_bak_uncompleted.py
|
13952522076/diffvg
|
2c5af9ecf470b1c7071e821583e5ba09cb2c4622
|
[
"Apache-2.0"
] | null | null | null |
VAE/vae_bak_uncompleted.py
|
13952522076/diffvg
|
2c5af9ecf470b1c7071e821583e5ba09cb2c4622
|
[
"Apache-2.0"
] | null | null | null |
VAE/vae_bak_uncompleted.py
|
13952522076/diffvg
|
2c5af9ecf470b1c7071e821583e5ba09cb2c4622
|
[
"Apache-2.0"
] | null | null | null |
import torch
from base import BaseVAE
from torch import nn
from typing import List, TypeVar
import torch.nn.functional as F
# from torch import tensor as Tensor
Tensor = TypeVar('torch.tensor')
__all__ = ['VanillaVAE', "VAELoss"]
class SELayer(nn.Module):
def __init__(self, channel, reduction = 16):
super(SELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction),
nn.ReLU(inplace = True),
nn.Linear(channel // reduction, channel),
nn.Sigmoid()
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
return x * y
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
self.se = SELayer(planes)
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.se(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class VanillaVAE(BaseVAE):
def __init__(self,
in_channels: int,
latent_dim: int,
hidden_dims: List = None,
**kwargs) -> None:
super(VanillaVAE, self).__init__()
self.latent_dim = latent_dim
modules = []
if hidden_dims is None:
hidden_dims = [32, 64, 128, 256, 512]
# Build Encoder
# for h_dim in hidden_dims:
# modules.append(
# nn.Sequential(
# nn.Conv2d(in_channels, out_channels=h_dim,
# kernel_size=3, stride=2, padding=1),
# nn.BatchNorm2d(h_dim),
# nn.LeakyReLU()
# )
# )
# in_channels = h_dim
modules = []
self.inplanes = 64
conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
bn1 = nn.BatchNorm2d(64)
relu = nn.ReLU(inplace=True)
maxpool = nn.Conv2d(64,64, kernel_size=3, stride=2, padding=1)
layer1 = self._make_layer(BasicBlock, 64, 2)
layer2 = self._make_layer(BasicBlock, 128, 2, stride=2)
layer3 = self._make_layer(BasicBlock, 256, 2, stride=2)
layer4 = self._make_layer(BasicBlock, 512, 2, stride=2)
layer5 = nn.Sequential(
nn.Conv2d(512, 512, kernel_size=5, stride=3, padding=2, bias=False)
)
modules = [conv1, bn1, relu, maxpool, layer1, layer2, layer3, layer4, layer5]
self.encoder = nn.Sequential(*modules)
self.fc_mu = nn.Linear(512, latent_dim)
self.fc_var = nn.Linear(512, latent_dim)
# Build Decoder
modules = []
self.decoder_input = nn.Linear(latent_dim, hidden_dims[-1] * 4)
hidden_dims.reverse()
for i in range(len(hidden_dims) - 1):
modules.append(
nn.Sequential(
nn.ConvTranspose2d(hidden_dims[i],
hidden_dims[i + 1],
kernel_size=3,
stride=2,
padding=1,
output_padding=1),
nn.BatchNorm2d(hidden_dims[i + 1]),
nn.LeakyReLU()
)
)
self.decoder = nn.Sequential(*modules)
self.final_layer = nn.Sequential(
nn.ConvTranspose2d(hidden_dims[-1],
hidden_dims[-1],
kernel_size=3,
stride=2,
padding=1,
output_padding=1),
nn.BatchNorm2d(hidden_dims[-1]),
nn.LeakyReLU(),
nn.Conv2d(hidden_dims[-1], out_channels=3,
kernel_size=3, padding=1),
nn.Tanh())
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def encode(self, input: Tensor) -> List[Tensor]:
"""
Encodes the input by passing through the encoder network
and returns the latent codes.
:param input: (Tensor) Input tensor to encoder [N x C x H x W]
:return: (Tensor) List of latent codes
"""
result = self.encoder(input)
result = F.adaptive_avg_pool2d(result,1)
result = torch.flatten(result, start_dim=1)
# Split the result into mu and var components
# of the latent Gaussian distribution
mu = self.fc_mu(result)
log_var = self.fc_var(result)
return [mu, log_var]
def decode(self, z: Tensor) -> Tensor:
"""
Maps the given latent codes
onto the image space.
:param z: (Tensor) [B x D]
:return: (Tensor) [B x C x H x W]
"""
result = self.decoder_input(z)
result = result.view(-1, 512, 2, 2)
result = self.decoder(result)
result = self.final_layer(result)
return result
def reparameterize(self, mu: Tensor, logvar: Tensor) -> Tensor:
"""
Reparameterization trick to sample from N(mu, var) from
N(0,1).
:param mu: (Tensor) Mean of the latent Gaussian [B x D]
:param logvar: (Tensor) Standard deviation of the latent Gaussian [B x D]
:return: (Tensor) [B x D]
"""
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps * std + mu
def forward(self, inputs: Tensor, **kwargs):
mu, log_var = self.encode(inputs)
z = self.reparameterize(mu, log_var)
return {
"reconstruct": self.decode(z),
"input": inputs,
"mu": mu,
"log_var": log_var
}
def sample(self, z, **kwargs) -> Tensor:
"""
Samples from the latent space and return the corresponding
image space map.
:param num_samples: (Int) Number of samples
:param current_device: (Int) Device to run the model
:return: (Tensor)
"""
# z = torch.randn(num_samples,
# self.latent_dim)
#
# z = z.to(current_device)
samples = self.decode(z)
return samples
def generate(self, x: Tensor, **kwargs) -> Tensor:
"""
Given an input image x, returns the reconstructed image
:param x: (Tensor) [B x C x H x W]
:return: (Tensor) [B x C x H x W]
"""
return self.forward(x)["reconstruct"]
class VAELoss(nn.Module):
def __init__(self, M_N):
super(VAELoss, self).__init__()
self.M_N = M_N
def forward(self, out):
"""
Computes the VAE loss function.
KL(N(\mu, \sigma), N(0, 1)) = \log \frac{1}{\sigma} + \frac{\sigma^2 + \mu^2}{2} - \frac{1}{2}
:param args:
:param kwargs:
:return:
"""
recons = out["reconstruct"]
input = out["input"]
mu = out["mu"]
log_var = out["log_var"]
kld_weight = self.M_N # Account for the minibatch samples from the dataset
recons_loss = F.mse_loss(recons, input)
kld_loss = kld_weight * torch.mean(-0.5 * torch.sum(1 + log_var - mu ** 2 - log_var.exp(), dim=1), dim=0)
loss = recons_loss + kld_loss
return {'loss': loss, 'Reconstruction_Loss': recons_loss, 'KLD': kld_loss}
if __name__ == '__main__':
model = VanillaVAE(in_channels=3, latent_dim=256)
x = torch.rand([3,3,224,224])
out = model(x)
reconstruct = out["reconstruct"]
input = out["input"]
mu = out["mu"]
log_var = out["log_var"]
print(reconstruct.shape)
print(input.shape)
print(mu.shape)
print(log_var.shape)
| 31.783784
| 113
| 0.537415
|
acfe8a65de229b9e0a5b72518b7391cf654e7276
| 9,880
|
py
|
Python
|
test/functional/mining_pos_reorg.py
|
CortezDevTeam/encocoin
|
638030888618b8b4572a809706346e3297b5cbb7
|
[
"MIT"
] | 1
|
2020-10-04T15:43:15.000Z
|
2020-10-04T15:43:15.000Z
|
test/functional/mining_pos_reorg.py
|
CortezDevTeam/encocoin
|
638030888618b8b4572a809706346e3297b5cbb7
|
[
"MIT"
] | null | null | null |
test/functional/mining_pos_reorg.py
|
CortezDevTeam/encocoin
|
638030888618b8b4572a809706346e3297b5cbb7
|
[
"MIT"
] | 3
|
2020-06-07T22:05:26.000Z
|
2020-08-31T18:10:54.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2019 The EncoCoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.authproxy import JSONRPCException
from test_framework.test_framework import EncoCoinTestFramework
from test_framework.util import (
sync_blocks,
assert_equal,
assert_raises_rpc_error,
connect_nodes_bi,
connect_nodes_clique,
disconnect_nodes,
set_node_times,
DecimalAmt,
)
class ReorgStakeTest(EncoCoinTestFramework):
def set_test_params(self):
self.num_nodes = 3
# node 0 and 1 stake the blocks, node 2 makes the zerocoin spends
self.extra_args = [['-staking=0']] * self.num_nodes
def setup_chain(self):
# Start with PoS cache: 330 blocks
self._initialize_chain(toPosPhase=True)
self.enable_mocktime()
def setup_network(self):
# connect all nodes between each other
self.setup_nodes()
connect_nodes_clique(self.nodes)
self.sync_all()
def log_title(self):
title = "*** Starting %s ***" % self.__class__.__name__
underline = "-" * len(title)
description = "Tests reorganisation for PoS blocks."
self.log.info("\n\n%s\n%s\n%s\n", title, underline, description)
def disconnect_all(self):
self.log.info("Disconnecting nodes...")
for i in range(self.num_nodes):
for j in range(self.num_nodes):
if j != i:
disconnect_nodes(self.nodes[i], j)
self.log.info("Nodes disconnected")
def get_tot_balance(self, nodeid):
wi = self.nodes[nodeid].getwalletinfo()
return wi['balance'] + wi['immature_balance']
def run_test(self):
def findUtxoInList(txid, vout, utxo_list):
for x in utxo_list:
if x["txid"] == txid and x["vout"] == vout:
return True, x
return False, None
# Stake with node 0 and node 1 up to public spend activation (400)
# 70 blocks: 5 blocks each (x7)
self.log.info("Staking 70 blocks to reach public spends activation...")
set_node_times(self.nodes, self.mocktime)
for i in range(7):
for peer in range(2):
for nblock in range(5):
self.mocktime = self.generate_pos(peer, self.mocktime)
sync_blocks(self.nodes)
set_node_times(self.nodes, self.mocktime)
block_time_0 = block_time_1 = self.mocktime
self.log.info("Blocks staked.")
# Check balances
self.log.info("Checking balances...")
initial_balance = [self.get_tot_balance(i) for i in range(self.num_nodes)]
# --nodes 0, 1: 62 pow blocks + 55 pos blocks
assert_equal(initial_balance[0], DecimalAmt(250.0 * (62 + 55)))
assert_equal(initial_balance[1], DecimalAmt(250.0 * (62 + 55)))
# --node 2: 62 pow blocks + 20 pos blocks - zc minted - zcfee
assert_equal(initial_balance[2], DecimalAmt(250.0 * (62 + 20) - 6666 - 0.08))
assert_equal(self.nodes[2].getzerocoinbalance()['Total'], DecimalAmt(6666))
self.log.info("Balances ok.")
# create the raw zerocoin spend txes
addy = self.nodes[2].getnewaddress()
self.log.info("Creating the raw zerocoin public spends...")
mints = self.nodes[2].listmintedzerocoins(True, True)
tx_A0 = self.nodes[2].createrawzerocoinspend(mints[0]["serial hash"], addy)
tx_A1 = self.nodes[2].createrawzerocoinspend(mints[1]["serial hash"], addy)
# Spending same coins to different recipients to get different txids
new_addy = "yAVWM5urwaTyhiuFQHP2aP47rdZsLUG5PH"
tx_B0 = self.nodes[2].createrawzerocoinspend(mints[0]["serial hash"], new_addy)
tx_B1 = self.nodes[2].createrawzerocoinspend(mints[1]["serial hash"], new_addy)
# Disconnect nodes
minted_amount = mints[0]["denomination"] + mints[1]["denomination"]
self.disconnect_all()
# Stake one block with node-0 and save the stake input
self.log.info("Staking 1 block with node 0...")
initial_unspent_0 = self.nodes[0].listunspent()
self.nodes[0].generate(1)
block_time_0 += 60
set_node_times(self.nodes, block_time_0)
last_block = self.nodes[0].getblock(self.nodes[0].getbestblockhash())
assert(len(last_block["tx"]) > 1) # a PoS block has at least two txes
coinstake_txid = last_block["tx"][1]
coinstake_tx = self.nodes[0].getrawtransaction(coinstake_txid, True)
assert(coinstake_tx["vout"][0]["scriptPubKey"]["hex"] == "") # first output of coinstake is empty
stakeinput = coinstake_tx["vin"][0]
# The stake input was unspent 1 block ago, now it's not
res, utxo = findUtxoInList(stakeinput["txid"], stakeinput["vout"], initial_unspent_0)
assert (res and utxo["spendable"])
res, utxo = findUtxoInList(stakeinput["txid"], stakeinput["vout"], self.nodes[0].listunspent())
assert (not res or not utxo["spendable"])
self.log.info("Coinstake input %s...%s-%d is no longer spendable." % (
stakeinput["txid"][:9], stakeinput["txid"][-4:], stakeinput["vout"]))
# Relay zerocoin spends
self.nodes[0].sendrawtransaction(tx_A0)
self.nodes[0].sendrawtransaction(tx_A1)
# Stake 10 more blocks with node-0 and check balances
self.log.info("Staking 10 more blocks with node 0...")
for i in range(10):
block_time_0 = self.generate_pos(0, block_time_0)
expected_balance_0 = initial_balance[0] + DecimalAmt(11 * 250.0)
assert_equal(self.get_tot_balance(0), expected_balance_0)
self.log.info("Balance for node 0 checks out.")
# Connect with node 2, sync and check zerocoin balance
self.log.info("Reconnecting node 0 and node 2")
connect_nodes_bi(self.nodes, 0, 2)
sync_blocks([self.nodes[i] for i in [0, 2]])
self.log.info("Resetting zerocoin mints on node 2")
self.nodes[2].resetmintzerocoin(True)
assert_equal(self.get_tot_balance(2), initial_balance[2] + DecimalAmt(minted_amount))
assert_equal(self.nodes[2].getzerocoinbalance()['Total'], DecimalAmt(6666-minted_amount))
self.log.info("Balance for node 2 checks out.")
# Double spending txes not possible
assert_raises_rpc_error(-26, "bad-txns-invalid-zpiv",
self.nodes[0].sendrawtransaction, tx_B0)
assert_raises_rpc_error(-26, "bad-txns-invalid-zpiv",
self.nodes[0].sendrawtransaction, tx_B1)
# verify that the stakeinput can't be spent
stakeinput_tx_json = self.nodes[0].getrawtransaction(stakeinput["txid"], True)
stakeinput_amount = float(stakeinput_tx_json["vout"][int(stakeinput["vout"])]["value"])
rawtx_unsigned = self.nodes[0].createrawtransaction(
[{"txid": stakeinput["txid"], "vout": int(stakeinput["vout"])}],
{"xxncEuJK27ygNh7imNfaX8JV6ZQUnoBqzN": (stakeinput_amount-0.01)})
rawtx = self.nodes[0].signrawtransaction(rawtx_unsigned)
assert(rawtx["complete"])
try:
self.nodes[0].sendrawtransaction(rawtx["hex"])
except JSONRPCException as e:
# JSONRPCException was thrown as expected. Check the code and message values are correct.
if e.error["code"] not in [-26, -25]:
raise AssertionError("Unexpected JSONRPC error code %i" % e.error["code"])
if ([x for x in ["bad-txns-inputs-spent", "Missing inputs"] if x in e.error['message']] == []):
raise e
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
self.log.info("GOOD: v2 spend was not possible.")
# Spend tx_B0 and tx_B1 on the other chain
self.nodes[1].sendrawtransaction(tx_B0)
self.nodes[1].sendrawtransaction(tx_B1)
# Stake 12 blocks with node-1
set_node_times(self.nodes, block_time_1)
self.log.info("Staking 12 blocks with node 1...")
for i in range(12):
block_time_1 = self.generate_pos(1, block_time_1)
expected_balance_1 = initial_balance[1] + DecimalAmt(12 * 250.0)
assert_equal(self.get_tot_balance(1), expected_balance_1)
self.log.info("Balance for node 1 checks out.")
# re-connect and sync nodes and check that node-0 and node-2 get on the other chain
new_best_hash = self.nodes[1].getbestblockhash()
self.log.info("Connecting and syncing nodes...")
set_node_times(self.nodes, block_time_1)
connect_nodes_clique(self.nodes)
sync_blocks(self.nodes)
for i in [0, 2]:
assert_equal(self.nodes[i].getbestblockhash(), new_best_hash)
# check balance of node-0
assert_equal(self.get_tot_balance(0), initial_balance[0])
self.log.info("Balance for node 0 checks out.")
# check that NOW the original stakeinput is present and spendable
res, utxo = findUtxoInList(stakeinput["txid"], stakeinput["vout"], self.nodes[0].listunspent())
assert (res and utxo["spendable"])
self.log.info("Coinstake input %s...%s-%d is spendable again." % (
stakeinput["txid"][:9], stakeinput["txid"][-4:], stakeinput["vout"]))
self.nodes[0].sendrawtransaction(rawtx["hex"])
self.nodes[1].generate(1)
sync_blocks(self.nodes)
res, utxo = findUtxoInList(stakeinput["txid"], stakeinput["vout"], self.nodes[0].listunspent())
assert (not res or not utxo["spendable"])
if __name__ == '__main__':
ReorgStakeTest().main()
| 46.824645
| 116
| 0.636134
|
acfe8a94c953a827d65adf79e589aaeece6b0fa1
| 42,788
|
py
|
Python
|
udsoncan/__init__.py
|
Mark-RSK/python-udsoncan
|
dc9ff3a74ff50e4a2178738b5e729994fef9d4a1
|
[
"MIT"
] | null | null | null |
udsoncan/__init__.py
|
Mark-RSK/python-udsoncan
|
dc9ff3a74ff50e4a2178738b5e729994fef9d4a1
|
[
"MIT"
] | null | null | null |
udsoncan/__init__.py
|
Mark-RSK/python-udsoncan
|
dc9ff3a74ff50e4a2178738b5e729994fef9d4a1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import inspect
import struct
import math
from udsoncan.exceptions import *
from udsoncan.Request import Request
from udsoncan.Response import Response
import logging, logging.config
from os import path
log_file_path = path.join(path.dirname(path.abspath(__file__)), 'logging.conf')
logging.config.fileConfig(log_file_path)
try:
logging.config.fileConfig(log_file_path)
except Exception as e:
logging.warning('Cannot load logging configuration. %s:%s' % (e.__class__.__name__, str(e)))
#Define how to encode/decode a Data Identifier value to/from a binary payload
class DidCodec:
"""
This class defines how to encode/decode a Data Identifier value to/from a binary payload.
One should extend this class and override the ``encode``, ``decode``, ``__len__`` methods as they will be used
to generate or parse binary payloads.
- ``encode`` Must receive any Python object and must return a bytes payload
- ``decode`` Must receive a bytes payload and may return any Python object
- ``__len__`` Must return the length of the bytes payload
If a data can be processed by a pack string, then this class may be used as is, without being extended.
:param packstr: A pack string used with struct.pack / struct.unpack.
:type packstr: string
"""
def __init__(self, packstr=None):
self.packstr = packstr
def encode(self, *did_value):
if self.packstr is None:
raise NotImplementedError('Cannot encode DID to binary payload. Codec has no "encode" implementation')
return struct.pack(self.packstr, *did_value)
def decode(self, did_payload):
if self.packstr is None:
raise NotImplementedError('Cannot decode DID from binary payload. Codec has no "decode" implementation')
return struct.unpack(self.packstr, did_payload)
#Must tell the size of the payload encoded or expected for decoding
def __len__(self):
if self.packstr is None:
raise NotImplementedError('Cannot tell the payload size. Codec has no "__len__" implementation')
return struct.calcsize(self.packstr)
@classmethod
def from_config(cls, didconfig):
if isinstance(didconfig, cls): #the given object already is a DidCodec instance
return didconfig
# The definition of the codec is a class. Returns an instance of this codec.
if inspect.isclass(didconfig) and issubclass(didconfig, cls):
return didconfig()
# It could be that the codec is in a dict. (for io_control)
if isinstance(didconfig, dict) and 'codec' in didconfig:
return cls.from_config(didconfig['codec'])
# The codec can be defined by a struct pack/unpack string
if isinstance(didconfig, str):
return cls(packstr = didconfig)
# Some standards, such as J1939, break down the 3-byte ID into 2-byte ID and 1-byte subtypes.
class Dtc:
"""
Defines a Diagnostic Trouble Code which consist of a 3-byte ID, a status, a severity and some diagnostic data.
:param dtcid: The 3-byte ID of the DTC
:type dtcid: int
"""
class Format:
"""
Provide a list of DTC formats and their indices. These values are used by the :ref:`The ReadDTCInformation<ReadDtcInformation>`
when requesting a number of DTCs.
"""
ISO15031_6 = 0
ISO14229_1 = 1
SAE_J1939_73 = 2
ISO11992_4 = 3
@classmethod
def get_name(cls, given_id):
if given_id is None:
return ""
for member in inspect.getmembers(cls):
if isinstance(member[1], int):
if member[1] == given_id:
return member[0]
return None
# DTC Status byte
# This byte is an 8-bit flag indicating how much we are sure that a DTC is active.
class Status:
"""
Represents a DTC status which consists of 8 boolean flags (a byte). All flags can be set after instantiation without problems.
:param test_failed: DTC is no longer failed at the time of the request
:type test_failed: bool
:param test_failed_this_operation_cycle: DTC never failed on the current operation cycle.
:type test_failed_this_operation_cycle: bool
:param pending: DTC failed on the current or previous operation cycle.
:type pending: bool
:param confirmed: DTC is not confirmed at the time of the request.
:type confirmed: bool
:param test_not_completed_since_last_clear: DTC test has been completed since the last codeclear.
:type test_not_completed_since_last_clear: bool
:param test_failed_since_last_clear: DTC test failed at least once since last code clear.
:type test_failed_since_last_clear: bool
:param test_not_completed_this_operation_cycle: DTC test completed this operation cycle.
:type test_not_completed_this_operation_cycle: bool
:param warning_indicator_requested: Server is not requesting warningIndicator to be active.
:type warning_indicator_requested: bool
"""
def __init__(self, test_failed=False, test_failed_this_operation_cycle=False, pending=False, confirmed=False, test_not_completed_since_last_clear=False, test_failed_since_last_clear=False, test_not_completed_this_operation_cycle=False, warning_indicator_requested=False):
self.test_failed = test_failed
self.test_failed_this_operation_cycle = test_failed_this_operation_cycle
self.pending = pending
self.confirmed = confirmed
self.test_not_completed_since_last_clear = test_not_completed_since_last_clear
self.test_failed_since_last_clear = test_failed_since_last_clear
self.test_not_completed_this_operation_cycle = test_not_completed_this_operation_cycle
self.warning_indicator_requested = warning_indicator_requested
def get_byte_as_int(self): # Returns the status byte as an integer
byte = 0
byte |= 0x1 if self.test_failed else 0
byte |= 0x2 if self.test_failed_this_operation_cycle else 0
byte |= 0x4 if self.pending else 0
byte |= 0x8 if self.confirmed else 0
byte |= 0x10 if self.test_not_completed_since_last_clear else 0
byte |= 0x20 if self.test_failed_since_last_clear else 0
byte |= 0x40 if self.test_not_completed_this_operation_cycle else 0
byte |= 0x80 if self.warning_indicator_requested else 0
return byte
def get_byte(self): # Returns the status byte in "bytes" format for payload creation
return struct.pack('B', self.get_byte_as_int())
def set_byte(self, byte): # Set all the status flags from the status byte
if not isinstance(byte, int) and not isinstance(byte, bytes):
raise ValueError('Given byte must be an integer or bytes object.')
if isinstance(byte, bytes):
byte = struct.unpack('B', byte[0])
self.test_failed = True if byte & 0x01 > 0 else False
self.test_failed_this_operation_cycle = True if byte & 0x02 > 0 else False
self.pending = True if byte & 0x04 > 0 else False
self.confirmed = True if byte & 0x08 > 0 else False
self.test_not_completed_since_last_clear = True if byte & 0x10 > 0 else False
self.test_failed_since_last_clear = True if byte & 0x20 > 0 else False
self.test_not_completed_this_operation_cycle = True if byte & 0x40 > 0 else False
self.warning_indicator_requested = True if byte & 0x80 > 0 else False
@classmethod
def from_byte(cls, byte):
status = cls()
status.set_byte(byte)
return status
# DTC Severity byte, it's a 3-bit indicator telling how serious a trouble code is.
class Severity:
"""
Represents a DTC severity which consists of 3 boolean flags. All flags can be set after instantiation without problems.
:param maintenance_only: This value indicates that the failure requests maintenance only
:type maintenance_only: bool
:param check_at_next_exit: This value indicates that the failure requires a check of the vehicle at the next halt.
:type check_at_next_exit: bool
:param check_immediately: This value indicates that the failure requires an immediate check of the vehicle.
:type check_immediately: bool
"""
def __init__(self, maintenance_only=False, check_at_next_exit=False, check_immediately=False):
self.maintenance_only = maintenance_only
self.check_at_next_exit = check_at_next_exit
self.check_immediately = check_immediately
def get_byte_as_int(self):
byte = 0
byte |= 0x20 if self.maintenance_only else 0
byte |= 0x40 if self.check_at_next_exit else 0
byte |= 0x80 if self.check_immediately else 0
return byte
def get_byte(self):
return struct.pack('B', self.get_byte_as_int())
def set_byte(self, byte):
if not isinstance(byte, int) and not isinstance(byte, bytes):
raise ValueError('Given byte must be an integer or bytes object.')
if isinstance(byte, bytes):
byte = struct.unpack('B', byte[0])
self.maintenance_only = True if byte & 0x20 > 0 else False
self.check_at_next_exit = True if byte & 0x40 > 0 else False
self.check_immediately = True if byte & 0x80 > 0 else False
@property
def available(self):
return True if self.get_byte_as_int() > 0 else False
def __init__(self, dtcid):
self.id = dtcid
self.status = Dtc.Status()
self.snapshots = [] # . DID codec must be configured
self.extended_data = []
self.severity = Dtc.Severity()
self.functional_unit = None # Implementation specific (ISO 14229 D.4)
self.fault_counter = None # Common practice is to detect a specific failure many times before setting the DTC active. This counter should tell the actual count.
def __repr__(self):
return '<DTC ID=0x%06x, Status=0x%02x, Severity=0x%02x at 0x%08x>' % (self.id, self.status.get_byte_as_int(), self.severity.get_byte_as_int(), id(self))
# A snapshot data. Not defined by ISO14229 and implementation specific.
# To read this data, the client must have a DID codec set in its config.
class Snapshot:
record_number = None
did = None
data = None
raw_data = b''
# Extended data. Not defined by ISO14229 and implementation specific
# Only raw data can be given to user.
class ExtendedData:
record_number = None
raw_data = b''
class AddressAndLengthFormatIdentifier:
"""
This class defines how many bytes of a memorylocation, composed of an address and a memorysize, should be encoded when sent over the underlying protocol.
Mainly used by :ref:`ReadMemoryByAddress<ReadMemoryByAddress>`, :ref:`WriteMemoryByAddress<WriteMemoryByAddress>`, :ref:`RequestDownload<RequestDownload>` and :ref:`RequestUpload<RequestUpload>` services
Defined by ISO-14229:2006, Annex G
:param address_format: The number of bits on which an address should be encoded. Possible values are 8, 16, 24, 32, 40
:type address_format: int
:param memorysize_format: The number of bits on which a memory size should be encoded. Possible values are 8, 16, 24, 32
:type memorysize_format: int
"""
address_map = {
8 : 1,
16 : 2,
24 : 3,
32 : 4,
40 : 5
}
memsize_map = {
8 : 1,
16 : 2,
24 : 3,
32 : 4
}
def __init__(self, address_format, memorysize_format):
if address_format not in self.address_map:
raise ValueError('address_format must ba an integer selected from : %s ' % (self.address_map.keys()))
if not isinstance(memorysize_format, int) or not isinstance(address_format, int):
raise ValueError('memorysize_format and address_format must be integers')
if memorysize_format not in self.memsize_map:
raise ValueError('memorysize_format must be an integer selected from : %s' % (self.memsize_map.keys()))
self.memorysize_format = memorysize_format
self.address_format = address_format
def get_byte_as_int(self):
return ((self.memsize_map[self.memorysize_format] << 4) | (self.address_map[self.address_format])) & 0xFF
# Byte given alongside a memory address and a length so that they are decoded properly.
def get_byte(self):
return struct.pack('B', self.get_byte_as_int())
class MemoryLocation:
"""
This class defines a memory block location including : address, size, AddressAndLengthFormatIdentifier (address format and memory size format)
:param address: A memory address pointing to the beginning of the memory block
:type address: int
:param memorysize: The size of the memory block
:type memorysize: int or None
:param address_format: The number of bits on which an address should be encoded. Possible values are 8, 16, 24, 32, 40.
If ``None`` is specified, the smallest size required to store the given address will be used
:type address_format: int
:param memorysize_format: The number of bits on which a memory size should be encoded. Possible values are 8, 16, 24, 32
If ``None`` is specified, the smallest size required to store the given memorysize will be used
:type memorysize_format: int or None
"""
def __init__(self, address, memorysize, address_format=None, memorysize_format=None):
self.address = address
self.memorysize = memorysize
self.address_format = address_format
self.memorysize_format = memorysize_format
if address_format is None:
address_format = self.autosize_address(address)
if memorysize_format is None:
memorysize_format = self.autosize_memorysize(memorysize)
self.alfid = AddressAndLengthFormatIdentifier(memorysize_format=memorysize_format, address_format=address_format)
# This is used by the client/server to set a format from a config object while letting the user override it
def set_format_if_none(self, address_format=None, memorysize_format=None):
previous_address_format = self.address_format
previous_memorysize_format = self.memorysize_format
try:
if address_format is not None:
if self.address_format is None:
self.address_format = address_format
if memorysize_format is not None:
if address_format is None:
self.memorysize_format=memorysize_format
address_format = self.address_format if self.address_format is not None else self.autosize_address(self.address)
memorysize_format = self.memorysize_format if self.memorysize_format is not None else self.autosize_memorysize(self.memorysize)
self.alfid = AddressAndLengthFormatIdentifier(memorysize_format=memorysize_format, address_format=address_format)
except:
self.address_format = previous_address_format
self.memorysize_format = previous_memorysize_format
raise
# Finds the smallest size that fits the address
def autosize_address(self, val):
fmt = math.ceil(val.bit_length()/8)*8
if fmt > 40:
raise ValueError("address size must be smaller or equal than 40 bits")
return fmt
# Finds the smallest size that fits the memory size
def autosize_memorysize(self, val):
fmt = math.ceil(val.bit_length()/8)*8
if fmt > 32:
raise ValueError("memory size must be smaller or equal than 32 bits")
return fmt
# Gets the address byte in the requested format
def get_address_bytes(self):
n = AddressAndLengthFormatIdentifier.address_map[self.alfid.address_format]
data = struct.pack('>q', self.address)
return data[-n:]
# Gets the memory size byte in the requested format
def get_memorysize_bytes(self):
n = AddressAndLengthFormatIdentifier.memsize_map[self.alfid.memorysize_format]
data = struct.pack('>q', self.memorysize)
return data[-n:]
# Generates an instance from the byte stream
@classmethod
def from_bytes(cls, address_bytes, memorysize_bytes):
if not isinstance(address_bytes, bytes):
raise ValueError('address_bytes must be a valid bytes object')
if not isinstance(memorysize_bytes, bytes):
raise ValueError('memorysize_bytes must be a valid bytes object')
if len(address_bytes) > 5:
raise ValueError('Address must be at most 40 bits long')
if len(memorysize_bytes) > 4:
raise ValueError('Memory size must be at most 32 bits long')
address_bytes_padded = b'\x00' * (8-len(address_bytes)) + address_bytes
memorysize_bytes_padded = b'\x00' * (8-len(memorysize_bytes)) + memorysize_bytes
address = struct.unpack('>q', address_bytes_padded)[0]
memorysize = struct.unpack('>q', memorysize_bytes_padded)[0]
address_format = len(address_bytes) * 8
memorysize_format = len(memorysize_bytes) * 8
return cls(address=address, memorysize=memorysize, address_format=address_format, memorysize_format=memorysize_format)
def __str__(self):
return 'Address=0x%x (%d bits), Size=0x%x (%d bits)' % (self.address, self.alfid.address_format, self.memorysize, self.alfid.memorysize_format)
def __repr__(self):
return '<%s: %s at 0x%08x>' % (self.__class__.__name__, str(self), id(self))
class DataFormatIdentifier:
"""
Defines the compression and encryption method of a specific chunk of data.
Mainly used by the :ref:`RequestUpload<RequestUpload>` and :ref:`RequestDownload<RequestDownload>` services
:param compression: Value between 0 and 0xF specifying the compression method. Only the value 0 has a meaning defined by UDS standard and it is `No compression`.
All other values are ECU manufacturer specific.
:type compression: int
:param encryption: Value between 0 and 0xF specifying the encryption method. Only the value 0 has a meaning defined by UDS standard and it is `No encryption`.
All other values are ECU manufacturer specific.
:type encryption: int
"""
def __init__(self, compression=0, encryption=0):
both = (compression, encryption)
for param in both:
if not isinstance(param, int):
raise ValueError('compression and encryption method must be an integer value')
if param < 0 or param > 0xF:
raise ValueError('compression and encryption method must each be an integer between 0 and 0xF')
self.compression = compression
self.encryption=encryption
def get_byte_as_int(self):
return ((self.compression & 0xF) << 4) | (self.encryption & 0xF)
def get_byte(self):
return struct.pack('B', self.get_byte_as_int())
def __str__(self):
return 'Compression:0x%x, Encryption:0x%x' % (self.compression, self.encryption)
def __repr__(self):
return '<%s: %s at 0x%08x>' % (self.__class__.__name__, str(self), id(self))
# Units defined in standard. Nowhere does the ISO-14229 make use of them, but they are defined
class Units:
#As defined in ISO-14229:2006 Annex C
class Prefixs:
class Prefix:
def __init__(self, id, name, symbol, description=None):
self.name = name
self.id = id
self.symbol = symbol
self.description = description
def __str__(self):
return self.name
def __repr__(self):
desc = "(%s) " % self.description if self.description is not None else ""
return "<UDS Unit prefix : %s[%s] %swith ID=%d at %08x>" % (self.name, self.symbol, desc, self.id, id(self))
exa = Prefix(id=0x40, name= 'exa', symbol='E', description='10e18')
peta = Prefix(id=0x41, name= 'peta', symbol='P', description='10e15')
tera = Prefix(id=0x42, name= 'tera', symbol='T', description='10e12')
giga = Prefix(id=0x43, name= 'giga', symbol='G', description='10e9')
mega = Prefix(id=0x44, name= 'mega', symbol='M', description='10e6')
kilo = Prefix(id=0x45, name= 'kilo', symbol='k', description='10e3')
hecto = Prefix(id=0x46, name= 'hecto', symbol='h', description='10e2')
deca = Prefix(id=0x47, name= 'deca', symbol='da', description='10e1')
deci = Prefix(id=0x48, name= 'deci', symbol='d', description='10e-1')
centi = Prefix(id=0x49, name= 'centi', symbol='c', description='10e-2')
milli = Prefix(id=0x4A, name= 'milli', symbol='m', description='10e-3')
micro = Prefix(id=0x4B, name= 'micro', symbol='m', description='10e-6')
nano = Prefix(id=0x4C, name= 'nano', symbol='n', description='10e-9')
pico = Prefix(id=0x4D, name= 'pico', symbol='p', description='10e-12')
femto = Prefix(id=0x4E, name= 'femto', symbol='f', description='10e-15')
atto = Prefix(id=0x4F, name= 'atto', symbol='a', description='10e-18')
class Unit:
def __init__(self, id, name, symbol, description=None):
self.id =id
self.name = name
self.symbol = symbol
self.description = description
def __str__(self):
return self.name
def __repr__(self):
desc = "(unit of %s) " % self.description if self.description is not None else ""
return "<UDS Unit : %s[%s] %swith ID=%d at %08x>" % (self.name, self.symbol, desc, self.id, id(self))
no_unit = Unit(id=0x00, name= 'no unit', symbol='-', description='-')
meter = Unit(id=0x01, name= 'meter', symbol='m', description='length')
foor = Unit(id=0x02, name= 'foot', symbol='ft', description='length')
inch = Unit(id=0x03, name= 'inch', symbol='in', description='length')
yard = Unit(id=0x04, name= 'yard', symbol='yd', description='length')
english_mile = Unit(id=0x05, name= 'mile (English)', symbol='mi', description='length')
gram = Unit(id=0x06, name= 'gram', symbol='g', description='mass')
metric_ton = Unit(id=0x07, name= 'ton (metric)', symbol='t', description='mass')
second = Unit(id=0x08, name= 'second', symbol='s', description='time')
minute = Unit(id=0x09, name= 'minute', symbol='min', description='time')
hour = Unit(id=0x0A, name= 'hour', symbol='h', description='time')
day = Unit(id=0x0B, name= 'day', symbol='d', description='time')
year = Unit(id=0x0C, name= 'year', symbol='y', description='time')
ampere = Unit(id=0x0D, name= 'ampere', symbol='A', description='current')
volt = Unit(id=0x0E, name= 'volt', symbol='V', description='voltage')
coulomb = Unit(id=0x0F, name= 'coulomb', symbol='C', description='electric charge')
ohm = Unit(id=0x10, name= 'ohm', symbol='W', description='resistance')
farad = Unit(id=0x11, name= 'farad', symbol='F', description='capacitance')
henry = Unit(id=0x12, name= 'henry', symbol='H', description='inductance')
siemens = Unit(id=0x13, name= 'siemens', symbol='S', description='electric conductance')
weber = Unit(id=0x14, name= 'weber', symbol='Wb', description='magnetic flux')
tesla = Unit(id=0x15, name= 'tesla', symbol='T', description='magnetic flux density')
kelvin = Unit(id=0x16, name= 'kelvin', symbol='K', description='thermodynamic temperature')
Celsius = Unit(id=0x17, name= 'Celsius', symbol='°C', description='thermodynamic temperature')
Fahrenheit = Unit(id=0x18, name= 'Fahrenheit', symbol='°F', description='thermodynamic temperature')
candela = Unit(id=0x19, name= 'candela', symbol='cd', description='luminous intensity')
radian = Unit(id=0x1A, name= 'radian', symbol='rad', description='plane angle')
degree = Unit(id=0x1B, name= 'degree', symbol='°', description='plane angle')
hertz = Unit(id=0x1C, name= 'hertz', symbol='Hz', description='frequency')
joule = Unit(id=0x1D, name= 'joule', symbol='J', description='energy')
Newton = Unit(id=0x1E, name= 'Newton', symbol='N', description='force')
kilopond = Unit(id=0x1F, name= 'kilopond', symbol='kp', description='force')
pound = Unit(id=0x20, name= 'pound force', symbol='lbf', description='force')
watt = Unit(id=0x21, name= 'watt', symbol='W', description='power')
horse = Unit(id=0x22, name= 'horse power (metric)', symbol='hk', description='power')
horse = Unit(id=0x23, name= 'horse power(UK and US)', symbol='hp', description='power')
Pascal = Unit(id=0x24, name= 'Pascal', symbol='Pa', description='pressure')
bar = Unit(id=0x25, name= 'bar', symbol='bar', description='pressure')
atmosphere = Unit(id=0x26, name= 'atmosphere', symbol='atm', description='pressure')
psi = Unit(id=0x27, name= 'pound force per square inch',symbol='psi', description='pressure')
becqerel = Unit(id=0x28, name= 'becqerel', symbol='Bq', description='radioactivity')
lumen = Unit(id=0x29, name= 'lumen', symbol='lm', description='light flux')
lux = Unit(id=0x2A, name= 'lux', symbol='lx', description='illuminance')
liter = Unit(id=0x2B, name= 'liter', symbol='l', description='volume')
gallon = Unit(id=0x2C, name= 'gallon (British)', symbol='-', description='volume')
gallon = Unit(id=0x2D, name= 'gallon (US liq)', symbol='-', description='volume')
cubic = Unit(id=0x2E, name= 'cubic inch', symbol='cu in', description='volume')
meter_per_sec = Unit(id=0x2F, name= 'meter per seconds', symbol='m/s', description='speed')
kmh = Unit(id=0x30, name= 'kilometre per hour', symbol='km/h', description='speed')
mph = Unit(id=0x31, name= 'mile per hour', symbol='mph', description='speed')
rps = Unit(id=0x32, name= 'revolutions per second', symbol='rps', description='angular velocity')
rpm = Unit(id=0x33, name= 'revolutions per minute', symbol='rpm', description='angular velocity')
counts = Unit(id=0x34, name= 'counts', symbol='-', description='-')
percent = Unit(id=0x35, name= 'percent', symbol='%', description='-')
mg_per_stroke = Unit(id=0x36, name= 'milligram per stroke', symbol='mg/stroke', description='mass per engine stroke')
meter_per_sec2 = Unit(id=0x37, name= 'meter per square seconds', symbol='m/s2', description='acceleration')
Nm = Unit(id=0x38, name= 'Newton meter', symbol='Nm', description='moment')
liter_per_min = Unit(id=0x39, name= 'liter per minute', symbol='l/min', description='flow')
watt_per_meter2 = Unit(id=0x3A, name= 'watt per square meter', symbol='W/m2', description='intensity')
bar_per_sec = Unit(id=0x3B, name= 'bar per second', symbol='bar/s', description='pressure change')
radians_per_sec = Unit(id=0x3C, name= 'radians per second', symbol='rad/s', description='angular velocity')
radians = Unit(id=0x3D, name= 'radians square second', symbol='rad/s2', description='angular acceleration')
kilogram_per_meter2 = Unit(id=0x3E, name= 'kilogram per square meter', symbol='kg/m2', description='-')
date1 = Unit(id=0x50, name='Date1', symbol='-', description = 'Year-Month-Day')
date2 = Unit(id=0x51, name='Date2', symbol='-', description = 'Day/Month/Year')
date3 = Unit(id=0x52, name='Date3', symbol='-', description = 'Month/Day/Year')
week = Unit(id=0x53, name='week', symbol='W', description = 'calendar week')
time1 = Unit(id=0x54, name='Time1', symbol='-', description = 'UTC Hour/Minute/Second')
time2 = Unit(id=0x55, name='Time2', symbol='-', description = 'Hour/Minute/Second')
datetime1 = Unit(id=0x56, name='DateAndTime1', symbol='-', description = 'Second/Minute/Hour/Day/Month/Year')
datetime2 = Unit(id=0x57, name='DateAndTime2', symbol='-', description = 'Second/Minute/Hour/Day/Month/Year/Local minute offset/Localhour offset')
datetime3 = Unit(id=0x58, name='DateAndTime3', symbol='-', description = 'Second/Minute/Hour/Month/Day/Year')
datetime4 = Unit(id=0x59, name='DateAndTime4', symbol='-', description = 'Second/Minute/Hour/Month/Day/Year/Local minute offset/Localhour offset')
# Routine class that containes few definitions for usage with nice syntax.
# myRoutine = Routine.EraseMemory or print(Routine.name_from_id(myRoutine))
class Routine:
"""
Defines a list of constants that are routine identifiers defined by the UDS standard.
This class provides no functionality apart from defining these constants
"""
DeployLoopRoutineID = 0xE200
EraseMemory = 0xFF00
CheckProgrammingDependencies = 0xFF01
EraseMirrorMemoryDTCs = 0xFF02
@classmethod
def name_from_id(cls, routine_id):
# Helper to print the type of requests (logging purpose) as defined by ISO-14229:2006, Annex F
if not isinstance(routine_id, int) or routine_id < 0 or routine_id > 0xFFFF:
raise ValueError('Routine ID must be a valid integer between 0 and 0xFFFF')
if routine_id >= 0x0000 and routine_id <= 0x00FF:
return 'ISOSAEReserved'
if routine_id >= 0x0100 and routine_id <= 0x01FF:
return 'TachographTestIds'
if routine_id >= 0x0200 and routine_id <= 0xDFFF:
return 'VehicleManufacturerSpecific'
if routine_id >= 0xE000 and routine_id <= 0xE1FF:
return 'OBDTestIds'
if routine_id == 0xE200:
return 'DeployLoopRoutineID'
if routine_id >= 0xE201 and routine_id <= 0xE2FF:
return 'SafetySystemRoutineIDs'
if routine_id >= 0xE300 and routine_id <= 0xEFFF:
return 'ISOSAEReserved'
if routine_id >= 0xF000 and routine_id <= 0xFEFF:
return 'SystemSupplierSpecific'
if routine_id == 0xFF00:
return 'EraseMemory'
if routine_id == 0xFF01:
return 'CheckProgrammingDependencies'
if routine_id == 0xFF02:
return 'EraseMirrorMemoryDTCs'
if routine_id >= 0xFF03 and routine_id <= 0xFFFF:
return 'ISOSAEReserved'
class DataIdentifier:
"""
Defines a list of constants that are data identifiers defined by the UDS standard.
This class provides no functionality apart from defining these constants
"""
BootSoftwareIdentification = 0xF180
ApplicationSoftwareIdentification = 0xF181
ApplicationDataIdentification = 0xF182
BootSoftwareFingerprint = 0xF183
ApplicationSoftwareFingerprint = 0xF184
ApplicationDataFingerprint = 0xF185
ActiveDiagnosticSession = 0xF186
VehicleManufacturerSparePartNumber = 0xF187
VehicleManufacturerECUSoftwareNumber = 0xF188
VehicleManufacturerECUSoftwareNumber = 0xF188
VehicleManufacturerECUSoftwareVersionNumber = 0xF189
SystemSupplierIdentifier = 0xF18A
ECUManufacturingDate = 0xF18B
ECUSerialNumber = 0xF18C
SupportedFunctionalUnits = 0xF18D
VehicleManufacturerKitAssemblyPartNumber = 0xF18E
ISOSAEReservedStandardized = 0xF18F
VIN = 0xF190
VehicleManufacturerECUHardwareNumber = 0xF191
SystemSupplierECUHardwareNumber = 0xF192
SystemSupplierECUHardwareVersionNumber = 0xF193
SystemSupplierECUSoftwareNumber = 0xF194
SystemSupplierECUSoftwareVersionNumber = 0xF195
ExhaustRegulationOrTypeApprovalNumber = 0xF196
SystemNameOrEngineType = 0xF197
RepairShopCodeOrTesterSerialNumber = 0xF198
ProgrammingDate = 0xF199
CalibrationRepairShopCodeOrCalibrationEquipmentSerialNumber = 0xF19A
CalibrationDate = 0xF19B
CalibrationEquipmentSoftwareNumber = 0xF19C
ECUInstallationDate = 0xF19D
ODXFile = 0xF19E
Entity = 0xF19F
@classmethod
def name_from_id(cls, did):
#As defined by ISO-14229:2006, Annex F
if not isinstance(did, int) or did < 0 or did > 0xFFFF:
raise ValueError('Data IDentifier must be a valid integer between 0 and 0xFFFF')
if did >= 0x0000 and did <= 0x00FF:
return 'ISOSAEReserved'
if did >= 0x0100 and did <= 0xEFFF:
return 'VehicleManufacturerSpecific'
if did >= 0xF000 and did <= 0xF00F:
return 'NetworkConfigurationDataForTractorTrailerApplicationDataIdentifier'
if did >= 0xF010 and did <= 0xF0FF:
return 'VehicleManufacturerSpecific'
if did >= 0xF100 and did <= 0xF17F:
return 'IdentificationOptionVehicleManufacturerSpecificDataIdentifier'
if did == 0xF180:
return 'BootSoftwareIdentificationDataIdentifier'
if did == 0xF181:
return 'ApplicationSoftwareIdentificationDataIdentifier'
if did == 0xF182:
return 'ApplicationDataIdentificationDataIdentifier'
if did == 0xF183:
return 'BootSoftwareFingerprintDataIdentifier'
if did == 0xF184:
return 'ApplicationSoftwareFingerprintDataIdentifier'
if did == 0xF185:
return 'ApplicationDataFingerprintDataIdentifier'
if did == 0xF186:
return 'ActiveDiagnosticSessionDataIdentifier'
if did == 0xF187:
return 'VehicleManufacturerSparePartNumberDataIdentifier'
if did == 0xF188:
return 'VehicleManufacturerECUSoftwareNumberDataIdentifier'
if did == 0xF188:
return 'VehicleManufacturerECUSoftwareNumberDataIdentifier'
if did == 0xF189:
return 'VehicleManufacturerECUSoftwareVersionNumberDataIdentifier'
if did == 0xF18A:
return 'SystemSupplierIdentifierDataIdentifier'
if did == 0xF18B:
return 'ECUManufacturingDateDataIdentifier'
if did == 0xF18C:
return 'ECUSerialNumberDataIdentifier'
if did == 0xF18D:
return 'SupportedFunctionalUnitsDataIdentifier'
if did == 0xF18E:
return 'VehicleManufacturerKitAssemblyPartNumberDataIdentifier'
if did == 0xF18F:
return 'ISOSAEReservedStandardized'
if did == 0xF190:
return 'VINDataIdentifier'
if did == 0xF191:
return 'VehicleManufacturerECUHardwareNumberDataIdentifier'
if did == 0xF192:
return 'SystemSupplierECUHardwareNumberDataIdentifier'
if did == 0xF193:
return 'SystemSupplierECUHardwareVersionNumberDataIdentifier'
if did == 0xF194:
return 'SystemSupplierECUSoftwareNumberDataIdentifier'
if did == 0xF195:
return 'SystemSupplierECUSoftwareVersionNumberDataIdentifier'
if did == 0xF196:
return 'ExhaustRegulationOrTypeApprovalNumberDataIdentifier'
if did == 0xF197:
return 'SystemNameOrEngineTypeDataIdentifier'
if did == 0xF198:
return 'RepairShopCodeOrTesterSerialNumberDataIdentifier'
if did == 0xF199:
return 'ProgrammingDateDataIdentifier'
if did == 0xF19A:
return 'CalibrationRepairShopCodeOrCalibrationEquipmentSerialNumberDataIdentifier'
if did == 0xF19B:
return 'CalibrationDateDataIdentifier'
if did == 0xF19C:
return 'CalibrationEquipmentSoftwareNumberDataIdentifier'
if did == 0xF19D:
return 'ECUInstallationDateDataIdentifier'
if did == 0xF19E:
return 'ODXFileDataIdentifier'
if did == 0xF19F:
return 'EntityDataIdentifier'
if did >= 0xF1A0 and did <= 0xF1EF:
return 'IdentificationOptionVehicleManufacturerSpecific'
if did >= 0xF1F0 and did <= 0xF1FF:
return 'IdentificationOptionSystemSupplierSpecific'
if did >= 0xF200 and did <= 0xF2FF:
return 'PeriodicDataIdentifier'
if did >= 0xF300 and did <= 0xF3FF:
return 'DynamicallyDefinedDataIdentifier'
if did >= 0xF400 and did <= 0xF4FF:
return 'OBDDataIdentifier'
if did >= 0xF500 and did <= 0xF5FF:
return 'OBDDataIdentifier'
if did >= 0xF600 and did <= 0xF6FF:
return 'OBDMonitorDataIdentifier'
if did >= 0xF700 and did <= 0xF7FF:
return 'OBDMonitorDataIdentifier'
if did >= 0xF800 and did <= 0xF8FF:
return 'OBDInfoTypeDataIdentifier'
if did >= 0xF900 and did <= 0xF9FF:
return 'TachographDataIdentifier'
if did >= 0xFA00 and did <= 0xFA0F:
return 'AirbagDeploymentDataIdentifier'
if did >= 0xFA10 and did <= 0xFAFF:
return 'SafetySystemDataIdentifier'
if did >= 0xFB00 and did <= 0xFCFF:
return 'ReservedForLegislativeUse'
if did >= 0xFD00 and did <= 0xFEFF:
return 'SystemSupplierSpecific'
if did >= 0xFF00 and did <= 0xFFFF:
return 'ISOSAEReserved'
# Communication type is a single byte value including message type and subnet.
# Used by CommunicationControl service and defined by ISO-14229:2006 Annex B, table B.1
class CommunicationType:
"""
This class represents a pair of subnet and message types. This value is mainly used by the :ref:`CommunicationControl<CommunicationControl>` service
:param subnet: Represent the subnet number. Value ranges from 0 to 0xF
:type subnet: int
:param normal_msg: Bit indicating that the `normal messages` are involved
:type normal_msg: bool
:param network_management_msg: Bit indicating that the `network management messages` are involved
:type network_management_msg: bool
"""
class Subnet:
node = 0
network = 0xF
def __init__(self, subnet):
if not isinstance(subnet, int):
raise ValueError('subnet must be an integer value')
if subnet < 0 or subnet > 0xF:
raise ValueError('subnet must be an integer between 0 and 0xF')
self.subnet=subnet
def value(self):
return self.subnet
def __init__(self, subnet, normal_msg=False, network_management_msg=False):
if not isinstance(subnet, self.Subnet):
subnet = self.Subnet(subnet)
if not isinstance(normal_msg, bool) or not isinstance(network_management_msg, bool):
raise ValueError('message type (normal_msg, network_management_msg) must be valid boolean values')
if normal_msg == False and network_management_msg == False:
raise ValueError('At least one message type must be controlled')
self.subnet = subnet
self.normal_msg = normal_msg
self.network_management_msg = network_management_msg
def get_byte_as_int(self):
message_type = 0
if self.normal_msg:
message_type |= 1
if self.network_management_msg:
message_type |= 2
byte = (message_type & 0x3) | ((self.subnet.value() & 0xF) << 4)
return byte
def get_byte(self):
return struct.pack('B', self.get_byte_as_int())
@classmethod
def from_byte(cls, byte):
if isinstance(byte, bytes):
val = struct.unpack('B', byte)[0]
val = int(val)
subnet = (val & 0xF0) >> 4
normal_msg = True if val & 1 > 0 else False
network_management_msg = True if val & 2 > 0 else False
return cls(subnet,normal_msg,network_management_msg)
def __str__(self):
flags = []
if self.normal_msg:
flags.append('NormalMsg')
if self.network_management_msg:
flags.append('NetworkManagementMsg')
return 'subnet=0x%x. Flags : [%s]' % (self.subnet.value(), ','.join(flags))
def __repr__(self):
return '<%s: %s at 0x%08x>' % (self.__class__.__name__, str(self), id(self))
class Baudrate:
"""
Represents a link speed in bit per seconds (or symbol per seconds to be more accurate).
This class is used by the :ref:`LinkControl<LinkControl>` service that controls the underlying protocol speeds.
The class can encode the baudrate in 2 different fashions : **Fixed** or **Specific**.
Some standard baudrate values are defined within ISO-14229:2006 Annex B.3
:param baudrate: The baudrate to be used.
:type: int
:param baudtype: Tells how the baudrate shall be encoded. 4 values are possible:
- ``Baudrate.Type.Fixed`` (0) : Will encode the baudrate in a single byte Fixed fashion. `baudrate` should be a supported value such as 9600, 19200, 125000, 250000, etc.
- ``Baudrate.Type.Specific`` (1) : Will encode the baudrate in a three-byte Specific fashion. `baudrate` can be any value ranging from 0 to 0xFFFFFF
- ``Baudrate.Type.Identifier`` (2) : Will encode the baudrate in a single byte Fixed fashion. `baudrate` should be the byte value to encode if the user wants to use a custom type.
- ``Baudrate.Type.Auto`` (3) : Let the class guess the type.
- If ``baudrate`` is a known standard value (19200, 38400, etc), then Fixed shall be used
- If ``baudrate`` is an integer that fits in a single byte, then Identifier shall be used
- If ``baudrate`` is none of the above, then Specific will be used.
:type baudtype: int
"""
baudrate_map = {
9600 : 0x01,
19200 : 0x02,
38400 : 0x03,
57600 : 0x04,
115200 : 0x05,
125000 : 0x10,
250000 : 0x11,
500000 : 0x12,
1000000 : 0x13,
}
class Type:
Fixed = 0 # When baudrate is a predefined value from standard
Specific = 1 # When using custom baudrate
Identifier = 2 # Baudrate implied by baudrate ID
Auto = 3 # Let the class decide the type
# User can specify the type of baudrate or let this class guess what he wants (this adds some simplicity for non-experts).
def __init__(self, baudrate, baudtype=Type.Auto):
if not isinstance(baudrate, int):
raise ValueError('baudrate must be an integer')
if baudrate < 0:
raise ValueError('baudrate must be an integer greater than 0')
if baudtype == self.Type.Auto:
if baudrate in self.baudrate_map:
self.baudtype = self.Type.Fixed
else:
if baudrate <= 0xFF:
self.baudtype = self.Type.Identifier
else:
self.baudtype = self.Type.Specific
else:
self.baudtype = baudtype
if self.baudtype == self.Type.Specific:
if baudrate > 0xFFFFFF:
raise ValueError('Baudrate value cannot be bigger than a 24 bits value.')
elif self.baudtype == self.Type.Identifier:
if baudrate > 0xFF:
raise ValueError('Baudrate ID must be an integer between 0 and 0xFF')
elif self.baudtype == self.Type.Fixed:
if baudrate not in self.baudrate_map:
raise ValueError('baudrate must be part of the supported baudrate list defined by UDS standard')
else:
raise ValueError('Unknown baudtype : %s' % self.baudtype)
self.baudrate = baudrate
# internal helper to change the type of this baudrate
def make_new_type(self, baudtype):
if baudtype not in [self.Type.Fixed, self.Type.Specific]:
raise ValueError('Baudrate type can only be change to Fixed or Specific')
return Baudrate(self.effective_baudrate(), baudtype=baudtype)
# Returns the baudrate in Symbol Per Seconds if available, otherwise value given by the user.
def effective_baudrate(self):
if self.baudtype == self.Type.Identifier:
for k in self.baudrate_map:
if self.baudrate_map[k] == self.baudrate:
return k
raise RuntimeError('Unknown effective baudrate, this could indicate a bug')
else:
return self.baudrate
# Encodes the baudrate value the way they are exchanged.
def get_bytes(self):
if self.baudtype == self.Type.Fixed:
return struct.pack('B', self.baudrate_map[self.baudrate])
if self.baudtype == self.Type.Specific:
b1 = (self.baudrate >> 16 ) & 0xFF
b2 = (self.baudrate >> 8 ) & 0xFF
b3 = (self.baudrate >> 0 ) & 0xFF
return struct.pack('BBB', b1, b2, b3)
if self.baudtype==self.Type.Identifier:
return struct.pack('B', self.baudrate)
raise RuntimeError('Unknown baudrate baudtype : %s' % self.baudtype)
def __str__(self):
baudtype_str = ''
if self.baudtype == self.Type.Fixed:
baudtype_str = 'Fixed'
elif self.baudtype == self.Type.Specific:
baudtype_str = 'Specific'
elif self.baudtype == self.Type.Identifier:
baudtype_str = 'Defined by identifier'
return '%sBauds, %s format.' % (str(self.effective_baudrate()), baudtype_str)
def __repr__(self):
return '<%s: %s at 0x%08x>' % (self.__class__.__name__, str(self), id(self))
#Used for IO Control service. Allows comprehensive one-liner.
class IOMasks:
"""
Allow to specify a list of masks for a :ref:`InputOutputControlByIdentifier<InputOutputControlByIdentifier>` composite codec.
Example : IOMasks(mask1,mask2, mask3=True, mask4=False)
:param args: Masks to set to True
:param kwargs: Masks and their values
"""
def __init__(self, *args, **kwargs):
for k in kwargs:
if not isinstance(kwargs[k], bool):
raise ValueError('mask value must be a boolean value')
for k in args:
if not isinstance(k, str):
raise ValueError('Mask name must be a valid string')
self.maskdict = dict();
for k in args:
self.maskdict[k] = True
for k in kwargs:
if not isinstance(kwargs[k], bool):
raise ValueError('Mask value must be True or False')
self.maskdict[k] = kwargs[k]
def get_dict(self):
return self.maskdict
#Used for IO Control service. Allows comprehensive one-liner.
class IOValues:
"""
This class saves a function argument so they can be passed to a callback function.
:param args: Arguments
:param kwargs: Named arguments
"""
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
| 41.301158
| 273
| 0.721744
|
acfe8af7d133fa5b8c1d31ad21b0f2e52fbfe46a
| 1,742
|
py
|
Python
|
ML-for-S2S/torch_customdataset.py
|
mariajmolina/ML-for-S2S
|
3de32e72042ba7e8b37a433579fa9c5630246d8c
|
[
"MIT"
] | null | null | null |
ML-for-S2S/torch_customdataset.py
|
mariajmolina/ML-for-S2S
|
3de32e72042ba7e8b37a433579fa9c5630246d8c
|
[
"MIT"
] | null | null | null |
ML-for-S2S/torch_customdataset.py
|
mariajmolina/ML-for-S2S
|
3de32e72042ba7e8b37a433579fa9c5630246d8c
|
[
"MIT"
] | 1
|
2021-05-29T23:24:37.000Z
|
2021-05-29T23:24:37.000Z
|
import torch
from torch.utils.data import Dataset
"""
Module contains several pytorch datasets.
Author: Maria J. Molina, NCAR (molina@ucar.edu)
"""
class CustomDataset(Dataset):
def __init__(self, traindata, testdata, transform=None, target_transform=None):
"""Minimal dataset preprocessing for training."""
self.img_train = traindata
self.img_label = testdata
self.transform = transform
self.target_transform = target_transform
def __len__(self):
return len(self.img_label)
def __getitem__(self, idx):
image = self.img_train[idx]
label = self.img_label[idx]
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return {'train': image, 'test': label, 'minibatch_indx': idx}
class CustomLSTMDataset(Dataset):
def __init__(self, traindata, testdata, transform=None, target_transform=None):
"""Minimal dataset preprocessing for training."""
self.img_train = traindata
self.img_label = testdata
self.transform = transform
self.target_transform = target_transform
def __len__(self):
return len(self.img_label)
def __getitem__(self, idx):
image = self.img_train[:, idx]
label = self.img_label[:, idx]
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return {'train': image, 'test': label}
| 24.885714
| 83
| 0.585534
|
acfe8d2211629730e873ba2f387379ac18bd56fb
| 95
|
py
|
Python
|
src/episodetrackerapp/apps.py
|
jonathankamau/EpisodeTracker
|
78b92b5cdcdcf52608a47a7a0edb7c6b0e3d1246
|
[
"MIT"
] | 1
|
2018-09-14T03:45:02.000Z
|
2018-09-14T03:45:02.000Z
|
src/episodetrackerapp/apps.py
|
jonathankamau/EpisodeTracker
|
78b92b5cdcdcf52608a47a7a0edb7c6b0e3d1246
|
[
"MIT"
] | 15
|
2020-06-05T14:55:41.000Z
|
2021-02-02T02:16:30.000Z
|
src/episodetrackerapp/apps.py
|
jonathankamau/EpisodeTracker
|
78b92b5cdcdcf52608a47a7a0edb7c6b0e3d1246
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class AppConfig(AppConfig):
name = 'episodetrackerapp'
| 15.833333
| 33
| 0.768421
|
acfe8da8fc3330b45ddcb2ff97f13cac2f3ad4e4
| 379
|
py
|
Python
|
models/facility.py
|
stuartcampbell/nsls2-api
|
8175e524279f51d9b43ad0f8f08b8b3f54eceae0
|
[
"BSD-3-Clause"
] | null | null | null |
models/facility.py
|
stuartcampbell/nsls2-api
|
8175e524279f51d9b43ad0f8f08b8b3f54eceae0
|
[
"BSD-3-Clause"
] | null | null | null |
models/facility.py
|
stuartcampbell/nsls2-api
|
8175e524279f51d9b43ad0f8f08b8b3f54eceae0
|
[
"BSD-3-Clause"
] | null | null | null |
from enum import Enum
from typing import Optional, List
from pydantic.main import BaseModel
class FacilityName(Enum):
nsls2="nsls2"
lbms="lbms"
class Facility(BaseModel):
name: str
id: str
fullname: str
pass_facility_id: str
class Cycle(BaseModel):
name: str
class Config:
schema_extra = {
"name": "2021-2"
}
| 15.791667
| 35
| 0.627968
|
acfe8db0b5b8391343a87ccf6639633b28413ff8
| 545
|
py
|
Python
|
linkysets/users/managers.py
|
hqrrylyu/linkysets
|
1b8c319820bdf116a5cad7efff69178e739cf26b
|
[
"MIT"
] | null | null | null |
linkysets/users/managers.py
|
hqrrylyu/linkysets
|
1b8c319820bdf116a5cad7efff69178e739cf26b
|
[
"MIT"
] | 5
|
2021-04-08T19:20:07.000Z
|
2021-09-22T19:03:30.000Z
|
linkysets/users/managers.py
|
hqrrylyu/polemicflow
|
1b8c319820bdf116a5cad7efff69178e739cf26b
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from typing import TYPE_CHECKING
from django.db.models import Count, QuerySet
if TYPE_CHECKING:
from .models import User
BaseUserQuerySet = QuerySet[User]
else:
BaseUserQuerySet = QuerySet
class UserQuerySet(BaseUserQuerySet):
def num_entrysets(self) -> UserQuerySet:
return self.annotate(num_sets=Count("entryset", distinct=True)) # type: ignore
def num_replies(self) -> UserQuerySet:
return self.annotate(num_replies=Count("reply", distinct=True)) # type: ignore
| 25.952381
| 87
| 0.743119
|
acfe8e86ac91064a7669bbd3d98fb38c804e305f
| 258
|
py
|
Python
|
foodsharing_bot/users/apps.py
|
apnkv/foodsharing_bot
|
5ae548dec0c86a129dbea7d2329feddc87b9a87d
|
[
"MIT"
] | null | null | null |
foodsharing_bot/users/apps.py
|
apnkv/foodsharing_bot
|
5ae548dec0c86a129dbea7d2329feddc87b9a87d
|
[
"MIT"
] | null | null | null |
foodsharing_bot/users/apps.py
|
apnkv/foodsharing_bot
|
5ae548dec0c86a129dbea7d2329feddc87b9a87d
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class UsersAppConfig(AppConfig):
name = "foodsharing_bot.users"
verbose_name = "Users"
def ready(self):
try:
import users.signals # noqa F401
except ImportError:
pass
| 18.428571
| 45
| 0.620155
|
acfe8f6f2aab1820e743c922d488b578b7dfbbac
| 10,297
|
py
|
Python
|
prototype/nplinker/scoring/data_linking_functions.py
|
louwenjjr/nplinker
|
22e957d0f3326775ca5c1f850073067c6fb256d6
|
[
"Apache-2.0"
] | 6
|
2018-10-24T20:33:49.000Z
|
2022-03-11T03:44:14.000Z
|
prototype/nplinker/scoring/data_linking_functions.py
|
louwenjjr/nplinker
|
22e957d0f3326775ca5c1f850073067c6fb256d6
|
[
"Apache-2.0"
] | 44
|
2018-10-25T19:46:26.000Z
|
2022-03-31T20:31:06.000Z
|
prototype/nplinker/scoring/data_linking_functions.py
|
louwenjjr/nplinker
|
22e957d0f3326775ca5c1f850073067c6fb256d6
|
[
"Apache-2.0"
] | 4
|
2018-10-29T15:29:41.000Z
|
2022-03-10T14:36:40.000Z
|
# Copyright 2021 The NPLinker Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# functions
import numpy as np
import math
def calc_correlation_matrix(M_type1_cond, M_type2_cond):
"""
Calculate correlation matrices from co-occurence matrices
Input:
M_type1_cond(x,y) is 1 if type1_x IS observed under condition_y
M_type1_cond(x,y) is 0 if type1_x IS NOT observed under condition_y
Outputs three correlation matrices:
M_type1_type2(x,y) --- number of conditions where type1_x and type2_y co-occur
M_type1_nottype2(x,y) --- number of conditions where type1_x and NOT-type2_y co-occur
M_nottype1_type2(x,y) --- number of conditions where NOT-type1_x and type2_y co-occur
"""
# Quick computation of sum both present
testA = np.dot(M_type1_cond,M_type2_cond.T)
# Present in type1 and not in type 2
testB = np.dot(M_type1_cond,1-M_type2_cond.T)
# Not in type 1 and in type 2
testC = np.dot(1-M_type1_cond,M_type2_cond.T)
# Not in either
testD = np.dot(1-M_type1_cond,1-M_type2_cond.T)
# print(np.abs((testA - M_type1_type2)).sum())
# print(np.abs((testB - M_type1_nottype2)).sum())
# print(np.abs((testC - M_nottype1_type2)).sum())
M_type1_type2 = testA
M_type1_nottype2 = testB
M_nottype1_type2 = testC
M_nottype1_nottype2 = testD
return M_type1_type2, M_type1_nottype2, M_nottype1_type2 , M_nottype1_nottype2
def calc_likelihood_matrix(M_type1_cond, M_type2_cond,
M_type1_type2, M_type1_nottype2, M_nottype1_type2):
"""
Calculate correlation matrices from co-occurence matrices
Input:
M_type1_cond(x,y) is 1 if type1_x IS observed under condition_y
M_type1_cond(x,y) is 0 if type1_x IS NOT observed under condition_y
M_type1_type2(x,y) --- number of conditions where type1_x and type2_y co-occur
M_type1_nottype2(x,y) --- number of conditions where type1_x and NOT-type2_y co-occur
M_nottype1_type2(x,y) --- number of conditions where NOT-type1_x and type2_y co-occur
Output:
Four likelihood matrices of size len(type1) x len(type2):
P_type2_given_type1, P_type2_not_type1, P_type1_given_type2, P_type1_not_type2
"""
dim1, dim2 = M_type1_type2.shape
num_conditions = M_type2_cond.shape[1]
P_type2_given_type1 = np.zeros((dim1, dim2))
P_type2_not_type1 = np.zeros((dim1, dim2))
P_type1_given_type2 = np.zeros((dim1, dim2))
P_type1_not_type2 = np.zeros((dim1, dim2))
# Calculate P_type2_given_type1 matrix
P_sum_type1 = np.sum(M_type1_cond, axis=1)
P_sum_type1[P_sum_type1 < 1] = 1 #avoid later division by 0
P_type2_given_type1 = M_type1_type2/np.tile(P_sum_type1, (dim2, 1)).transpose(1, 0)
# Calculate P_type1_given_type2 matrix
P_sum_type2 = np.sum(M_type2_cond, axis=1)
P_sum_type2[P_sum_type2 < 1] = 1 #avoid later division by 0
P_type1_given_type2 = M_type1_type2/np.tile(P_sum_type2, (dim1, 1))
# Calculate P_type2_not_type1 matrix
P_sum_not_type1 = num_conditions - P_sum_type1
P_type2_not_type1 = M_nottype1_type2/np.tile(P_sum_not_type1, (dim2, 1)).transpose(1, 0)
# Calculate P_type1_not_type2 matrix
P_sum_not_type2 = num_conditions - P_sum_type2
P_type1_not_type2 = M_type1_nottype2/np.tile(P_sum_not_type2, (dim1, 1))
return P_type2_given_type1, P_type2_not_type1, P_type1_given_type2, P_type1_not_type2
def pair_prob_approx(P_str, XG, Ny, hits):
"""
Calculate probability of finding 'k' hits between Gx and Sy.
Parameters
----------
P_str: numpy array
Probabilities for finding a spectrum in the a certain strain.
Usually this can be set to num-of-spectra-in-strain / num-of-spectra-in-all-strains
XG: list
List of ids of strains where the GCF of interest occurs.
Ny: int
Number of strains that contain the spectrum of interest.
hits: int
number of hits
"""
Nx = len(XG)
Nstr = len(P_str)
# Check Nx, Ny, hits
if (hits > Nx) or (hits > Ny):
print("Given number of 'hits' must be <= Nx and <= Ny.")
p_hit_mean = np.sum(P_str[XG])/Nx
p_nohit_mean = (1 - np.sum(P_str[XG]))/(Nstr - Nx)
p_mean = (hits * p_hit_mean + (Ny-hits) * p_nohit_mean)/Ny
# Calculate product of all hits
prod0 = 1
for i in range(hits):
prod0 = prod0 * p_hit_mean * (Nx -i)
# Calculate product of all non-hits
prod1 = 1
for i in range(Ny - hits):
prod1 = prod1 * ((Nstr- Nx - i)/Nstr)
# Calculate product of probability updates
# (fewer accessible elements lead to increasing probabilities)
prod2 = 1
for j in range(Ny):
prod2 = prod2 * (1/(1 - j*p_mean))
return np.sum(math.factorial(Ny)/(math.factorial(hits) * math.factorial(Ny - hits)) * prod0 * prod1 * prod2)
def link_prob(P_str, XGS, Nx, Ny, Nstr):
"""
Calculate probability of finding a set of *specific* hits between Gx and Sy.
This means: the probability to find hits only in all the strains that form the set XGS
Parameters
----------
P_str: numpy array
Probabilities for finding a spectrum in the a certain strain.
Usually this can be set to num-of-spectra-in-strain / num-of-spectra-in-all-strains
XGS: list
List of ids of strains where GCF and a spectrum of interest co-occurs.
Nx: int
Number of strains that contain the GCF of interest.
Ny: int
Number of strains that contain the spectrum of interest.
Nstr: int
Number of strains.
"""
p_mean = 1/Nstr
hits = len(XGS)
# Calculate product of all non-hits
prod1 = 1
for i in range(Ny - hits):
prod1 = prod1 * ((Nstr- Nx - i)/Nstr)
# Calculate product of probability updates
# (fewer accessible elements lead to increasing probabilities)
prod2 = 1
for j in range(Ny):
prod2 = prod2 * (1/(1 - j*p_mean))
return math.factorial(Ny)/math.factorial(Ny-hits) * np.prod(P_str[XGS]) * prod1 * prod2
def pair_prob_hg(k, N, Nx, Ny):
"""
Calculate the probability to draw k times type(Ny) out of N elements (whereof Ny type(Ny)s),
when drawing Nx times in total.
Same as hypergemoetric distribution
"""
if (k > Nx) or (k > Ny):
print("Given 'k' must be <= Nx and <= Ny.")
import math
term1 = math.factorial(Ny)/(math.factorial(k) * math.factorial(Ny - k))
term2 = math.factorial(N - Ny)/(math.factorial(Nx - k) * math.factorial(N - Nx - Ny + k))
term3 = math.factorial(N)/(math.factorial(Nx) * math.factorial(N - Nx))
return term1 * term2 / term3
def hit_prob_dist(N, Nx, Ny, nys):
import math
p_dist_ks = []
for k in range(1,min(Nx, Ny)+1):
p = pair_prob_hg(k, N, Nx, Ny)
p_dist = []
for k in range(0, nys+1):
term1 = math.factorial(nys) / (math.factorial(k) * math.factorial(nys - k))
term2 = p**k * (1 - p)**(nys-k)
p_dist.append(term1 * term2)
p_dist_ks.append(p_dist)
return p_dist_ks
def pair_prob(P_str, XG, Ny, hits):
"""
Calculate probability of finding 'k' hits between Gx and Sy.
CAREFUL: for larger Nx, Ny, Nstr this quickly becomes *VERY* slow (many, many permutations...)
--> better use pair_prob_approx instead
Parameters
----------
P_str: numpy array
Probabilities for finding a spectrum in the a certain strain.
Usually this can be set to num-of-spectra-in-strain / num-of-spectra-in-all-strains
XG: list
List of ids of strains where the GCF of interest occurs.
Ny: int
Number of strains that contain the spectrum of interest.
hits: int
number of hits
"""
Nx = len(XG)
Nstr = len(P_str)
# Check Nx, Ny, hits
if (hits > Nx) or (hits > Ny):
print("Given number of 'hits' must be <= Nx and <= Ny.")
# Calculate all unique permutations:
state0 = [1]*hits + [0]*(Nx-hits)
states = np.array(list(permutation_unique(state0)))
# Calculate the product of all probabilties accross all permutations
P_states = states*P_str[XG]
prods = np.prod(P_states + np.abs(states-1), axis=1)
del P_states
del states
p_mean = 1/Nstr
# Calculate product of all non-hits
prod1 = 1
for i in range(Ny - hits):
prod1 = prod1 * ((Nstr- Nx - i)/Nstr)
# Calculate product of probability updates
# (fewer accessible elements lead to increasing probabilities)
prod2 = 1
for j in range(Ny):
prod2 = prod2 * (1/(1 - j*p_mean))
return np.sum(math.factorial(Ny)/math.factorial(Ny-hits) * prods * prod1 * prod2)
# method to calculate unique permutations:
class unique_element:
def __init__(self,value,occurrences):
self.value = value
self.occurrences = occurrences
def permutation_unique(elements):
"""
Derive unique permutations of elements (list)
"""
eset = set(elements)
listunique = [unique_element(i, elements.count(i)) for i in eset]
num_elements = len(elements)
return permutation_unique_helper(listunique, [0]*num_elements, num_elements-1)
def permutation_unique_helper(listunique, result_list, d):
"""
Helper function to derive unique permutations of elements (list)
"""
if d < 0:
yield tuple(result_list)
else:
for i in listunique:
if i.occurrences > 0:
result_list[d]=i.value
i.occurrences-=1
for g in permutation_unique_helper(listunique, result_list, d-1):
yield g
i.occurrences+=1
| 34.323333
| 112
| 0.653394
|
acfe8f6f311d2ff9da77ed8c0ae3276808823fd0
| 6,052
|
py
|
Python
|
cassle/methods/mocov2plus.py
|
DonkeyShot21/cassle
|
d25f9c7cb5e822660dc1ef03e7fac09a33d0b1a8
|
[
"MIT"
] | 13
|
2022-03-24T10:08:44.000Z
|
2022-03-29T09:33:05.000Z
|
cassle/methods/mocov2plus.py
|
DonkeyShot21/cassle
|
d25f9c7cb5e822660dc1ef03e7fac09a33d0b1a8
|
[
"MIT"
] | null | null | null |
cassle/methods/mocov2plus.py
|
DonkeyShot21/cassle
|
d25f9c7cb5e822660dc1ef03e7fac09a33d0b1a8
|
[
"MIT"
] | null | null | null |
import argparse
from typing import Any, Dict, List, Sequence, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from cassle.losses.moco import moco_loss_func
from cassle.methods.base import BaseMomentumModel
from cassle.utils.gather_layer import gather
from cassle.utils.momentum import initialize_momentum_params
class MoCoV2Plus(BaseMomentumModel):
queue: torch.Tensor
def __init__(
self, output_dim: int, proj_hidden_dim: int, temperature: float, queue_size: int, **kwargs
):
"""Implements MoCo V2+ (https://arxiv.org/abs/2011.10566).
Args:
output_dim (int): number of dimensions of projected features.
proj_hidden_dim (int): number of neurons of the hidden layers of the projector.
temperature (float): temperature for the softmax in the contrastive loss.
queue_size (int): number of samples to keep in the queue.
"""
super().__init__(**kwargs)
self.temperature = temperature
self.queue_size = queue_size
# projector
self.projector = nn.Sequential(
nn.Linear(self.features_dim, proj_hidden_dim),
nn.ReLU(),
nn.Linear(proj_hidden_dim, output_dim),
)
# momentum projector
self.momentum_projector = nn.Sequential(
nn.Linear(self.features_dim, proj_hidden_dim),
nn.ReLU(),
nn.Linear(proj_hidden_dim, output_dim),
)
initialize_momentum_params(self.projector, self.momentum_projector)
# create the queue
self.register_buffer("queue", torch.randn(2, output_dim, queue_size))
self.queue = nn.functional.normalize(self.queue, dim=1)
self.register_buffer("queue_ptr", torch.zeros(1, dtype=torch.long))
@staticmethod
def add_model_specific_args(parent_parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
parent_parser = super(MoCoV2Plus, MoCoV2Plus).add_model_specific_args(parent_parser)
parser = parent_parser.add_argument_group("mocov2plus")
# projector
parser.add_argument("--output_dim", type=int, default=128)
parser.add_argument("--proj_hidden_dim", type=int, default=2048)
# parameters
parser.add_argument("--temperature", type=float, default=0.1)
# queue settings
parser.add_argument("--queue_size", default=65536, type=int)
return parent_parser
@property
def learnable_params(self) -> List[dict]:
"""Adds projector parameters together with parent's learnable parameters.
Returns:
List[dict]: list of learnable parameters.
"""
extra_learnable_params = [{"params": self.projector.parameters()}]
return super().learnable_params + extra_learnable_params
@property
def momentum_pairs(self) -> List[Tuple[Any, Any]]:
"""Adds (projector, momentum_projector) to the parent's momentum pairs.
Returns:
List[Tuple[Any, Any]]: list of momentum pairs.
"""
extra_momentum_pairs = [(self.projector, self.momentum_projector)]
return super().momentum_pairs + extra_momentum_pairs
@torch.no_grad()
def _dequeue_and_enqueue(self, keys: torch.Tensor):
"""Adds new samples and removes old samples from the queue in a fifo manner.
Args:
keys (torch.Tensor): output features of the momentum encoder.
"""
batch_size = keys.shape[1]
ptr = int(self.queue_ptr) # type: ignore
assert self.queue_size % batch_size == 0 # for simplicity
# replace the keys at ptr (dequeue and enqueue)
keys = keys.permute(0, 2, 1)
self.queue[:, :, ptr : ptr + batch_size] = keys
ptr = (ptr + batch_size) % self.queue_size # move pointer
self.queue_ptr[0] = ptr # type: ignore
def forward(self, X: torch.Tensor, *args, **kwargs) -> Dict[str, Any]:
"""Performs the forward pass of the online encoder and the online projection.
Args:
X (torch.Tensor): a batch of images in the tensor format.
Returns:
Dict[str, Any]: a dict containing the outputs of the parent and the projected features.
"""
out = super().forward(X, *args, **kwargs)
q = F.normalize(self.projector(out["feats"]), dim=-1)
return {**out, "q": q}
def training_step(self, batch: Sequence[Any], batch_idx: int) -> torch.Tensor:
"""
Training step for MoCo reusing BaseMomentumModel training step.
Args:
batch (Sequence[Any]): a batch of data in the
format of [img_indexes, [X], Y], where [X] is a list of size self.num_crops
containing batches of images.
batch_idx (int): index of the batch.
Returns:
torch.Tensor: total loss composed of MOCO loss and classification loss.
"""
out = super().training_step(batch, batch_idx)
feats1, feats2 = out["feats"]
momentum_feats1, momentum_feats2 = out["momentum_feats"]
q1 = self.projector(feats1)
q2 = self.projector(feats2)
q1 = F.normalize(q1, dim=-1)
q2 = F.normalize(q2, dim=-1)
with torch.no_grad():
k1 = self.momentum_projector(momentum_feats1)
k2 = self.momentum_projector(momentum_feats2)
k1 = F.normalize(k1, dim=-1)
k2 = F.normalize(k2, dim=-1)
# ------- contrastive loss -------
# symmetric
queue = self.queue.clone().detach()
nce_loss = (
moco_loss_func(q1, k2, queue[1], self.temperature)
+ moco_loss_func(q2, k1, queue[0], self.temperature)
) / 2
# ------- update queue -------
keys = torch.stack((gather(k1), gather(k2)))
self._dequeue_and_enqueue(keys)
self.log("train_nce_loss", nce_loss, on_epoch=True, sync_dist=True)
out.update({"loss": out["loss"] + nce_loss, "z": [q1, q2]})
return out
| 35.6
| 99
| 0.627726
|
acfe8fdced05944fd9ac688ac3f984143e679a22
| 1,885
|
py
|
Python
|
beginner_contest/176/D.py
|
FGtatsuro/myatcoder
|
25a3123be6a6311e7d1c25394987de3e35575ff4
|
[
"MIT"
] | null | null | null |
beginner_contest/176/D.py
|
FGtatsuro/myatcoder
|
25a3123be6a6311e7d1c25394987de3e35575ff4
|
[
"MIT"
] | null | null | null |
beginner_contest/176/D.py
|
FGtatsuro/myatcoder
|
25a3123be6a6311e7d1c25394987de3e35575ff4
|
[
"MIT"
] | null | null | null |
import sys
input = sys.stdin.readline
sys.setrecursionlimit(10 ** 7)
h, w = map(int, input().split())
ch, cw = map(int, input().split())
dh, dw = map(int, input().split())
graph = [0] + [0] * h
dist = [0] + [0] * h
for i in range(h):
graph[i + 1] = [0] + list(input().strip())
dist[i + 1] = [0] + [-1] * w
from collections import deque
def can_move(graph, dist, next_h, next_w):
if not ((1 <= next_h <= h) and (1 <= next_w <= w)):
return False
if graph[next_h][next_w] == '#':
return False
return True
# FYI: https://betrue12.hateblo.jp/entry/2018/12/08/000020
def bfs(graph, queue, dist):
part = set()
while queue:
current_h, current_w = queue.popleft()
# cost 0
for dh, dw in ((-1, 0), (1, 0), (0, -1), (0, 1)):
next_h, next_w = current_h + dh, current_w + dw
if not can_move(graph, dist, next_h, next_w):
continue
# https://atcoder.jp/contests/abc176/editorial/65
# 解説のとおり、先にワープして到達している場合でも短くなるのであれば上書きする
# => 通常のグラフでも起こりえる。ワープと書いてあるから勘違いするが、普通に辺で結ばれているグラフを考えると分かりやすい
# Dijkstraみたいに考える
if dist[next_h][next_w] == -1 or dist[next_h][next_w] > dist[current_h][current_w]:
dist[next_h][next_w] = dist[current_h][current_w]
queue.appendleft((next_h, next_w))
# cost 1
for dh in range(-2, 3):
for dw in range(-2, 3):
next_h, next_w = current_h + dh, current_w + dw
if not can_move(graph, dist, next_h, next_w):
continue
if dist[next_h][next_w] != -1:
continue
dist[next_h][next_w] = dist[current_h][current_w] + 1
queue.append((next_h, next_w))
queue = deque([(ch, cw)])
dist[ch][cw] = 0
bfs(graph, queue, dist)
print(dist[dh][dw])
| 33.070175
| 95
| 0.54748
|
acfe901bf002079cb12493e329c64fa9be514bf9
| 3,288
|
py
|
Python
|
Util.py
|
pr0tege/csv_database
|
3e3c44b846ef9229a9336d2aca8c4b82c5260f74
|
[
"MIT"
] | null | null | null |
Util.py
|
pr0tege/csv_database
|
3e3c44b846ef9229a9336d2aca8c4b82c5260f74
|
[
"MIT"
] | null | null | null |
Util.py
|
pr0tege/csv_database
|
3e3c44b846ef9229a9336d2aca8c4b82c5260f74
|
[
"MIT"
] | null | null | null |
USERS = "./data_store/users.csv"
ADDRESSES = "./data_store/addresses.csv"
def load_users():
"""
Loads users csv
:return:
"""
with open(USERS, "r") as file:
# creates dictionary to separate csv values to make it easy to iterate between them
# the hash() function is used to identify the values in the csv, as they have their individual hash
# keys, and as the csv is immutable it'll be the same throughout
users = {}
for user in file:
user = user.strip().split(",")
user_tuple = create_user(*user[:5], int(user[5]))
users[hash(user_tuple)] = user_tuple
return users
def load_addresses():
"""
Loads adddresses csv
:return:
"""
with open(ADDRESSES, "r") as file:
addresses = {}
for address in file:
address_tuple = create_address(*address.strip().split(","))
addresses[hash(address_tuple)] = address_tuple
return addresses
def load_datastore():
"""
Creates datastore data structure (similar to a database)
A dictionary containing two keys ["users", "addresses"]. The "Users" entry stores a dictionary with keys containing
the hashed user's and the entries containing the user datastructures. The same principle is applied to "Addresses"
:return: (dict)
"""
return {"users": load_users(), "addresses": load_addresses()}
def write_users(users):
with open(USERS, "w") as file:
file.write("\n".join([
"{0},{1},{2},{3},{4},{5}".format(*user) for user in users
]))
def write_addresses(addresses):
with open(ADDRESSES, "w") as file:
file.write("\n".join([
"{0},{1}".format(*address) for address in addresses
]))
def write_datastore(datastore):
write_users(datastore["users"].values())
write_addresses(datastore["addresses"].values())
def create_address(house_number, post_code):
"""
Creates immutable address data structure
:param house_number: user's house number (string)
:param post_code: user's post code (string)
:return: (tuple)
"""
return (house_number, post_code)
def create_user(first_name, last_name, email, phone_number, dob, address_id):
"""
Creates immutable user data structure
:param first_name: user's first name (string)
:param last_name: user's last name (string)
:param email: user's email (string)
:param phone_number: user's phone number (string)
:param dob: user's date of birth (string)
:return: (tuple)
"""
return (first_name, last_name, email, phone_number, dob, address_id)
def format_user(datastore, user_hash):
"""
:param datastore:
:param user_hash:
:return:
"""
return "First Name: {0}, Last Name: {1}, Email: {2}, Phone Number: {3}, Date of Birth: {4}, Address: {5}".format(
*datastore["users"][user_hash][:5],
format_address(datastore, datastore["users"][user_hash][5])
)
def format_address(datastore, address_hash):
"""
:param datastore:
:param address_hash:
:return:
"""
return "{0}, {1}".format(*datastore["addresses"][address_hash])
| 30.444444
| 120
| 0.612835
|
acfe90442cd1d5ff082cd97b3d275b545af6330a
| 9,167
|
py
|
Python
|
neutron/plugins/nec/db/packetfilter.py
|
ksshanam/neutron-dvr
|
c0854ea0d1023ab42e1ef861f9b6ff480e985ac5
|
[
"Apache-2.0"
] | 3
|
2015-02-02T02:51:39.000Z
|
2015-02-23T10:20:23.000Z
|
neutron/plugins/nec/db/packetfilter.py
|
ksshanam/neutron-dvr
|
c0854ea0d1023ab42e1ef861f9b6ff480e985ac5
|
[
"Apache-2.0"
] | 4
|
2015-02-23T10:21:11.000Z
|
2015-03-04T09:28:20.000Z
|
neutron/plugins/nec/db/packetfilter.py
|
ksshanam/neutron-dvr
|
c0854ea0d1023ab42e1ef861f9b6ff480e985ac5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2012-2013 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Ryota MIBU
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy.orm import exc as sa_exc
from sqlalchemy import sql
from neutron.api.v2 import attributes
from neutron.db import model_base
from neutron.db import models_v2
from neutron.openstack.common import uuidutils
from neutron.plugins.nec.db import models as nmodels
from neutron.plugins.nec.extensions import packetfilter as ext_pf
PF_STATUS_ACTIVE = 'ACTIVE'
PF_STATUS_DOWN = 'DOWN'
PF_STATUS_ERROR = 'ERROR'
INT_FIELDS = ('eth_type', 'src_port', 'dst_port')
class PacketFilter(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents a packet filter."""
name = sa.Column(sa.String(255))
network_id = sa.Column(sa.String(36),
sa.ForeignKey('networks.id', ondelete="CASCADE"),
nullable=False)
priority = sa.Column(sa.Integer, nullable=False)
action = sa.Column(sa.String(16), nullable=False)
# condition
in_port = sa.Column(sa.String(36),
sa.ForeignKey('ports.id', ondelete="CASCADE"),
nullable=True)
src_mac = sa.Column(sa.String(32), nullable=False)
dst_mac = sa.Column(sa.String(32), nullable=False)
eth_type = sa.Column(sa.Integer, nullable=False)
src_cidr = sa.Column(sa.String(64), nullable=False)
dst_cidr = sa.Column(sa.String(64), nullable=False)
protocol = sa.Column(sa.String(16), nullable=False)
src_port = sa.Column(sa.Integer, nullable=False)
dst_port = sa.Column(sa.Integer, nullable=False)
# status
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
status = sa.Column(sa.String(16), nullable=False)
network = orm.relationship(
models_v2.Network,
backref=orm.backref('packetfilters', lazy='joined', cascade='delete'),
uselist=False)
in_port_ref = orm.relationship(
models_v2.Port,
backref=orm.backref('packetfilters', lazy='joined', cascade='delete'),
primaryjoin="Port.id==PacketFilter.in_port",
uselist=False)
class PacketFilterDbMixin(object):
def _make_packet_filter_dict(self, pf_entry, fields=None):
res = {'id': pf_entry['id'],
'name': pf_entry['name'],
'tenant_id': pf_entry['tenant_id'],
'network_id': pf_entry['network_id'],
'action': pf_entry['action'],
'priority': pf_entry['priority'],
'in_port': pf_entry['in_port'],
# "or None" ensure the filed is None if empty
'src_mac': pf_entry['src_mac'] or None,
'dst_mac': pf_entry['dst_mac'] or None,
'eth_type': pf_entry['eth_type'] or None,
'src_cidr': pf_entry['src_cidr'] or None,
'dst_cidr': pf_entry['dst_cidr'] or None,
'protocol': pf_entry['protocol'] or None,
'src_port': pf_entry['src_port'] or None,
'dst_port': pf_entry['dst_port'] or None,
'admin_state_up': pf_entry['admin_state_up'],
'status': pf_entry['status']}
return self._fields(res, fields)
def _get_packet_filter(self, context, id):
try:
pf_entry = self._get_by_id(context, PacketFilter, id)
except sa_exc.NoResultFound:
raise ext_pf.PacketFilterNotFound(id=id)
return pf_entry
def get_packet_filter(self, context, id, fields=None):
pf_entry = self._get_packet_filter(context, id)
return self._make_packet_filter_dict(pf_entry, fields)
def get_packet_filters(self, context, filters=None, fields=None):
return self._get_collection(context,
PacketFilter,
self._make_packet_filter_dict,
filters=filters,
fields=fields)
def _replace_unspecified_field(self, params, key):
if not attributes.is_attr_set(params[key]):
if key == 'in_port':
params[key] = None
elif key in INT_FIELDS:
# Integer field
params[key] = 0
else:
params[key] = ''
def _get_eth_type_for_protocol(self, protocol):
if protocol.upper() in ("ICMP", "TCP", "UDP"):
return 0x800
elif protocol.upper() == "ARP":
return 0x806
def _set_eth_type_from_protocol(self, filter_dict):
if filter_dict.get('protocol'):
eth_type = self._get_eth_type_for_protocol(filter_dict['protocol'])
if eth_type:
filter_dict['eth_type'] = eth_type
def _check_eth_type_and_protocol(self, new_filter, current_filter):
if 'protocol' in new_filter or 'eth_type' not in new_filter:
return
eth_type = self._get_eth_type_for_protocol(current_filter['protocol'])
if not eth_type:
return
if eth_type != new_filter['eth_type']:
raise ext_pf.PacketFilterEtherTypeProtocolMismatch(
eth_type=hex(new_filter['eth_type']),
protocol=current_filter['protocol'])
def create_packet_filter(self, context, packet_filter):
pf_dict = packet_filter['packet_filter']
tenant_id = self._get_tenant_id_for_create(context, pf_dict)
if pf_dict['in_port'] == attributes.ATTR_NOT_SPECIFIED:
# validate network ownership
self.get_network(context, pf_dict['network_id'])
else:
# validate port ownership
self.get_port(context, pf_dict['in_port'])
params = {'tenant_id': tenant_id,
'id': pf_dict.get('id') or uuidutils.generate_uuid(),
'name': pf_dict['name'],
'network_id': pf_dict['network_id'],
'priority': pf_dict['priority'],
'action': pf_dict['action'],
'admin_state_up': pf_dict.get('admin_state_up', True),
'status': PF_STATUS_DOWN,
'in_port': pf_dict['in_port'],
'src_mac': pf_dict['src_mac'],
'dst_mac': pf_dict['dst_mac'],
'eth_type': pf_dict['eth_type'],
'src_cidr': pf_dict['src_cidr'],
'dst_cidr': pf_dict['dst_cidr'],
'src_port': pf_dict['src_port'],
'dst_port': pf_dict['dst_port'],
'protocol': pf_dict['protocol']}
for key in params:
self._replace_unspecified_field(params, key)
self._set_eth_type_from_protocol(params)
with context.session.begin(subtransactions=True):
pf_entry = PacketFilter(**params)
context.session.add(pf_entry)
return self._make_packet_filter_dict(pf_entry)
def update_packet_filter(self, context, id, packet_filter):
params = packet_filter['packet_filter']
for key in params:
self._replace_unspecified_field(params, key)
self._set_eth_type_from_protocol(params)
with context.session.begin(subtransactions=True):
pf_entry = self._get_packet_filter(context, id)
self._check_eth_type_and_protocol(params, pf_entry)
pf_entry.update(params)
return self._make_packet_filter_dict(pf_entry)
def delete_packet_filter(self, context, id):
with context.session.begin(subtransactions=True):
pf_entry = self._get_packet_filter(context, id)
context.session.delete(pf_entry)
def get_packet_filters_for_port(self, context, port):
"""Retrieve packet filters on OFC on a given port.
It returns a list of tuple (neutron filter_id, OFC id).
"""
query = (context.session.query(nmodels.OFCFilterMapping)
.join(PacketFilter,
nmodels.OFCFilterMapping.neutron_id == PacketFilter.id)
.filter(PacketFilter.admin_state_up == sql.true()))
network_id = port['network_id']
net_pf_query = (query.filter(PacketFilter.network_id == network_id)
.filter(PacketFilter.in_port == sql.null()))
net_filters = [(pf['neutron_id'], pf['ofc_id']) for pf in net_pf_query]
port_pf_query = query.filter(PacketFilter.in_port == port['id'])
port_filters = [(pf['neutron_id'], pf['ofc_id'])
for pf in port_pf_query]
return net_filters + port_filters
| 41.858447
| 79
| 0.617105
|
acfe90be6d26f4e8b9caf47184039f55dccb6c13
| 1,042
|
py
|
Python
|
FuzzyLayer.py
|
CI-SSLab/FRNN
|
f0f9a6a4b2981f0c70c612eb7f020bdc03040032
|
[
"MIT"
] | null | null | null |
FuzzyLayer.py
|
CI-SSLab/FRNN
|
f0f9a6a4b2981f0c70c612eb7f020bdc03040032
|
[
"MIT"
] | null | null | null |
FuzzyLayer.py
|
CI-SSLab/FRNN
|
f0f9a6a4b2981f0c70c612eb7f020bdc03040032
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from tensorflow.keras.layers import Layer, InputSpec
import tensorflow as tf
from tensorflow.keras.initializers import RandomUniform
class FuzzyLayer(Layer):
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
self.input_spec = InputSpec(min_ndim=2)
super(FuzzyLayer, self).__init__(**kwargs)
def build(self, input_shape):
self.kernel = self.add_weight(name='kernel', shape=(input_shape[-1], self.output_dim),
initializer=RandomUniform(minval=0, maxval=1), trainable=True)
super(FuzzyLayer, self).build(input_shape)
def call(self, x, *args):
s = []
for i in range(x.shape[-1]):
dd = []
for j in range(self.output_dim):
dij = tf.maximum(x[:, :, i], self.kernel[i, j])
dd.append(tf.expand_dims(dij, axis=-1))
s.append(tf.concat(dd, axis=-1))
s = tf.concat(s, axis=-1)
return s
| 32.5625
| 100
| 0.591171
|
acfe911d07fe53778beb84ba1b773c08f91d505e
| 4,202
|
py
|
Python
|
kitchen/migrations/0001_initial.py
|
DenerRodrigues/cheffapp-api
|
498e9c96102f9bc777158b7aa07a99d89afa6a39
|
[
"MIT"
] | 1
|
2020-03-23T03:21:43.000Z
|
2020-03-23T03:21:43.000Z
|
kitchen/migrations/0001_initial.py
|
DenerRodrigues/cheffapp-api
|
498e9c96102f9bc777158b7aa07a99d89afa6a39
|
[
"MIT"
] | 5
|
2021-03-19T01:03:34.000Z
|
2021-06-10T18:44:34.000Z
|
kitchen/migrations/0001_initial.py
|
DenerRodrigues/chefapp-api
|
498e9c96102f9bc777158b7aa07a99d89afa6a39
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.4 on 2020-03-23 02:58
from decimal import Decimal
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import jsonfield.fields
import multiselectfield.db.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Chef',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_active', models.BooleanField(default=True, help_text='Indicates that the record is active. Instead of deleting the record, uncheck this.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('last_update', models.DateTimeField(blank=True, null=True, verbose_name='last update')),
('name', models.CharField(blank=True, max_length=100, null=True, verbose_name='Name')),
('description', models.CharField(default='', max_length=250, verbose_name='Description')),
('email', models.EmailField(max_length=254, unique=True, verbose_name='Email address')),
('phone', models.CharField(blank=True, max_length=50, null=True, verbose_name='Phone')),
('address', jsonfield.fields.JSONField(blank=True, default={}, null=True, verbose_name='Address')),
('open_at', models.TimeField(verbose_name='Open at')),
('close_at', models.TimeField(verbose_name='Close at')),
('days_of_weak', multiselectfield.db.fields.MultiSelectField(choices=[('SUNDAY', 'Sunday'), ('MONDAY', 'Monday'), ('TUESDAY', 'Tuesday'), ('WEDNESDAY', 'Wednesday'), ('THURSDAY', 'Thursday'), ('FRIDAY', 'Friday'), ('SATURDAY', 'Saturday')], max_length=56, verbose_name='Days of weak')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='chefs', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Chef',
'verbose_name_plural': 'Chefs',
},
),
migrations.CreateModel(
name='FoodRecipe',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_active', models.BooleanField(default=True, help_text='Indicates that the record is active. Instead of deleting the record, uncheck this.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('last_update', models.DateTimeField(blank=True, null=True, verbose_name='last update')),
('name', models.CharField(blank=True, max_length=50, null=True, verbose_name='Name')),
('description', models.CharField(default='', max_length=250, verbose_name='Description')),
('category', models.CharField(choices=[('OTHERS', 'Others'), ('BRAZILIAN', 'Brazilian'), ('ARABIC', 'Arabic'), ('ASIAN', 'Asian'), ('MEXICAN', 'Mexican'), ('ITALIAN', 'Italian'), ('SCNACK', 'Snack'), ('PACKED_LUNCH', 'Packed lunch'), ('MEAT', 'Meat'), ('PIZZA', 'Pizza'), ('PASTA', 'Pasta'), ('FIT', 'Fit'), ('VEGETARIAN', 'Vegetarian'), ('VEGAN', 'Vegan'), ('DRINK', 'Drink')], default='OTHERS', max_length=50, verbose_name='Category')),
('price', models.DecimalField(decimal_places=2, max_digits=10, validators=[django.core.validators.MinValueValidator(Decimal('0.01'))], verbose_name='Price')),
('preparation_time', models.TimeField(verbose_name='Preparation time')),
('chef', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='food_recipes', to='kitchen.Chef')),
],
options={
'verbose_name': 'Food Recipe',
'verbose_name_plural': 'Food Recipes',
},
),
]
| 65.65625
| 454
| 0.632556
|
acfe91beb4afebcdfc91b4027f7d02e152f6beb9
| 5,744
|
py
|
Python
|
python/Lib/site-packages/tectle/fakeorders.py
|
ksritharan/tectle
|
ca76424d85e66b041b40997838a3ceb79266efab
|
[
"MIT"
] | 1
|
2021-03-04T14:58:05.000Z
|
2021-03-04T14:58:05.000Z
|
python/Lib/site-packages/tectle/fakeorders.py
|
ksritharan/tectle
|
ca76424d85e66b041b40997838a3ceb79266efab
|
[
"MIT"
] | 8
|
2021-02-26T02:32:59.000Z
|
2021-05-28T02:22:07.000Z
|
python/Lib/site-packages/tectle/fakeorders.py
|
ksritharan/business-automation
|
ca76424d85e66b041b40997838a3ceb79266efab
|
[
"MIT"
] | null | null | null |
from .db import get_connection, get_data_dict
from flask import render_template, url_for, make_response, jsonify
from .buildschema import create_fake_receipts
from datetime import datetime
def add_one_more(cur):
query = """
SELECT MAX(CAST(receipt_id AS INT))+COUNT(1)+1 as new_receipt_id
FROM FAKE_RECEIPTS
"""
cur.execute(query)
new_receipt_id = cur.fetchone()[0]
n = 1
max_items = 2
max_quantity = 2
create_fake_receipts(cur, new_receipt_id, n, max_items, max_quantity)
def do_open_receipts(limit, offset):
conn = get_connection()
cur = conn.cursor()
add_one_more(cur)
conn.commit()
query = """
SELECT *
FROM fake_receipts
WHERE status = 0
AND source = 'Etsy'
ORDER BY receipt_id DESC
LIMIT %s
OFFSET %s
""" % (limit, offset)
rows = get_data_dict(cur, query)
query = """
SELECT COUNT(1)
FROM fake_receipts
WHERE status = 0
"""
cur.execute(query)
count = cur.fetchone()[0]
data = {'count': count,
'results': rows}
return jsonify(data)
def do_unfulfilled_receipts(limit, offset):
conn = get_connection()
cur = conn.cursor()
add_one_more(cur)
conn.commit()
query = """
SELECT f.*,
c.country_code
FROM fake_receipts f
JOIN countries c ON f.country_id = c.country_id
WHERE f.status = 0
AND f.source = 'Webflow'
ORDER BY f.receipt_id DESC
LIMIT %s
OFFSET %s
""" % (limit, offset)
rows = get_data_dict(cur, query)
data = [ _format_row(row) for row in rows]
return jsonify(data)
def _format_row(row):
new_row = {
'orderId': row['receipt_id'],
'status': 'unfulfilled' if row['status'] == 0 else 'fulfilled',
'customerInfo': {
'fullName': row['name'],
'email': row['buyer_email']
},
'shippingAddress': {
'type': 'shipping',
'addressee': row['name'],
'line1': row['first_line'],
'line2': row['second_line'],
'city': row['city'],
'state': row['state'],
'country': row['country_code'],
'postalCode': row['zip']
},
"acceptedOn": datetime.utcfromtimestamp(row['creation_tsz']).isoformat(timespec='milliseconds') + 'Z'
}
purchased_items_data = _get_purchased_items_data(row['receipt_id'])
new_row.update(purchased_items_data)
return new_row
def _get_purchased_items_data(receipt_id):
conn = get_connection()
cur = conn.cursor()
query = """
SELECT *
FROM fake_transactions ft
JOIN fake_products fp
ON fp.id = ft.listing_id
AND fp.id = ft.product_id
WHERE receipt_id = '%s'
""" % receipt_id
rows = get_data_dict(cur, query)
purchased_items = []
purchased_items_count = 0
total_price = 0
for row in rows:
purchased_item = {
'count': row['quantity'],
'rowTotal': {
'unit': 'USD',
'value': 5200,
'string': '$ 52.00 USD'
},
'variantSKU': row['sku'],
'variantPrice': {
'unit': 'USD',
'value': 5200,
'string': '$ 52.00 USD'
}
}
total_price += 52
purchased_items_count += row['quantity']
purchased_items.append(purchased_item)
data = {
'purchasedItems': purchased_items,
'purchasedItemsCount': purchased_items_count,
'totals': {
'subtotal': {
'unit': 'USD',
'value': total_price,
'string': '$ %.2lf USD' % (total_price/100)
},
"extras": [
{
"type": "shipping",
"name": "Free Shipping",
"description": "Orders over 35 USD qualify for free shipping.",
"price": {
"unit": "USD",
"value": 0,
"string": "$ 0.00 USD"
}
}
],
'total': {
'unit': 'USD',
'value': total_price,
'string': '$ %.2lf USD' % (total_price/100)
},
}
}
return data
def do_transactions(receipt_id):
conn = get_connection()
cur = conn.cursor()
query = """
SELECT *
FROM fake_transactions ft
JOIN fake_products fp
ON fp.id = ft.listing_id
AND fp.id = ft.product_id
WHERE receipt_id = '%s'
""" % receipt_id
rows = get_data_dict(cur, query)
new_rows = []
for row in rows:
data_row = {'receipt_id': row['receipt_id'],
'quantity': row['quantity'],
'listing_id': row['listing_id'],
'product_data': {'product_id': row['product_id'],
'sku': row['sku']
}
}
new_rows.append(data_row)
data = {'count': len(rows),
'results': new_rows}
return jsonify(data)
def do_listing_product(listing_id, product_id):
conn = get_connection()
cur = conn.cursor()
query = """
SELECT *
FROM fake_products
WHERE id = %s
AND id = %s
""" % (listing_id, product_id)
rows = get_data_dict(cur, query)[0]
data = {'count': 1,
'results': rows}
return jsonify(data)
| 29.15736
| 109
| 0.501219
|
acfe9215630a3f3c92563aed0f90b1a2afb43201
| 4,056
|
py
|
Python
|
data/p4VQE/R4/benchmark/startQiskit_Class777.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p4VQE/R4/benchmark/startQiskit_Class777.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p4VQE/R4/benchmark/startQiskit_Class777.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=5
# total number=40
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.h(input_qubit[1]) # number=4
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[4]) # number=21
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(1):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[4]) # number=27
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.cx(input_qubit[1],input_qubit[0]) # number=28
prog.x(input_qubit[0]) # number=29
prog.cx(input_qubit[1],input_qubit[0]) # number=30
prog.cx(input_qubit[0],input_qubit[1]) # number=37
prog.x(input_qubit[1]) # number=38
prog.cx(input_qubit[0],input_qubit[1]) # number=39
prog.cx(input_qubit[0],input_qubit[2]) # number=22
prog.x(input_qubit[2]) # number=23
prog.cx(input_qubit[0],input_qubit[2]) # number=24
prog.cx(input_qubit[0],input_qubit[3]) # number=31
prog.x(input_qubit[3]) # number=32
prog.cx(input_qubit[0],input_qubit[3]) # number=33
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.x(input_qubit[0]) # number=13
prog.x(input_qubit[1]) # number=14
prog.x(input_qubit[2]) # number=15
prog.y(input_qubit[1]) # number=26
prog.x(input_qubit[3]) # number=16
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[2]) # number=34
prog.cz(input_qubit[4],input_qubit[2]) # number=35
prog.h(input_qubit[2]) # number=36
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[3]) # number=20
prog.h(input_qubit[0])
prog.h(input_qubit[1])
prog.h(input_qubit[2])
prog.h(input_qubit[3])
# circuit end
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
backend = BasicAer.get_backend('statevector_simulator')
sample_shot =7924
info = execute(prog, backend=backend).result().get_statevector()
qubits = round(log2(len(info)))
info = {
np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)
for i in range(2 ** qubits)
}
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_Class777.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 30.727273
| 80
| 0.603057
|
acfe928eeba48beae36f27c90e54dbe8ee39e9bb
| 3,413
|
py
|
Python
|
csg_dicoms_anonymizer/csg_fileutil_libs/pydicom/util/dump.py
|
lrq3000/csg_dicoms_anonymizer
|
bd7d631b8b657e54086585c04593ed8470670f0d
|
[
"MIT"
] | 1
|
2021-04-01T22:26:47.000Z
|
2021-04-01T22:26:47.000Z
|
csg_dicoms_anonymizer/csg_fileutil_libs/pydicom/util/dump.py
|
lrq3000/csg_dicoms_anonymizer
|
bd7d631b8b657e54086585c04593ed8470670f0d
|
[
"MIT"
] | 1
|
2019-04-08T08:59:23.000Z
|
2019-04-09T16:43:29.000Z
|
csg_dicoms_anonymizer/csg_fileutil_libs/pydicom/util/dump.py
|
lrq3000/csg_dicoms_anonymizer
|
bd7d631b8b657e54086585c04593ed8470670f0d
|
[
"MIT"
] | 3
|
2017-08-08T11:41:31.000Z
|
2021-09-30T08:52:08.000Z
|
# dump.py
"""Utility functions used in debugging writing and reading"""
# Copyright (c) 2008-2012 Darcy Mason
# This file is part of pydicom, relased under an MIT license.
# See the file license.txt included with this distribution, also
# available at https://github.com/darcymason/pydicom
from __future__ import print_function
from io import BytesIO
def print_character(ordchr):
"""Return a printable character, or '.' for non-printable ones."""
if 31 < ordchr < 126 and ordchr != 92:
return chr(ordchr)
else:
return '.'
def filedump(filename, start_address=0, stop_address=None):
"""Dump out the contents of a file to a standard hex dump 16 bytes wide"""
fp = open(filename, 'rb')
return hexdump(fp, start_address, stop_address)
def datadump(data):
stop_address = len(data) + 1
fp = BytesIO(data)
print(hexdump(fp, 0, stop_address))
def hexdump(file_in, start_address=0, stop_address=None, showAddress=True):
"""Return a formatted string of hex bytes and characters in data.
This is a utility function for debugging file writing.
file_in -- a file-like object to get the bytes to show from"""
str_out = BytesIO()
byteslen = 16 * 3 - 1 # space taken up if row has a full 16 bytes
blanks = ' ' * byteslen
file_in.seek(start_address)
data = True # dummy to start the loop
while data:
if stop_address and file_in.tell() > stop_address:
break
if showAddress:
str_out.write("%04x : " % file_in.tell()) # address at start of line
data = file_in.read(16)
if not data:
break
row = [ord(x) for x in data] # need ord twice below so convert once
byte_string = ' '.join(["%02x" % x for x in row]) # string of two digit hex bytes
str_out.write(byte_string)
str_out.write(blanks[:byteslen - len(byte_string)]) # if not 16, pad
str_out.write(' ')
str_out.write(''.join([print_character(x) for x in row])) # character rep of bytes
str_out.write("\n")
return str_out.getvalue()
def pretty_print(ds, indent=0, indent_chars=" "):
"""Print a dataset directly, with indented levels.
This is just like Dataset._pretty_str, but more useful for debugging as it
prints each item immediately rather than composing a string, making it
easier to immediately see where an error in processing a dataset starts.
"""
indentStr = indent_chars * indent
nextIndentStr = indent_chars * (indent + 1)
for data_element in ds:
if data_element.VR == "SQ": # a sequence
fmt_str = "{0:s}{1:s} {2:s} {3:d} item(s) ---"
new_str = fmt_str.format(indentStr, str(data_element.tag),
data_element.name, len(data_element.value))
print(new_str)
for dataset in data_element.value:
pretty_print(dataset, indent + 1)
print(nextIndentStr + "---------")
else:
print(indentStr + repr(data_element))
if __name__ == "__main__":
import sys
filename = sys.argv[1]
start_address = 0
stop_address = None
if len(sys.argv) > 2: # then have start address
start_address = eval(sys.argv[2])
if len(sys.argv) > 3:
stop_address = eval(sys.argv[3])
print(filedump(filename, start_address, stop_address))
| 34.474747
| 91
| 0.636683
|
acfe9444e58241095eda0d2f353e7a80a4a6736d
| 27,139
|
py
|
Python
|
two_stream_bert2.py
|
hansheng0512/LateTemporalModeling3DCNN
|
71c1d3fae9781c55059f0518e0b39781a535e153
|
[
"MIT"
] | null | null | null |
two_stream_bert2.py
|
hansheng0512/LateTemporalModeling3DCNN
|
71c1d3fae9781c55059f0518e0b39781a535e153
|
[
"MIT"
] | null | null | null |
two_stream_bert2.py
|
hansheng0512/LateTemporalModeling3DCNN
|
71c1d3fae9781c55059f0518e0b39781a535e153
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 4 10:42:00 2019
@author: esat
"""
import os
import time
import argparse
import shutil
import numpy as np
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="0"
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
from tensorboardX import SummaryWriter
from torch.optim import lr_scheduler
import video_transforms
import models
import datasets
from opt.AdamW import AdamW
from utils.model_path import rgb_3d_model_path_selection
model_names = sorted(name for name in models.__dict__
if not name.startswith("__")
and callable(models.__dict__[name]))
dataset_names = sorted(name for name in datasets.__all__)
parser = argparse.ArgumentParser(description='PyTorch Two-Stream Action Recognition')
#parser.add_argument('--data', metavar='DIR', default='./datasets/ucf101_frames',
# help='path to dataset')
parser.add_argument('--settings', metavar='DIR', default='./datasets/settings',
help='path to datset setting files')
#parser.add_argument('--modality', '-m', metavar='MODALITY', default='rgb',
# choices=["rgb", "flow"],
# help='modality: rgb | flow')
parser.add_argument('--dataset', '-d', default='hmdb51',
choices=["ucf101", "hmdb51", "smtV2", "window"],
help='dataset: ucf101 | hmdb51 | smtV2')
parser.add_argument('--arch', '-a', default='rgb_resneXt3D64f101_bert10_FRMB',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: rgb_resneXt3D64f101_bert10_FRMB)')
parser.add_argument('-s', '--split', default=1, type=int, metavar='S',
help='which split of data to work on (default: 1)')
parser.add_argument('-j', '--workers', default=2, type=int, metavar='N',
help='number of data loading workers (default: 2)')
parser.add_argument('--epochs', default=200, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('-b', '--batch-size', default=8, type=int,
metavar='N', help='mini-batch size (default: 8)')
parser.add_argument('--iter-size', default=16, type=int,
metavar='I', help='iter size to reduce memory usage (default: 16)')
parser.add_argument('--lr', '--learning-rate', default=1e-5, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum (default: 0.9)')
parser.add_argument('--weight-decay', '--wd', default=1e-3, type=float,
metavar='W', help='weight decay (default: 1e-3)')
parser.add_argument('--print-freq', default=400, type=int,
metavar='N', help='print frequency (default: 400)')
parser.add_argument('--save-freq', default=1, type=int,
metavar='N', help='save frequency (default: 1)')
parser.add_argument('--num-seg', default=1, type=int,
metavar='N', help='Number of segments in dataloader (default: 1)')
#parser.add_argument('--resume', default='./dene4', type=str, metavar='PATH',
# help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('-c', '--continue', dest='contine', action='store_true',
help='evaluate model on validation set')
best_prec1 = 0
best_loss = 30
warmUpEpoch=5
smt_pretrained = False
HALF = False
training_continue = False
def main():
global args, best_prec1,model,writer,best_loss, length, width, height, input_size, scheduler
args = parser.parse_args()
training_continue = args.contine
if '3D' in args.arch:
if 'I3D' in args.arch or 'MFNET3D' in args.arch:
if '112' in args.arch:
scale = 0.5
else:
scale = 1
else:
if '224' in args.arch:
scale = 1
else:
scale = 0.5
elif 'r2plus1d' in args.arch:
scale = 0.5
else:
scale = 1
print('scale: %.1f' %(scale))
input_size = int(224 * scale)
width = int(340 * scale)
height = int(256 * scale)
saveLocation="./checkpoint/"+args.dataset+"_"+args.arch+"_split"+str(args.split)
if not os.path.exists(saveLocation):
os.makedirs(saveLocation)
writer = SummaryWriter(saveLocation)
# create model
if args.evaluate:
print("Building validation model ... ")
model = build_model_validate()
optimizer = AdamW(model.parameters(), lr= args.lr, weight_decay=args.weight_decay)
elif training_continue:
model, startEpoch, optimizer, best_prec1 = build_model_continue()
for param_group in optimizer.param_groups:
lr = param_group['lr']
#param_group['lr'] = lr
print("Continuing with best precision: %.3f and start epoch %d and lr: %f" %(best_prec1,startEpoch,lr))
else:
print("Building model with ADAMW... ")
model = build_model()
optimizer = AdamW(model.parameters(), lr= args.lr, weight_decay=args.weight_decay)
startEpoch = 0
if HALF:
model.half() # convert to half precision
for layer in model.modules():
if isinstance(layer, nn.BatchNorm2d):
layer.float()
print("Model %s is loaded. " % (args.arch))
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
criterion2 = nn.MSELoss().cuda()
scheduler = lr_scheduler.ReduceLROnPlateau(
optimizer, 'min', patience=5, verbose=True)
print("Saving everything to directory %s." % (saveLocation))
if args.dataset=='ucf101':
dataset='./datasets/ucf101_frames'
elif args.dataset=='hmdb51':
dataset='./datasets/hmdb51_frames'
elif args.dataset=='smtV2':
dataset='./datasets/smtV2_frames'
elif args.dataset=='window':
dataset='./datasets/window_frames'
else:
print("No convenient dataset entered, exiting....")
return 0
cudnn.benchmark = True
modality=args.arch.split('_')[0]
if "3D" in args.arch or 'tsm' in args.arch or 'slowfast' in args.arch or 'r2plus1d' in args.arch:
if '64f' in args.arch:
length=64
elif '32f' in args.arch:
length=32
else:
length=16
else:
length=1
# Data transforming
if modality == "rgb" or modality == "pose":
is_color = True
scale_ratios = [1.0, 0.875, 0.75, 0.66]
if 'I3D' in args.arch:
if 'resnet' in args.arch:
clip_mean = [0.45, 0.45, 0.45] * args.num_seg * length
clip_std = [0.225, 0.225, 0.225] * args.num_seg * length
else:
clip_mean = [0.5, 0.5, 0.5] * args.num_seg * length
clip_std = [0.5, 0.5, 0.5] * args.num_seg * length
#clip_std = [0.25, 0.25, 0.25] * args.num_seg * length
elif 'MFNET3D' in args.arch:
clip_mean = [0.48627451, 0.45882353, 0.40784314] * args.num_seg * length
clip_std = [0.234, 0.234, 0.234] * args.num_seg * length
elif "3D" in args.arch:
clip_mean = [114.7748, 107.7354, 99.4750] * args.num_seg * length
clip_std = [1, 1, 1] * args.num_seg * length
elif "r2plus1d" in args.arch:
clip_mean = [0.43216, 0.394666, 0.37645] * args.num_seg * length
clip_std = [0.22803, 0.22145, 0.216989] * args.num_seg * length
elif "rep_flow" in args.arch:
clip_mean = [0.5, 0.5, 0.5] * args.num_seg * length
clip_std = [0.5, 0.5, 0.5] * args.num_seg * length
elif "slowfast" in args.arch:
clip_mean = [0.45, 0.45, 0.45] * args.num_seg * length
clip_std = [0.225, 0.225, 0.225] * args.num_seg * length
else:
clip_mean = [0.485, 0.456, 0.406] * args.num_seg * length
clip_std = [0.229, 0.224, 0.225] * args.num_seg * length
elif modality == "pose":
is_color = True
scale_ratios = [1.0, 0.875, 0.75, 0.66]
clip_mean = [0.485, 0.456, 0.406] * args.num_seg
clip_std = [0.229, 0.224, 0.225] * args.num_seg
elif modality == "flow":
is_color = False
scale_ratios = [1.0, 0.875, 0.75, 0.66]
if 'I3D' in args.arch:
clip_mean = [0.5, 0.5] * args.num_seg * length
clip_std = [0.5, 0.5] * args.num_seg * length
elif "3D" in args.arch:
clip_mean = [127.5, 127.5] * args.num_seg * length
clip_std = [1, 1] * args.num_seg * length
else:
clip_mean = [0.5, 0.5] * args.num_seg * length
clip_std = [0.226, 0.226] * args.num_seg * length
elif modality == "both":
is_color = True
scale_ratios = [1.0, 0.875, 0.75, 0.66]
clip_mean = [0.485, 0.456, 0.406, 0.5, 0.5] * args.num_seg * length
clip_std = [0.229, 0.224, 0.225, 0.226, 0.226] * args.num_seg * length
else:
print("No such modality. Only rgb and flow supported.")
normalize = video_transforms.Normalize(mean=clip_mean,
std=clip_std)
if "3D" in args.arch and not ('I3D' in args.arch):
train_transform = video_transforms.Compose([
video_transforms.MultiScaleCrop((input_size, input_size), scale_ratios),
video_transforms.RandomHorizontalFlip(),
video_transforms.ToTensor2(),
normalize,
])
val_transform = video_transforms.Compose([
video_transforms.CenterCrop((input_size)),
video_transforms.ToTensor2(),
normalize,
])
else:
train_transform = video_transforms.Compose([
video_transforms.MultiScaleCrop((input_size, input_size), scale_ratios),
video_transforms.RandomHorizontalFlip(),
video_transforms.ToTensor(),
normalize,
])
val_transform = video_transforms.Compose([
video_transforms.CenterCrop((input_size)),
video_transforms.ToTensor(),
normalize,
])
# data loading
train_setting_file = "train_%s_split%d.txt" % (modality, args.split)
train_split_file = os.path.join(args.settings, args.dataset, train_setting_file)
val_setting_file = "val_%s_split%d.txt" % (modality, args.split)
val_split_file = os.path.join(args.settings, args.dataset, val_setting_file)
if not os.path.exists(train_split_file) or not os.path.exists(val_split_file):
print("No split file exists in %s directory. Preprocess the dataset first" % (args.settings))
train_dataset = datasets.__dict__[args.dataset](root=dataset,
source=train_split_file,
phase="train",
modality=modality,
is_color=is_color,
new_length=length,
new_width=width,
new_height=height,
video_transform=train_transform,
num_segments=args.num_seg)
val_dataset = datasets.__dict__[args.dataset](root=dataset,
source=val_split_file,
phase="val",
modality=modality,
is_color=is_color,
new_length=length,
new_width=width,
new_height=height,
video_transform=val_transform,
num_segments=args.num_seg)
print('{} samples found, {} train samples and {} test samples.'.format(len(val_dataset)+len(train_dataset),
len(train_dataset),
len(val_dataset)))
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
if args.evaluate:
prec1,prec3,lossClassification = validate(val_loader, model, criterion,criterion2,modality)
return
for epoch in range(startEpoch, args.epochs):
# if learning_rate_index > max_learning_rate_decay_count:
# break
# adjust_learning_rate(optimizer, epoch)
train(train_loader, model, criterion,criterion2, optimizer, epoch,modality)
# evaluate on validation set
prec1 = 0.0
lossClassification = 0
if (epoch + 1) % args.save_freq == 0:
prec1,prec3,lossClassification = validate(val_loader, model, criterion,criterion2,modality)
writer.add_scalar('data/top1_validation', prec1, epoch)
writer.add_scalar('data/top3_validation', prec3, epoch)
writer.add_scalar('data/classification_loss_validation', lossClassification, epoch)
scheduler.step(lossClassification)
# remember best prec@1 and save checkpoint
is_best = prec1 >= best_prec1
best_prec1 = max(prec1, best_prec1)
# best_in_existing_learning_rate = max(prec1, best_in_existing_learning_rate)
#
# if best_in_existing_learning_rate > prec1 + 1:
# learning_rate_index = learning_rate_index
# best_in_existing_learning_rate = 0
if (epoch + 1) % args.save_freq == 0:
checkpoint_name = "%03d_%s" % (epoch + 1, "checkpoint.pth.tar")
if is_best:
print("Model son iyi olarak kaydedildi")
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'best_loss': best_loss,
'optimizer' : optimizer.state_dict(),
}, is_best, checkpoint_name, saveLocation)
checkpoint_name = "%03d_%s" % (epoch + 1, "checkpoint.pth.tar")
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'best_loss': best_loss,
'optimizer' : optimizer.state_dict(),
}, is_best, checkpoint_name, saveLocation)
writer.export_scalars_to_json("./all_scalars.json")
writer.close()
def build_model():
modality=args.arch.split('_')[0]
if modality == "rgb":
model_path = rgb_3d_model_path_selection(args.arch)
#model_path = os.path.join(modelLocation,'model_best.pth.tar')
elif modality == "flow":
model_path=''
if "3D" in args.arch:
if 'I3D' in args.arch:
model_path='./weights/flow_imagenet.pth'
elif '3D' in args.arch:
model_path='./weights/Flow_Kinetics_64f.pth'
#model_path = os.path.join(modelLocation,'model_best.pth.tar')
elif modality == "both":
model_path=''
if args.dataset=='ucf101':
print('model path is: %s' %(model_path))
model = models.__dict__[args.arch](modelPath=model_path, num_classes=101,length=args.num_seg)
elif args.dataset=='hmdb51':
print('model path is: %s' %(model_path))
model = models.__dict__[args.arch](modelPath=model_path, num_classes=51, length=args.num_seg)
elif args.dataset=='smtV2':
print('model path is: %s' %(model_path))
model = models.__dict__[args.arch](modelPath=model_path, num_classes=174, length=args.num_seg)
elif args.dataset=='window':
print('model path is: %s' %(model_path))
model = models.__dict__[args.arch](modelPath=model_path, num_classes=3, length=args.num_seg)
if torch.cuda.device_count() > 1:
model=torch.nn.DataParallel(model)
model = model.cuda()
return model
def build_model_validate():
modelLocation="./checkpoint/"+args.dataset+"_"+args.arch+"_split"+str(args.split)
model_path = os.path.join(modelLocation,'model_best.pth.tar')
params = torch.load(model_path)
print(modelLocation)
if args.dataset=='ucf101':
model=models.__dict__[args.arch](modelPath='', num_classes=101,length=args.num_seg)
elif args.dataset=='hmdb51':
model=models.__dict__[args.arch](modelPath='', num_classes=51,length=args.num_seg)
if torch.cuda.device_count() > 1:
model=torch.nn.DataParallel(model)
model.load_state_dict(params['state_dict'])
model.cuda()
model.eval()
return model
def build_model_continue():
modelLocation="./checkpoint/"+args.dataset+"_"+args.arch+"_split"+str(args.split)
model_path = os.path.join(modelLocation,'model_best.pth.tar')
params = torch.load(model_path)
print(modelLocation)
if args.dataset=='ucf101':
model=models.__dict__[args.arch](modelPath='', num_classes=101,length=args.num_seg)
elif args.dataset=='hmdb51':
model=models.__dict__[args.arch](modelPath='', num_classes=51,length=args.num_seg)
if torch.cuda.device_count() > 1:
model=torch.nn.DataParallel(model)
model.load_state_dict(params['state_dict'])
model = model.cuda()
optimizer = AdamW(model.parameters(), lr= args.lr, weight_decay=args.weight_decay)
optimizer.load_state_dict(params['optimizer'])
startEpoch = params['epoch']
best_prec = params['best_prec1']
return model, startEpoch, optimizer, best_prec
def train(train_loader, model, criterion, criterion2, optimizer, epoch,modality):
batch_time = AverageMeter()
lossesClassification = AverageMeter()
top1 = AverageMeter()
top3 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
optimizer.zero_grad()
loss_mini_batch_classification = 0.0
acc_mini_batch = 0.0
acc_mini_batch_top3 = 0.0
totalSamplePerIter=0
for i, (inputs, targets) in enumerate(train_loader):
if modality == "rgb" or modality == "pose":
if "3D" in args.arch or "r2plus1d" in args.arch or 'slowfast' in args.arch:
inputs=inputs.view(-1,length,3,input_size,input_size).transpose(1,2)
elif modality == "flow":
if "3D" in args.arch or "r2plus1d" in args.arch:
inputs=inputs.view(-1,length,2,input_size,input_size).transpose(1,2)
else:
inputs=inputs.view(-1,2*length,input_size,input_size)
elif modality == "both":
inputs=inputs.view(-1,5*length,input_size,input_size)
if HALF:
inputs = inputs.cuda().half()
else:
inputs = inputs.cuda()
targets = targets.cuda()
output, input_vectors, sequenceOut, maskSample = model(inputs)
# maskSample=maskSample.cuda()
# input_vectors=(1-maskSample[:,1:]).unsqueeze(2)*input_vectors
# sequenceOut=(1-maskSample[:,1:]).unsqueeze(2)*sequenceOut
# measure accuracy and record loss
# input_vectors_rank=input_vectors.view(-1,input_vectors.shape[-1])
# targetRank=torch.tensor(range(args.num_seg)).repeat(input_vectors.shape[0]).cuda()
# rankingFC = nn.Linear(input_vectors.shape[-1], args.num_seg).cuda()
# out_rank = rankingFC(input_vectors_rank)
prec1, prec3 = accuracy(output.data, targets, topk=(1, 3))
acc_mini_batch += prec1.item()
acc_mini_batch_top3 += prec3.item()
lossClassification = criterion(output, targets)
lossClassification = lossClassification / args.iter_size
#totalLoss=lossMSE
totalLoss=lossClassification
#totalLoss = lossMSE + lossClassification
loss_mini_batch_classification += lossClassification.data.item()
totalLoss.backward()
totalSamplePerIter += output.size(0)
if (i+1) % args.iter_size == 0:
# compute gradient and do SGD step
optimizer.step()
optimizer.zero_grad()
lossesClassification.update(loss_mini_batch_classification, totalSamplePerIter)
top1.update(acc_mini_batch/args.iter_size, totalSamplePerIter)
top3.update(acc_mini_batch_top3/args.iter_size, totalSamplePerIter)
batch_time.update(time.time() - end)
end = time.time()
loss_mini_batch_classification = 0
acc_mini_batch = 0
acc_mini_batch_top3 = 0.0
totalSamplePerIter = 0.0
#scheduler.step()
if (i+1) % args.print_freq == 0:
print('[%d] time: %.3f loss: %.4f' %(i,batch_time.avg,lossesClassification.avg))
print(' * Epoch: {epoch} Prec@1 {top1.avg:.3f} Prec@3 {top3.avg:.3f} Classification Loss {lossClassification.avg:.4f}\n'
.format(epoch = epoch, top1=top1, top3=top3, lossClassification=lossesClassification))
writer.add_scalar('data/classification_loss_training', lossesClassification.avg, epoch)
writer.add_scalar('data/top1_training', top1.avg, epoch)
writer.add_scalar('data/top3_training', top3.avg, epoch)
def validate(val_loader, model, criterion,criterion2,modality):
batch_time = AverageMeter()
lossesClassification = AverageMeter()
top1 = AverageMeter()
top3 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
with torch.no_grad():
for i, (inputs, targets) in enumerate(val_loader):
if modality == "rgb" or modality == "pose":
if "3D" in args.arch or "r2plus1d" in args.arch or 'slowfast' in args.arch:
inputs=inputs.view(-1,length,3,input_size,input_size).transpose(1,2)
elif modality == "flow":
if "3D" in args.arch or "r2plus1d" in args.arch:
inputs=inputs.view(-1,length,2,input_size,input_size).transpose(1,2)
else:
inputs=inputs.view(-1,2*length,input_size,input_size)
elif modality == "both":
inputs=inputs.view(-1,5*length,input_size,input_size)
if HALF:
inputs = inputs.cuda().half()
else:
inputs = inputs.cuda()
targets = targets.cuda()
# compute output
output, input_vectors, sequenceOut, _ = model(inputs)
lossClassification = criterion(output, targets)
# measure accuracy and record loss
prec1, prec3 = accuracy(output.data, targets, topk=(1, 3))
lossesClassification.update(lossClassification.data.item(), output.size(0))
top1.update(prec1.item(), output.size(0))
top3.update(prec3.item(), output.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
print(' * * Prec@1 {top1.avg:.3f} Prec@3 {top3.avg:.3f} Classification Loss {lossClassification.avg:.4f}\n'
.format(top1=top1, top3=top3, lossClassification=lossesClassification))
return top1.avg, top3.avg, lossesClassification.avg
def save_checkpoint(state, is_best, filename, resume_path):
cur_path = os.path.join(resume_path, filename)
torch.save(state, cur_path)
best_path = os.path.join(resume_path, 'model_best.pth.tar')
if is_best:
shutil.copyfile(cur_path, best_path)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every 150 epochs"""
decay = 0.1 ** (sum(epoch >= np.array(args.lr_steps)))
lr = args.lr * decay
print("Current learning rate is %4.6f:" % lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def adjust_learning_rate2(optimizer, epoch):
isWarmUp=epoch < warmUpEpoch
decayRate=0.2
if isWarmUp:
lr=args.lr*(epoch+1)/warmUpEpoch
else:
lr=args.lr*(1/(1+(epoch+1-warmUpEpoch)*decayRate))
#decay = 0.1 ** (sum(epoch >= np.array(args.lr_steps)))
print("Current learning rate is %4.6f:" % lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def adjust_learning_rate3(optimizer, epoch):
isWarmUp=epoch < warmUpEpoch
decayRate=0.97
if isWarmUp:
lr=args.lr*(epoch+1)/warmUpEpoch
else:
lr = args.lr * decayRate**(epoch+1-warmUpEpoch)
#decay = 0.1 ** (sum(epoch >= np.array(args.lr_steps)))
print("Current learning rate is %4.6f:" % lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def adjust_learning_rate4(optimizer, learning_rate_index):
"""Sets the learning rate to the initial LR decayed by 10 every 150 epochs"""
decay = 0.1 ** learning_rate_index
lr = args.lr * decay
print("Current learning rate is %4.8f:" % lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| 40.627246
| 124
| 0.589779
|
acfe94f633ccfb1fdfe48cb1bba8161a19ba7bcf
| 13,728
|
py
|
Python
|
tests/test_engine.py
|
longkyle/scrapy
|
81218b62cbaf2a3f772845b65c3e7126371a09b6
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_engine.py
|
longkyle/scrapy
|
81218b62cbaf2a3f772845b65c3e7126371a09b6
|
[
"BSD-3-Clause"
] | 1
|
2020-04-03T11:16:19.000Z
|
2020-04-03T11:16:19.000Z
|
tests/test_engine.py
|
grammy-jiang/scrapy
|
519f752d6dcd3065e81e9387062003a39f3f0b81
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Scrapy engine tests
This starts a testing web server (using twisted.server.Site) and then crawls it
with the Scrapy crawler.
To view the testing web server in a browser you can start it by running this
module with the ``runserver`` argument::
python test_engine.py runserver
"""
import os
import re
import sys
from collections import defaultdict
from urllib.parse import urlparse
from twisted.internet import reactor, defer
from twisted.trial import unittest
from twisted.web import server, static, util
from pydispatch import dispatcher
from scrapy import signals
from scrapy.core.engine import ExecutionEngine
from scrapy.http import Request
from scrapy.item import Item, Field
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import Spider
from scrapy.utils.signal import disconnect_all
from scrapy.utils.test import get_crawler
from tests import tests_datadir, get_testdata
class TestItem(Item):
name = Field()
url = Field()
price = Field()
class TestSpider(Spider):
name = "scrapytest.org"
allowed_domains = ["scrapytest.org", "localhost"]
itemurl_re = re.compile(r"item\d+.html")
name_re = re.compile(r"<h1>(.*?)</h1>", re.M)
price_re = re.compile(r">Price: \$(.*?)<", re.M)
item_cls = TestItem
def parse(self, response):
xlink = LinkExtractor()
itemre = re.compile(self.itemurl_re)
for link in xlink.extract_links(response):
if itemre.search(link.url):
yield Request(url=link.url, callback=self.parse_item)
def parse_item(self, response):
item = self.item_cls()
m = self.name_re.search(response.text)
if m:
item['name'] = m.group(1)
item['url'] = response.url
m = self.price_re.search(response.text)
if m:
item['price'] = m.group(1)
return item
class TestDupeFilterSpider(TestSpider):
def start_requests(self):
return (Request(url) for url in self.start_urls) # no dont_filter=True
class DictItemsSpider(TestSpider):
item_cls = dict
class ItemZeroDivisionErrorSpider(TestSpider):
custom_settings = {
"ITEM_PIPELINES": {
"tests.pipelines.ProcessWithZeroDivisionErrorPipiline": 300,
}
}
def start_test_site(debug=False):
root_dir = os.path.join(tests_datadir, "test_site")
r = static.File(root_dir)
r.putChild(b"redirect", util.Redirect(b"/redirected"))
r.putChild(b"redirected", static.Data(b"Redirected here", "text/plain"))
numbers = [str(x).encode("utf8") for x in range(2**14)]
r.putChild(b"numbers", static.Data(b"".join(numbers), "text/plain"))
port = reactor.listenTCP(0, server.Site(r), interface="127.0.0.1")
if debug:
print("Test server running at http://localhost:%d/ - hit Ctrl-C to finish."
% port.getHost().port)
return port
class CrawlerRun:
"""A class to run the crawler and keep track of events occurred"""
def __init__(self, spider_class):
self.spider = None
self.respplug = []
self.reqplug = []
self.reqdropped = []
self.reqreached = []
self.itemerror = []
self.itemresp = []
self.bytes = defaultdict(lambda: list())
self.signals_caught = {}
self.spider_class = spider_class
def run(self):
self.port = start_test_site()
self.portno = self.port.getHost().port
start_urls = [
self.geturl("/"),
self.geturl("/redirect"),
self.geturl("/redirect"), # duplicate
self.geturl("/numbers"),
]
for name, signal in vars(signals).items():
if not name.startswith('_'):
dispatcher.connect(self.record_signal, signal)
self.crawler = get_crawler(self.spider_class)
self.crawler.signals.connect(self.item_scraped, signals.item_scraped)
self.crawler.signals.connect(self.item_error, signals.item_error)
self.crawler.signals.connect(self.bytes_received, signals.bytes_received)
self.crawler.signals.connect(self.request_scheduled, signals.request_scheduled)
self.crawler.signals.connect(self.request_dropped, signals.request_dropped)
self.crawler.signals.connect(self.request_reached, signals.request_reached_downloader)
self.crawler.signals.connect(self.response_downloaded, signals.response_downloaded)
self.crawler.crawl(start_urls=start_urls)
self.spider = self.crawler.spider
self.deferred = defer.Deferred()
dispatcher.connect(self.stop, signals.engine_stopped)
return self.deferred
def stop(self):
self.port.stopListening()
for name, signal in vars(signals).items():
if not name.startswith('_'):
disconnect_all(signal)
self.deferred.callback(None)
def geturl(self, path):
return "http://localhost:%s%s" % (self.portno, path)
def getpath(self, url):
u = urlparse(url)
return u.path
def item_error(self, item, response, spider, failure):
self.itemerror.append((item, response, spider, failure))
def item_scraped(self, item, spider, response):
self.itemresp.append((item, response))
def bytes_received(self, data, request, spider):
self.bytes[request].append(data)
def request_scheduled(self, request, spider):
self.reqplug.append((request, spider))
def request_reached(self, request, spider):
self.reqreached.append((request, spider))
def request_dropped(self, request, spider):
self.reqdropped.append((request, spider))
def response_downloaded(self, response, spider):
self.respplug.append((response, spider))
def record_signal(self, *args, **kwargs):
"""Record a signal and its parameters"""
signalargs = kwargs.copy()
sig = signalargs.pop('signal')
signalargs.pop('sender', None)
self.signals_caught[sig] = signalargs
class EngineTest(unittest.TestCase):
@defer.inlineCallbacks
def test_crawler(self):
for spider in TestSpider, DictItemsSpider:
self.run = CrawlerRun(spider)
yield self.run.run()
self._assert_visited_urls()
self._assert_scheduled_requests(urls_to_visit=9)
self._assert_downloaded_responses()
self._assert_scraped_items()
self._assert_signals_caught()
self._assert_bytes_received()
@defer.inlineCallbacks
def test_crawler_dupefilter(self):
self.run = CrawlerRun(TestDupeFilterSpider)
yield self.run.run()
self._assert_scheduled_requests(urls_to_visit=8)
self._assert_dropped_requests()
@defer.inlineCallbacks
def test_crawler_itemerror(self):
self.run = CrawlerRun(ItemZeroDivisionErrorSpider)
yield self.run.run()
self._assert_items_error()
def _assert_visited_urls(self):
must_be_visited = ["/", "/redirect", "/redirected",
"/item1.html", "/item2.html", "/item999.html"]
urls_visited = set([rp[0].url for rp in self.run.respplug])
urls_expected = set([self.run.geturl(p) for p in must_be_visited])
assert urls_expected <= urls_visited, "URLs not visited: %s" % list(urls_expected - urls_visited)
def _assert_scheduled_requests(self, urls_to_visit=None):
self.assertEqual(urls_to_visit, len(self.run.reqplug))
paths_expected = ['/item999.html', '/item2.html', '/item1.html']
urls_requested = set([rq[0].url for rq in self.run.reqplug])
urls_expected = set([self.run.geturl(p) for p in paths_expected])
assert urls_expected <= urls_requested
scheduled_requests_count = len(self.run.reqplug)
dropped_requests_count = len(self.run.reqdropped)
responses_count = len(self.run.respplug)
self.assertEqual(scheduled_requests_count,
dropped_requests_count + responses_count)
self.assertEqual(len(self.run.reqreached),
responses_count)
def _assert_dropped_requests(self):
self.assertEqual(len(self.run.reqdropped), 1)
def _assert_downloaded_responses(self):
# response tests
self.assertEqual(9, len(self.run.respplug))
self.assertEqual(9, len(self.run.reqreached))
for response, _ in self.run.respplug:
if self.run.getpath(response.url) == '/item999.html':
self.assertEqual(404, response.status)
if self.run.getpath(response.url) == '/redirect':
self.assertEqual(302, response.status)
def _assert_items_error(self):
self.assertEqual(2, len(self.run.itemerror))
for item, response, spider, failure in self.run.itemerror:
self.assertEqual(failure.value.__class__, ZeroDivisionError)
self.assertEqual(spider, self.run.spider)
self.assertEqual(item['url'], response.url)
if 'item1.html' in item['url']:
self.assertEqual('Item 1 name', item['name'])
self.assertEqual('100', item['price'])
if 'item2.html' in item['url']:
self.assertEqual('Item 2 name', item['name'])
self.assertEqual('200', item['price'])
def _assert_scraped_items(self):
self.assertEqual(2, len(self.run.itemresp))
for item, response in self.run.itemresp:
self.assertEqual(item['url'], response.url)
if 'item1.html' in item['url']:
self.assertEqual('Item 1 name', item['name'])
self.assertEqual('100', item['price'])
if 'item2.html' in item['url']:
self.assertEqual('Item 2 name', item['name'])
self.assertEqual('200', item['price'])
def _assert_bytes_received(self):
self.assertEqual(9, len(self.run.bytes))
for request, data in self.run.bytes.items():
joined_data = b"".join(data)
if self.run.getpath(request.url) == "/":
self.assertEqual(joined_data, get_testdata("test_site", "index.html"))
elif self.run.getpath(request.url) == "/item1.html":
self.assertEqual(joined_data, get_testdata("test_site", "item1.html"))
elif self.run.getpath(request.url) == "/item2.html":
self.assertEqual(joined_data, get_testdata("test_site", "item2.html"))
elif self.run.getpath(request.url) == "/redirected":
self.assertEqual(joined_data, b"Redirected here")
elif self.run.getpath(request.url) == '/redirect':
self.assertEqual(
joined_data,
b"\n<html>\n"
b" <head>\n"
b" <meta http-equiv=\"refresh\" content=\"0;URL=/redirected\">\n"
b" </head>\n"
b" <body bgcolor=\"#FFFFFF\" text=\"#000000\">\n"
b" <a href=\"/redirected\">click here</a>\n"
b" </body>\n"
b"</html>\n"
)
elif self.run.getpath(request.url) == "/tem999.html":
self.assertEqual(
joined_data,
b"\n<html>\n"
b" <head><title>404 - No Such Resource</title></head>\n"
b" <body>\n"
b" <h1>No Such Resource</h1>\n"
b" <p>File not found.</p>\n"
b" </body>\n"
b"</html>\n"
)
elif self.run.getpath(request.url) == "/numbers":
# signal was fired multiple times
self.assertTrue(len(data) > 1)
# bytes were received in order
numbers = [str(x).encode("utf8") for x in range(2**14)]
self.assertEqual(joined_data, b"".join(numbers))
def _assert_signals_caught(self):
assert signals.engine_started in self.run.signals_caught
assert signals.engine_stopped in self.run.signals_caught
assert signals.spider_opened in self.run.signals_caught
assert signals.spider_idle in self.run.signals_caught
assert signals.spider_closed in self.run.signals_caught
self.assertEqual({'spider': self.run.spider},
self.run.signals_caught[signals.spider_opened])
self.assertEqual({'spider': self.run.spider},
self.run.signals_caught[signals.spider_idle])
self.assertEqual({'spider': self.run.spider, 'reason': 'finished'},
self.run.signals_caught[signals.spider_closed])
@defer.inlineCallbacks
def test_close_downloader(self):
e = ExecutionEngine(get_crawler(TestSpider), lambda _: None)
yield e.close()
@defer.inlineCallbacks
def test_close_spiders_downloader(self):
e = ExecutionEngine(get_crawler(TestSpider), lambda _: None)
yield e.open_spider(TestSpider(), [])
self.assertEqual(len(e.open_spiders), 1)
yield e.close()
self.assertEqual(len(e.open_spiders), 0)
@defer.inlineCallbacks
def test_close_engine_spiders_downloader(self):
e = ExecutionEngine(get_crawler(TestSpider), lambda _: None)
yield e.open_spider(TestSpider(), [])
e.start()
self.assertTrue(e.running)
yield e.close()
self.assertFalse(e.running)
self.assertEqual(len(e.open_spiders), 0)
if __name__ == "__main__":
if len(sys.argv) > 1 and sys.argv[1] == 'runserver':
start_test_site(debug=True)
reactor.run()
| 37.714286
| 105
| 0.621649
|
acfe95ababd05bfd9bf8386a164cb630c7b5f41a
| 3,195
|
py
|
Python
|
tests/test_model_funcs.py
|
linkian209/PyBitWarden
|
bc686da04a362a99a30a66de1d484fd585300f74
|
[
"MIT"
] | null | null | null |
tests/test_model_funcs.py
|
linkian209/PyBitWarden
|
bc686da04a362a99a30a66de1d484fd585300f74
|
[
"MIT"
] | null | null | null |
tests/test_model_funcs.py
|
linkian209/PyBitWarden
|
bc686da04a362a99a30a66de1d484fd585300f74
|
[
"MIT"
] | null | null | null |
"""tests.test_model_funcs
This module tests the helper functions for the models of this app.
Attributes:
UUID_REGEX: A regular expression for testing valid UUIDS
"""
import pytest # noqa
import re
import time
import models.funcs as test_funcs
UUID_REGEX = '\A[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{12}\Z' # noqa
def testGenerateSecureUUIDMakesValidUUID():
"""
This test will validate that the function generates a valid UUID. The
version number should be 1 to 5.
"""
test_uuid = test_funcs.generateSecureUUID()
test_regex = re.compile(UUID_REGEX)
test_matches = test_regex.match(test_uuid)
assert test_matches is not None
def testGenerateSecureUUIDMakesRandomUUIDs():
"""
This test verifies that the function will generate unique UUIDs.
"""
test_uuids = [test_funcs.generateSecureUUID() for x in range(100)]
assert len(test_uuids) is len(set(test_uuids))
def testConstantTimeCompareWorks():
"""
This test makes sure that constantTimeCompare correctly compares.
"""
test_val1 = 'test'
test_val2 = 'test'
test_val3 = 't3st'
assert test_funcs.constantTimeCompare(test_val1, test_val2)
assert not test_funcs.constantTimeCompare(test_val1, test_val3)
def testConstantTimeCompareIsConstantTime():
"""
This test will verify that it takes the same amount of time to compare
the two values regardless of any differences with respect to the default
string compare
"""
correct_val = 'test_val'
test_val1 = 'Test_val'
test_val2 = 'tesT_val'
test_val3 = 'test_vAl'
time_margin = .000008
startTime = time.time()
test_funcs.constantTimeCompare(correct_val, correct_val)
base_line = time.time() - startTime
startTime = time.time()
test_funcs.constantTimeCompare(correct_val, test_val1)
test_time = time.time() - startTime
assert abs(test_time - base_line) <= time_margin
startTime = time.time()
test_funcs.constantTimeCompare(correct_val, test_val2)
test_time = time.time() - startTime
assert abs(test_time - base_line) <= time_margin
startTime = time.time()
test_funcs.constantTimeCompare(correct_val, test_val3)
test_time = time.time() - startTime
assert abs(test_time - base_line) <= time_margin
def testUppercaseFirstHash():
"""
This test verifies that the uppercaseFirstHash function correctly formats
the return data.
"""
test_data = {'test1': 1, 'TeSt2': 2, 'TEST3': 3, 'tesT4': 4}
proper_data = {'Test1': 1, 'Test2': 2, 'Test3': 3, 'Test4': 4}
reformatted_data = test_funcs.uppercaseFirstHash(test_data)
assert reformatted_data == proper_data
def testUppercaseFirstHashRaisesTypeError():
"""
This test verifies that uppercaseFirstHash raises the TypeError if a value
that is not a dict is inputted
"""
with pytest.raises(TypeError):
test_funcs.uppercaseFirstHash(1)
test_funcs.uppercaseFirstHash('a')
test_funcs.uppercaseFirstHash([1, 2, 3])
test_funcs.uppercaseFirstHash(('1', 2))
test_funcs.uppercaseFirstHash(True)
test_funcs.uppercaseFirstHash(None)
| 29.045455
| 101
| 0.705164
|
acfe9684bf6964bd892e4d55d0570719ec061188
| 7,854
|
py
|
Python
|
platforms/hermes-python/tests/api/ffi/test_dialogue.py
|
elbywan/hermes-protocol
|
da23326f026fbfb8a23413c78383301e523188f3
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 37
|
2018-12-07T16:32:32.000Z
|
2021-07-08T17:43:27.000Z
|
platforms/hermes-python/tests/api/ffi/test_dialogue.py
|
elbywan/hermes-protocol
|
da23326f026fbfb8a23413c78383301e523188f3
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 52
|
2018-12-28T11:10:05.000Z
|
2019-11-13T02:01:41.000Z
|
platforms/hermes-python/tests/api/ffi/test_dialogue.py
|
elbywan/hermes-protocol
|
da23326f026fbfb8a23413c78383301e523188f3
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 34
|
2019-03-21T21:53:35.000Z
|
2020-07-31T16:50:43.000Z
|
from __future__ import unicode_literals
import mock
import pytest
from hermes_python.api.ffi.dialogue import DialogueFFI
from hermes_python.ontology.dialogue import StartSessionMessage, SessionInitAction, SessionInitNotification, \
ContinueSessionMessage, EndSessionMessage, DialogueConfigureMessage, DialogueConfigureIntent
DUMMY_INTENT_NAME = "INTENT"
@pytest.fixture()
def dialogue_ffi():
return DialogueFFI()
@mock.patch("hermes_python.api.ffi.dialogue.utils")
class TestDialogueMethodsCallsUnderlyingFFIfunctions:
def test_subscribe_intent_correctly_registers_callback(self, ffi_utils, dialogue_ffi):
def user_callback(_, __):
pass
hermes_client = mock.Mock()
dialogue_ffi.register_subscribe_intent_handler(DUMMY_INTENT_NAME, user_callback, hermes_client)
assert len(dialogue_ffi._c_callback_subscribe_intent) == 1
ffi_utils.hermes_dialogue_subscribe_intent_json.assert_called_once()
def test_subscribe_intent_correctly_registers_two_callbacks_for_same_intent(self, ffi_utils, dialogue_ffi):
def user_callback_1(hermes, intentMessage):
pass
def user_callback_2(hermes, intentMessage):
pass
hermes_client = mock.Mock()
dialogue_ffi.register_subscribe_intent_handler(DUMMY_INTENT_NAME, user_callback_1, hermes_client)
dialogue_ffi.register_subscribe_intent_handler(DUMMY_INTENT_NAME, user_callback_2, hermes_client)
assert len(dialogue_ffi._c_callback_subscribe_intent) == 2
assert ffi_utils.hermes_dialogue_subscribe_intent_json.call_count == 2
def test_successful_registration_c_handler_callback(self, ffi_utils):
dialogue_ffi = DialogueFFI()
c_handler = mock.Mock()
dialogue_ffi._register_c_intent_handler('test_function', DUMMY_INTENT_NAME, c_handler)
ffi_utils.test_function_json.assert_called_once()
ffi_without_json_api = DialogueFFI(use_json_api=False)
ffi_without_json_api._register_c_intent_handler('test_function', DUMMY_INTENT_NAME, c_handler)
ffi_utils.test_function.assert_called_once()
def test_subscribe_intents_correctly_registers_callback(self, ffi_utils, dialogue_ffi):
def user_callback(_, __):
pass
hermes_client = mock.Mock()
dialogue_ffi.register_subscribe_intents_handler(user_callback, hermes_client)
assert dialogue_ffi._c_callback_subscribe_intents is not None
ffi_utils.hermes_dialogue_subscribe_intents_json.assert_called_once()
def test_subscribe_session_started_correctly_registers_callback(self, ffi_utils, dialogue_ffi):
def user_callback(_, __):
pass
hermes_client = mock.Mock()
dialogue_ffi.register_session_started_handler(user_callback, hermes_client)
assert dialogue_ffi._c_callback_subscribe_session_started is not None
ffi_utils.hermes_dialogue_subscribe_session_started_json.assert_called_once()
def test_subscribe_session_queued_correctly_registers_callback(self, ffi_utils, dialogue_ffi):
def user_callback(_, __):
pass
hermes_client = mock.Mock()
dialogue_ffi.register_session_queued_handler(user_callback, hermes_client)
assert dialogue_ffi._c_callback_subscribe_session_queued is not None
ffi_utils.hermes_dialogue_subscribe_session_queued_json.assert_called_once()
def test_subscribe_session_ended_correctly_registers_callback(self, ffi_utils, dialogue_ffi):
def user_callback(hermes, intentMessage):
pass
hermes_client = mock.Mock()
dialogue_ffi.register_session_ended_handler(user_callback, hermes_client)
assert dialogue_ffi._c_callback_subscribe_session_ended is not None
ffi_utils.hermes_dialogue_subscribe_session_ended_json.assert_called_once()
def test_subscribe_intent_not_recognized_correctly_registers_callback(self, ffi_utils, dialogue_ffi):
def user_callback(hermes, intentMessage):
pass
hermes_client = mock.Mock()
dialogue_ffi.register_intent_not_recognized_handler(user_callback, hermes_client)
assert dialogue_ffi._c_callback_subscribe_intent_not_recognized is not None
ffi_utils.hermes_dialogue_subscribe_intent_not_recognized_json.assert_called_once()
def test_publish_start_session_with_action_success(self, ffi_utils):
dialogue_ffi = DialogueFFI(use_json_api=False)
session_init = SessionInitAction()
start_session_message_with_action = StartSessionMessage(session_init, custom_data=None, site_id=None)
dialogue_ffi.publish_start_session(start_session_message_with_action)
ffi_utils.hermes_dialogue_publish_start_session.assert_called_once()
def test_publish_start_session_with_action_success_json(self, ffi_utils, dialogue_ffi):
start_session_message_with_action = {"test": "test"}
dialogue_ffi.publish_start_session(start_session_message_with_action)
ffi_utils.hermes_dialogue_publish_start_session_json.assert_called_once()
def test_publish_start_session_with_notification_success(self, ffi_utils):
ffi = DialogueFFI(use_json_api=False)
session_init = SessionInitNotification("hello world!")
start_session_message_with_notification = StartSessionMessage(session_init, custom_data=None, site_id=None)
ffi.publish_start_session(start_session_message_with_notification)
ffi_utils.hermes_dialogue_publish_start_session.assert_called_once()
def test_publish_start_session_with_notification_success_json(self, ffi_utils, dialogue_ffi):
start_session_message_with_notification = {"test": "test"}
dialogue_ffi.publish_start_session(start_session_message_with_notification)
ffi_utils.hermes_dialogue_publish_start_session_json.assert_called_once()
def test_publish_continue_session_success(self, ffi_utils):
dialogue_ffi = DialogueFFI(use_json_api=False)
continue_session_message = ContinueSessionMessage("session_id",
"text",
"intent_filter",
"custom_data",
False)
dialogue_ffi.publish_continue_session(continue_session_message)
ffi_utils.hermes_dialogue_publish_continue_session.assert_called_once()
def test_publish_continue_session_success_json(self, ffi_utils, dialogue_ffi):
continue_session_message = {"test": "test"}
dialogue_ffi.publish_continue_session(continue_session_message)
ffi_utils.hermes_dialogue_publish_continue_session_json.assert_called_once()
def test_publish_end_session_success(self, ffi_utils):
dialogue_ffi = DialogueFFI(use_json_api=False)
end_session_message = EndSessionMessage("session_id", "I end the session with this text")
dialogue_ffi.publish_end_session(end_session_message)
ffi_utils.hermes_dialogue_publish_end_session.assert_called_once()
def test_publish_end_session_success_json(self, ffi_utils, dialogue_ffi):
end_session_message = {"session_id": "session_id", "text": "ok"}
dialogue_ffi.publish_end_session(end_session_message)
ffi_utils.hermes_dialogue_publish_end_session_json.assert_called_once()
def test_configure_dialogue(self, ffi_utils):
dialogue_ffi = DialogueFFI(use_json_api=False)
intent_config = DialogueConfigureIntent("dummy_intent", False)
dialogue_configure_message = DialogueConfigureMessage(None, [intent_config])
dialogue_ffi.publish_configure(dialogue_configure_message)
ffi_utils.hermes_dialogue_publish_configure.assert_called_once()
| 45.398844
| 115
| 0.761523
|
acfe96b3020f8ce0e289a5cda14e56340f60ca5f
| 4,838
|
py
|
Python
|
get_gaoqingla_1080p_movie.py
|
Damon-wenc/Small-Data
|
d5e01169bff0c418142521a7c1b4e7e6d66ba51b
|
[
"Apache-2.0"
] | null | null | null |
get_gaoqingla_1080p_movie.py
|
Damon-wenc/Small-Data
|
d5e01169bff0c418142521a7c1b4e7e6d66ba51b
|
[
"Apache-2.0"
] | null | null | null |
get_gaoqingla_1080p_movie.py
|
Damon-wenc/Small-Data
|
d5e01169bff0c418142521a7c1b4e7e6d66ba51b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
这个小程序是为了将一个电影网(高清网 http://gaoqing.la/)的1080P资源遍历下保存下来,
'''
import urllib2
import lxml.html as parser
from gevent import monkey; monkey.patch_socket()
from gevent.pool import Pool
pool = Pool(2)
#网站禁止爬虫,需加一个表头伪装浏览器头
HEADERS = {'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'}
# GLOBAL VARIABLES
g_movie_urls = []
g_movie_infos = []
''' get and save all specific movie urls in g_movie_urls[] for further analysis '''
def get_movie_urls():
global OUT_OF_RANGE_FLAG, g_movie_urls
page_index = 1
while True:
url = "http://gaoqing.la/1080p/page/%d" %page_index
req = urllib2.Request(url, headers = HEADERS)
try:
resp = urllib2.urlopen(req)
htmlSource = resp.read()
except:
print "All movie resources have been found."
print "There are %d pages and %d movies in total." %(page_index -1, len(g_movie_urls))
print "Program are doing selection, please wait..."
return 0
try:
html = parser.document_fromstring(htmlSource)
urls = html.xpath("//div/div/h2/a/@href")
except:
print "Analysis html failed :("
for url in urls:
g_movie_urls.append(url)
#parse next page
page_index += 1
return 0
''' save info of target movies to g_movie_infos[] '''
def Analysis_single_movie(url):
global VOTE_ThRESHOLD, g_movie_infos
movie_info = []
try:
#Due to the bandwitdh, we need a timeout method to
req = urllib2.Request(url, headers = HEADERS)
resp = urllib2.urlopen(req, timeout = 100000)
htmlSource = resp.read()
except:
print "Analysis url[%s] failed." %url
return -1
try:
html = parser.document_fromstring(htmlSource)
#a lot of movies don't have a vote number...
# vote_value = html.xpath("//p[*]/span[10]/text()")
movie_name = html.xpath("//div[1]/h1/text()")
#lots of movies don't have a douban link...
# movie_link = html.xpath("//p[*]/span[11]/text()")
titles = html.xpath("//*[@id='post_content']/p[*]//a//text()")
magnets = html.xpath("//*[@id='post_content']/p[*]//a[@rel='external nofollow']/@href")
movie_info.append("%s" %movie_name[0])
index = 0
magnet_info = []
for magnet in magnets:
tmp_info = []
#sometimes titles are split into two parts)
if len(titles) == len(magnets) * 2:
tmp_info.append("%s%s" %(titles[index * 2], titles[index * 2 + 1]))
tmp_info.append("%s" %magnets[index])
else:
tmp_info.append("%s" %titles[index])
tmp_info.append("%s" %magnets[index])
if len(tmp_info) > 0 and "BluRay" in tmp_info[0] and "1080p" in tmp_info[0]:
magnet_info.append(tmp_info)
index += 1
if len(magnet_info):
movie_info.append(magnet_info)
else: #sometimes there are only 720P BluRay resources, skip this one
return 1
if len(movie_info):
g_movie_infos.append(movie_info)
return 0
except:
#too many f**king strange format, I give up. Good job.
#print "Get movie[%s] info failed, maybe unique html format OR resources are failed, please check it manually." %url
return -1
def Analysis_movies():
global g_movie_urls
pool.map(Analysis_single_movie, g_movie_urls) # not too fast
return 0
''' save result to HTML file '''
def Save_to_html():
global g_movie_infos
print "There are %d movies" %len(g_movie_infos)
html_head = "<!DOCTYPE html><html><head><meta http-equiv=\"content-type\" content=\"text/html;charset=utf-8\"><style type=\"text/css\"> body {margin: 60px;} h1 {font-size: 20px;} b, p,div {font-size: 16px;} </style><title>gaoqingla网1080P索引</title></head><body><ol>"
html_end = "</ol></body></html>"
try:
f = open("gaoqingla_info.html", "w")
f.write(html_head)
for movie in g_movie_infos:
urls = movie[1]
f.write("<li><h1>%s</h1><ul>" %(movie[0].encode("utf-8")))
for url in urls:
f.write("<div align=left><a href=\"%s\">%s</a></div>"
%(url[1].encode("utf-8"), url[0].encode("utf-8")))
f.write("</ul></li>")
f.write(html_end)
finally:
f.close()
def run():
get_movie_urls()
Analysis_movies()
Save_to_html()
if __name__ == "__main__":
from timeit import Timer
t = Timer("run()", "from __main__ import run")
print "runtine time of script is %.1f seconds" %t.timeit(1)
| 31.415584
| 269
| 0.580819
|
acfe971566ffdb6a9495dbd12002a73646a8c3a7
| 651
|
py
|
Python
|
cmsplugin_forms_builder/__init__.py
|
natgeosociety/cmsplugin-forms-builder
|
5420f2be32eec484919cdf4a73aba8f831f14adc
|
[
"BSD-3-Clause"
] | null | null | null |
cmsplugin_forms_builder/__init__.py
|
natgeosociety/cmsplugin-forms-builder
|
5420f2be32eec484919cdf4a73aba8f831f14adc
|
[
"BSD-3-Clause"
] | null | null | null |
cmsplugin_forms_builder/__init__.py
|
natgeosociety/cmsplugin-forms-builder
|
5420f2be32eec484919cdf4a73aba8f831f14adc
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
cmsplugin_forms_builder
"""
__version_info__ = {
'major': 2,
'minor': 0,
'micro': 0,
'releaselevel': 'final',
'serial': 1
}
def get_version(short=False):
assert __version_info__['releaselevel'] in ('alpha', 'beta', 'final')
vers = ["%(major)i.%(minor)i" % __version_info__, ]
if __version_info__['micro']:
vers.append(".%(micro)i" % __version_info__)
if __version_info__['releaselevel'] != 'final' and not short:
vers.append('%s%i' % (
__version_info__['releaselevel'][0], __version_info__['serial']))
return ''.join(vers)
__version__ = get_version()
| 25.038462
| 77
| 0.609831
|
acfe98aafceaa1278a75c280992c851fb39db201
| 23,052
|
py
|
Python
|
tests/integration/netapi/rest_tornado/test_app.py
|
ari/salt
|
05de98e237719b4f0cc68f08871ffaa532eddec5
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/netapi/rest_tornado/test_app.py
|
ari/salt
|
05de98e237719b4f0cc68f08871ffaa532eddec5
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/netapi/rest_tornado/test_app.py
|
ari/salt
|
05de98e237719b4f0cc68f08871ffaa532eddec5
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Import Python Libs
from __future__ import absolute_import
from __future__ import print_function
import json
import time
from distutils.version import StrictVersion # pylint: disable=import-error,no-name-in-module
# Import Salt Libs
from salt.netapi.rest_tornado import saltnado
from unit.netapi.rest_tornado.test_handlers import SaltnadoTestCase
# Import Salt Testing Libs
from salttesting import skipIf
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../../')
import salt.ext.six as six
try:
import zmq
from zmq.eventloop.ioloop import ZMQIOLoop
HAS_ZMQ_IOLOOP = True
except ImportError:
HAS_ZMQ_IOLOOP = False
def json_loads(data):
if six.PY3:
data = data.decode('utf-8')
return json.loads(data)
@skipIf(HAS_ZMQ_IOLOOP is False, 'PyZMQ version must be >= 14.0.1 to run these tests.')
@skipIf(StrictVersion(zmq.__version__) < StrictVersion('14.0.1'), 'PyZMQ must be >= 14.0.1 to run these tests.')
class TestSaltAPIHandler(SaltnadoTestCase):
def get_app(self):
urls = [('/', saltnado.SaltAPIHandler)]
application = self.build_tornado_app(urls)
application.event_listener = saltnado.EventListener({}, self.opts)
return application
def test_root(self):
'''
Test the root path which returns the list of clients we support
'''
response = self.fetch('/',
connect_timeout=30,
request_timeout=30,
)
self.assertEqual(response.code, 200)
response_obj = json_loads(response.body)
self.assertEqual(sorted(response_obj['clients']),
['local', 'local_async', 'runner', 'runner_async'])
self.assertEqual(response_obj['return'], 'Welcome')
def test_post_no_auth(self):
'''
Test post with no auth token, should 401
'''
# get a token for this test
low = [{'client': 'local',
'tgt': '*',
'fun': 'test.ping',
}]
response = self.fetch('/',
method='POST',
body=json.dumps(low),
headers={'Content-Type': self.content_type_map['json']},
follow_redirects=False,
connect_timeout=30,
request_timeout=30,
)
self.assertEqual(response.code, 302)
self.assertEqual(response.headers['Location'], '/login')
# Local client tests
@skipIf(True, 'to be reenabled when #23623 is merged')
def test_simple_local_post(self):
'''
Test a basic API of /
'''
low = [{'client': 'local',
'tgt': '*',
'fun': 'test.ping',
}]
response = self.fetch('/',
method='POST',
body=json.dumps(low),
headers={'Content-Type': self.content_type_map['json'],
saltnado.AUTH_TOKEN_HEADER: self.token['token']},
connect_timeout=30,
request_timeout=30,
)
response_obj = json_loads(response.body)
self.assertEqual(response_obj['return'], [{'minion': True, 'sub_minion': True}])
def test_simple_local_post_no_tgt(self):
'''
POST job with invalid tgt
'''
low = [{'client': 'local',
'tgt': 'minion_we_dont_have',
'fun': 'test.ping',
}]
response = self.fetch('/',
method='POST',
body=json.dumps(low),
headers={'Content-Type': self.content_type_map['json'],
saltnado.AUTH_TOKEN_HEADER: self.token['token']},
connect_timeout=30,
request_timeout=30,
)
response_obj = json_loads(response.body)
self.assertEqual(response_obj['return'], ["No minions matched the target. No command was sent, no jid was assigned."])
# local client request body test
def test_simple_local_post_only_dictionary_request(self):
'''
Test a basic API of /
'''
low = {'client': 'local',
'tgt': '*',
'fun': 'test.ping',
}
response = self.fetch('/',
method='POST',
body=json.dumps(low),
headers={'Content-Type': self.content_type_map['json'],
saltnado.AUTH_TOKEN_HEADER: self.token['token']},
connect_timeout=30,
request_timeout=30,
)
response_obj = json_loads(response.body)
self.assertEqual(response_obj['return'], [{'minion': True, 'sub_minion': True}])
def test_simple_local_post_invalid_request(self):
'''
Test a basic API of /
'''
low = ["invalid request"]
response = self.fetch('/',
method='POST',
body=json.dumps(low),
headers={'Content-Type': self.content_type_map['json'],
saltnado.AUTH_TOKEN_HEADER: self.token['token']},
connect_timeout=30,
request_timeout=30,
)
self.assertEqual(response.code, 400)
# local_async tests
def test_simple_local_async_post(self):
low = [{'client': 'local_async',
'tgt': '*',
'fun': 'test.ping',
}]
response = self.fetch('/',
method='POST',
body=json.dumps(low),
headers={'Content-Type': self.content_type_map['json'],
saltnado.AUTH_TOKEN_HEADER: self.token['token']},
)
response_obj = json_loads(response.body)
ret = response_obj['return']
ret[0]['minions'] = sorted(ret[0]['minions'])
# TODO: verify pub function? Maybe look at how we test the publisher
self.assertEqual(len(ret), 1)
self.assertIn('jid', ret[0])
self.assertEqual(ret[0]['minions'], sorted(['minion', 'sub_minion']))
def test_multi_local_async_post(self):
low = [{'client': 'local_async',
'tgt': '*',
'fun': 'test.ping',
},
{'client': 'local_async',
'tgt': '*',
'fun': 'test.ping',
}]
response = self.fetch('/',
method='POST',
body=json.dumps(low),
headers={'Content-Type': self.content_type_map['json'],
saltnado.AUTH_TOKEN_HEADER: self.token['token']},
)
response_obj = json_loads(response.body)
ret = response_obj['return']
ret[0]['minions'] = sorted(ret[0]['minions'])
ret[1]['minions'] = sorted(ret[1]['minions'])
self.assertEqual(len(ret), 2)
self.assertIn('jid', ret[0])
self.assertIn('jid', ret[1])
self.assertEqual(ret[0]['minions'], sorted(['minion', 'sub_minion']))
self.assertEqual(ret[1]['minions'], sorted(['minion', 'sub_minion']))
def test_multi_local_async_post_multitoken(self):
low = [{'client': 'local_async',
'tgt': '*',
'fun': 'test.ping',
},
{'client': 'local_async',
'tgt': '*',
'fun': 'test.ping',
'token': self.token['token'], # send a different (but still valid token)
},
{'client': 'local_async',
'tgt': '*',
'fun': 'test.ping',
'token': 'BAD_TOKEN', # send a bad token
},
]
response = self.fetch('/',
method='POST',
body=json.dumps(low),
headers={'Content-Type': self.content_type_map['json'],
saltnado.AUTH_TOKEN_HEADER: self.token['token']},
)
response_obj = json_loads(response.body)
ret = response_obj['return']
ret[0]['minions'] = sorted(ret[0]['minions'])
ret[1]['minions'] = sorted(ret[1]['minions'])
self.assertEqual(len(ret), 3) # make sure we got 3 responses
self.assertIn('jid', ret[0]) # the first 2 are regular returns
self.assertIn('jid', ret[1])
self.assertIn('Failed to authenticate', ret[2]) # bad auth
self.assertEqual(ret[0]['minions'], sorted(['minion', 'sub_minion']))
self.assertEqual(ret[1]['minions'], sorted(['minion', 'sub_minion']))
def test_simple_local_async_post_no_tgt(self):
low = [{'client': 'local_async',
'tgt': 'minion_we_dont_have',
'fun': 'test.ping',
}]
response = self.fetch('/',
method='POST',
body=json.dumps(low),
headers={'Content-Type': self.content_type_map['json'],
saltnado.AUTH_TOKEN_HEADER: self.token['token']},
)
response_obj = json_loads(response.body)
self.assertEqual(response_obj['return'], [{}])
# runner tests
def test_simple_local_runner_post(self):
low = [{'client': 'runner',
'fun': 'manage.up',
}]
response = self.fetch('/',
method='POST',
body=json.dumps(low),
headers={'Content-Type': self.content_type_map['json'],
saltnado.AUTH_TOKEN_HEADER: self.token['token']},
connect_timeout=30,
request_timeout=30,
)
response_obj = json_loads(response.body)
self.assertEqual(len(response_obj['return']), 1)
self.assertEqual(set(response_obj['return'][0]), set(['minion', 'sub_minion']))
# runner_async tests
def test_simple_local_runner_async_post(self):
low = [{'client': 'runner_async',
'fun': 'manage.up',
}]
response = self.fetch('/',
method='POST',
body=json.dumps(low),
headers={'Content-Type': self.content_type_map['json'],
saltnado.AUTH_TOKEN_HEADER: self.token['token']},
connect_timeout=10,
request_timeout=10,
)
response_obj = json_loads(response.body)
self.assertIn('return', response_obj)
self.assertEqual(1, len(response_obj['return']))
self.assertIn('jid', response_obj['return'][0])
self.assertIn('tag', response_obj['return'][0])
@skipIf(HAS_ZMQ_IOLOOP is False, 'PyZMQ version must be >= 14.0.1 to run these tests.')
class TestMinionSaltAPIHandler(SaltnadoTestCase):
def get_app(self):
urls = [(r"/minions/(.*)", saltnado.MinionSaltAPIHandler),
(r"/minions", saltnado.MinionSaltAPIHandler),
]
application = self.build_tornado_app(urls)
application.event_listener = saltnado.EventListener({}, self.opts)
return application
@skipIf(True, 'issue #34753')
def test_get_no_mid(self):
response = self.fetch('/minions',
method='GET',
headers={saltnado.AUTH_TOKEN_HEADER: self.token['token']},
follow_redirects=False,
)
response_obj = json_loads(response.body)
self.assertEqual(len(response_obj['return']), 1)
# one per minion
self.assertEqual(len(response_obj['return'][0]), 2)
# check a single grain
for minion_id, grains in six.iteritems(response_obj['return'][0]):
self.assertEqual(minion_id, grains['id'])
@skipIf(True, 'to be reenabled when #23623 is merged')
def test_get(self):
response = self.fetch('/minions/minion',
method='GET',
headers={saltnado.AUTH_TOKEN_HEADER: self.token['token']},
follow_redirects=False,
)
response_obj = json_loads(response.body)
self.assertEqual(len(response_obj['return']), 1)
self.assertEqual(len(response_obj['return'][0]), 1)
# check a single grain
self.assertEqual(response_obj['return'][0]['minion']['id'], 'minion')
def test_post(self):
low = [{'tgt': '*',
'fun': 'test.ping',
}]
response = self.fetch('/minions',
method='POST',
body=json.dumps(low),
headers={'Content-Type': self.content_type_map['json'],
saltnado.AUTH_TOKEN_HEADER: self.token['token']},
)
response_obj = json_loads(response.body)
ret = response_obj['return']
ret[0]['minions'] = sorted(ret[0]['minions'])
# TODO: verify pub function? Maybe look at how we test the publisher
self.assertEqual(len(ret), 1)
self.assertIn('jid', ret[0])
self.assertEqual(ret[0]['minions'], sorted(['minion', 'sub_minion']))
def test_post_with_client(self):
# get a token for this test
low = [{'client': 'local_async',
'tgt': '*',
'fun': 'test.ping',
}]
response = self.fetch('/minions',
method='POST',
body=json.dumps(low),
headers={'Content-Type': self.content_type_map['json'],
saltnado.AUTH_TOKEN_HEADER: self.token['token']},
)
response_obj = json_loads(response.body)
ret = response_obj['return']
ret[0]['minions'] = sorted(ret[0]['minions'])
# TODO: verify pub function? Maybe look at how we test the publisher
self.assertEqual(len(ret), 1)
self.assertIn('jid', ret[0])
self.assertEqual(ret[0]['minions'], sorted(['minion', 'sub_minion']))
def test_post_with_incorrect_client(self):
'''
The /minions endpoint is async only, so if you try something else
make sure you get an error
'''
# get a token for this test
low = [{'client': 'local',
'tgt': '*',
'fun': 'test.ping',
}]
response = self.fetch('/minions',
method='POST',
body=json.dumps(low),
headers={'Content-Type': self.content_type_map['json'],
saltnado.AUTH_TOKEN_HEADER: self.token['token']},
)
self.assertEqual(response.code, 400)
@skipIf(HAS_ZMQ_IOLOOP is False, 'PyZMQ version must be >= 14.0.1 to run these tests.')
class TestJobsSaltAPIHandler(SaltnadoTestCase):
def get_app(self):
urls = [(r"/jobs/(.*)", saltnado.JobsSaltAPIHandler),
(r"/jobs", saltnado.JobsSaltAPIHandler),
]
application = self.build_tornado_app(urls)
application.event_listener = saltnado.EventListener({}, self.opts)
return application
@skipIf(True, 'to be reenabled when #23623 is merged')
def test_get(self):
# test with no JID
self.http_client.fetch(self.get_url('/jobs'),
self.stop,
method='GET',
headers={saltnado.AUTH_TOKEN_HEADER: self.token['token']},
follow_redirects=False,
)
response = self.wait(timeout=30)
response_obj = json_loads(response.body)['return'][0]
try:
for jid, ret in six.iteritems(response_obj):
self.assertIn('Function', ret)
self.assertIn('Target', ret)
self.assertIn('Target-type', ret)
self.assertIn('User', ret)
self.assertIn('StartTime', ret)
self.assertIn('Arguments', ret)
except AttributeError as attribute_error:
print(json_loads(response.body))
raise
# test with a specific JID passed in
jid = next(six.iterkeys(response_obj))
self.http_client.fetch(self.get_url('/jobs/{0}'.format(jid)),
self.stop,
method='GET',
headers={saltnado.AUTH_TOKEN_HEADER: self.token['token']},
follow_redirects=False,
)
response = self.wait(timeout=30)
response_obj = json_loads(response.body)['return'][0]
self.assertIn('Function', response_obj)
self.assertIn('Target', response_obj)
self.assertIn('Target-type', response_obj)
self.assertIn('User', response_obj)
self.assertIn('StartTime', response_obj)
self.assertIn('Arguments', response_obj)
self.assertIn('Result', response_obj)
# TODO: run all the same tests from the root handler, but for now since they are
# the same code, we'll just sanity check
@skipIf(HAS_ZMQ_IOLOOP is False, 'PyZMQ version must be >= 14.0.1 to run these tests.')
class TestRunSaltAPIHandler(SaltnadoTestCase):
def get_app(self):
urls = [("/run", saltnado.RunSaltAPIHandler),
]
application = self.build_tornado_app(urls)
application.event_listener = saltnado.EventListener({}, self.opts)
return application
@skipIf(True, 'to be reenabled when #23623 is merged')
def test_get(self):
low = [{'client': 'local',
'tgt': '*',
'fun': 'test.ping',
}]
response = self.fetch('/run',
method='POST',
body=json.dumps(low),
headers={'Content-Type': self.content_type_map['json'],
saltnado.AUTH_TOKEN_HEADER: self.token['token']},
)
response_obj = json_loads(response.body)
self.assertEqual(response_obj['return'], [{'minion': True, 'sub_minion': True}])
@skipIf(HAS_ZMQ_IOLOOP is False, 'PyZMQ version must be >= 14.0.1 to run these tests.')
class TestEventsSaltAPIHandler(SaltnadoTestCase):
def get_app(self):
urls = [(r"/events", saltnado.EventsSaltAPIHandler),
]
application = self.build_tornado_app(urls)
application.event_listener = saltnado.EventListener({}, self.opts)
# store a reference, for magic later!
self.application = application
self.events_to_fire = 0
return application
def test_get(self):
self.events_to_fire = 5
response = self.fetch('/events',
headers={saltnado.AUTH_TOKEN_HEADER: self.token['token']},
streaming_callback=self.on_event,
)
def _stop(self):
self.stop()
def on_event(self, event):
if six.PY3:
event = event.decode('utf-8')
if self.events_to_fire > 0:
self.application.event_listener.event.fire_event({
'foo': 'bar',
'baz': 'qux',
}, 'salt/netapi/test')
self.events_to_fire -= 1
# once we've fired all the events, lets call it a day
else:
# wait so that we can ensure that the next future is ready to go
# to make sure we don't explode if the next one is ready
ZMQIOLoop.current().add_timeout(time.time() + 0.5, self._stop)
event = event.strip()
# if we got a retry, just continue
if event != 'retry: 400':
tag, data = event.splitlines()
self.assertTrue(tag.startswith('tag: '))
self.assertTrue(data.startswith('data: '))
@skipIf(HAS_ZMQ_IOLOOP is False, 'PyZMQ version must be >= 14.0.1 to run these tests.')
class TestWebhookSaltAPIHandler(SaltnadoTestCase):
def get_app(self):
urls = [(r"/hook(/.*)?", saltnado.WebhookSaltAPIHandler),
]
application = self.build_tornado_app(urls)
self.application = application
application.event_listener = saltnado.EventListener({}, self.opts)
return application
def test_post(self):
def verify_event(future):
'''
Verify that the event fired on the master matches what we sent
'''
event = future.result()
self.assertEqual(event['tag'], 'salt/netapi/hook')
self.assertIn('headers', event['data'])
self.assertEqual(event['data']['post'], {'foo': 'bar'})
# get an event future
self._finished = False # TODO: remove after some cleanup of the event listener
event = self.application.event_listener.get_event(self,
tag='salt/netapi/hook',
callback=verify_event,
)
# fire the event
response = self.fetch('/hook',
method='POST',
body='foo=bar',
headers={saltnado.AUTH_TOKEN_HEADER: self.token['token']},
)
response_obj = json_loads(response.body)
self.assertTrue(response_obj['success'])
if __name__ == '__main__':
from integration import run_tests # pylint: disable=import-error
run_tests(TestEventsSaltAPIHandler,
TestJobsSaltAPIHandler,
TestMinionSaltAPIHandler,
TestRunSaltAPIHandler,
TestSaltAPIHandler,
TestWebhookSaltAPIHandler, needs_daemon=True)
| 40.944938
| 126
| 0.508806
|
acfe9970f88be9ec9b3955cc0740ff1cb85b1c41
| 2,502
|
py
|
Python
|
test/functional/mempool_limit.py
|
CryptoDev-Project/FUNC
|
5000a663600befe26370e6831d65115645844ffb
|
[
"MIT"
] | 4
|
2019-12-31T06:33:20.000Z
|
2021-08-02T18:52:15.000Z
|
test/functional/mempool_limit.py
|
CryptoDev-Project/FUNC
|
5000a663600befe26370e6831d65115645844ffb
|
[
"MIT"
] | null | null | null |
test/functional/mempool_limit.py
|
CryptoDev-Project/FUNC
|
5000a663600befe26370e6831d65115645844ffb
|
[
"MIT"
] | 4
|
2019-12-24T12:32:36.000Z
|
2020-07-13T17:05:22.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mempool limiting together/eviction with the wallet."""
from test_framework.test_framework import FuncTestFramework
from test_framework.util import *
class MempoolLimitTest(FuncTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [["-maxmempool=5", "-spendzeroconfchange=0"]]
def run_test(self):
txouts = gen_return_txouts()
relayfee = self.nodes[0].getnetworkinfo()['relayfee']
self.log.info('Check that mempoolminfee is minrelytxfee')
assert_equal(self.nodes[0].getmempoolinfo()['minrelaytxfee'], Decimal('0.00001000'))
assert_equal(self.nodes[0].getmempoolinfo()['mempoolminfee'], Decimal('0.00001000'))
txids = []
utxos = create_confirmed_utxos(relayfee, self.nodes[0], 91)
self.log.info('Create a mempool tx that will be evicted')
us0 = utxos.pop()
inputs = [{ "txid" : us0["txid"], "vout" : us0["vout"]}]
outputs = {self.nodes[0].getnewaddress() : 0.0001}
tx = self.nodes[0].createrawtransaction(inputs, outputs)
self.nodes[0].settxfee(relayfee) # specifically fund this tx with low fee
txF = self.nodes[0].fundrawtransaction(tx)
self.nodes[0].settxfee(0) # return to automatic fee selection
txFS = self.nodes[0].signrawtransaction(txF['hex'])
txid = self.nodes[0].sendrawtransaction(txFS['hex'])
relayfee = self.nodes[0].getnetworkinfo()['relayfee']
base_fee = relayfee*100
for i in range (3):
txids.append([])
txids[i] = create_lots_of_big_transactions(self.nodes[0], txouts, utxos[30*i:30*i+30], 30, (i+1)*base_fee)
self.log.info('The tx should be evicted by now')
assert(txid not in self.nodes[0].getrawmempool())
txdata = self.nodes[0].gettransaction(txid)
assert(txdata['confirmations'] == 0) #confirmation should still be 0
self.log.info('Check that mempoolminfee is larger than minrelytxfee')
assert_equal(self.nodes[0].getmempoolinfo()['minrelaytxfee'], Decimal('0.00001000'))
assert_greater_than(self.nodes[0].getmempoolinfo()['mempoolminfee'], Decimal('0.00001000'))
if __name__ == '__main__':
MempoolLimitTest().main()
| 45.490909
| 118
| 0.671463
|
acfe9a593a327ddaa18d843c8afa039a3f97f5bf
| 9,832
|
py
|
Python
|
mypyc/emitwrapper.py
|
r3m0t/mypyc
|
118d08c2fa8235f24724880b66b991b79462ff76
|
[
"PSF-2.0"
] | null | null | null |
mypyc/emitwrapper.py
|
r3m0t/mypyc
|
118d08c2fa8235f24724880b66b991b79462ff76
|
[
"PSF-2.0"
] | null | null | null |
mypyc/emitwrapper.py
|
r3m0t/mypyc
|
118d08c2fa8235f24724880b66b991b79462ff76
|
[
"PSF-2.0"
] | null | null | null |
"""Generate CPython API wrapper function for a native function."""
from mypyc.common import PREFIX, NATIVE_PREFIX, DUNDER_PREFIX
from mypyc.emit import Emitter
from mypyc.ops import (
ClassIR, FuncIR, RType, RuntimeArg,
is_object_rprimitive, is_int_rprimitive, is_bool_rprimitive,
bool_rprimitive,
FUNC_STATICMETHOD,
)
from mypyc.namegen import NameGenerator
from typing import List, Optional
def wrapper_function_header(fn: FuncIR, names: NameGenerator) -> str:
return 'PyObject *{prefix}{name}(PyObject *self, PyObject *args, PyObject *kw)'.format(
prefix=PREFIX,
name=fn.cname(names))
def generate_wrapper_function(fn: FuncIR, emitter: Emitter) -> None:
"""Generates a CPython-compatible wrapper function for a native function.
In particular, this handles unboxing the arguments, calling the native function, and
then boxing the return value.
"""
emitter.emit_line('{} {{'.format(wrapper_function_header(fn, emitter.names)))
# If fn is a method, then the first argument is a self param
real_args = list(fn.args)
if fn.class_name and not fn.decl.kind == FUNC_STATICMETHOD:
arg = real_args.pop(0)
emitter.emit_line('PyObject *obj_{} = self;'.format(arg.name))
optional_args = [arg for arg in fn.args if arg.optional]
arg_names = ''.join('"{}", '.format(arg.name) for arg in real_args)
emitter.emit_line('static char *kwlist[] = {{{}0}};'.format(arg_names))
for arg in real_args:
emitter.emit_line('PyObject *obj_{}{};'.format(
arg.name, ' = NULL' if arg.optional else ''))
arg_format = '{}{}:{}'.format(
'O' * (len(real_args) - len(optional_args)),
'|' + 'O' * len(optional_args) if len(optional_args) > 0 else '',
fn.name,
)
arg_ptrs = ''.join(', &obj_{}'.format(arg.name) for arg in real_args)
emitter.emit_lines(
'if (!PyArg_ParseTupleAndKeywords(args, kw, "{}", kwlist{})) {{'.format(
arg_format, arg_ptrs),
'return NULL;',
'}')
generate_wrapper_core(fn, emitter, optional_args)
emitter.emit_line('}')
def generate_dunder_wrapper(cl: ClassIR, fn: FuncIR, emitter: Emitter) -> str:
"""Generates a wrapper for native __dunder__ methods to be able to fit into the mapping
protocol slot. This specifically means that the arguments are taken as *PyObjects and returned
as *PyObjects.
"""
input_args = ', '.join('PyObject *obj_{}'.format(arg.name) for arg in fn.args)
name = '{}{}{}'.format(DUNDER_PREFIX, fn.name, cl.name_prefix(emitter.names))
emitter.emit_line('static PyObject *{name}({input_args}) {{'.format(
name=name,
input_args=input_args,
))
generate_wrapper_core(fn, emitter)
emitter.emit_line('}')
return name
RICHCOMPARE_OPS = {
'__lt__': 'Py_LT',
'__gt__': 'Py_GT',
'__le__': 'Py_LE',
'__ge__': 'Py_GE',
'__eq__': 'Py_EQ',
'__ne__': 'Py_NE',
}
def generate_richcompare_wrapper(cl: ClassIR, emitter: Emitter) -> Optional[str]:
"""Generates a wrapper for richcompare dunder methods."""
matches = [name for name in RICHCOMPARE_OPS if cl.has_method(name)]
if not matches:
return None
name = '{}_RichCompare_{}'.format(DUNDER_PREFIX, cl.name_prefix(emitter.names))
emitter.emit_line(
'static PyObject *{name}(PyObject *obj_lhs, PyObject *obj_rhs, int op) {{'.format(
name=name)
)
emitter.emit_line('switch (op) {')
for func in matches:
emitter.emit_line('case {}: {{'.format(RICHCOMPARE_OPS[func]))
method = cl.get_method(func)
assert method is not None
generate_wrapper_core(method, emitter, arg_names=['lhs', 'rhs'])
emitter.emit_line('}')
emitter.emit_line('}')
emitter.emit_line('Py_INCREF(Py_NotImplemented);')
emitter.emit_line('return Py_NotImplemented;')
emitter.emit_line('}')
return name
def generate_get_wrapper(cl: ClassIR, fn: FuncIR, emitter: Emitter) -> str:
"""Generates a wrapper for native __get__ methods."""
name = '{}{}{}'.format(DUNDER_PREFIX, fn.name, cl.name_prefix(emitter.names))
emitter.emit_line(
'static PyObject *{name}(PyObject *self, PyObject *instance, PyObject *owner) {{'.
format(name=name))
emitter.emit_line('instance = instance ? instance : Py_None;')
emitter.emit_line('return {}{}(self, instance, owner);'.format(
NATIVE_PREFIX,
fn.cname(emitter.names)))
emitter.emit_line('}')
return name
def generate_hash_wrapper(cl: ClassIR, fn: FuncIR, emitter: Emitter) -> str:
"""Generates a wrapper for native __hash__ methods."""
name = '{}{}{}'.format(DUNDER_PREFIX, fn.name, cl.name_prefix(emitter.names))
emitter.emit_line('static Py_ssize_t {name}(PyObject *self) {{'.format(
name=name
))
emitter.emit_line('{}retval = {}{}(self);'.format(emitter.ctype_spaced(fn.ret_type),
NATIVE_PREFIX,
fn.cname(emitter.names)))
emitter.emit_error_check('retval', fn.ret_type, 'return -1;')
if is_int_rprimitive(fn.ret_type):
emitter.emit_line('Py_ssize_t val = CPyTagged_AsLongLong(retval);')
else:
emitter.emit_line('Py_ssize_t val = PyLong_AsLongLong(retval);')
emitter.emit_dec_ref('retval', fn.ret_type)
emitter.emit_line('if (PyErr_Occurred()) return -1;')
# We can't return -1 from a hash function..
emitter.emit_line('if (val == -1) return -2;')
emitter.emit_line('return val;')
emitter.emit_line('}')
return name
def generate_bool_wrapper(cl: ClassIR, fn: FuncIR, emitter: Emitter) -> str:
"""Generates a wrapper for native __bool__ methods."""
name = '{}{}{}'.format(DUNDER_PREFIX, fn.name, cl.name_prefix(emitter.names))
emitter.emit_line('static int {name}(PyObject *self) {{'.format(
name=name
))
emitter.emit_line('{}val = {}{}(self);'.format(emitter.ctype_spaced(fn.ret_type),
NATIVE_PREFIX,
fn.cname(emitter.names)))
emitter.emit_error_check('val', fn.ret_type, 'return -1;')
# This wouldn't be that hard to fix but it seems unimportant and
# getting error handling and unboxing right would be fiddly. (And
# way easier to do in IR!)
assert is_bool_rprimitive(fn.ret_type), "Only bool return supported for __bool__"
emitter.emit_line('return val;')
emitter.emit_line('}')
return name
def generate_wrapper_core(fn: FuncIR, emitter: Emitter,
optional_args: List[RuntimeArg] = [],
arg_names: Optional[List[str]] = None) -> None:
"""Generates the core part of a wrapper function for a native function.
This expects each argument as a PyObject * named obj_{arg} as a precondition.
It converts the PyObject *s to the necessary types, checking and unboxing if necessary,
makes the call, then boxes the result if necessary and returns it.
"""
arg_names = arg_names or [arg.name for arg in fn.args]
for arg_name, arg in zip(arg_names, fn.args):
generate_arg_check(arg_name, arg.type, emitter, arg in optional_args)
native_args = ', '.join('arg_{}'.format(arg) for arg in arg_names)
if fn.ret_type.is_unboxed:
# TODO: The Py_RETURN macros return the correct PyObject * with reference count handling.
# Are they relevant?
emitter.emit_line('{}retval = {}{}({});'.format(emitter.ctype_spaced(fn.ret_type),
NATIVE_PREFIX,
fn.cname(emitter.names),
native_args))
emitter.emit_error_check('retval', fn.ret_type, 'return NULL;')
emitter.emit_box('retval', 'retbox', fn.ret_type, declare_dest=True)
emitter.emit_line('return retbox;')
else:
emitter.emit_line('return {}{}({});'.format(NATIVE_PREFIX,
fn.cname(emitter.names),
native_args))
# TODO: Tracebacks?
def generate_arg_check(name: str, typ: RType, emitter: Emitter, optional: bool = False) -> None:
"""Insert a runtime check for argument and unbox if necessary.
The object is named PyObject *obj_{}. This is expected to generate
a value of name arg_{} (unboxed if necessary). For each primitive a runtime
check ensures the correct type.
"""
if typ.is_unboxed:
# Borrow when unboxing to avoid reference count manipulation.
emitter.emit_unbox('obj_{}'.format(name), 'arg_{}'.format(name), typ,
'return NULL;', declare_dest=True, borrow=True, optional=optional)
elif is_object_rprimitive(typ):
# Trivial, since any object is valid.
if optional:
emitter.emit_line('PyObject *arg_{};'.format(name))
emitter.emit_line('if (obj_{} == NULL) {{'.format(name))
emitter.emit_line('arg_{} = {};'.format(name, emitter.c_error_value(typ)))
emitter.emit_lines('} else {', 'arg_{} = obj_{}; '.format(name, name), '}')
else:
emitter.emit_line('PyObject *arg_{} = obj_{};'.format(name, name))
else:
emitter.emit_cast('obj_{}'.format(name), 'arg_{}'.format(name), typ,
declare_dest=True, optional=optional)
if optional:
emitter.emit_line('if (obj_{} != NULL && arg_{} == NULL) return NULL;'.format(
name, name))
else:
emitter.emit_line('if (arg_{} == NULL) return NULL;'.format(name, name))
| 43.122807
| 98
| 0.62144
|
acfe9ae4ad7c86e19b11a15d5dfa5fcc936a331b
| 7,009
|
py
|
Python
|
test2.py
|
saurabhtalele/finitediff
|
e27e092a0236cc980dfe58405d72d7f95a17667d
|
[
"MIT"
] | 1
|
2021-04-30T10:14:27.000Z
|
2021-04-30T10:14:27.000Z
|
test2.py
|
saurabhtalele/finitediff
|
e27e092a0236cc980dfe58405d72d7f95a17667d
|
[
"MIT"
] | null | null | null |
test2.py
|
saurabhtalele/finitediff
|
e27e092a0236cc980dfe58405d72d7f95a17667d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 30 03:26:59 2021
@author: s
"""
import numpy.matlib
import numpy as np
import sympy as sp
import re
import sys
import io
from io import StringIO
import string
from pytexit import py2tex
#import matplotlib.pyplot as plt
from flask import Flask
from pywebio.input import input, NUMBER,select
from pywebio.output import put_text, put_html, put_markdown, put_table ,put_image,put_widget
from pywebio.platform.flask import webio_view
def backwrd():
###differantiation order
dd=input("order of differentiation:", type=NUMBER)
put_text('order of differentiation',(dd))
put_html('<hr>')
###hex of smal step h is
hs=dd
#### order of error
err=input("order of error even number or one:", type=NUMBER)
put_text('order of error',(err))
put_html('<hr>')
#total number of points in cosideration
put_text('please provide only integer number ')
put_text('for error order result if its greter than this please relod web and try max limit integer number is equal or less than ',-1*(dd+err-1))
put_text('take limits from 0 to 3 for example ,so points in cosiderations are -3,-2,-1,0 total n is 4')
n = input("number of points in cosideration:", type=NUMBER)
put_text('n is ',(n))
#take limits from -3 to 3 for example ,so points in cosiderations are -3,-2,-1,0,1,2,3 total n is 7
put_text('stencil of -3 to 0 for example ,so points in cosiderations are -3,-2,-1,0 so min_limit as -3')
min_limit = input("Input your min_limit:", type=NUMBER)
put_text('-3 to 3 for example ,so points in cosiderations are -3,-2,-1,0 so max_limit as 0')
put_text('yor stencils max limit is ',(min_limit))
put_html('<hr>')
#max limit
max_limit = 0
put_html('<hr>')
####makiuing array
a0 = np.linspace(min_limit,max_limit,n)
a0= np. array(a0)
# making n*n matrix
a=np.tile(a0, (n,1))
#print(a)
a=np.array(a)
### making indices
b=np.linspace(0,n-1)
b=np.arange(0, n).reshape(n,1)
it = np.nditer([a, b, None], flags=['external_loop'])
with it:
for x, y, z in it:
z[...] = x**y
result = it.operands[2]
#result
bb=result
########Inserting whre one is going to come
az=np.zeros(n-1)
yy=np.insert(az,dd, 1)
#output capture from print
old_stdout = sys.stdout
new_stdout = io.StringIO()
sys.stdout = new_stdout
for i in np.nditer(a0):
print(sp.symbols('f(x+({}*h))'.format(i)))
output = new_stdout.getvalue()
sys.stdout = old_stdout
j=output
############solving matrix
hh = np.linalg.solve(bb, yy)
a = hh*np.math.factorial(dd)
#print(a)
############ symbols manupalation and list to symbols conversion
##print(type(j))
d = [x.replace('\n', ' ') for x in j]
# Original Array
array = np.array(j, dtype=np.str)
#print(array)
# Split the element of the said array
# with spaces
sparr = np.char.split(array) # imp step str to array numpy
#print(sparr)
d=np.asarray(sparr,dtype=object)
d = d.tolist() # convert to list
#print(d)
d=sp.symbols(d)
d=np.array(d)
#print(d)
######multiplyer
B=np.array(a)
#B=B.astype(str)
#c = B.tolist()
c=B.astype(object)
#print(c)
re.sub(r' *\n *', '\t',np.array_str(np.c_[c,d]).replace('[', '(').replace(']', ')').strip())
res = "\t".join("({}*{})+".format(x, y) for x, y in zip(c, d))
name = res.rstrip(res[-1])
#print('(',name,')','*','*1/h**',hs)
######captiring print
old_stdout = sys.stdout
new_stdout = io.StringIO()
sys.stdout = new_stdout
print('(',name,')','*','1/h**',hs)
kj = new_stdout.getvalue()
sys.stdout = old_stdout
def remove(string):
pattern = re.compile(r'\s+')
return re.sub(pattern, '', string)
# Driver Program
string = kj
#remove will remove all spaces
yy=remove(string)
#put_text('%s' % (remove(string)))
#making variable to latex plote
y=py2tex(yy,print_latex=False, print_formula=False)
o=y.replace('+-','-')
#put_text('%s' % o)
def org(o):
w=o
p=w
pp=p.replace('$$',' ')
tt=" ' "
#string="r"
#pg=string+pp
#tg=" ' "
pg=tt+pp+tt
return pg
t=org(o)
###matplotlib
lat = t
# #add text
# ax = plt.axes([1,0,0.1,0.1]) #left,bottom,width,height
# ax.set_xticks([])
# ax.set_yticks([])
# ax.axis('off')
# plt.text(0.2,0.2,r'$%s$' % lat ,size=500,color="red",fontsize=100)
# #hide axes
# fig = plt.gca()
# fig.axes.get_xaxis().set_visible(False)
# fig.axes.get_yaxis().set_visible(False)
# plt.savefig('images/out.jpeg', bbox_inches='tight', pad_inches=0)
# plt.close()
#save image
# img = open('images/out.jpeg', 'rb').read()
# put_image(img, width='500000000px')
put_html('<hr>')
put_text('this is python output %s' % yy)
put_html('<hr>')
#visualize equation
#tpl = '''<!DOCTYPE html><html><head> <meta charset="utf-8"> <meta name="viewport" content="width=device-width"> <title>MathJax example</title> <script src="https://polyfill.io/v3/polyfill.min.js?features=es6"></script> <script id="MathJax-script" async src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js"> </script></head><body><p>{{#contents}}{{& pywebio_output_parse}}{{/contents}}</p></body></html>'''
tpl = '''<!DOCTYPE html>
<html><head> <meta charset="utf-8">
<meta name="viewport" content="width=device-width">
<title> </title>
<script src="https://polyfill.io/v3/polyfill.min.js?features=es6"></script>
<script id="MathJax-script" async src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js"> </script>
</head>
<body>
<p>{{#contents}}{{& pywebio_output_parse}}{{/contents}}
</p>
</body>
</html>'''
put_widget(tpl, {"contents": [put_text((lat))]})
#for latex output
put_html('<hr>')
put_text('upper is latex out put %s' % o)
# if __name__ == '__main__':
# bmi()
| 28.49187
| 439
| 0.525467
|
acfe9b02239c12729025a00316856bfc38aba472
| 1,002
|
py
|
Python
|
expenshare/sharelists/urls.py
|
atsanda/Expenshare
|
3b6c21395a552200627e8203a7a5d28166dbb813
|
[
"MIT"
] | null | null | null |
expenshare/sharelists/urls.py
|
atsanda/Expenshare
|
3b6c21395a552200627e8203a7a5d28166dbb813
|
[
"MIT"
] | 20
|
2020-06-07T20:05:18.000Z
|
2021-09-19T13:43:11.000Z
|
expenshare/sharelists/urls.py
|
atsanda/Expenshare
|
3b6c21395a552200627e8203a7a5d28166dbb813
|
[
"MIT"
] | null | null | null |
from django.urls import path
from expenshare.sharelists.views import (
CreditCreate,
CreditDelete,
CreditUpdate,
CreditView,
SharelistCreate,
SharelistMain,
SharelistSummary,
)
app_name = "sharelists"
urlpatterns = [
path("create", SharelistCreate.as_view(), name="create"),
path("<int:sharelist_id>", SharelistMain.as_view(), name="view"),
path("<int:sharelist_id>/summary", SharelistSummary.as_view(), name="summary"),
path(
"<int:sharelist_id>/credits/create",
CreditCreate.as_view(),
name="credits-create",
),
path(
"<int:sharelist_id>/credits/<int:credit_id>",
CreditView.as_view(),
name="credits-view",
),
path(
"<int:sharelist_id>/credits/<int:credit_id>/update",
CreditUpdate.as_view(),
name="credits-update",
),
path(
"<int:sharelist_id>/credits/<int:credit_id>/delete",
CreditDelete.as_view(),
name="credits-delete",
),
]
| 25.692308
| 83
| 0.623752
|
acfe9bbc55e16fb1206cbbc2626a40b76a342b98
| 97
|
py
|
Python
|
test/test_playeah.py
|
fmcevoy/playeah
|
98c77bc6ba5b6f53cffeab9ed3133e682d26a54d
|
[
"Apache-2.0"
] | null | null | null |
test/test_playeah.py
|
fmcevoy/playeah
|
98c77bc6ba5b6f53cffeab9ed3133e682d26a54d
|
[
"Apache-2.0"
] | null | null | null |
test/test_playeah.py
|
fmcevoy/playeah
|
98c77bc6ba5b6f53cffeab9ed3133e682d26a54d
|
[
"Apache-2.0"
] | null | null | null |
import pytest
def test_nothing():
assert 1 == 1
def test_nothing_else():
assert 2 == 2
| 12.125
| 24
| 0.649485
|
acfe9bf7f83a0ed50382fcb536ae4c91045f8684
| 1,329
|
py
|
Python
|
mqtt.py
|
RRMoelker/socketMQTT
|
5efe73106524ba87bd6e168f16fb3a3b18b8636d
|
[
"MIT"
] | 3
|
2016-04-10T15:39:12.000Z
|
2020-08-07T05:18:05.000Z
|
mqtt.py
|
RRMoelker/socketMQTT
|
5efe73106524ba87bd6e168f16fb3a3b18b8636d
|
[
"MIT"
] | 1
|
2016-04-10T08:14:21.000Z
|
2016-04-10T08:18:46.000Z
|
mqtt.py
|
RRMoelker/socketMQTT
|
5efe73106524ba87bd6e168f16fb3a3b18b8636d
|
[
"MIT"
] | 2
|
2019-10-31T11:13:13.000Z
|
2021-09-08T22:28:15.000Z
|
import logging
import paho.mqtt.client as mqtt
import config
logger = logging.getLogger(__name__)
def on_connect(client, userdata, rc):
"""
The callback for when the client receives a CONNACK response from the server.
Subscribing in on_connect() means that if we lose the connection and
reconnect then subscriptions will be renewed.
"""
logger.info("Connected with result code %s", str(rc))
client.subscribe("$SYS/#")
class MqttConnection:
def connect(self, user=None, password=None):
self.mqttc = mqtt.Client()
self.mqttc.on_connect = on_connect
if config.MQTT_AUTHENTICATE:
self.mqttc.username_pw_set(user, password)
self.mqttc.connect(config.MQTT_HOST, config.MQTT_PORT, 60)
logger.info('MQTT connect called, waiting for connected')
self.mqttc.loop_start()
logger.info('MQTT looping in other thread')
def send(self, topic, message):
logger.debug('MQTT sending message %s', message)
(result, mid) = self.mqttc.publish(topic, message)
if result == mqtt.MQTT_ERR_SUCCESS:
logger.info('MQTT message send')
return True
elif result == mqtt.MQTT_ERR_NO_CONN:
logger.critical('ERROR, MQTT message not send, client not connected')
return False
| 34.076923
| 81
| 0.670429
|
acfe9bfeda2f12d841a5325d6ebd32ff76e71c98
| 25,560
|
py
|
Python
|
glycowork/motif/tokenization.py
|
Old-Shatterhand/glycowork
|
544fde03dd38cf95fb97792e050d7ff68f5637b1
|
[
"MIT"
] | null | null | null |
glycowork/motif/tokenization.py
|
Old-Shatterhand/glycowork
|
544fde03dd38cf95fb97792e050d7ff68f5637b1
|
[
"MIT"
] | null | null | null |
glycowork/motif/tokenization.py
|
Old-Shatterhand/glycowork
|
544fde03dd38cf95fb97792e050d7ff68f5637b1
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
import re
import copy
from itertools import combinations_with_replacement
from collections import Counter
from sklearn.cluster import DBSCAN
from glycowork.glycan_data.loader import lib, motif_list, find_nth, unwrap, df_species, Hex, dHex, HexNAc, Sia, linkages
from glycowork.motif.processing import small_motif_find, min_process_glycans
from glycowork.motif.graph import compare_glycans
from glycowork.motif.annotate import annotate_dataset
chars = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','P','Q','R','S','T',
'V','W','Y', 'X', 'Z'] + ['z']
def constrain_prot(proteins, libr = None):
"""Ensures that no characters outside of libr are present in proteins\n
| Arguments:
| :-
| proteins (list): list of proteins as strings
| libr (list): sorted list of amino acids occurring in proteins\n
| Returns:
| :-
| Returns list of proteins with only permitted amino acids
"""
if libr is None:
libr = chars
mega_prot = list(set(list(''.join(proteins))))
forbidden = [k for k in mega_prot if k not in libr]
for k in forbidden:
proteins = [j.replace(k,'z') for j in proteins]
return proteins
def prot_to_coded(proteins, libr = None):
"""Encodes protein sequences to be used in LectinOracle-flex\n
| Arguments:
| :-
| proteins (list): list of proteins as strings
| libr (list): sorted list of amino acids occurring in proteins\n
| Returns:
| :-
| Returns list of encoded proteins with only permitted amino acids
"""
if libr is None:
libr = chars
prots = [k[:min(len(k), 1000)] for k in proteins]
prots = constrain_prot(prots, libr = libr)
prots = [pad_sequence(string_to_labels(str(k).upper(),libr = libr),
max_length = 1000,
pad_label = len(libr)-1) for k in prots]
return prots
def character_to_label(character, libr = None):
"""tokenizes character by indexing passed library\n
| Arguments:
| :-
| character (string): character to index
| libr (list): list of library items\n
| Returns:
| :-
| Returns index of character in library
"""
if libr is None:
libr = lib
character_label = libr.index(character)
return character_label
def string_to_labels(character_string, libr = None):
"""tokenizes word by indexing characters in passed library\n
| Arguments:
| :-
| character_string (string): string of characters to index
| libr (list): list of library items\n
| Returns:
| :-
| Returns indexes of characters in library
"""
if libr is None:
libr = lib
return list(map(lambda character: character_to_label(character, libr), character_string))
def pad_sequence(seq, max_length, pad_label = None, libr = None):
"""brings all sequences to same length by adding padding token\n
| Arguments:
| :-
| seq (list): sequence to pad (from string_to_labels)
| max_length (int): sequence length to pad to
| pad_label (int): which padding label to use
| libr (list): list of library items\n\n
| Returns:
| :-
| Returns padded sequence
"""
if libr is None:
libr = lib
if pad_label is None:
pad_label = len(libr)
seq += [pad_label for i in range(max_length - len(seq))]
return seq
def get_core(sugar):
"""retrieves core monosaccharide from modified monosaccharide\n
| Arguments:
| :-
| sugar (string): monosaccharide or linkage\n
| Returns:
| :-
| Returns core monosaccharide as string
"""
easy_cores = ['GlcNAc', 'GalNAc', 'ManNAc', 'FucNAc', 'QuiNAc', 'RhaNAc', 'GulNAc',
'IdoNAc', 'MurNAc', 'HexNAc', '6dAltNAc', 'AcoNAc', 'GlcA', 'AltA',
'GalA', 'ManA', 'Tyv', 'Yer', 'Abe', 'GlcfNAc', 'GalfNAc', 'ManfNAc',
'FucfNAc', 'IdoA', 'GulA', 'LDManHep', 'DDManHep', 'DDGlcHep', 'LyxHep', 'ManHep',
'DDAltHep', 'IdoHep', 'DLGlcHep', 'GalHep']
next_cores = ['GlcN', 'GalN', 'ManN', 'FucN', 'QuiN', 'RhaN', 'AraN', 'IdoN' 'Glcf', 'Galf', 'Manf',
'Fucf', 'Araf', 'Lyxf', 'Xylf', '6dAltf', 'Ribf', 'Fruf', 'Apif', 'Kdof', 'Sedf',
'6dTal', 'AltNAc', '6dAlt']
hard_cores = ['Glc', 'Gal', 'Man', 'Fuc', 'Qui', 'Rha', 'Ara', 'Oli', 'Kdn', 'Gul', 'Lyx',
'Xyl', 'Dha', 'Rib', 'Kdo', 'Tal', 'All', 'Pse', 'Leg', 'Asc',
'Fru', 'Hex', 'Alt', 'Xluf', 'Api', 'Ko', 'Pau', 'Fus', 'Erwiniose',
'Aco', 'Bac', 'Dig', 'Thre-ol', 'Ery-ol']
if bool([ele for ele in easy_cores if(ele in sugar)]):
return [ele for ele in easy_cores if(ele in sugar)][0]
elif bool([ele for ele in next_cores if(ele in sugar)]):
return [ele for ele in next_cores if(ele in sugar)][0]
elif bool([ele for ele in hard_cores if(ele in sugar)]):
return [ele for ele in hard_cores if(ele in sugar)][0]
elif (('Neu' in sugar) and ('5Ac' in sugar)):
return 'Neu5Ac'
elif (('Neu' in sugar) and ('5Gc' in sugar)):
return 'Neu5Gc'
elif 'Neu' in sugar:
return 'Neu'
elif ((sugar.startswith('a')) or sugar.startswith('b')):
return sugar
elif re.match('^[0-9]+(-[0-9]+)+$', sugar):
return sugar
else:
return 'Sug'
def get_stem_lib(libr):
"""creates a mapping file to map modified monosaccharides to core monosaccharides\n
| Arguments:
| :-
| libr (list): sorted list of unique glycoletters observed in the glycans of our dataset\n
| Returns:
| :-
| Returns dictionary of form modified_monosaccharide:core_monosaccharide
"""
return {k:get_core(k) for k in libr}
def stemify_glycan(glycan, stem_lib = None, libr = None):
"""removes modifications from all monosaccharides in a glycan\n
| Arguments:
| :-
| glycan (string): glycan in IUPAC-condensed format
| stem_lib (dictionary): dictionary of form modified_monosaccharide:core_monosaccharide; default:created from lib
| libr (list): sorted list of unique glycoletters observed in the glycans of our dataset; default:lib\n
| Returns:
| :-
| Returns stemmed glycan as string
"""
if libr is None:
libr = lib
if stem_lib is None:
stem_lib = get_stem_lib(libr)
clean_list = list(stem_lib.values())
for k in list(stem_lib.keys())[::-1][:-1]:
if ((k not in clean_list) and (k in glycan) and not (k.startswith(('a','b'))) and not (re.match('^[0-9]+(-[0-9]+)+$', k))):
while ((k in glycan) and (sum(1 for s in clean_list if k in s) <= 1)):
glycan_start = glycan[:glycan.rindex('(')]
glycan_part = glycan_start
if k in glycan_start:
cut = glycan_start[glycan_start.index(k):]
try:
cut = cut[:cut.index('(')]
except:
pass
if cut not in clean_list:
glycan_part = glycan_start[:glycan_start.index(k)]
glycan_part = glycan_part + stem_lib[k]
else:
glycan_part = glycan_start
try:
glycan_mid = glycan_start[glycan_start.index(k) + len(k):]
if ((cut not in clean_list) and (len(glycan_mid)>0)):
glycan_part = glycan_part + glycan_mid
except:
pass
glycan_end = glycan[glycan.rindex('('):]
if k in glycan_end:
if ']' in glycan_end:
filt = ']'
else:
filt = ')'
cut = glycan_end[glycan_end.index(filt)+1:]
if cut not in clean_list:
glycan_end = glycan_end[:glycan_end.index(filt)+1] + stem_lib[k]
else:
pass
glycan = glycan_part + glycan_end
return glycan
def stemify_dataset(df, stem_lib = None, libr = None,
glycan_col_name = 'target', rarity_filter = 1):
"""stemifies all glycans in a dataset by removing monosaccharide modifications\n
| Arguments:
| :-
| df (dataframe): dataframe with glycans in IUPAC-condensed format in column glycan_col_name
| stem_lib (dictionary): dictionary of form modified_monosaccharide:core_monosaccharide; default:created from lib
| libr (list): sorted list of unique glycoletters observed in the glycans of our dataset; default:lib
| glycan_col_name (string): column name under which glycans are stored; default:target
| rarity_filter (int): how often monosaccharide modification has to occur to not get removed; default:1\n
| Returns:
| :-
| Returns df with glycans stemified
"""
if libr is None:
libr = lib
if stem_lib is None:
stem_lib = get_stem_lib(libr)
pool = unwrap(min_process_glycans(df[glycan_col_name].values.tolist()))
pool_count = Counter(pool)
for k in list(set(pool)):
if pool_count[k] > rarity_filter:
stem_lib[k] = k
df_out = copy.deepcopy(df)
df_out[glycan_col_name] = [stemify_glycan(k, stem_lib = stem_lib,
libr = libr) for k in df_out[glycan_col_name].values.tolist()]
return df_out
def match_composition(composition, group, level, df = None,
mode = "minimal", libr = None, glycans = None,
relaxed = False):
"""Given a monosaccharide composition, it returns all corresponding glycans\n
| Arguments:
| :-
| composition (dict): a dictionary indicating the composition to match (for example {"Fuc":1, "Gal":1, "GlcNAc":1})
| group (string): name of the Species, Genus, Family, Order, Class, Phylum, Kingdom, or Domain used to filter
| level (string): Species, Genus, Family, Order, Class, Phylum, Kingdom, or Domain
| df (dataframe): glycan dataframe for searching glycan structures; default:df_species
| mode (string): can be "minimal" or "exact" to match glycans that contain at least the specified composition or glycans matching exactly the requirements
| libr (list): sorted list of unique glycoletters observed in the glycans of our dataset; default:lib
| glycans (list): custom list of glycans to check the composition in; default:None
| relaxed (bool): specify if "minimal" means exact counts (False) or _at least_ (True); default:False\n
| Returns:
| :-
| Returns list of glycans matching composition in IUPAC-condensed
"""
if df is None:
df = df_species
if libr is None:
libr = lib
filtered_df = df[df[level] == group]
exact_composition = {}
if mode == "minimal":
for element in libr:
if element in composition:
exact_composition[element] = composition.get(element)
if glycans is None:
glycan_list = filtered_df.target.values.tolist()
else:
glycan_list = copy.deepcopy(glycans)
to_remove = []
output_list = glycan_list
for glycan in glycan_list:
for key in exact_composition:
glycan_count = sum(1 for _ in re.finditer(r'\b%s\b' % re.escape(key), glycan))
if relaxed:
if exact_composition[key] > glycan_count:
to_remove.append(glycan)
else:
if exact_composition[key] != glycan_count :
to_remove.append(glycan)
for element in to_remove:
try :
output_list.remove(element)
except :
a = 1
output_list = list(set(output_list))
#print(str(len(output_list)) + " glycan structures match your composition.")
#for element in output_list:
# print(element)
if mode == "exact":
for element in libr:
if element in composition:
exact_composition[element] = composition.get(element)
if glycans is None:
glycan_list = filtered_df.target.values.tolist()
else:
glycan_list = glycans
to_remove = []
output_list = glycan_list
for glycan in glycan_list:
count_sum = 0
for key in exact_composition :
glycan_count = sum(1 for _ in re.finditer(r'\b%s\b' % re.escape(key), glycan))
count_sum = count_sum + exact_composition[key]
if exact_composition[key] != glycan_count:
to_remove.append(glycan)
monosaccharide_number_in_glycan = glycan.count("(") + 1
if monosaccharide_number_in_glycan != count_sum:
to_remove.append(glycan)
for element in to_remove:
try :
output_list.remove(element)
except :
a = 1
output_list = list(set(output_list))
#print(str(len(output_list)) + " glycan structures match your composition.")
#for element in output_list:
# print(element)
return output_list
def match_composition_relaxed(composition, group, level, df = None,
mode = "exact", libr = None, reducing_end = None):
"""Given a coarse-grained monosaccharide composition (Hex, HexNAc, etc.), it returns all corresponding glycans\n
| Arguments:
| :-
| composition (dict): a dictionary indicating the composition to match (for example {"Fuc":1, "Gal":1, "GlcNAc":1})
| group (string): name of the Species, Genus, Family, Order, Class, Phylum, Kingdom, or Domain used to filter
| level (string): Species, Genus, Family, Order, Class, Phylum, Kingdom, or Domain
| df (dataframe): glycan dataframe for searching glycan structures; default:df_species
| mode (string): can be "minimal" or "exact" to match glycans that contain at least the specified composition or glycans matching exactly the requirements; default:"exact"
| libr (list): sorted list of unique glycoletters observed in the glycans of our dataset; default:lib
| reducing_end (string): filters possible glycans by reducing end monosaccharide; default:None\n
| Returns:
| :-
| Returns list of glycans matching composition in IUPAC-condensed
"""
if df is None:
df = df_species
if reducing_end is not None:
df = df[df.target.str.endswith(reducing_end)].reset_index(drop = True)
if libr is None:
libr = lib
input_composition = copy.deepcopy(composition)
input_composition2 = copy.deepcopy(composition)
original_composition = copy.deepcopy(composition)
output_list = df[df[level] == group].target.values.tolist()
input_composition2.pop('Hex', None)
input_composition2.pop('dHex', None)
input_composition2.pop('HexNAc', None)
if len(input_composition2)>0:
output_list = match_composition(input_composition2, group, level, df = df,
mode = 'minimal', libr = libr,
glycans = output_list, relaxed = True)
if 'Hex' in input_composition:
if any([j in input_composition for j in Hex]):
relaxed = True
else:
relaxed = False
hex_pool = list(combinations_with_replacement(Hex, input_composition['Hex']))
hex_pool = [Counter(k) for k in hex_pool]
input_composition.pop('Hex')
output_list = [match_composition(k, group, level, df = df,
mode = 'minimal', libr = libr,
glycans = output_list, relaxed = relaxed) for k in hex_pool]
output_list = list(set(unwrap(output_list)))
if 'dHex' in input_composition:
if any([j in input_composition for j in dHex]):
relaxed = True
else:
relaxed = False
dhex_pool = list(combinations_with_replacement(dHex, input_composition['dHex']))
dhex_pool = [Counter(k) for k in dhex_pool]
input_composition.pop('dHex')
temp = [match_composition(k, group, level, df = df,
mode = 'minimal', libr = libr,
glycans = output_list, relaxed = relaxed) for k in dhex_pool]
output_list = list(set(unwrap(temp)))
if 'HexNAc' in input_composition:
if any([j in input_composition for j in HexNAc]):
relaxed = True
else:
relaxed = False
hexnac_pool = list(combinations_with_replacement(HexNAc, input_composition['HexNAc']))
hexnac_pool = [Counter(k) for k in hexnac_pool]
input_composition.pop('HexNAc')
temp = [match_composition(k, group, level, df = df,
mode = 'minimal', libr = libr,
glycans = output_list, relaxed = relaxed) for k in hexnac_pool]
output_list = list(set(unwrap(temp)))
if mode == 'exact':
monosaccharide_count = sum(original_composition.values())
monosaccharide_types = list(original_composition.keys())
if 'Hex' in original_composition:
monosaccharide_types = monosaccharide_types + Hex
if 'HexNAc' in original_composition:
monosaccharide_types = monosaccharide_types + HexNAc
if 'dHex' in original_composition:
monosaccharide_types = monosaccharide_types + dHex
output_list = [k for k in output_list if k.count('(') == monosaccharide_count-1]
output_list = [k for k in output_list if not any([j not in monosaccharide_types for j in list(set(min_process_glycans([k])[0])) if j[0].isupper()])]
if 'Hex' in original_composition and len(input_composition2)>0:
output_list = [k for k in output_list if all([k.count(j) == original_composition[j] for j in HexNAc[:-1] if j in original_composition])]
output_list = [k for k in output_list if all([k.count(j) == original_composition[j] for j in dHex[:-1] if j in original_composition])]
elif 'dHex' in original_composition and len(input_composition2)>0:
output_list = [k for k in output_list if all([k.count(j) == original_composition[j] for j in Hex[:-1] if j in original_composition])]
output_list = [k for k in output_list if all([k.count(j) == original_composition[j] for j in HexNAc[:-1] if j in original_composition])]
elif 'HexNAc' in original_composition and len(input_composition2)>0:
output_list = [k for k in output_list if all([k.count(j) == original_composition[j] for j in Hex[:-1] if j in original_composition])]
output_list = [k for k in output_list if all([k.count(j) == original_composition[j] for j in dHex[:-1] if j in original_composition])]
return output_list
def condense_composition_matching(matched_composition, libr = None):
"""Given a list of glycans matching a composition, find the minimum number of glycans characterizing this set\n
| Arguments:
| :-
| matched_composition (list): list of glycans matching to a composition
| libr (list): sorted list of unique glycoletters observed in the glycans of our dataset; default:lib\n
| Returns:
| :-
| Returns minimal list of glycans that match a composition
"""
if libr is None:
libr = lib
match_matrix = [[compare_glycans(k, j,libr = libr, wildcards = True,
wildcard_list = [libr.index('bond')]) for j in matched_composition] for k in matched_composition]
match_matrix = pd.DataFrame(match_matrix)
match_matrix.columns = matched_composition
clustering = DBSCAN(eps = 1, min_samples = 1).fit(match_matrix)
cluster_labels = clustering.labels_
num_clusters = len(list(set(cluster_labels)))
sum_glycans = []
for k in range(num_clusters):
cluster_glycans = [matched_composition[j] for j in range(len(cluster_labels)) if cluster_labels[j] == k]
#print(cluster_glycans)
#idx = np.argmin([j.count('bond') for j in cluster_glycans])
county = [j.count('bond') for j in cluster_glycans]
idx = np.where(county == np.array(county).min())[0]
if len(idx) == 1:
sum_glycans.append(cluster_glycans[idx[0]])
else:
for j in idx:
sum_glycans.append(cluster_glycans[j])
#sum_glycans.append(cluster_glycans[idx])
#print("This matching can be summarized by " + str(num_clusters) + " glycans.")
return sum_glycans
def compositions_to_structures(composition_list, abundances, group, level,
df = None, libr = None, reducing_end = None,
verbose = False):
"""wrapper function to map compositions to structures, condense them, and match them with relative intensities\n
| Arguments:
| :-
| composition_list (list): list of composition dictionaries of the form {'Hex': 1, 'HexNAc': 1}
| abundances (dataframe): every row one glycan (matching composition_list in order), every column one sample; pd.DataFrame([range(len(composition_list))]*2).T if not applicable
| group (string): name of the Species, Genus, Family, Order, Class, Phylum, Kingdom, or Domain used to filter
| level (string): Species, Genus, Family, Order, Class, Phylum, Kingdom, or Domain
| df (dataframe): glycan dataframe for searching glycan structures; default:df_species
| libr (list): sorted list of unique glycoletters observed in the glycans of our dataset; default:lib
| reducing_end (string): filters possible glycans by reducing end monosaccharide; default:None
| verbose (bool): whether to print any non-matching compositions; default:False\n
| Returns:
| :-
| Returns dataframe of (matched structures) x (relative intensities)
"""
if libr is None:
libr = lib
if df is None:
df = df_species
out_df = []
not_matched = []
for k in range(len(composition_list)):
matched = match_composition_relaxed(composition_list[k], group, level,
reducing_end = reducing_end, df = df, libr = libr)
if len(matched)>0:
condensed = condense_composition_matching(matched, libr = libr)
matched_data = [abundances.iloc[k,1:].values.tolist()]*len(condensed)
for ele in range(len(condensed)):
out_df.append([condensed[ele]] + matched_data[ele])
else:
not_matched.append(composition_list[k])
df_out = pd.DataFrame(out_df)
print(str(len(not_matched)) + " compositions could not be matched. Run with verbose = True to see which compositions.")
if verbose:
print(not_matched)
return df_out
def structures_to_motifs(df, libr = None, feature_set = ['exhaustive'],
form = 'wide'):
"""function to convert relative intensities of glycan structures to those of glycan motifs\n
| Arguments:
| :-
| df (dataframe): function expects glycans in the first column and rel. intensities of each sample in a new column
| libr (list): sorted list of unique glycoletters observed in the glycans of our dataset; default:lib
| feature_set (list): which feature set to use for annotations, add more to list to expand; default is 'exhaustive'; options are: 'known' (hand-crafted glycan features), 'graph' (structural graph features of glycans) and 'exhaustive' (all mono- and disaccharide features)
| form (string): whether to return 'wide' or 'long' dataframe; default:'wide'\n
| Returns:
| :-
| Returns dataframe of motifs, relative intensities, and sample IDs
"""
if libr is None:
libr = lib
annot = annotate_dataset(df.iloc[:,0].values.tolist(), libr = libr,
feature_set = feature_set, condense = True)
annot2 = pd.concat([annot.reset_index(drop = True), df.iloc[:,1:]], axis = 1)
out_tuples = []
for k in range(len(annot2)):
for j in range(annot.shape[1]):
if annot2.iloc[k,j]>0:
out_tuples.append([annot2.columns.values.tolist()[j]] + df.iloc[k, 1:].values.tolist())
motif_df = pd.DataFrame(out_tuples)
motif_df = motif_df.groupby(motif_df.columns.values.tolist()[0]).mean().reset_index()
if form == 'wide':
motif_df.columns = ['glycan'] + ['sample'+str(k) for k in range(1, motif_df.shape[1])]
return motif_df
elif form == 'long':
motif_df.columns = ['glycan'] + ['rel_intensity' for k in range(1, motif_df.shape[1])]
sample_dfs = [pd.concat([motif_df.iloc[:,0], motif_df.iloc[:,k]], axis = 1) for k in range(1, motif_df.shape[1])]
out = pd.concat(sample_dfs, axis = 0, ignore_index = True)
out['sample_id'] = unwrap([[k]*len(sample_dfs[k]) for k in range(len(sample_dfs))])
return out
def mask_rare_glycoletters(glycans, thresh_monosaccharides = None, thresh_linkages = None):
"""masks rare monosaccharides and linkages in a list of glycans\n
| Arguments:
| :-
| glycans (list): list of glycans in IUPAC-condensed form
| thresh_monosaccharides (int): threshold-value for monosaccharides seen as "rare"; default:(0.001*len(glycans))
| thresh_linkages (int): threshold-value for linkages seen as "rare"; default:(0.03*len(glycans))\n
| Returns:
| :-
| Returns list of glycans in IUPAC-condensed with masked rare monosaccharides and linkages
"""
if thresh_monosaccharides is None:
thresh_monosaccharides = int(np.ceil(0.001*len(glycans)))
if thresh_linkages is None:
thresh_linkages = int(np.ceil(0.03*len(glycans)))
rares = unwrap(min_process_glycans(glycans))
rare_linkages, rare_monosaccharides = [], []
for x in rares:
(rare_monosaccharides, rare_linkages)[x in linkages].append(x)
rares = [rare_monosaccharides, rare_linkages]
thresh = [thresh_monosaccharides, thresh_linkages]
rares = [list({x: count for x, count in Counter(rares[k]).items() if count <= thresh[k]}.keys()) for k in range(len(rares))]
try:
rares[0].remove('')
except:
pass
out = []
for k in glycans:
for j in rares[0]:
if (j in k) and ('-'+j not in k):
k = k.replace(j+'(', 'monosaccharide(')
if k.endswith(j):
k = re.sub(j+'$', 'monosaccharide', k)
for m in rares[1]:
if m in k:
if m[1] == '1':
k = k.replace(m, '?1-?')
else:
k = k.replace(m, '?2-?')
out.append(k)
return out
| 45.399645
| 273
| 0.645696
|
acfe9c7a8753dc3128af8acf0be531cfc2500dca
| 247
|
py
|
Python
|
old/buttontest.py
|
gumbald/photobooth
|
930527e3f9232a7bbf637d445f4e5ce77d5cb0ec
|
[
"MIT"
] | null | null | null |
old/buttontest.py
|
gumbald/photobooth
|
930527e3f9232a7bbf637d445f4e5ce77d5cb0ec
|
[
"MIT"
] | null | null | null |
old/buttontest.py
|
gumbald/photobooth
|
930527e3f9232a7bbf637d445f4e5ce77d5cb0ec
|
[
"MIT"
] | null | null | null |
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
GPIO.setup(5, GPIO.IN, pull_up_down=GPIO.PUD_UP)
while True:
input_state = GPIO.input(5)
if input_state == False:
print("Hello")
time.sleep(0.2)
| 19
| 48
| 0.623482
|
acfe9ce57a0b07f75ae85db86dd680efbfc36463
| 826
|
py
|
Python
|
src/services/core/service_loader.py
|
Telsanyr/Johnny_VI
|
db9f54457c33b1f70616671611e6bc4fc4c44d1b
|
[
"WTFPL"
] | 6
|
2018-08-07T14:57:58.000Z
|
2020-02-13T18:43:49.000Z
|
src/services/core/service_loader.py
|
Telsanyr/Johnny_VI
|
db9f54457c33b1f70616671611e6bc4fc4c44d1b
|
[
"WTFPL"
] | 2
|
2018-08-08T12:12:28.000Z
|
2018-08-23T12:46:25.000Z
|
src/services/core/service_loader.py
|
Telsanyr/Johnny_VI
|
db9f54457c33b1f70616671611e6bc4fc4c44d1b
|
[
"WTFPL"
] | 1
|
2018-08-07T14:51:15.000Z
|
2018-08-07T14:51:15.000Z
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# coding: utf8
# This is a module loader. It will reload all the files dedicated to the module
# each time it is reloaded. Therefore, any code modification will be taken into
# account.
# Moreover, because all files are loaded in the same module. All classes and
# global variables are reachable by any other file. When using a module class
# which is outside your file, you do not need to import it.
# However when modifying or adding a new file, you must take care that you do
# not override existing class/variable in the module, outside of your code file.
# Libs imports
import imp
# "core_module" is built from all files in core folder (apart from the loader)
SERVICE_PATH = "./src/services/core/"
Service = imp.load_source("core_module", SERVICE_PATH + "core.py").Service
| 41.3
| 80
| 0.751816
|
acfe9f4e0aefbb7c974bcb3beaf946d90910c093
| 14,313
|
py
|
Python
|
grama/fit/fit_scikitlearn.py
|
Riya-1/py_grama
|
caafeac418ce0014b477e6feded06ccc1592b94d
|
[
"MIT"
] | 13
|
2020-02-24T16:51:51.000Z
|
2022-03-30T18:56:55.000Z
|
grama/fit/fit_scikitlearn.py
|
zdelrosario/py_grama
|
43f1a76dc93dd33f02e8a7f8de3323894beefed0
|
[
"MIT"
] | 78
|
2019-12-30T19:13:21.000Z
|
2022-02-23T18:17:54.000Z
|
grama/fit/fit_scikitlearn.py
|
Riya-1/py_grama
|
caafeac418ce0014b477e6feded06ccc1592b94d
|
[
"MIT"
] | 7
|
2020-10-19T17:49:25.000Z
|
2021-08-15T20:46:52.000Z
|
__all__ = [
"fit_gp",
"ft_gp",
"fit_lm",
"ft_lm",
"fit_rf",
"ft_rf",
"fit_kmeans",
"ft_kmeans",
]
## Fitting via sklearn package
try:
from sklearn.base import clone
from sklearn.linear_model import LinearRegression
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import Kernel, RBF, ConstantKernel as Con
from sklearn.cluster import KMeans
from sklearn.ensemble import RandomForestRegressor
except ModuleNotFoundError:
raise ModuleNotFoundError("module sklearn not found")
import grama as gr
from copy import deepcopy
from grama import add_pipe, pipe
from pandas import concat, DataFrame, Series
from toolz import curry
from warnings import filterwarnings
## Helper functions and classes
# --------------------------------------------------
def standardize_cols(df, ser_min, ser_max, var):
"""
@pre set(ser_min.index) == set(ser_max.index)
"""
df_std = df.copy()
for v in var:
den = ser_max[v] - ser_min[v]
if den < 1e-16:
den = 1
df_std[v] = (df_std[v] - ser_min[v]) / den
return df_std
def restore_cols(df, ser_min, ser_max, var):
"""
@pre set(ser_min.index) == set(ser_max.index)
"""
df_res = df.copy()
for v in var:
den = ser_max[v] - ser_min[v]
if den < 1e-16:
den = 1
df_res[v] = den * df[v] + ser_min[v]
return df_res
class FunctionGPR(gr.Function):
def __init__(self, gpr, var, out, name, runtime, var_min, var_max):
self.gpr = gpr
# self.df_train = df_train
self.var = var
## "Natural" outputs; what we're modeling
self.out_nat = out
## Predicted outputs; mean and std
self.out_mean = list(map(lambda s: s + "_mean", out))
self.out_sd = list(map(lambda s: s + "_sd", out))
self.out = self.out_mean + self.out_sd
self.name = name
self.runtime = runtime
self.var_min = var_min
self.var_max = var_max
def eval(self, df):
## Check invariant; model inputs must be subset of df columns
if not set(self.var).issubset(set(df.columns)):
raise ValueError(
"Model function `{}` var not a subset of given columns".format(
self.name
)
)
df_sd = standardize_cols(df, self.var_min, self.var_max, self.var)
y, y_sd = self.gpr.predict(df_sd[self.var], return_std=True)
return concat(
(
DataFrame(data=y, columns=self.out_mean),
DataFrame(data=y_sd, columns=self.out_sd),
),
axis=1,
)
def copy(self):
func_new = FunctionGPR(
self.gpr,
self.df_train.copy(),
self.var,
self.out_nat,
self.name,
self.runtime,
)
return func_new
class FunctionRegressor(gr.Function):
def __init__(self, regressor, var, out, name, runtime):
"""
Args:
regressor (scikit Regressor):
"""
self.regressor = regressor
self.var = var
self.out = list(map(lambda s: s + "_mean", out))
self.name = name
self.runtime = runtime
def eval(self, df):
## Check invariant; model inputs must be subset of df columns
if not set(self.var).issubset(set(df.columns)):
raise ValueError(
"Model function `{}` var not a subset of given columns".format(
self.name
)
)
## Predict
y = self.regressor.predict(df[self.var])
return DataFrame(data=y, columns=self.out)
## Fit GP model with sklearn
# --------------------------------------------------
@curry
def fit_gp(
df,
md=None,
var=None,
out=None,
domain=None,
density=None,
kernels=None,
seed=None,
suppress_warnings=True,
n_restart=5,
alpha=1e-10,
):
r"""Fit a gaussian process
Fit a gaussian process to given data. Specify var and out, or inherit from
an existing model.
Note that the new model will have two outputs `y_mean, y_sd` for each
original output `y`. The quantity `y_mean` is the best-fit value, while
`y_sd` is a measure of predictive uncertainty.
Args:
df (DataFrame): Data for function fitting
md (gr.Model): Model from which to inherit metadata
var (list(str) or None): List of features or None for all except outputs
out (list(str)): List of outputs to fit
domain (gr.Domain): Domain for new model
density (gr.Density): Density for new model
seed (int or None): Random seed for fitting process
kernels (sklearn.gaussian_process.kernels.Kernel or dict or None): Kernel for GP
n_restart (int): Restarts for optimization
alpha (float or iterable): Value added to diagonal of kernel matrix
suppress_warnings (bool): Suppress warnings when fitting?
Returns:
gr.Model: A grama model with fitted function(s)
Notes:
- Wrapper for sklearn.gaussian_process.GaussianProcessRegressor
"""
if suppress_warnings:
filterwarnings("ignore")
n_obs, n_in = df.shape
## Infer fitting metadata, if available
if not (md is None):
domain = md.domain
density = md.density
out = md.out
## Check invariants
if not set(out).issubset(set(df.columns)):
raise ValueError("out must be subset of df.columns")
## Default input value
if var is None:
var = list(set(df.columns).difference(set(out)))
## Check more invariants
set_inter = set(out).intersection(set(var))
if len(set_inter) > 0:
raise ValueError(
"out and var must be disjoint; intersect = {}".format(set_inter)
)
if not set(var).issubset(set(df.columns)):
raise ValueError("var must be subset of df.columns")
## Pre-process kernel selection
if kernels is None:
# Vectorize
kernels = {o: None for o in out}
elif isinstance(kernels, Kernel):
kernels = {o: kernels for o in out}
## Pre-process data
var_min = df[var].min()
var_max = df[var].max()
df_sd = standardize_cols(df, var_min, var_max, var)
## Construct gaussian process for each output
functions = []
for output in out:
# Define and fit model
gpr = GaussianProcessRegressor(
kernel=deepcopy(kernels[output]),
random_state=seed,
normalize_y=True,
copy_X_train=True,
n_restarts_optimizer=n_restart,
alpha=alpha,
)
gpr.fit(df_sd[var], df_sd[output])
name = "GP ({})".format(str(gpr.kernel_))
fun = FunctionGPR(gpr, var, [output], name, 0, var_min, var_max)
functions.append(fun)
## Construct model
return gr.Model(functions=functions, domain=domain, density=density)
ft_gp = add_pipe(fit_gp)
## Fit random forest model with sklearn
# --------------------------------------------------
@curry
def fit_rf(
df,
md=None,
var=None,
out=None,
domain=None,
density=None,
seed=None,
suppress_warnings=True,
**kwargs
):
r"""Fit a random forest
Fit a random forest to given data. Specify inputs and outputs, or inherit
from an existing model.
Args:
df (DataFrame): Data for function fitting
md (gr.Model): Model from which to inherit metadata
var (list(str) or None): List of features or None for all except outputs
out (list(str)): List of outputs to fit
domain (gr.Domain): Domain for new model
density (gr.Density): Density for new model
seed (int or None): Random seed for fitting process
suppress_warnings (bool): Suppress warnings when fitting?
Keyword Arguments:
n_estimators (int):
criterion (int):
max_depth (int or None):
min_samples_split (int, float):
min_samples_leaf (int, float):
min_weight_fraction_leaf (float):
max_features (int, float, string):
max_leaf_nodes (int or None):
min_impurity_decrease (float):
min_impurity_split (float):
bootstrap (bool):
oob_score (bool):
n_jobs (int or None):
random_state (int):
Returns:
gr.Model: A grama model with fitted function(s)
Notes:
- Wrapper for sklearn.ensemble.RandomForestRegressor
"""
if suppress_warnings:
filterwarnings("ignore")
n_obs, n_in = df.shape
## Infer fitting metadata, if available
if not (md is None):
domain = md.domain
density = md.density
out = md.out
## Check invariants
if not set(out).issubset(set(df.columns)):
raise ValueError("out must be subset of df.columns")
## Default input value
if var is None:
var = list(set(df.columns).difference(set(out)))
## Check more invariants
set_inter = set(out).intersection(set(var))
if len(set_inter) > 0:
raise ValueError(
"outputs and inputs must be disjoint; intersect = {}".format(set_inter)
)
if not set(var).issubset(set(df.columns)):
raise ValueError("var must be subset of df.columns")
## Construct gaussian process for each output
functions = []
for output in out:
rf = RandomForestRegressor(random_state=seed, **kwargs)
rf.fit(df[var], df[output])
name = "RF"
fun = FunctionRegressor(rf, var, [output], name, 0)
functions.append(fun)
## Construct model
return gr.Model(functions=functions, domain=domain, density=density)
ft_rf = add_pipe(fit_rf)
## Fit linear model with sklearn
# --------------------------------------------------
@curry
def fit_lm(
df,
md=None,
var=None,
out=None,
domain=None,
density=None,
seed=None,
suppress_warnings=True,
**kwargs
):
r"""Fit a linear model
Fit a linear model to given data. Specify inputs and outputs, or inherit
from an existing model.
Args:
df (DataFrame): Data for function fitting
md (gr.Model): Model from which to inherit metadata
var (list(str) or None): List of features or None for all except outputs
out (list(str)): List of outputs to fit
domain (gr.Domain): Domain for new model
density (gr.Density): Density for new model
seed (int or None): Random seed for fitting process
suppress_warnings (bool): Suppress warnings when fitting?
Returns:
gr.Model: A grama model with fitted function(s)
Notes:
- Wrapper for sklearn.ensemble.RandomForestRegressor
"""
if suppress_warnings:
filterwarnings("ignore")
n_obs, n_in = df.shape
## Infer fitting metadata, if available
if not (md is None):
domain = md.domain
density = md.density
out = md.out
## Check invariants
if not set(out).issubset(set(df.columns)):
raise ValueError("out must be subset of df.columns")
## Default input value
if var is None:
var = list(set(df.columns).difference(set(out)))
## Check more invariants
set_inter = set(out).intersection(set(var))
if len(set_inter) > 0:
raise ValueError(
"outputs and inputs must be disjoint; intersect = {}".format(set_inter)
)
if not set(var).issubset(set(df.columns)):
raise ValueError("var must be subset of df.columns")
## Construct gaussian process for each output
functions = []
for output in out:
lm = LinearRegression(**kwargs)
lm.fit(df[var], df[output])
name = "LM"
fun = FunctionRegressor(lm, var, [output], name, 0)
functions.append(fun)
## Construct model
return gr.Model(functions=functions, domain=domain, density=density)
ft_lm = add_pipe(fit_lm)
## Fit kmeans clustering model
# --------------------------------------------------
@curry
def fit_kmeans(df, var=None, colname="cluster_id", seed=None, **kwargs):
r"""K-means cluster a dataset
Create a cluster-labeling model on a dataset using the K-means algorithm.
Args:
df (DataFrame): Hybrid point results from gr.eval_hybrid()
var (list or None): Variables in df on which to cluster. Use None to
cluster on all variables.
colname (string): Name of cluster id; will be output in cluster model.
seed (int): Random seed for kmeans clustering
Kwargs:
n_clusters (int): Number of clusters to fit
random_state (int or None):
Returns:
gr.Model: Model that labels input data
Notes:
- A wrapper for sklearn.cluster.KMeans
References:
Scikit-learn: Machine Learning in Python, Pedregosa et al. JMLR 12, pp. 2825-2830, 2011.
Examples:
>>> import grama as gr
>>> from grama.data import df_stang
>>> from grama.fit import ft_kmeans
>>> X = gr.Intention()
>>> md_cluster = (
>>> df_stang
>>> >> ft_kmeans(var=["E", "mu"], n_clusters=2)
>>> )
>>> (
>>> md_cluster
>>> >> gr.ev_df(df_stang)
>>> >> gr.tf_group_by(X.cluster_id)
>>> >> gr.tf_summarize(
>>> thick_mean=gr.mean(X.thick),
>>> thick_sd=gr.sd(X.thick),
>>> n=gr.n(X.index),
>>> )
>>> )
"""
## Check invariants
if var is None:
var = list(df.columns).copy()
else:
var = list(var).copy()
diff = set(var).difference(set(df.columns))
if len(diff) > 0:
raise ValueError(
"`var` must be subset of `df.columns`\n" "diff = {}".format(diff)
)
## Generate clustering
kmeans = KMeans(random_state=seed, **kwargs).fit(df[var].values)
## Build grama model
def fun_cluster(df):
res = kmeans.predict(df[var].values)
return DataFrame(data={colname: res})
md = gr.Model() >> gr.cp_vec_function(fun=fun_cluster, var=var, out=[colname])
return md
ft_kmeans = add_pipe(fit_kmeans)
| 28.915152
| 96
| 0.593866
|
acfe9f5610b93f98ec0936a6d94913b428f06b23
| 425
|
py
|
Python
|
2021/3-1.py
|
lkesteloot/advent-of-code
|
4372e76602e16c69db8c227081b334d58455af1f
|
[
"MIT"
] | null | null | null |
2021/3-1.py
|
lkesteloot/advent-of-code
|
4372e76602e16c69db8c227081b334d58455af1f
|
[
"MIT"
] | null | null | null |
2021/3-1.py
|
lkesteloot/advent-of-code
|
4372e76602e16c69db8c227081b334d58455af1f
|
[
"MIT"
] | null | null | null |
lines = [list(map(int,list(line.strip()))) for line in open("input-3.txt")]
count = len(lines)
half = count // 2
sums = lines[0]
for i in range(1, count):
for j in range(len(sums)):
sums[j] += lines[i][j]
print(sums, count)
gamma = int("".join(map(lambda x: "1" if x > half else "0", sums)), 2)
epsilon = int("".join(map(lambda x: "1" if x < half else "0", sums)), 2)
print(gamma, epsilon, gamma*epsilon)
| 21.25
| 75
| 0.6
|
acfe9fce76273a7e6d4a0611a05499ee73cc3ade
| 2,000
|
py
|
Python
|
net/HyperIMxd.py
|
bcol23/HyperIM
|
c257d1c6296e399e734110d54d0ccd5fc91b5c02
|
[
"MIT"
] | 44
|
2019-05-27T07:00:36.000Z
|
2021-12-06T14:05:56.000Z
|
net/HyperIMxd.py
|
dragon9001/HyperIM
|
c257d1c6296e399e734110d54d0ccd5fc91b5c02
|
[
"MIT"
] | 6
|
2019-06-02T16:43:49.000Z
|
2021-11-17T03:37:19.000Z
|
net/HyperIMxd.py
|
dragon9001/HyperIM
|
c257d1c6296e399e734110d54d0ccd5fc91b5c02
|
[
"MIT"
] | 10
|
2019-06-05T08:20:10.000Z
|
2021-08-09T11:49:25.000Z
|
import sys
sys.path.append('..')
import torch as th
import torch.nn as nn
import torch.nn.functional as F
import geoopt as gt
from .hypernnxd import *
from util.hyperop import poinc_dist
class HyperIM(nn.Module):
def __init__(self, feature_num, word_embed, label_embed, d_ball=2, hidden_size=5, if_gru=True,
default_dtype=th.float64, **kwargs):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.d_ball = d_ball
self.word_embed = gt.ManifoldParameter(word_embed, manifold=gt.PoincareBall())
self.label_embed = gt.ManifoldParameter(label_embed, manifold=gt.PoincareBall())
self.default_dtype = default_dtype
if(if_gru):
self.rnn = hyperGRU(input_size=word_embed.shape[1], hidden_size=self.hidden_size, d_ball=self.d_ball,
default_dtype=self.default_dtype)
else:
self.rnn = hyperRNN(input_size=word_embed.shape[1], hidden_size=self.hidden_size, d_ball=self.d_ball,
default_dtype=self.default_dtype)
self.dense_1 = nn.Linear(feature_num, int(feature_num/2))
self.dense_2 = nn.Linear(int(feature_num/2), 1)
def forward(self, x, label=None):
word_embed = self.word_embed[x]
encode = self.rnn(word_embed)
encode = encode.unsqueeze(dim=2)
if(label == None):
encode = encode.expand(-1, -1, self.label_embed.shape[0], -1, -1)
interaction = poinc_dist(encode, self.label_embed.expand_as(encode))
else:
encode = encode.expand(-1, -1, len(label), -1, -1)
interaction = poinc_dist(encode, self.label_embed[label].expand_as(encode))
interaction = interaction.squeeze(dim=-1).sum(dim=-1).transpose(1, 2)
out = F.relu(self.dense_1(interaction))
out = self.dense_2(out).squeeze(dim=-1)
return out
| 36.363636
| 114
| 0.6165
|
acfea023c24916e487df0187a4e457da80a4a78a
| 4,116
|
py
|
Python
|
build_binary_dist.py
|
Nukem9/IDASkins
|
989690fffe8a7df78dc08f927cf8120f58bbfd58
|
[
"MIT"
] | 37
|
2015-05-25T19:14:01.000Z
|
2021-11-18T14:05:11.000Z
|
build_binary_dist.py
|
nihilus/ida-skins
|
ee881f39798e8cfc2908246ccd150cb5b6702f5f
|
[
"MIT"
] | 5
|
2015-08-27T15:10:25.000Z
|
2017-06-02T01:28:13.000Z
|
build_binary_dist.py
|
nihilus/ida-skins
|
ee881f39798e8cfc2908246ccd150cb5b6702f5f
|
[
"MIT"
] | 7
|
2015-06-02T16:50:01.000Z
|
2020-09-04T20:36:40.000Z
|
#!/usr/bin/env python
"""
This script build the binary distribution for the Windows version of IDA
PRO for multiple IDA versions in one batch.
The MIT License (MIT)
Copyright (c) 2014 athre0z
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os
import errno
import argparse
from subprocess import Popen, PIPE
from distutils.spawn import find_executable
if __name__ == '__main__':
#
# Parse arguments
#
parser = argparse.ArgumentParser(
description='Batch build script creating the plugin for multiple IDA versions')
parser.add_argument('ida_sdks_path', type=str,
help='Path containing the IDA SDKs for the desired IDA target versions')
parser.add_argument('--cmake_args', default='', type=str,
help='Additional arguments passed to cmake', nargs='?')
parser.add_argument('target_versions', metavar='target_version', type=str,
nargs='+')
args = parser.parse_args()
def print_usage(error=None):
parser.print_usage()
if error:
print(error)
exit()
target_versions = []
for cur_version in args.target_versions:
cur_version = cur_version.strip().split('.')
try:
target_versions.append((int(cur_version[0]), int(cur_version[1])))
except (ValueError, IndexError):
print_usage('[-] Invalid version format, expected something like "6.5"')
#
# Find tools
#
cmake_bin = find_executable('cmake')
msbuild_bin = find_executable('MSBuild')
if not cmake_bin:
print_usage('[-] Unable to find cmake binary')
if not msbuild_bin:
print_usage('[-] Unable to find MSBuild (please use Visual Studio CMD)')
#
# Build targets
#
for arch in (32, 64):
for cur_target in target_versions:
build_dir = 'build-{}.{}-{}'.format(*(cur_target + (arch,)))
try:
os.mkdir(build_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
proc = Popen([
cmake_bin,
'-Dida_sdk=' + os.path.join(args.ida_sdks_path, 'idasdk{}{}'.format(*cur_target)),
'-G', 'Visual Studio 10',
'-DCMAKE_INSTALL_PREFIX:PATH=../dist/IDA-{}.{}'.format(*cur_target),
] + args.cmake_args.split(' ') + ['..'] + (
['-DIDA_ARCH_64=TRUE'] if arch == 64 else []
), cwd=build_dir)
if proc.wait() != 0:
print('[-] CMake failed, giving up.')
exit()
proc = Popen([msbuild_bin, 'IDASkins.sln', '/p:Configuration=Release'], cwd=build_dir)
if proc.wait() != 0:
print('[-] MSBuild failed, giving up.')
exit()
proc = Popen([msbuild_bin, 'INSTALL.vcxproj', '/p:Configuration=Release'],
cwd=build_dir)
if proc.wait() != 0:
print('[-] MSBuild INSTALL failed, giving up.')
exit()
print('[+] Done!')
| 38.111111
| 98
| 0.618319
|
acfea07e1c3d6cf7c3aed9d21b6d0260b728e4da
| 684
|
py
|
Python
|
formulario/migrations/0011_auto_20201128_0105.py
|
giuliocc/censo-querido-diario
|
0a83244d6c7a9de21f72ce1b441868bc517d0aec
|
[
"MIT"
] | 40
|
2020-09-09T00:21:03.000Z
|
2021-12-12T03:30:37.000Z
|
formulario/migrations/0011_auto_20201128_0105.py
|
giuliocc/censo-querido-diario
|
0a83244d6c7a9de21f72ce1b441868bc517d0aec
|
[
"MIT"
] | 41
|
2020-10-02T01:56:40.000Z
|
2022-03-23T14:08:22.000Z
|
formulario/migrations/0011_auto_20201128_0105.py
|
giuliocc/censo-querido-diario
|
0a83244d6c7a9de21f72ce1b441868bc517d0aec
|
[
"MIT"
] | 23
|
2020-09-30T19:15:45.000Z
|
2022-03-03T01:03:28.000Z
|
# Generated by Django 3.1.1 on 2020-11-28 01:05
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('formulario', '0010_merge_20201009_2009'),
]
operations = [
migrations.CreateModel(
name='MapeamentoValidado',
fields=[
],
options={
'proxy': True,
'indexes': [],
'constraints': [],
},
bases=('formulario.mapeamento',),
),
migrations.RenameField(
model_name='municipio',
old_name='populacao_2010',
new_name='populacao_2020',
),
]
| 22.8
| 51
| 0.501462
|
acfea12d733ddcd5d72e71bfee4249d314cc6e8d
| 27,953
|
py
|
Python
|
src/controller/python/chip/ChipDeviceCtrl.py
|
step0035/connectedhomeip
|
02bd2c7112cd4f375dc880cf9d81a1bab58da6c1
|
[
"Apache-2.0"
] | null | null | null |
src/controller/python/chip/ChipDeviceCtrl.py
|
step0035/connectedhomeip
|
02bd2c7112cd4f375dc880cf9d81a1bab58da6c1
|
[
"Apache-2.0"
] | null | null | null |
src/controller/python/chip/ChipDeviceCtrl.py
|
step0035/connectedhomeip
|
02bd2c7112cd4f375dc880cf9d81a1bab58da6c1
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2020-2021 Project CHIP Authors
# Copyright (c) 2019-2020 Google, LLC.
# Copyright (c) 2013-2018 Nest Labs, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# @file
# Python interface for Chip Device Manager
#
"""Chip Device Controller interface
"""
from __future__ import absolute_import
from __future__ import print_function
import asyncio
from ctypes import *
from .ChipStack import *
from .interaction_model import InteractionModelError, delegate as im
from .exceptions import *
from .clusters import Command as ClusterCommand
from .clusters import Attribute as ClusterAttribute
from .clusters import ClusterObjects as ClusterObjects
from .clusters import Objects as GeneratedObjects
from .clusters.CHIPClusters import *
import enum
import threading
import typing
__all__ = ["ChipDeviceController"]
_DevicePairingDelegate_OnPairingCompleteFunct = CFUNCTYPE(None, c_uint32)
_DevicePairingDelegate_OnCommissioningCompleteFunct = CFUNCTYPE(
None, c_uint64, c_uint32)
_DeviceAddressUpdateDelegate_OnUpdateComplete = CFUNCTYPE(
None, c_uint64, c_uint32)
# void (*)(Device *, CHIP_ERROR).
#
# CHIP_ERROR is actually signed, so using c_uint32 is weird, but everything
# else seems to do it.
_DeviceAvailableFunct = CFUNCTYPE(None, c_void_p, c_uint32)
# This is a fix for WEAV-429. Jay Logue recommends revisiting this at a later
# date to allow for truely multiple instances so this is temporary.
def _singleton(cls):
instance = [None]
def wrapper(*args, **kwargs):
if instance[0] is None:
instance[0] = cls(*args, **kwargs)
return instance[0]
return wrapper
class DCState(enum.IntEnum):
NOT_INITIALIZED = 0
IDLE = 1
BLE_READY = 2
RENDEZVOUS_ONGOING = 3
RENDEZVOUS_CONNECTED = 4
@_singleton
class ChipDeviceController(object):
def __init__(self, startNetworkThread=True, controllerNodeId=0, bluetoothAdapter=None):
self.state = DCState.NOT_INITIALIZED
self.devCtrl = None
if bluetoothAdapter is None:
bluetoothAdapter = 0
self._ChipStack = ChipStack(bluetoothAdapter=bluetoothAdapter)
self._dmLib = None
self._InitLib()
devCtrl = c_void_p(None)
res = self._dmLib.pychip_DeviceController_NewDeviceController(
pointer(devCtrl), controllerNodeId)
if res != 0:
raise self._ChipStack.ErrorToException(res)
self.devCtrl = devCtrl
self._ChipStack.devCtrl = devCtrl
self._Cluster = ChipClusters(self._ChipStack)
self._Cluster.InitLib(self._dmLib)
def HandleKeyExchangeComplete(err):
if err != 0:
print("Failed to establish secure session to device: {}".format(err))
self._ChipStack.callbackRes = self._ChipStack.ErrorToException(
err)
else:
print("Secure Session to Device Established")
self.state = DCState.IDLE
self._ChipStack.completeEvent.set()
def HandleAddressUpdateComplete(nodeid, err):
if err != 0:
print("Failed to update node address: {}".format(err))
# Failed update address, don't wait for HandleCommissioningComplete
self.state = DCState.IDLE
self._ChipStack.callbackRes = err
self._ChipStack.completeEvent.set()
else:
print("Node address has been updated")
# Wait for HandleCommissioningComplete before setting
# self._ChipStack.callbackRes; we're not done until that happens.
def HandleCommissioningComplete(nodeid, err):
if err != 0:
print("Failed to commission: {}".format(err))
else:
print("Commissioning complete")
self.state = DCState.IDLE
self._ChipStack.callbackRes = err
self._ChipStack.completeEvent.set()
self._ChipStack.commissioningCompleteEvent.set()
self._ChipStack.commissioningEventRes = err
im.InitIMDelegate()
ClusterCommand.Init(self)
ClusterAttribute.Init()
self.cbHandleKeyExchangeCompleteFunct = _DevicePairingDelegate_OnPairingCompleteFunct(
HandleKeyExchangeComplete)
self._dmLib.pychip_ScriptDevicePairingDelegate_SetKeyExchangeCallback(
self.devCtrl, self.cbHandleKeyExchangeCompleteFunct)
self.cbHandleCommissioningCompleteFunct = _DevicePairingDelegate_OnCommissioningCompleteFunct(
HandleCommissioningComplete)
self._dmLib.pychip_ScriptDevicePairingDelegate_SetCommissioningCompleteCallback(
self.devCtrl, self.cbHandleCommissioningCompleteFunct)
self.cbOnAddressUpdateComplete = _DeviceAddressUpdateDelegate_OnUpdateComplete(
HandleAddressUpdateComplete)
self._dmLib.pychip_ScriptDeviceAddressUpdateDelegate_SetOnAddressUpdateComplete(
self.cbOnAddressUpdateComplete)
self.state = DCState.IDLE
def __del__(self):
if self.devCtrl != None:
self._dmLib.pychip_DeviceController_DeleteDeviceController(
self.devCtrl)
self.devCtrl = None
def IsConnected(self):
return self._ChipStack.Call(
lambda: self._dmLib.pychip_DeviceController_IsConnected(
self.devCtrl)
)
def ConnectBle(self, bleConnection):
self._ChipStack.CallAsync(
lambda: self._dmLib.pychip_DeviceController_ValidateBTP(
self.devCtrl,
bleConnection,
self._ChipStack.cbHandleComplete,
self._ChipStack.cbHandleError,
)
)
def ConnectBLE(self, discriminator, setupPinCode, nodeid):
self.state = DCState.RENDEZVOUS_ONGOING
return self._ChipStack.CallAsync(
lambda: self._dmLib.pychip_DeviceController_ConnectBLE(
self.devCtrl, discriminator, setupPinCode, nodeid)
)
def CloseBLEConnection(self):
return self._ChipStack.Call(
lambda: self._dmLib.pychip_DeviceCommissioner_CloseBleConnection(
self.devCtrl)
)
def CloseSession(self, nodeid):
return self._ChipStack.Call(
lambda: self._dmLib.pychip_DeviceController_CloseSession(
self.devCtrl, nodeid)
)
def ConnectIP(self, ipaddr, setupPinCode, nodeid):
# IP connection will run through full commissioning, so we need to wait
# for the commissioning complete event, not just any callback.
self.state = DCState.RENDEZVOUS_ONGOING
self._ChipStack.CallAsync(
lambda: self._dmLib.pychip_DeviceController_ConnectIP(
self.devCtrl, ipaddr, setupPinCode, nodeid)
)
# Wait up to 5 additional seconds for the commissioning complete event
if not self._ChipStack.commissioningCompleteEvent.isSet():
self._ChipStack.commissioningCompleteEvent.wait(5.0)
if not self._ChipStack.commissioningCompleteEvent.isSet():
# Error 50 is a timeout
return False
return self._ChipStack.commissioningEventRes == 0
def ResolveNode(self, nodeid):
return self._ChipStack.CallAsync(
lambda: self._dmLib.pychip_DeviceController_UpdateDevice(
self.devCtrl, nodeid)
)
def GetAddressAndPort(self, nodeid):
address = create_string_buffer(64)
port = c_uint16(0)
error = self._ChipStack.Call(
lambda: self._dmLib.pychip_DeviceController_GetAddressAndPort(
self.devCtrl, nodeid, address, 64, pointer(port))
)
return (address.value.decode(), port.value) if error == 0 else None
def DiscoverCommissionableNodesLongDiscriminator(self, long_discriminator):
return self._ChipStack.Call(
lambda: self._dmLib.pychip_DeviceController_DiscoverCommissionableNodesLongDiscriminator(
self.devCtrl, long_discriminator)
)
def DiscoverCommissionableNodesShortDiscriminator(self, short_discriminator):
return self._ChipStack.Call(
lambda: self._dmLib.pychip_DeviceController_DiscoverCommissionableNodesShortDiscriminator(
self.devCtrl, short_discriminator)
)
def DiscoverCommissionableNodesVendor(self, vendor):
return self._ChipStack.Call(
lambda: self._dmLib.pychip_DeviceController_DiscoverCommissionableNodesVendor(
self.devCtrl, vendor)
)
def DiscoverCommissionableNodesDeviceType(self, device_type):
return self._ChipStack.Call(
lambda: self._dmLib.pychip_DeviceController_DiscoverCommissionableNodesDeviceType(
self.devCtrl, device_type)
)
def DiscoverCommissionableNodesCommissioningEnabled(self):
return self._ChipStack.Call(
lambda: self._dmLib.pychip_DeviceController_DiscoverCommissionableNodesCommissioningEnabled(
self.devCtrl)
)
def PrintDiscoveredDevices(self):
return self._ChipStack.Call(
lambda: self._dmLib.pychip_DeviceController_PrintDiscoveredDevices(
self.devCtrl)
)
def ParseQRCode(self, qrCode, output):
print(output)
return self._ChipStack.Call(
lambda: self._dmLib.pychip_DeviceController_ParseQRCode(
qrCode, output)
)
def GetIPForDiscoveredDevice(self, idx, addrStr, length):
return self._ChipStack.Call(
lambda: self._dmLib.pychip_DeviceController_GetIPForDiscoveredDevice(
self.devCtrl, idx, addrStr, length)
)
def DiscoverAllCommissioning(self):
return self._ChipStack.Call(
lambda: self._dmLib.pychip_DeviceController_DiscoverAllCommissionableNodes(
self.devCtrl)
)
def OpenCommissioningWindow(self, nodeid, timeout, iteration, discriminator, option):
res = self._ChipStack.Call(
lambda: self._dmLib.pychip_DeviceController_OpenCommissioningWindow(
self.devCtrl, nodeid, timeout, iteration, discriminator, option)
)
if res != 0:
raise self._ChipStack.ErrorToException(res)
def GetCompressedFabricId(self):
fabricid = c_uint64(0)
res = self._ChipStack.Call(
lambda: self._dmLib.pychip_DeviceController_GetCompressedFabricId(
self.devCtrl, pointer(fabricid))
)
if res == 0:
return fabricid.value
else:
raise self._ChipStack.ErrorToException(res)
def GetFabricId(self):
fabricid = c_uint64(0)
res = self._ChipStack.Call(
lambda: self._dmLib.pychip_DeviceController_GetFabricId(
self.devCtrl, pointer(fabricid))
)
if res == 0:
return fabricid.value
else:
raise self._ChipStack.ErrorToException(res)
def GetClusterHandler(self):
return self._Cluster
def GetConnectedDeviceSync(self, nodeid):
returnDevice = c_void_p(None)
deviceAvailableCV = threading.Condition()
def DeviceAvailableCallback(device, err):
nonlocal returnDevice
nonlocal deviceAvailableCV
with deviceAvailableCV:
returnDevice = c_void_p(device)
deviceAvailableCV.notify_all()
if err != 0:
print("Failed in getting the connected device: {}".format(err))
raise self._ChipStack.ErrorToException(err)
res = self._ChipStack.Call(lambda: self._dmLib.pychip_GetDeviceBeingCommissioned(
self.devCtrl, nodeid, byref(returnDevice)))
if res == 0:
# TODO: give users more contrtol over whether they want to send this command over a PASE established connection
print('Using PASE connection')
return returnDevice
res = self._ChipStack.Call(lambda: self._dmLib.pychip_GetConnectedDeviceByNodeId(
self.devCtrl, nodeid, _DeviceAvailableFunct(DeviceAvailableCallback)))
if res != 0:
raise self._ChipStack.ErrorToException(res)
# The callback might have been received synchronously (during self._ChipStack.Call()).
# Check if the device is already set before waiting for the callback.
if returnDevice.value == None:
with deviceAvailableCV:
deviceAvailableCV.wait()
if returnDevice.value == None:
raise self._ChipStack.ErrorToException(CHIP_ERROR_INTERNAL)
return returnDevice
async def SendCommand(self, nodeid: int, endpoint: int, payload: ClusterObjects.ClusterCommand, responseType=None):
'''
Send a cluster-object encapsulated command to a node and get returned a future that can be awaited upon to receive the response.
If a valid responseType is passed in, that will be used to deserialize the object. If not, the type will be automatically deduced
from the metadata received over the wire.
'''
eventLoop = asyncio.get_running_loop()
future = eventLoop.create_future()
device = self.GetConnectedDeviceSync(nodeid)
res = self._ChipStack.Call(
lambda: ClusterCommand.SendCommand(
future, eventLoop, responseType, device, ClusterCommand.CommandPath(
EndpointId=endpoint,
ClusterId=payload.cluster_id,
CommandId=payload.command_id,
), payload)
)
if res != 0:
future.set_exception(self._ChipStack.ErrorToException(res))
return await future
async def WriteAttribute(self, nodeid: int, attributes: typing.List[typing.Tuple[int, ClusterObjects.ClusterAttributeDescriptor]]):
'''
Write a list of attributes on a target node.
nodeId: Target's Node ID
attributes: A list of tuples of type (endpoint, cluster-object):
E.g
(1, Clusters.TestCluster.Attributes.XYZAttribute('hello')) -- Write 'hello' to the XYZ attribute on the test cluster to endpoint 1
'''
eventLoop = asyncio.get_running_loop()
future = eventLoop.create_future()
device = self.GetConnectedDeviceSync(nodeid)
attrs = []
for v in attributes:
attrs.append(ClusterAttribute.AttributeWriteRequest(
v[0], v[1], v[1].value))
res = self._ChipStack.Call(
lambda: ClusterAttribute.WriteAttributes(
future, eventLoop, device, attrs)
)
if res != 0:
raise self._ChipStack.ErrorToException(res)
return await future
async def ReadAttribute(self, nodeid: int, attributes: typing.List[typing.Union[
None, # Empty tuple, all wildcard
typing.Tuple[int], # Endpoint
# Wildcard endpoint, Cluster id present
typing.Tuple[typing.Type[ClusterObjects.Cluster]],
# Wildcard endpoint, Cluster + Attribute present
typing.Tuple[typing.Type[ClusterObjects.ClusterAttributeDescriptor]],
# Wildcard attribute id
typing.Tuple[int, typing.Type[ClusterObjects.Cluster]],
# Concrete path
typing.Tuple[int, typing.Type[ClusterObjects.ClusterAttributeDescriptor]]
]], reportInterval: typing.Tuple[int, int] = None):
'''
Read a list of attributes from a target node
nodeId: Target's Node ID
attributes: A list of tuples of varying types depending on the type of read being requested:
(endpoint, Clusters.ClusterA.AttributeA): Endpoint = specific, Cluster = specific, Attribute = specific
(endpoint, Clusters.ClusterA): Endpoint = specific, Cluster = specific, Attribute = *
(Clusters.ClusterA.AttributeA): Endpoint = *, Cluster = specific, Attribute = specific
endpoint: Endpoint = specific, Cluster = *, Attribute = *
Clusters.ClusterA: Endpoint = *, Cluster = specific, Attribute = *
'*' or (): Endpoint = *, Cluster = *, Attribute = *
The cluster and attributes specified above are to be selected from the generated cluster objects.
e.g.
ReadAttribute(1, [ 1 ] ) -- case 4 above.
ReadAttribute(1, [ Clusters.Basic ] ) -- case 5 above.
ReadAttribute(1, [ (1, Clusters.Basic.Attributes.Location ] ) -- case 1 above.
reportInterval: A tuple of two int-s for (MinIntervalFloor, MaxIntervalCeiling). Used by establishing subscriptions.
When not provided, a read request will be sent.
'''
eventLoop = asyncio.get_running_loop()
future = eventLoop.create_future()
device = self.GetConnectedDeviceSync(nodeid)
attrs = []
for v in attributes:
endpoint = None
cluster = None
attribute = None
if v == ('*') or v == ():
# Wildcard
pass
elif type(v) is not tuple:
print(type(v))
if type(v) is int:
endpoint = v
elif issubclass(v, ClusterObjects.Cluster):
cluster = v
elif issubclass(v, ClusterObjects.ClusterAttributeDescriptor):
attribute = v
else:
raise ValueError("Unsupported Attribute Path")
else:
# endpoint + (cluster) attribute / endpoint + cluster
endpoint = v[0]
if issubclass(v[1], ClusterObjects.Cluster):
cluster = v[1]
elif issubclass(v[1], ClusterAttribute.ClusterAttributeDescriptor):
attribute = v[1]
else:
raise ValueError("Unsupported Attribute Path")
attrs.append(ClusterAttribute.AttributePath(
EndpointId=endpoint, Cluster=cluster, Attribute=attribute))
res = self._ChipStack.Call(
lambda: ClusterAttribute.ReadAttributes(future, eventLoop, device, self, attrs, ClusterAttribute.SubscriptionParameters(reportInterval[0], reportInterval[1]) if reportInterval else None))
if res != 0:
raise self._ChipStack.ErrorToException(res)
return await future
def ZCLSend(self, cluster, command, nodeid, endpoint, groupid, args, blocking=False):
req = None
try:
req = eval(
f"GeneratedObjects.{cluster}.Commands.{command}")(**args)
except:
raise UnknownCommand(cluster, command)
try:
res = asyncio.run(self.SendCommand(nodeid, endpoint, req))
print(f"CommandResponse {res}")
return (0, res)
except InteractionModelError as ex:
return (int(ex.state), None)
def ZCLReadAttribute(self, cluster, attribute, nodeid, endpoint, groupid, blocking=True):
req = None
try:
req = eval(f"GeneratedObjects.{cluster}.Attributes.{attribute}")
except:
raise UnknownAttribute(cluster, attribute)
result = asyncio.run(self.ReadAttribute(nodeid, [(endpoint, req)]))
path = ClusterAttribute.AttributePath(
EndpointId=endpoint, Attribute=req)
return im.AttributeReadResult(path=im.AttributePath(nodeId=nodeid, endpointId=path.EndpointId, clusterId=path.ClusterId, attributeId=path.AttributeId), status=0, value=result[path].Data.value)
def ZCLWriteAttribute(self, cluster: str, attribute: str, nodeid, endpoint, groupid, value, blocking=True):
req = None
try:
req = eval(
f"GeneratedObjects.{cluster}.Attributes.{attribute}")(value)
except:
raise UnknownAttribute(cluster, attribute)
return asyncio.run(self.WriteAttribute(nodeid, [(endpoint, req)]))
def ZCLSubscribeAttribute(self, cluster, attribute, nodeid, endpoint, minInterval, maxInterval, blocking=True):
req = None
try:
req = eval(f"GeneratedObjects.{cluster}.Attributes.{attribute}")
except:
raise UnknownAttribute(cluster, attribute)
return asyncio.run(self.ReadAttribute(nodeid, [(endpoint, req)], reportInterval=(minInterval, maxInterval)))
def ZCLShutdownSubscription(self, subscriptionId: int):
res = self._ChipStack.Call(
lambda: self._dmLib.pychip_InteractionModel_ShutdownSubscription(subscriptionId))
if res != 0:
raise self._ChipStack.ErrorToException(res)
def ZCLCommandList(self):
return self._Cluster.ListClusterCommands()
def ZCLAttributeList(self):
return self._Cluster.ListClusterAttributes()
def SetLogFilter(self, category):
if category < 0 or category > pow(2, 8):
raise ValueError("category must be an unsigned 8-bit integer")
self._ChipStack.Call(
lambda: self._dmLib.pychip_DeviceController_SetLogFilter(category)
)
def GetLogFilter(self):
self._ChipStack.Call(
lambda: self._dmLib.pychip_DeviceController_GetLogFilter()
)
def SetBlockingCB(self, blockingCB):
self._ChipStack.blockingCB = blockingCB
# ----- Private Members -----
def _InitLib(self):
if self._dmLib is None:
self._dmLib = CDLL(self._ChipStack.LocateChipDLL())
self._dmLib.pychip_DeviceController_NewDeviceController.argtypes = [
POINTER(c_void_p), c_uint64]
self._dmLib.pychip_DeviceController_NewDeviceController.restype = c_uint32
self._dmLib.pychip_DeviceController_DeleteDeviceController.argtypes = [
c_void_p]
self._dmLib.pychip_DeviceController_DeleteDeviceController.restype = c_uint32
self._dmLib.pychip_DeviceController_ConnectBLE.argtypes = [
c_void_p, c_uint16, c_uint32, c_uint64]
self._dmLib.pychip_DeviceController_ConnectBLE.restype = c_uint32
self._dmLib.pychip_DeviceController_ConnectIP.argtypes = [
c_void_p, c_char_p, c_uint32, c_uint64]
self._dmLib.pychip_DeviceController_DiscoverAllCommissionableNodes.argtypes = [
c_void_p]
self._dmLib.pychip_DeviceController_DiscoverAllCommissionableNodes.restype = c_uint32
self._dmLib.pychip_DeviceController_DiscoverCommissionableNodesLongDiscriminator.argtypes = [
c_void_p, c_uint16]
self._dmLib.pychip_DeviceController_DiscoverCommissionableNodesLongDiscriminator.restype = c_uint32
self._dmLib.pychip_DeviceController_DiscoverCommissionableNodesShortDiscriminator.argtypes = [
c_void_p, c_uint16]
self._dmLib.pychip_DeviceController_DiscoverCommissionableNodesShortDiscriminator.restype = c_uint32
self._dmLib.pychip_DeviceController_DiscoverCommissionableNodesVendor.argtypes = [
c_void_p, c_uint16]
self._dmLib.pychip_DeviceController_DiscoverCommissionableNodesVendor.restype = c_uint32
self._dmLib.pychip_DeviceController_DiscoverCommissionableNodesDeviceType.argtypes = [
c_void_p, c_uint16]
self._dmLib.pychip_DeviceController_DiscoverCommissionableNodesDeviceType.restype = c_uint32
self._dmLib.pychip_DeviceController_DiscoverCommissionableNodesCommissioningEnabled.argtypes = [
c_void_p]
self._dmLib.pychip_DeviceController_DiscoverCommissionableNodesCommissioningEnabled.restype = c_uint32
self._dmLib.pychip_DeviceController_PrintDiscoveredDevices.argtypes = [
c_void_p]
self._dmLib.pychip_DeviceController_GetIPForDiscoveredDevice.argtypes = [
c_void_p, c_int, c_char_p, c_uint32]
self._dmLib.pychip_DeviceController_GetIPForDiscoveredDevice.restype = c_bool
self._dmLib.pychip_DeviceController_ConnectIP.argtypes = [
c_void_p, c_char_p, c_uint32, c_uint64]
self._dmLib.pychip_DeviceController_ConnectIP.restype = c_uint32
self._dmLib.pychip_DeviceController_CloseSession.argtypes = [
c_void_p, c_uint64]
self._dmLib.pychip_DeviceController_CloseSession.restype = c_uint32
self._dmLib.pychip_DeviceController_GetAddressAndPort.argtypes = [
c_void_p, c_uint64, c_char_p, c_uint64, POINTER(c_uint16)]
self._dmLib.pychip_DeviceController_GetAddressAndPort.restype = c_uint32
self._dmLib.pychip_ScriptDevicePairingDelegate_SetKeyExchangeCallback.argtypes = [
c_void_p, _DevicePairingDelegate_OnPairingCompleteFunct]
self._dmLib.pychip_ScriptDevicePairingDelegate_SetKeyExchangeCallback.restype = c_uint32
self._dmLib.pychip_ScriptDevicePairingDelegate_SetCommissioningCompleteCallback.argtypes = [
c_void_p, _DevicePairingDelegate_OnCommissioningCompleteFunct]
self._dmLib.pychip_ScriptDevicePairingDelegate_SetCommissioningCompleteCallback.restype = c_uint32
self._dmLib.pychip_ScriptDeviceAddressUpdateDelegate_SetOnAddressUpdateComplete.argtypes = [
_DeviceAddressUpdateDelegate_OnUpdateComplete]
self._dmLib.pychip_ScriptDeviceAddressUpdateDelegate_SetOnAddressUpdateComplete.restype = None
self._dmLib.pychip_DeviceController_UpdateDevice.argtypes = [
c_void_p, c_uint64]
self._dmLib.pychip_DeviceController_UpdateDevice.restype = c_uint32
self._dmLib.pychip_GetConnectedDeviceByNodeId.argtypes = [
c_void_p, c_uint64, _DeviceAvailableFunct]
self._dmLib.pychip_GetConnectedDeviceByNodeId.restype = c_uint32
self._dmLib.pychip_GetDeviceBeingCommissioned.argtypes = [
c_void_p, c_uint64, c_void_p]
self._dmLib.pychip_GetDeviceBeingCommissioned.restype = c_uint32
self._dmLib.pychip_DeviceCommissioner_CloseBleConnection.argtypes = [
c_void_p]
self._dmLib.pychip_DeviceCommissioner_CloseBleConnection.restype = c_uint32
self._dmLib.pychip_GetCommandSenderHandle.argtypes = [c_void_p]
self._dmLib.pychip_GetCommandSenderHandle.restype = c_uint64
self._dmLib.pychip_DeviceController_GetCompressedFabricId.argtypes = [
c_void_p, POINTER(c_uint64)]
self._dmLib.pychip_DeviceController_GetCompressedFabricId.restype = c_uint32
self._dmLib.pychip_DeviceController_OpenCommissioningWindow.argtypes = [
c_void_p, c_uint64, c_uint16, c_uint16, c_uint16, c_uint8]
self._dmLib.pychip_DeviceController_OpenCommissioningWindow.restype = c_uint32
self._dmLib.pychip_InteractionModel_ShutdownSubscription.argtypes = [
c_uint64]
self._dmLib.pychip_InteractionModel_ShutdownSubscription.restype = c_uint32
| 42.034586
| 200
| 0.661933
|
acfea1486fc330bac99c5998491fdfbd341e4889
| 4,777
|
py
|
Python
|
pylayers/location/locarule.py
|
usmanwardag/pylayers
|
2e8a9bdc993b2aacc92610a9c7edf875c6c7b24a
|
[
"MIT"
] | 143
|
2015-01-09T07:50:20.000Z
|
2022-03-02T11:26:53.000Z
|
pylayers/location/locarule.py
|
usmanwardag/pylayers
|
2e8a9bdc993b2aacc92610a9c7edf875c6c7b24a
|
[
"MIT"
] | 148
|
2015-01-13T04:19:34.000Z
|
2022-03-11T23:48:25.000Z
|
pylayers/location/locarule.py
|
usmanwardag/pylayers
|
2e8a9bdc993b2aacc92610a9c7edf875c6c7b24a
|
[
"MIT"
] | 95
|
2015-05-01T13:22:42.000Z
|
2022-03-15T11:22:28.000Z
|
from pylayers.util.project import *
import networkx as nx
import ConfigParser
import scipy.stats as sps
import pdb
##### global info PL_model
#config = ConfigParser.ConfigParser()
#config.read(basename+'/ini/EMSolver.ini')
#plm_opt = dict(config.items('PL_MODEL'))
#sigmaRSS = float(plm_opt['sigmarss'])# dBm !!!!!!
#f = float(plm_opt['f'])
#RSSnp = float(plm_opt['rssnp'])
#d0 = float(plm_opt['d0'])
#PL_method = plm_opt['method'] # mean, median , mode
class Take_all():
"""
Take TOA and Pr for any RAT.
"""
def take(self, net, RAT=None, LDP=None):
"""
Parameters
----------
net
RAT
LDP
if RAT= None : All RAT are processed
"""
cd = {}
### select RAT
if RAT is None:
Rat = net.RAT
elif isinstance(RAT, str):
Rat = [RAT]
### select LDP
if LDP is None:
Ldp = net.LDP
elif isinstance(LDP, str):
Ldp = [LDP]
for ldp in Ldp:
cd[ldp] = {}
for rat in Rat:
try:
cd[ldp][rat] = nx.get_edge_attributes(
net.SubNet[rat], ldp).items()
except:
pass # if the specified RAT doesn't exist in the PN
return cd
#class Take_all_Pr():
# """
# Take Pr for any RAT.
#
# Take_all_Pr.take(net=Network.Network,RAT=None)
# if RAT= None : All RAT are processed
# """
# def take(self,net,RAT=None):
# cd = {}
# if RAT == None :
# Rat = net.RAT
# elif isinstance(Rat,str):
# Rat = [RAT]
# """
# Take Pr for any RAT.
#
# Take_all_Pr.take(net=Network.Network,RAT=None)
# if RAT= None : All RAT are processed
# """
# def take(self,net,RAT=None):
# cd = {}
# if RAT == None :
# Rat = net.RAT
# elif isinstance(Rat,str):
# Rat = [RAT]
# cd['Pr']={}
# for rat in Rat:
# cd['Pr'][rat]=nx.get_edge_attributes(net.SubNet[rat],ldp).items()
# cd['Pr']={}
# for rat in Rat:
# cd['Pr'][rat]=nx.get_edge_attributes(net.SubNet[rat],ldp).items()
#class Take_all_TOA():
# """
# Take TOA for any RAT.
#
# Take_all_Pr.take(net=Network.Network,RAT=None)
# if RAT= None : All RAT are processed
# """
# def take(self,net,RAT=None):
# cd = {}
# if RAT == None :
# Rat = net.RAT
# elif isinstance(Rat,str):
# Rat = [RAT]
# cd['TOA']={}
# for rat in Rat:
# cd['TOA'][rat]=nx.get_edge_attributes(net.SubNet[rat],ldp).items()
#class Qconnect():
# """
# take only nodes from a given RAT assuming connectivity probability
#
# p(n1,n2) = Q((10 np log_10(d_(1,2)/R))/sigma_sh)
# """
# Take TOA for any RAT.
#
# Take_all_Pr.take(net=Network.Network,RAT=None)
# if RAT= None : All RAT are processed
# """
# def take(self,net,RAT=None):
# cd = {}
# if RAT == None :
# Rat = net.RAT
# elif isinstance(Rat,str):
# Rat = [RAT]
# cd['TOA']={}
# for rat in Rat:
# cd['TOA'][rat]=nx.get_edge_attributes(net.SubNet[rat],ldp).items()
# with R = 10^((P_0-P_th)/10np)
#
# p_0 = 1/2 with d1,2 =R
#
# """
#class Qconnect():
# """
# take only nodes from a given RAT assuming connectivity
# probability
#
# p(n1,n2) = Q((10 np log_10(d_(1,2)/R))/sigma_sh)
# with R = 10^((P_0-P_th)/10np)
# p_0 = 1/2 with d1,2 =R
# """
#
# def take(self,net,RAT=None,LDP=None):
# if Rat == None :
# Rat = net.RAT
# elif isinstance(Rat,str):
# Rat = [Rat]
#
# for rat in Rat:
# d=nx.get_edge_attributes(self.net.SubNet[rat],'d')
# P=nx.get_edge_attributes(self.net.SubNet[rat],'Pr')
#
# for ldp in Ldp:
# cd[ldp]={}
# for rat in Rat:
# try:
# cd[ldp][rat]=nx.get_edge_attributes(net.SubNet[rat],ldp).items()
# except:
# if the
# specified RAT doesn't
# exist in the PN
# R = pow(10,())
# var = 10*RSSnp*np.log10(d/R)
#
#N=sps.uniform(scale=)
#
def merge_rules(self,RAT=None,LDP=None):
rules = {}
for rule in self.rule:
rules.update(rule.take(self.PN,RAT,LDP))
return (rules)
| 23.648515
| 90
| 0.468495
|
acfea24b3b49fdffc938ea58b8f7a675fa188cc8
| 311
|
py
|
Python
|
Basic_Concepts_of_String_Manipulation/Artificial_reviews.py
|
RKiddle/python_reg_expressions
|
9e89c1c59677ffa19a4c64a37e92bbea33fad88e
|
[
"MIT"
] | null | null | null |
Basic_Concepts_of_String_Manipulation/Artificial_reviews.py
|
RKiddle/python_reg_expressions
|
9e89c1c59677ffa19a4c64a37e92bbea33fad88e
|
[
"MIT"
] | null | null | null |
Basic_Concepts_of_String_Manipulation/Artificial_reviews.py
|
RKiddle/python_reg_expressions
|
9e89c1c59677ffa19a4c64a37e92bbea33fad88e
|
[
"MIT"
] | null | null | null |
# Select the first 32 characters of movie1
first_part = movie1[:32]
# Select from 43rd character to the end of movie1
last_part = movie1[42:]
# Select from 33rd to the 42nd character
middle_part = movie2[32:42]
# Print concatenation and movie2 variable
print(first_part+middle_part+last_part)
print(movie2)
| 23.923077
| 49
| 0.778135
|
acfea3baecd4c6fa01ab5a8a38b2600662084195
| 1,858
|
py
|
Python
|
Pillow-4.3.0/Tests/test_shell_injection.py
|
leorzz/simplemooc
|
8b1c5e939d534b1fd729596df4c59fc69708b896
|
[
"MIT"
] | null | null | null |
Pillow-4.3.0/Tests/test_shell_injection.py
|
leorzz/simplemooc
|
8b1c5e939d534b1fd729596df4c59fc69708b896
|
[
"MIT"
] | null | null | null |
Pillow-4.3.0/Tests/test_shell_injection.py
|
leorzz/simplemooc
|
8b1c5e939d534b1fd729596df4c59fc69708b896
|
[
"MIT"
] | null | null | null |
from helper import unittest, PillowTestCase
from helper import djpeg_available, cjpeg_available, netpbm_available
import sys
import shutil
from PIL import Image, JpegImagePlugin, GifImagePlugin
TEST_JPG = "Tests/images/hopper.jpg"
TEST_GIF = "Tests/images/hopper.gif"
test_filenames = (
"temp_';",
"temp_\";",
"temp_'\"|",
"temp_'\"||",
"temp_'\"&&",
)
@unittest.skipIf(sys.platform.startswith('win32'), "requires Unix or MacOS")
class TestShellInjection(PillowTestCase):
def assert_save_filename_check(self, src_img, save_func):
for filename in test_filenames:
dest_file = self.tempfile(filename)
save_func(src_img, 0, dest_file)
# If file can't be opened, shell injection probably occurred
Image.open(dest_file).load()
@unittest.skipUnless(djpeg_available(), "djpeg not available")
def test_load_djpeg_filename(self):
for filename in test_filenames:
src_file = self.tempfile(filename)
shutil.copy(TEST_JPG, src_file)
im = Image.open(src_file)
im.load_djpeg()
@unittest.skipUnless(cjpeg_available(), "cjpeg not available")
def test_save_cjpeg_filename(self):
im = Image.open(TEST_JPG)
self.assert_save_filename_check(im, JpegImagePlugin._save_cjpeg)
@unittest.skipUnless(netpbm_available(), "netpbm not available")
def test_save_netpbm_filename_bmp_mode(self):
im = Image.open(TEST_GIF).convert("RGB")
self.assert_save_filename_check(im, GifImagePlugin._save_netpbm)
@unittest.skipUnless(netpbm_available(), "netpbm not available")
def test_save_netpbm_filename_l_mode(self):
im = Image.open(TEST_GIF).convert("L")
self.assert_save_filename_check(im, GifImagePlugin._save_netpbm)
if __name__ == '__main__':
unittest.main()
| 32.034483
| 76
| 0.698062
|
acfea456573b480dd638a2e5578f17abf0843cc5
| 8,001
|
py
|
Python
|
IPython/history.py
|
yaii/ipyconsole
|
f8cd869074ce2778f86dced1cfeb4569adeccb40
|
[
"BSD-3-Clause"
] | null | null | null |
IPython/history.py
|
yaii/ipyconsole
|
f8cd869074ce2778f86dced1cfeb4569adeccb40
|
[
"BSD-3-Clause"
] | null | null | null |
IPython/history.py
|
yaii/ipyconsole
|
f8cd869074ce2778f86dced1cfeb4569adeccb40
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
""" History related magics and functionality """
# Stdlib imports
import fnmatch
import os
# IPython imports
from IPython.genutils import Term, ask_yes_no
def magic_history(self, parameter_s = ''):
"""Print input history (_i<n> variables), with most recent last.
%history -> print at most 40 inputs (some may be multi-line)\\
%history n -> print at most n inputs\\
%history n1 n2 -> print inputs between n1 and n2 (n2 not included)\\
Each input's number <n> is shown, and is accessible as the
automatically generated variable _i<n>. Multi-line statements are
printed starting at a new line for easy copy/paste.
Options:
-n: do NOT print line numbers. This is useful if you want to get a
printout of many lines which can be directly pasted into a text
editor.
This feature is only available if numbered prompts are in use.
-t: (default) print the 'translated' history, as IPython understands it.
IPython filters your input and converts it all into valid Python source
before executing it (things like magics or aliases are turned into
function calls, for example). With this option, you'll see the native
history instead of the user-entered version: '%cd /' will be seen as
'_ip.magic("%cd /")' instead of '%cd /'.
-r: print the 'raw' history, i.e. the actual commands you typed.
-g: treat the arg as a pattern to grep for in (full) history.
This includes the "shadow history" (almost all commands ever written).
Use '%hist -g' to show full shadow history (may be very long).
In shadow history, every index nuwber starts with 0.
-f FILENAME: instead of printing the output to the screen, redirect it to
the given file. The file is always overwritten, though IPython asks for
confirmation first if it already exists.
"""
ip = self.api
shell = self.shell
if not shell.outputcache.do_full_cache:
print 'This feature is only available if numbered prompts are in use.'
return
opts,args = self.parse_options(parameter_s,'gntsrf:',mode='list')
# Check if output to specific file was requested.
try:
outfname = opts['f']
except KeyError:
outfile = Term.cout
# We don't want to close stdout at the end!
close_at_end = False
else:
if os.path.exists(outfname):
ans = ask_yes_no("File %r exists. Overwrite?" % outfname)
if not ans:
print 'Aborting.'
return
else:
outfile = open(outfname,'w')
close_at_end = True
if opts.has_key('t'):
input_hist = shell.input_hist
elif opts.has_key('r'):
input_hist = shell.input_hist_raw
else:
input_hist = shell.input_hist
default_length = 40
pattern = None
if opts.has_key('g'):
init = 1
final = len(input_hist)
parts = parameter_s.split(None,1)
if len(parts) == 1:
parts += '*'
head, pattern = parts
pattern = "*" + pattern + "*"
elif len(args) == 0:
final = len(input_hist)
init = max(1,final-default_length)
elif len(args) == 1:
final = len(input_hist)
init = max(1,final-int(args[0]))
elif len(args) == 2:
init,final = map(int,args)
else:
warn('%hist takes 0, 1 or 2 arguments separated by spaces.')
print self.magic_hist.__doc__
return
width = len(str(final))
line_sep = ['','\n']
print_nums = not opts.has_key('n')
found = False
if pattern is not None:
sh = ip.IP.shadowhist.all()
for idx, s in sh:
if fnmatch.fnmatch(s, pattern):
print "0%d: %s" %(idx, s)
found = True
if found:
print "==="
print "shadow history ends, fetch by %rep <number> (must start with 0)"
print "=== start of normal history ==="
for in_num in range(init,final):
inline = input_hist[in_num]
if pattern is not None and not fnmatch.fnmatch(inline, pattern):
continue
multiline = int(inline.count('\n') > 1)
if print_nums:
print >> outfile, \
'%s:%s' % (str(in_num).ljust(width),line_sep[multiline]),
print >> outfile, inline,
if close_at_end:
outfile.close()
def magic_hist(self, parameter_s=''):
"""Alternate name for %history."""
return self.magic_history(parameter_s)
def rep_f(self, arg):
r""" Repeat a command, or get command to input line for editing
- %rep (no arguments):
Place a string version of last computation result (stored in the special '_'
variable) to the next input prompt. Allows you to create elaborate command
lines without using copy-paste::
$ l = ["hei", "vaan"]
$ "".join(l)
==> heivaan
$ %rep
$ heivaan_ <== cursor blinking
%rep 45
Place history line 45 to next input prompt. Use %hist to find out the
number.
%rep 1-4 6-7 3
Repeat the specified lines immediately. Input slice syntax is the same as
in %macro and %save.
%rep foo
Place the most recent line that has the substring "foo" to next input.
(e.g. 'svn ci -m foobar').
"""
opts,args = self.parse_options(arg,'',mode='list')
ip = self.api
if not args:
ip.set_next_input(str(ip.user_ns["_"]))
return
if len(args) == 1 and not '-' in args[0]:
arg = args[0]
if len(arg) > 1 and arg.startswith('0'):
# get from shadow hist
num = int(arg[1:])
line = self.shadowhist.get(num)
ip.set_next_input(str(line))
return
try:
num = int(args[0])
ip.set_next_input(str(ip.IP.input_hist_raw[num]).rstrip())
return
except ValueError:
pass
for h in reversed(self.shell.input_hist_raw):
if 'rep' in h:
continue
if fnmatch.fnmatch(h,'*' + arg + '*'):
ip.set_next_input(str(h).rstrip())
return
try:
lines = self.extract_input_slices(args, True)
print "lines",lines
ip.runlines(lines)
except ValueError:
print "Not found in recent history:", args
_sentinel = object()
class ShadowHist:
def __init__(self,db):
# cmd => idx mapping
self.curidx = 0
self.db = db
def inc_idx(self):
idx = self.db.get('shadowhist_idx', 1)
self.db['shadowhist_idx'] = idx + 1
return idx
def add(self, ent):
old = self.db.hget('shadowhist', ent, _sentinel)
if old is not _sentinel:
return
newidx = self.inc_idx()
#print "new",newidx # dbg
self.db.hset('shadowhist',ent, newidx)
def all(self):
d = self.db.hdict('shadowhist')
items = [(i,s) for (s,i) in d.items()]
items.sort()
return items
def get(self, idx):
all = self.all()
for k, v in all:
#print k,v
if k == idx:
return v
def test_shist():
from IPython.Extensions import pickleshare
db = pickleshare.PickleShareDB('~/shist')
s = ShadowHist(db)
s.add('hello')
s.add('world')
s.add('hello')
s.add('hello')
s.add('karhu')
print "all",s.all()
print s.get(2)
def init_ipython(ip):
ip.expose_magic("rep",rep_f)
ip.expose_magic("hist",magic_hist)
ip.expose_magic("history",magic_history)
import ipy_completers
ipy_completers.quick_completer('%hist' ,'-g -t -r -n')
#test_shist()
| 29.307692
| 80
| 0.568929
|
acfea48c74b0c4aad5c0dac65589bec51b0d99bc
| 7,130
|
py
|
Python
|
qiskit_experiments/library/quantum_volume/qv_experiment.py
|
spencerking/qiskit-experiments
|
11a254b010afe35933aaabac70de12b5b5a244bf
|
[
"Apache-2.0"
] | null | null | null |
qiskit_experiments/library/quantum_volume/qv_experiment.py
|
spencerking/qiskit-experiments
|
11a254b010afe35933aaabac70de12b5b5a244bf
|
[
"Apache-2.0"
] | null | null | null |
qiskit_experiments/library/quantum_volume/qv_experiment.py
|
spencerking/qiskit-experiments
|
11a254b010afe35933aaabac70de12b5b5a244bf
|
[
"Apache-2.0"
] | 1
|
2021-06-20T10:32:16.000Z
|
2021-06-20T10:32:16.000Z
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Quantum Volume Experiment class.
"""
from typing import Union, Iterable, Optional, List
from numpy.random import Generator, default_rng
from qiskit.providers.backend import Backend
try:
from qiskit import Aer
HAS_SIMULATION_BACKEND = True
except ImportError:
HAS_SIMULATION_BACKEND = False
from qiskit import QuantumCircuit
from qiskit.circuit.library import QuantumVolume as QuantumVolumeCircuit
from qiskit import transpile
from qiskit_experiments.framework import BaseExperiment, Options
from .qv_analysis import QuantumVolumeAnalysis
class QuantumVolume(BaseExperiment):
"""Quantum Volume Experiment class.
# section: overview
Quantum Volume (QV) is a single-number metric that can be measured using a concrete protocol
on near-term quantum computers of modest size. The QV method quantifies the largest random
circuit of equal width and depth that the computer successfully implements.
Quantum computing systems with high-fidelity operations, high connectivity,
large calibrated gate sets, and circuit rewriting toolchains are expected to
have higher quantum volumes.
The Quantum Volume is determined by the largest circuit depth :math:`d_{max}`,
and equals to :math:`2^{d_{max}}`.
See `Qiskit Textbook
<https://qiskit.org/textbook/ch-quantum-hardware/measuring-quantum-volume.html>`_
for an explanation on the QV protocol.
In the QV experiment we generate `QV circuits
<https://qiskit.org/documentation/stubs/qiskit.circuit.library.QuantumVolume.html>`_
on :math:`d` qubits, which contain :math:`d` layers, where each layer consists of random 2-qubit
unitary gates from :math:`SU(4)`, followed by a random permutation on the :math:`d` qubits.
Then these circuits run on the quantum backend and on an ideal simulator
(either :class:`AerSimulator` or :class:`qiskit.quantum_info.Statevector`).
A depth :math:`d` QV circuit is successful if it has 'mean heavy-output probability' > 2/3 with
confidence level > 0.977 (corresponding to z_value = 2), and at least 100 trials have been ran.
See :class:`QuantumVolumeAnalysis` documentation for additional
information on QV experiment analysis.
# section: reference
.. ref_arxiv:: 1 1811.12926
.. ref_arxiv:: 2 2008.08571
"""
# Analysis class for experiment
__analysis_class__ = QuantumVolumeAnalysis
def __init__(
self,
qubits: Union[int, Iterable[int]],
trials: Optional[int] = 100,
seed: Optional[Union[int, Generator]] = None,
simulation_backend: Optional[Backend] = None,
):
"""Initialize a quantum volume experiment.
Args:
qubits: The number of qubits or list of
physical qubits for the experiment.
trials: The number of trials to run the quantum volume circuit.
seed: Seed or generator object for random number
generation. If None default_rng will be used.
simulation_backend: The simulator backend to use to generate
the expected results. the simulator must have a 'save_probabilities'
method. If None :class:`AerSimulator` simulator will be used
(in case :class:`AerSimulator` is not
installed :class:`qiskit.quantum_info.Statevector` will be used).
"""
super().__init__(qubits)
# Set configurable options
self.set_experiment_options(trials=trials)
if not isinstance(seed, Generator):
self._rng = default_rng(seed=seed)
else:
self._rng = seed
if not simulation_backend and HAS_SIMULATION_BACKEND:
self._simulation_backend = Aer.get_backend("aer_simulator")
else:
self._simulation_backend = simulation_backend
@classmethod
def _default_experiment_options(cls) -> Options:
"""Default experiment options.
Experiment Options:
trials (int): Optional, number of times to generate new Quantum Volume
circuits and calculate their heavy output.
"""
options = super()._default_experiment_options()
options.trials = 100
return options
def _get_ideal_data(self, circuit: QuantumCircuit, **run_options) -> List[float]:
"""Return ideal measurement probabilities.
In case the user does not have Aer installed use Terra to calculate
the ideal state.
Args:
circuit: the circuit to extract the ideal data from
run_options: backend run options.
Returns:
list: list of the probabilities for each state in the circuit (as Numpy array)
"""
ideal_circuit = circuit.remove_final_measurements(inplace=False)
if self._simulation_backend:
ideal_circuit.save_probabilities()
# always transpile with optimization_level 0, even if the non ideal circuits will run
# with different optimization level, because we need to compare the results to the
# exact generated probabilities
ideal_circuit = transpile(ideal_circuit, self._simulation_backend, optimization_level=0)
ideal_result = self._simulation_backend.run(ideal_circuit, **run_options).result()
probabilities = ideal_result.data().get("probabilities")
else:
from qiskit.quantum_info import Statevector
state_vector = Statevector(ideal_circuit)
probabilities = state_vector.probabilities()
return probabilities
def circuits(self, backend: Optional[Backend] = None) -> List[QuantumCircuit]:
"""Return a list of Quantum Volume circuits.
Args:
backend (Backend): Optional, a backend object.
Returns:
A list of :class:`QuantumCircuit`.
"""
circuits = []
depth = self._num_qubits
# Note: the trials numbering in the metadata is starting from 1 for each new experiment run
for trial in range(1, self.experiment_options.trials + 1):
qv_circ = QuantumVolumeCircuit(depth, depth, seed=self._rng)
qv_circ.measure_active()
qv_circ.metadata = {
"experiment_type": self._type,
"depth": depth,
"trial": trial,
"qubits": self.physical_qubits,
"ideal_probabilities": self._get_ideal_data(qv_circ),
}
circuits.append(qv_circ)
return circuits
| 40.05618
| 104
| 0.669705
|
acfea499400ea153aff5d4183c5b838cacd2b732
| 12,178
|
py
|
Python
|
jiant/utils/data_loaders.py
|
sxjscience/jiant
|
95bd488cd1318c33ca758b520b6fe3929bc4836b
|
[
"MIT"
] | 5
|
2021-10-09T07:37:32.000Z
|
2022-03-09T08:46:32.000Z
|
jiant/utils/data_loaders.py
|
sxjscience/jiant
|
95bd488cd1318c33ca758b520b6fe3929bc4836b
|
[
"MIT"
] | null | null | null |
jiant/utils/data_loaders.py
|
sxjscience/jiant
|
95bd488cd1318c33ca758b520b6fe3929bc4836b
|
[
"MIT"
] | 1
|
2019-09-08T22:13:55.000Z
|
2019-09-08T22:13:55.000Z
|
"""
Functions having to do with loading data from output of
files downloaded in scripts/download_data_glue.py
"""
import codecs
import csv
import json
import numpy as np
import pandas as pd
from allennlp.data import vocabulary
from jiant.utils.tokenizers import get_tokenizer
from jiant.utils.retokenize import realign_spans
def load_span_data(tokenizer_name, file_name, label_fn=None, has_labels=True):
"""
Load a span-related task file in .jsonl format, does re-alignment of spans, and tokenizes
the text.
Re-alignment of spans involves transforming the spans so that it matches the text after
tokenization.
For example, given the original text: [Mr., Porter, is, nice] and bert-base-cased
tokenization, we get [Mr, ., Por, ter, is, nice ]. If the original span indices was [0,2],
under the new tokenization, it becomes [0, 3].
The task file should of be of the following form:
text: str,
label: bool
target: dict that contains the spans
Args:
tokenizer_name: str,
file_name: str,
label_fn: function that expects a row and outputs a transformed row with labels
transformed.
Returns:
List of dictionaries of the aligned spans and tokenized text.
"""
rows = pd.read_json(file_name, lines=True)
# realign spans
rows = rows.apply(lambda x: realign_spans(x, tokenizer_name), axis=1)
if has_labels is False:
rows["label"] = 0
elif label_fn is not None:
rows["label"] = rows["label"].apply(label_fn)
return list(rows.T.to_dict().values())
def load_pair_nli_jsonl(data_file, tokenizer_name, max_seq_len, targ_map):
"""
Loads a pair NLI task.
Parameters
-----------------
data_file: path to data file,
tokenizer_name: str,
max_seq_len: int,
targ_map: a dictionary that maps labels to ints
Returns
-----------------
sent1s: list of strings of tokenized first sentences,
sent2s: list of strings of tokenized second sentences,
trgs: list of ints of labels,
idxs: list of ints
"""
data = [json.loads(d) for d in open(data_file, encoding="utf-8")]
sent1s, sent2s, trgs, idxs, pair_ids = [], [], [], [], []
for example in data:
sent1s.append(tokenize_and_truncate(tokenizer_name, example["premise"], max_seq_len))
sent2s.append(tokenize_and_truncate(tokenizer_name, example["hypothesis"], max_seq_len))
trg = targ_map[example["label"]] if "label" in example else 0
trgs.append(trg)
idxs.append(example["idx"])
if "pair_id" in example:
pair_ids.append(example["pair_id"])
return [sent1s, sent2s, trgs, idxs, pair_ids]
def load_tsv(
tokenizer_name,
data_file,
max_seq_len,
label_idx=2,
s1_idx=0,
s2_idx=1,
label_fn=None,
skip_rows=0,
return_indices=False,
delimiter="\t",
quote_level=csv.QUOTE_NONE,
filter_idx=None,
has_labels=True,
filter_value=None,
tag_vocab=None,
tag2idx_dict=None,
):
"""
Load a tsv.
To load only rows that have a certain value for a certain columnn, set filter_idx and
filter_value (for example, for mnli-fiction we want rows where the genre column has
value 'fiction').
Args:
tokenizer_name (str): The name of the tokenizer to use (see defaluts.conf for values).
data_file (str): The path to the file to read.
max_seq_len (int): The maximum number of tokens to keep after tokenization, per text field.
Start and end symbols are introduced before tokenization, and are counted, so we will
keep max_seq_len - 2 tokens *of text*.
label_idx (int|None): The column index for the label field, if present.
s1_idx (int): The column index for the first text field.
s2_idx (int|None): The column index for the second text field, if present.
label_fn (fn: str -> int|None): A function to map items in column label_idx to int-valued
labels.
skip_rows (int|list): Skip this many header rows or skip these specific row indices.
has_labels (bool): If False, don't look for labels at position label_idx.
filter_value (str|None): The value in which we want filter_idx to be equal to.
filter_idx (int|None): The column index in which to look for filter_value.
tag_vocab (allennlp vocabulary): In some datasets, examples are attached to tags, and we
need to know the results on examples with certain tags, this is a vocabulary for
tracking tags in a dataset across splits
tag2idx_dict (dict<string, int>): The tags form a two-level hierarchy, each fine tag belong
to a coarse tag. In the tsv, each coarse tag has one column, the content in that column
indicates what fine tags(seperated by ;) beneath that coarse tag the examples have.
tag2idx_dict is a dictionary to map coarse tag to the index of corresponding column.
e.g. if we have two coarse tags: source at column 0, topic at column 1; and four fine
tags: wiki, reddit beneath source, and economics, politics beneath topic. The tsv will
be: | wiki | economics;politics|, with the tag2idx_dict as {"source": 0, "topic": 1}
| reddit| politics |
Returns:
List of first and second sentences, labels, and if applicable indices
"""
# TODO(Yada): Instead of index integers, adjust this to pass in column names
# get the first row as the columns to pass into the pandas reader
# This reads the data file given the delimiter, skipping over any rows
# (usually header row)
rows = pd.read_csv(
data_file,
sep=delimiter,
error_bad_lines=False,
header=None,
skiprows=skip_rows,
quoting=quote_level,
keep_default_na=False,
encoding="utf-8",
)
if filter_idx and filter_value:
rows = rows[rows[filter_idx] == filter_value]
# Filter for sentence1s that are of length 0
# Filter if row[targ_idx] is nan
mask = rows[s1_idx].str.len() > 0
if s2_idx is not None:
mask = mask & (rows[s2_idx].str.len() > 0)
if has_labels:
mask = mask & rows[label_idx].notnull()
rows = rows.loc[mask]
sent1s = rows[s1_idx].apply(lambda x: tokenize_and_truncate(tokenizer_name, x, max_seq_len))
if s2_idx is None:
sent2s = pd.Series()
else:
sent2s = rows[s2_idx].apply(lambda x: tokenize_and_truncate(tokenizer_name, x, max_seq_len))
label_fn = label_fn if label_fn is not None else (lambda x: x)
if has_labels:
labels = rows[label_idx].apply(lambda x: label_fn(x))
else:
# If dataset doesn't have labels, for example for test set, then mock labels
labels = np.zeros(len(rows), dtype=int)
if tag2idx_dict is not None:
# -2 offset to cancel @@unknown@@ and @@padding@@ in vocab
def tags_to_tids(coarse_tag, fine_tags):
return (
[]
if pd.isna(fine_tags)
else (
[tag_vocab.add_token_to_namespace(coarse_tag) - 2]
+ [
tag_vocab.add_token_to_namespace("%s__%s" % (coarse_tag, fine_tag)) - 2
for fine_tag in fine_tags.split(";")
]
)
)
tid_temp = [
rows[idx].apply(lambda x: tags_to_tids(coarse_tag, x)).tolist()
for coarse_tag, idx in tag2idx_dict.items()
]
tagids = [[tid for column in tid_temp for tid in column[idx]] for idx in range(len(rows))]
if return_indices:
idxs = rows.index.tolist()
# Get indices of the remaining rows after filtering
return sent1s.tolist(), sent2s.tolist(), labels.tolist(), idxs
elif tag2idx_dict is not None:
return sent1s.tolist(), sent2s.tolist(), labels.tolist(), tagids
else:
return sent1s.tolist(), sent2s.tolist(), labels.tolist()
def load_diagnostic_tsv(
tokenizer_name,
data_file,
max_seq_len,
label_col,
s1_col="",
s2_col="",
label_fn=None,
skip_rows=0,
delimiter="\t",
):
"""Load a tsv and indexes the columns from the diagnostic tsv.
This is only used for GLUEDiagnosticTask right now.
Args:
data_file: string
max_seq_len: int
s1_col: string
s2_col: string
label_col: string
label_fn: function
skip_rows: list of ints
delimiter: string
Returns:
A dictionary of the necessary indexed fields, the tokenized sent1 and sent2
and indices
Note: If a field in a particular row in the dataset is empty, we return []
for that field for that row, otherwise we return an array of ints (indices)
Else, we return an array of indices
"""
# TODO: Abstract indexing layer from this function so that MNLI-diagnostic
# calls load_tsv
assert (
len(s1_col) > 0 and len(label_col) > 0
), "Make sure you passed in column names for sentence 1 and labels"
rows = pd.read_csv(
data_file, sep=delimiter, error_bad_lines=False, quoting=csv.QUOTE_NONE, encoding="utf-8"
)
rows = rows.fillna("")
def targs_to_idx(col_name):
# This function builds the index to vocab (and its inverse) mapping
values = set(rows[col_name].values)
vocab = vocabulary.Vocabulary(counter=None, non_padded_namespaces=[col_name])
for value in values:
vocab.add_token_to_namespace(value, col_name)
idx_to_word = vocab.get_index_to_token_vocabulary(col_name)
word_to_idx = vocab.get_token_to_index_vocabulary(col_name)
rows[col_name] = rows[col_name].apply(lambda x: [word_to_idx[x]] if x != "" else [])
return word_to_idx, idx_to_word, rows[col_name]
sent1s = rows[s1_col].apply(lambda x: tokenize_and_truncate(tokenizer_name, x, max_seq_len))
sent2s = rows[s2_col].apply(lambda x: tokenize_and_truncate(tokenizer_name, x, max_seq_len))
labels = rows[label_col].apply(lambda x: label_fn(x))
# Build indices for field attributes
lex_sem_to_ix_dic, ix_to_lex_sem_dic, lex_sem = targs_to_idx("Lexical Semantics")
pr_ar_str_to_ix_di, ix_to_pr_ar_str_dic, pr_ar_str = targs_to_idx(
"Predicate-Argument Structure"
)
logic_to_ix_dic, ix_to_logic_dic, logic = targs_to_idx("Logic")
knowledge_to_ix_dic, ix_to_knowledge_dic, knowledge = targs_to_idx("Knowledge")
idxs = rows.index
return {
"sents1": sent1s.tolist(),
"sents2": sent2s.tolist(),
"targs": labels.tolist(),
"idxs": idxs.tolist(),
"lex_sem": lex_sem.tolist(),
"pr_ar_str": pr_ar_str.tolist(),
"logic": logic.tolist(),
"knowledge": knowledge.tolist(),
"ix_to_lex_sem_dic": ix_to_lex_sem_dic,
"ix_to_pr_ar_str_dic": ix_to_pr_ar_str_dic,
"ix_to_logic_dic": ix_to_logic_dic,
"ix_to_knowledge_dic": ix_to_knowledge_dic,
}
def get_tag_list(tag_vocab):
"""
retrieve tag strings from the tag vocab object
Args:
tag_vocab: the vocab that contains all tags
Returns:
tag_list: a list of "coarse__fine" tag strings
"""
# get dictionary from allennlp vocab, neglecting @@unknown@@ and
# @@padding@@
tid2tag_dict = {
key - 2: tag
for key, tag in tag_vocab.get_index_to_token_vocabulary().items()
if key - 2 >= 0
}
tag_list = [
tid2tag_dict[tid].replace(":", "_").replace(", ", "_").replace(" ", "_").replace("+", "_")
for tid in range(len(tid2tag_dict))
]
return tag_list
def tokenize_and_truncate(tokenizer_name, sent, max_seq_len):
"""Truncate and tokenize a sentence or paragraph."""
max_seq_len -= 2 # For boundary tokens.
tokenizer = get_tokenizer(tokenizer_name)
if isinstance(sent, str):
return tokenizer.tokenize(sent)[:max_seq_len]
elif isinstance(sent, list):
assert isinstance(sent[0], str), "Invalid sentence found!"
return sent[:max_seq_len]
| 39.032051
| 100
| 0.651667
|
acfea4a2b24d2c4f9aa885eb87d5415dad911456
| 924
|
py
|
Python
|
ns-allinone-3.27/ns-3.27/.waf-1.8.19-b1fc8f7baef51bd2db4c2971909a568d/waflib/Tools/gdc.py
|
zack-braun/4607_NS
|
43c8fb772e5552fb44bd7cd34173e73e3fb66537
|
[
"MIT"
] | 93
|
2019-04-21T08:22:26.000Z
|
2022-03-30T04:26:29.000Z
|
ns-allinone-3.27/ns-3.27/.waf-1.8.19-b1fc8f7baef51bd2db4c2971909a568d/waflib/Tools/gdc.py
|
zack-braun/4607_NS
|
43c8fb772e5552fb44bd7cd34173e73e3fb66537
|
[
"MIT"
] | 12
|
2019-04-19T16:39:58.000Z
|
2021-06-22T13:18:32.000Z
|
ns-allinone-3.27/ns-3.27/.waf-1.8.19-b1fc8f7baef51bd2db4c2971909a568d/waflib/Tools/gdc.py
|
zack-braun/4607_NS
|
43c8fb772e5552fb44bd7cd34173e73e3fb66537
|
[
"MIT"
] | 21
|
2019-05-27T19:36:12.000Z
|
2021-07-26T02:37:41.000Z
|
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! https://waf.io/book/index.html#_obtaining_the_waf_file
from waflib.Tools import ar,d
from waflib.Configure import conf
@conf
def find_gdc(conf):
conf.find_program('gdc',var='D')
out=conf.cmd_and_log(conf.env.D+['--version'])
if out.find("gdc")==-1:
conf.fatal("detected compiler is not gdc")
@conf
def common_flags_gdc(conf):
v=conf.env
v['DFLAGS']=[]
v['D_SRC_F']=['-c']
v['D_TGT_F']='-o%s'
v['D_LINKER']=v['D']
v['DLNK_SRC_F']=''
v['DLNK_TGT_F']='-o%s'
v['DINC_ST']='-I%s'
v['DSHLIB_MARKER']=v['DSTLIB_MARKER']=''
v['DSTLIB_ST']=v['DSHLIB_ST']='-l%s'
v['DSTLIBPATH_ST']=v['DLIBPATH_ST']='-L%s'
v['LINKFLAGS_dshlib']=['-shared']
v['DHEADER_ext']='.di'
v.DFLAGS_d_with_header='-fintfc'
v['D_HDR_F']='-fintfc-file=%s'
def configure(conf):
conf.find_gdc()
conf.load('ar')
conf.load('d')
conf.common_flags_gdc()
conf.d_platform_flags()
| 25.666667
| 78
| 0.667749
|
acfea4f559b03906de0bb80d206a8437f1b0e6bb
| 815
|
py
|
Python
|
school_django/api/resources.py
|
gentiger55/django_react_school
|
1f55fee562be7ad8b5220f09091ae4af3c2ab8c8
|
[
"BSD-3-Clause"
] | 1
|
2018-12-16T15:46:11.000Z
|
2018-12-16T15:46:11.000Z
|
school_django/api/resources.py
|
gentiger55/django_react_school
|
1f55fee562be7ad8b5220f09091ae4af3c2ab8c8
|
[
"BSD-3-Clause"
] | null | null | null |
school_django/api/resources.py
|
gentiger55/django_react_school
|
1f55fee562be7ad8b5220f09091ae4af3c2ab8c8
|
[
"BSD-3-Clause"
] | null | null | null |
from tastypie.resources import ModelResource
from tastypie import fields
from api.models import School, Statistics, User
from tastypie.authorization import Authorization
class SchoolResource(ModelResource):
class Meta:
queryset = School.objects.all()
resource_name = 'school'
authorization = Authorization()
always_return_data = True
# fields = ['school_name']
class StatisticsResource(ModelResource):
school_id = fields.ForeignKey(SchoolResource, 'school_id')
class Meta:
queryset = Statistics.objects.all()
resource_name = 'statistics'
authorization = Authorization()
class UserResource(ModelResource):
class Meta:
queryset = User.objects.all()
resource_name = 'users'
authorizaiton = Authorization()
| 30.185185
| 62
| 0.700613
|
acfea582cb4c52a250ba6598015a1ce5057dc080
| 2,338
|
py
|
Python
|
src/text-mining.py
|
anakinanakin/data-analysis
|
28c0847d547fba43cbf4441764df370b89d6504f
|
[
"MIT"
] | null | null | null |
src/text-mining.py
|
anakinanakin/data-analysis
|
28c0847d547fba43cbf4441764df370b89d6504f
|
[
"MIT"
] | null | null | null |
src/text-mining.py
|
anakinanakin/data-analysis
|
28c0847d547fba43cbf4441764df370b89d6504f
|
[
"MIT"
] | null | null | null |
# nltk's default stoplist:
from nltk.corpus import stopwords
stoplist = set(stopwords.words('english'))
import csv
import re
import nltk
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.pipeline import Pipeline
from sklearn.linear_model import SGDClassifier
train_data = []
with open('./Assignment2Datasets/pg_train.csv', encoding = 'latin-1') as csvfile:
#all lowercase
lower_stream = (line.lower() for line in csvfile)
pg_train = csv.reader(lower_stream, delimiter=' ', quotechar='"', escapechar='^')
#no punctuation
for row in pg_train:
row = re.sub('[^A-Za-z0-9#\w\s]+', '', str(row))
#tokenize
row = nltk.word_tokenize(row)
#stemming
port = nltk.PorterStemmer()
for word in row:
word = port.stem(word)
#stopword removal
row = [word for word in row if word not in stoplist]
#make wordlist into string
row = ' '.join(row)
#make a list with string elements
train_data.append(row)
#print train_data
#make train_data to list for input
#train_data = [train_data]
authorList_train = []
train_data_new = []
blank = 0
#separate author and content
for s in train_data:
#avoid newline 's'
if blank == 1:
blank = 0
continue
blank += 1
author = ""
ctr = 0
for char in s:
if char != "#":
author += char
ctr += 1
#not working
#s = s.replace(char, "", 1)
#print char
#print ctr
#s = ''.join(s.split(char, 1))
else:
#s = s.replace(char, "", 1)
#s = ''.join(s.split(char, 1))
break
ctr += 2
train_data_new.append(s[ctr:])
#s = s.lstrip(author)
#cannot access s
#s += "00000000000"
#print (s)
authorList_train.append(author)
#print (authorList_train)
#print (train_data_new)
#binary document-term matrix
count_vect_binary = CountVectorizer(binary=True)
X = count_vect_binary.fit_transform(train_data_new)
#print count_vect.get_feature_names()
#print X.toarray()
#train logistic clf
log_binary = Pipeline([('vect', count_vect_binary), ('logistic', LogisticRegression())])
log_binary = log_binary.fit(train_data_new, authorList_train)
| 25.977778
| 88
| 0.612062
|
acfea693f281b89349515b65e4fc2a6475d744e3
| 74
|
py
|
Python
|
scripts/mango/relations/constants.py
|
robertjoosten/maya-orm
|
9c5db622d5bbba63246ff1d3f0a22bd3f7140f6c
|
[
"MIT"
] | 11
|
2020-11-14T14:37:49.000Z
|
2022-03-25T03:28:23.000Z
|
scripts/mango/relations/constants.py
|
robertjoosten/maya-orm
|
9c5db622d5bbba63246ff1d3f0a22bd3f7140f6c
|
[
"MIT"
] | null | null | null |
scripts/mango/relations/constants.py
|
robertjoosten/maya-orm
|
9c5db622d5bbba63246ff1d3f0a22bd3f7140f6c
|
[
"MIT"
] | null | null | null |
__all__ = [
"CASCADE",
"DO_NOTHING"
]
CASCADE = 0
DO_NOTHING = 1
| 9.25
| 16
| 0.581081
|
acfea6ff361247cc61851c261bf2691d2c761dec
| 978
|
py
|
Python
|
ticketus/core/migrations/0007_auto_20141228_0646.py
|
sjkingo/ticketus
|
90f2781af9418e9e2c6470ca50c9a6af9ce098ff
|
[
"BSD-2-Clause"
] | 3
|
2019-02-09T10:52:55.000Z
|
2021-09-19T14:14:36.000Z
|
ticketus/core/migrations/0007_auto_20141228_0646.py
|
sjkingo/ticketus
|
90f2781af9418e9e2c6470ca50c9a6af9ce098ff
|
[
"BSD-2-Clause"
] | null | null | null |
ticketus/core/migrations/0007_auto_20141228_0646.py
|
sjkingo/ticketus
|
90f2781af9418e9e2c6470ca50c9a6af9ce098ff
|
[
"BSD-2-Clause"
] | 3
|
2018-03-04T18:05:02.000Z
|
2021-09-19T14:14:38.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0006_auto_20141227_1735'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='created_at',
field=models.DateTimeField(),
preserve_default=True,
),
migrations.AlterField(
model_name='comment',
name='edited_at',
field=models.DateTimeField(),
preserve_default=True,
),
migrations.AlterField(
model_name='ticket',
name='created_at',
field=models.DateTimeField(),
preserve_default=True,
),
migrations.AlterField(
model_name='ticket',
name='edited_at',
field=models.DateTimeField(),
preserve_default=True,
),
]
| 25.076923
| 44
| 0.550102
|
acfea90a5075d69ec82579a5bacc35bbbe9e85f6
| 1,908
|
py
|
Python
|
src/gtk/toga_gtk/widgets/scrollcontainer.py
|
simonw/toga
|
8b52479c5d9960c5f3af960b5837ecc467c0bc95
|
[
"BSD-3-Clause"
] | 3
|
2020-12-09T02:13:55.000Z
|
2021-02-18T00:41:36.000Z
|
src/gtk/toga_gtk/widgets/scrollcontainer.py
|
simonw/toga
|
8b52479c5d9960c5f3af960b5837ecc467c0bc95
|
[
"BSD-3-Clause"
] | 1
|
2021-05-23T04:04:58.000Z
|
2021-05-25T22:08:14.000Z
|
src/gtk/toga_gtk/widgets/scrollcontainer.py
|
simonw/toga
|
8b52479c5d9960c5f3af960b5837ecc467c0bc95
|
[
"BSD-3-Clause"
] | null | null | null |
from ..libs import Gtk
from ..window import GtkViewport
from .base import Widget
class ScrollContainer(Widget):
def create(self):
self.native = Gtk.ScrolledWindow()
# Set this minimum size of scroll windows because we must reserve space for
# scrollbars when splitter resized. See, https://gitlab.gnome.org/GNOME/gtk/-/issues/210
self.native.set_min_content_width(self.interface.MIN_WIDTH)
self.native.set_min_content_height(self.interface.MIN_HEIGHT)
self.native.set_overlay_scrolling(True)
self.native.interface = self.interface
def set_content(self, widget):
self.inner_container = widget
widget.viewport = GtkViewport(self.native)
# Add all children to the content widget.
for child in widget.interface.children:
child._impl.container = widget
# Remove the old widget before add the new one
if self.native.get_child():
self.native.get_child().destroy()
# Add the widget to ScrolledWindow as a scrollable widget
self.native.add(self.inner_container.native)
self.native.show_all()
def set_app(self, app):
if self.interface.content:
self.interface.content.app = app
def set_window(self, window):
if self.interface.content:
self.interface.content.window = window
def set_horizontal(self, value):
self.native.set_policy(
Gtk.PolicyType.AUTOMATIC if self.interface.horizontal else Gtk.PolicyType.NEVER,
Gtk.PolicyType.AUTOMATIC if self.interface.vertical else Gtk.PolicyType.NEVER,
)
def set_vertical(self, value):
self.native.set_policy(
Gtk.PolicyType.AUTOMATIC if self.interface.horizontal else Gtk.PolicyType.NEVER,
Gtk.PolicyType.AUTOMATIC if self.interface.vertical else Gtk.PolicyType.NEVER,
)
| 35.333333
| 96
| 0.680294
|
acfeaa9e7dc82e2688682aaf7073e736a360e591
| 549
|
py
|
Python
|
day6-part1/main.py
|
KSonny4/advent-of-code-2021
|
ca4cf2e94799174468950019a562ed9063001599
|
[
"MIT"
] | 4
|
2021-12-02T21:17:43.000Z
|
2021-12-05T22:15:28.000Z
|
day6-part1/main.py
|
KSonny4/advent-of-code-2021
|
ca4cf2e94799174468950019a562ed9063001599
|
[
"MIT"
] | null | null | null |
day6-part1/main.py
|
KSonny4/advent-of-code-2021
|
ca4cf2e94799174468950019a562ed9063001599
|
[
"MIT"
] | null | null | null |
def main():
with open("input.txt", encoding="utf-8") as f:
lanternfishes = map(int, f.read().split(","))
counter = 0
while counter != 80:
new_fish_list = []
for fish in lanternfishes:
if fish == 0:
new_fish_list.append(6) # fish gave birth
new_fish_list.append(8) # baby
else:
new_fish_list.append(fish - 1)
lanternfishes = new_fish_list
counter += 1
print(len(lanternfishes))
if __name__ == "__main__":
main()
| 24.954545
| 58
| 0.533698
|
acfeab3066d5658ec7b837c1159eaf0e0e32f886
| 37,312
|
py
|
Python
|
python_packages_static/flopy/modflow/mflak.py
|
usgs/neversink_workflow
|
acd61435b8553e38d4a903c8cd7a3afc612446f9
|
[
"CC0-1.0"
] | null | null | null |
python_packages_static/flopy/modflow/mflak.py
|
usgs/neversink_workflow
|
acd61435b8553e38d4a903c8cd7a3afc612446f9
|
[
"CC0-1.0"
] | null | null | null |
python_packages_static/flopy/modflow/mflak.py
|
usgs/neversink_workflow
|
acd61435b8553e38d4a903c8cd7a3afc612446f9
|
[
"CC0-1.0"
] | null | null | null |
"""
mflak module. Contains the ModflowLak class. Note that the user can access
the ModflowLak class as `flopy.modflow.ModflowLak`.
Additional information for this MODFLOW package can be found at the `Online
MODFLOW Guide
<http://water.usgs.gov/ogw/modflow/MODFLOW-2005-Guide/lak.htm>`_.
"""
import sys
import numpy as np
from ..pakbase import Package
from ..utils.util_array import Transient3d
from ..utils import Util3d, read_fixed_var, write_fixed_var
class ModflowLak(Package):
"""
MODFLOW Lake Package Class.
Parameters
----------
model : model object
The model object (of type :class:`flopy.modflow.mf.Modflow`) to which
this package will be added.
nlakes : int
NLAKES Number of separate lakes.
Sublakes of multiple-lake systems are considered separate lakes for
input purposes. The variable NLAKES is used, with certain internal
assumptions and approximations, to dimension arrays for the simulation.
ipakcb : int
(ILKCB in MODFLOW documentation)
Whether or not to write cell-by-cell flows (yes if ILKCB> 0, no
otherwise). If ILKCB< 0 and "Save Budget" is specified in the Output
Control or ICBCFL is not equal to 0, the cell-by-cell flows will be
printed in the standard output file. ICBCFL is specified in the input
to the Output Control Option of MODFLOW.
lwrt : int or list of ints (one per SP)
lwrt > 0, suppresses printout from the lake package. Default is 0 (to
print budget information)
theta : float
Explicit (THETA = 0.0), semi-implicit (0.0 < THETA < 1.0), or implicit
(THETA = 1.0) solution for lake stages. SURFDEPTH is read only if
THETA is assigned a negative value (the negative value of THETA is
then changed to a positive value internally by the code).
* A new method of solving for lake stage uses only the time-weighting
factor THETA (Merritt and Konikow, 2000, p. 52) for transient
simulations. THETA is automatically set to a value of 1.0 for all
steady-state stress periods. For transient stress periods, Explicit
(THETA = 0.0), semi-implicit (0.0 < THETA < 1.0), or implicit
(THETA = 1.0) solutions can be used to calculate lake stages. The
option to specify negative values for THETA is supported to allow
specification of additional variables (NSSITER, SSCNCR, SURFDEP)
for simulations that only include transient stress periods. If
THETA is specified as a negative value, then it is converted to a
positive value for calculations of lake stage.
* In MODFLOW-2000 and later, ISS is not part of the input. Instead
NSSITR or SSCNCR should be included if one or more stress periods
is a steady state stress period as defined in Ss/tr in the
Discretization file.
* SSCNCR and NSSITR can be read for a transient only simulation by
placing a negative sign immediately in front of THETA. A negative
THETA sets a flag which assumes input values for NSSITR and SSCNCR
will follow THETA in the format as described by Merritt and Konikow
(p. 52). A negative THETA is automatically reset to a positive
value after values of NSSITR and SSCNCR are read.
nssitr : int
Maximum number of iterations for Newton's method of solution for
equilibrium lake stages in each MODFLOW iteration for steady-state
aquifer head solution. Only read if ISS (option flag input to DIS
Package of MODFLOW indicating steady-state solution) is not zero or
if THETA is specified as a negative value.
* NSSITR and SSCNCR may be omitted for transient solutions (ISS = 0).
* In MODFLOW-2000 and later, ISS is not part of the input.
Instead NSSITR or SSCNCR should be included if one or more stress
periods is a steady state stress period as defined in Ss/tr in the
Discretization file.
* SSCNCR and NSSITR can be read for a transient only simulation by
placing a negative sign immediately in front of THETA. A negative
THETA sets a flag which assumes input values for NSSITR and SSCNCR
will follow THETA in the format as described by Merritt and Konikow
(p. 52). A negative THETA is automatically reset to a positive
value after values of NSSITR and SSCNCR are read.
* If NSSITR = 0, a value of 100 will be used instead.
sscncr : float
Convergence criterion for equilibrium lake stage solution by Newton's
method. Only read if ISS is not zero or if THETA is specified as a
negative value. See notes above for nssitr.
surfdepth : float
The height of small topological variations (undulations) in lake-bottom
elevations that can affect groundwater discharge to lakes. SURFDEPTH
decreases the lakebed conductance for vertical flow across a horizontal
lakebed caused both by a groundwater head that is between the lakebed
and the lakebed plus SURFDEPTH and a lake stage that is also between
the lakebed and the lakebed plus SURFDEPTH. This method provides a
smooth transition from a condition of no groundwater discharge to a
lake, when groundwater head is below the lakebed, to a condition of
increasing groundwater discharge to a lake as groundwater head becomes
greater than the elevation of the dry lakebed. The method also allows
for the transition of seepage from a lake to groundwater when the lake
stage decreases to the lakebed elevation. Values of SURFDEPTH ranging
from 0.01 to 0.5 have been used successfully in test simulations.
SURFDEP is read only if THETA is specified as a negative value.
stages : float or list of floats
The initial stage of each lake at the beginning of the run.
stage_range : list of tuples (ssmn, ssmx) of length nlakes
Where ssmn and ssmx are the minimum and maximum stages allowed for each
lake in steady-state solution.
* SSMN and SSMX are not needed for a transient run and must be
omitted when the solution is transient.
* When the first stress period is a steady-state stress period,
SSMN is defined in record 3.
For subsequent steady-state stress periods, SSMN is defined in
record 9a.
lakarr : array of integers (nlay, nrow, ncol)
LKARR A value is read in for every grid cell.
If LKARR(I,J,K) = 0, the grid cell is not a lake volume cell.
If LKARR(I,J,K) > 0, its value is the identification number of the lake
occupying the grid cell. LKARR(I,J,K) must not exceed the value NLAKES.
If it does, or if LKARR(I,J,K) < 0, LKARR(I,J,K) is set to zero.
Lake cells cannot be overlain by non-lake cells in a higher layer.
Lake cells must be inactive cells (IBOUND = 0) and should not be
convertible to active cells (WETDRY = 0).
The Lake package can be used when all or some of the model layers
containing the lake are confined. The authors recommend using the
Layer-Property Flow Package (LPF) for this case, although the
BCF and HUF Packages will work too. However, when using the BCF6
package to define aquifer properties, lake/aquifer conductances in the
lateral direction are based solely on the lakebed leakance (and not on
the lateral transmissivity of the aquifer layer). As before, when the
BCF6 package is used, vertical lake/aquifer conductances are based on
lakebed conductance and on the vertical hydraulic conductivity of the
aquifer layer underlying the lake when the wet/dry option is
implemented, and only on the lakebed leakance when the wet/dry option
is not implemented.
bdlknc : array of floats (nlay, nrow, ncol)
BDLKNC A value is read in for every grid cell. The value is the lakebed
leakance that will be assigned to lake/aquifer interfaces that occur
in the corresponding grid cell. If the wet-dry option flag (IWDFLG) is
not active (cells cannot rewet if they become dry), then the BDLKNC
values are assumed to represent the combined leakances of the lakebed
material and the aquifer material between the lake and the centers of
the underlying grid cells, i. e., the vertical conductance values (CV)
will not be used in the computation of conductances across lake/aquifer
boundary faces in the vertical direction.
IBOUND and WETDRY should be set to zero for every cell for which LKARR
is not equal to zero. IBOUND is defined in the input to the Basic
Package of MODFLOW. WETDRY is defined in the input to the BCF or other
flow package of MODFLOW if the IWDFLG option is active. When used with
the HUF package, the Lake Package has been modified to compute
effective lake-aquifer conductance solely on the basis of the
user-specified value of lakebed leakance; aquifer hydraulic
conductivities are not used in this calculation. An appropriate
informational message is now printed after the lakebed conductances
are written to the main output file.
sill_data : dict
(dataset 8 in documentation)
Dict of lists keyed by stress period. Each list has a tuple of dataset
8a, 8b for every multi-lake system, where dataset 8a is another tuple of
IC : int
The number of sublakes
ISUB : list of ints
The identification numbers of the sublakes in the sublake
system being described in this record. The center lake number
is listed first.
And dataset 8b contains
SILLVT : sequence of floats
A sequence of sill elevations for each sublakes that determines
whether the center lake is connected with a given sublake.
Values are entered for each sublake in the order the sublakes
are listed in the previous record.
flux_data : dict
(dataset 9 in documentation)
Dict of lists keyed by stress period. The list for each stress period
is a list of lists, with each list containing the variables
PRCPLK EVAPLK RNF WTHDRW [SSMN] [SSMX] from the documentation.
PRCPLK : float
The rate of precipitation per unit area at the surface of a
lake (L/T).
EVAPLK : float
The rate of evaporation per unit area from the surface of a
lake (L/T).
RNF : float
Overland runoff from an adjacent watershed entering the lake.
If RNF > 0, it is specified directly as a volumetric rate, or
flux (L3 /T). If RNF < 0, its absolute value is used as a
dimensionless multiplier applied to the product of the lake
precipitation rate per unit area (PRCPLK) and the surface area
of the lake at its full stage (occupying all layer 1 lake
cells). When RNF is entered as a dimensionless multiplier
(RNF < 0), it is considered to be the product of two
proportionality factors. The first is the ratio of the area of
the basin contributing runoff to the surface area of the lake
when it is at full stage. The second is the fraction of the
current rainfall rate that becomes runoff to the lake. This
procedure provides a means for the automated computation of
runoff rate from a watershed to a lake as a function of
varying rainfall rate. For example, if the basin area is 10
times greater than the surface area of the lake, and 20 percent
of the precipitation on the basin becomes overland runoff
directly into the lake, then set RNF = -2.0.
WTHDRW : float
The volumetric rate, or flux (L3 /T), of water removal from a
lake by means other than rainfall, evaporation, surface
outflow, or groundwater seepage. A negative value indicates
augmentation. Normally, this would be used to specify the
rate of artificial withdrawal from a lake for human water use,
or if negative, artificial augmentation of a lake volume for
aesthetic or recreational purposes.
SSMN : float
Minimum stage allowed for each lake in steady-state solution.
See notes on ssmn and ssmx above.
SSMX : float
SSMX Maximum stage allowed for each lake in steady-state
solution.
options : list of strings
Package options. (default is None).
extension : string
Filename extension (default is 'lak')
unitnumber : int
File unit number (default is None).
filenames : str or list of str
Filenames to use for the package and the output files. If
filenames=None the package name will be created using the model name
and package extension and the cbc output name will be created using
the model name and .cbc extension (for example, modflowtest.cbc),
if ipakcbc is a number greater than zero. If a single string is passed
the package will be set to the string and cbc output names will be
created using the model name and .cbc extension, if ipakcbc is a
number greater than zero. To define the names for all package files
(input and output) the length of the list of strings should be 2.
Default is None.
Methods
-------
See Also
--------
Notes
-----
Parameters are not supported in FloPy.
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> lak = {}
>>> lak[0] = [[2, 3, 4, 15.6, 1050., -4]] #this lake boundary will be
>>> #applied to all stress periods
>>> lak = flopy.modflow.ModflowLak(m, nstress_period_data=strd)
"""
def __init__(
self,
model,
nlakes=1,
ipakcb=None,
theta=-1.0,
nssitr=0,
sscncr=0.0,
surfdep=0.0,
stages=1.0,
stage_range=None,
tab_files=None,
tab_units=None,
lakarr=None,
bdlknc=None,
sill_data=None,
flux_data=None,
extension="lak",
unitnumber=None,
filenames=None,
options=None,
lwrt=0,
**kwargs
):
"""
Package constructor.
"""
# set default unit number of one is not specified
if unitnumber is None:
unitnumber = ModflowLak._defaultunit()
# set filenames
tabdata = False
nlen = 2
if options is not None:
for option in options:
if "TABLEINPUT" in option.upper():
tabdata = True
nlen += nlakes
break
if filenames is None:
filenames = [None for x in range(nlen)]
elif isinstance(filenames, str):
filenames = [filenames] + [None for x in range(nlen - 1)]
elif isinstance(filenames, list):
if len(filenames) < nlen:
filenames = filenames + [None for x in range(2, nlen)]
# update external file information with cbc output, if necessary
if ipakcb is not None:
fname = filenames[1]
model.add_output_file(
ipakcb, fname=fname, package=ModflowLak._ftype()
)
else:
ipakcb = 0
# table input files
if tabdata:
if tab_files is None:
tab_files = filenames[2:]
# add tab_files as external files
if tabdata:
# make sure the number of tabfiles is equal to the number of lakes
if len(tab_files) < nlakes:
msg = (
"a tabfile must be specified for each lake"
+ "{} tabfiles specified ".format(len(tab_files))
+ "instead of {} tabfiles".format(nlakes)
)
# make sure tab_files are not None
for idx, fname in enumerate(tab_files):
if fname is None:
msg = (
"a filename must be specified for the "
+ "tabfile for lake {}".format(idx + 1)
)
raise ValueError(msg)
# set unit for tab files if not passed to __init__
if tab_units is None:
tab_units = []
for idx in range(len(tab_files)):
tab_units.append(model.next_ext_unit())
# add tabfiles as external files
for iu, fname in zip(tab_units, tab_files):
model.add_external(fname, iu)
# Fill namefile items
name = [ModflowLak._ftype()]
units = [unitnumber]
extra = [""]
# set package name
fname = [filenames[0]]
# Call ancestor's init to set self.parent, extension, name and unit number
Package.__init__(
self,
model,
extension=extension,
name=name,
unit_number=units,
extra=extra,
filenames=fname,
)
self.heading = (
"# {} package for ".format(self.name[0])
+ " {}, ".format(model.version_types[model.version])
+ "generated by Flopy."
)
self.url = "lak.htm"
if options is None:
options = []
self.options = options
self.nlakes = nlakes
self.ipakcb = ipakcb
self.theta = theta
self.nssitr = nssitr
self.sscncr = sscncr
self.surfdep = surfdep
self.lwrt = lwrt
if isinstance(stages, float):
if self.nlakes == 1:
stages = np.array([self.nlakes], dtype=float) * stages
else:
stages = np.ones(self.nlakes, dtype=float) * stages
elif isinstance(stages, list):
stages = np.array(stages)
if stages.shape[0] != nlakes:
err = "stages shape should be " + "({}) but is only ({}).".format(
nlakes, stages.shape[0]
)
raise Exception(err)
self.stages = stages
if stage_range is None:
stage_range = np.ones((nlakes, 2), dtype=float)
stage_range[:, 0] = -10000.0
stage_range[:, 1] = 10000.0
else:
if isinstance(stage_range, list):
stage_range = np.array(stage_range)
elif isinstance(stage_range, float):
err = (
"stage_range should be a list or "
+ "array of size ({}, 2)".format(nlakes)
)
raise Exception(err)
if self.parent.dis.steady[0]:
if stage_range.shape != (nlakes, 2):
err = (
"stages shape should be "
+ "({},2) but is only {}.".format(
nlakes, stage_range.shape
)
)
raise Exception(err)
self.stage_range = stage_range
# tabfile data
self.tabdata = tabdata
self.iunit_tab = tab_units
if lakarr is None and bdlknc is None:
err = "lakarr and bdlknc must be specified"
raise Exception(err)
nrow, ncol, nlay, nper = self.parent.get_nrow_ncol_nlay_nper()
self.lakarr = Transient3d(
model, (nlay, nrow, ncol), np.int32, lakarr, name="lakarr_"
)
self.bdlknc = Transient3d(
model, (nlay, nrow, ncol), np.float32, bdlknc, name="bdlknc_"
)
if sill_data is not None:
if not isinstance(sill_data, dict):
try:
sill_data = {0: sill_data}
except:
err = "sill_data must be a dictionary"
raise Exception(err)
if flux_data is not None:
if not isinstance(flux_data, dict):
# convert array to a dictionary
try:
flux_data = {0: flux_data}
except:
err = "flux_data must be a dictionary"
raise Exception(err)
for key, value in flux_data.items():
if isinstance(value, np.ndarray):
td = {}
for k in range(value.shape[0]):
td[k] = value[k, :].tolist()
flux_data[key] = td
if len(list(flux_data.keys())) != nlakes:
err = (
"flux_data dictionary must "
+ "have {} entries".format(nlakes)
)
raise Exception(err)
elif isinstance(value, float) or isinstance(value, int):
td = {}
for k in range(self.nlakes):
td[k] = (np.ones(6, dtype=float) * value).tolist()
flux_data[key] = td
elif isinstance(value, dict):
try:
steady = self.parent.dis.steady[key]
except:
steady = True
nlen = 4
if steady and key > 0:
nlen = 6
for k in range(self.nlakes):
td = value[k]
if len(td) < nlen:
err = (
"flux_data entry for stress period".format(
key + 1
)
+ "has {} entries but ".format(nlen)
+ "should have {} entries".format(len(td))
)
raise Exception(err)
self.flux_data = flux_data
self.sill_data = sill_data
self.parent.add_package(self)
return
def _ncells(self):
"""Maximum number of cells that can have lakes (developed for
MT3DMS SSM package).
Returns
-------
ncells: int
maximum number of lak cells
"""
nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper
return nlay * nrow * ncol
def write_file(self):
"""
Write the package file.
Returns
-------
None
"""
f = open(self.fn_path, "w")
# dataset 0
self.heading = "# {} package for ".format(
self.name[0]
) + "{}, generated by Flopy.".format(self.parent.version)
f.write("{0}\n".format(self.heading))
# dataset 1a
if len(self.options) > 0:
for option in self.options:
f.write("{} ".format(option))
f.write("\n")
# dataset 1b
f.write(
write_fixed_var(
[self.nlakes, self.ipakcb], free=self.parent.free_format_input
)
)
# dataset 2
steady = np.any(self.parent.dis.steady.array)
t = [self.theta]
if self.theta < 0.0 or steady:
t.append(self.nssitr)
t.append(self.sscncr)
if self.theta < 0.0:
t.append(self.surfdep)
f.write(write_fixed_var(t, free=self.parent.free_format_input))
# dataset 3
steady = self.parent.dis.steady[0]
for n in range(self.nlakes):
ipos = [10]
t = [self.stages[n]]
if steady:
ipos.append(10)
t.append(self.stage_range[n, 0])
ipos.append(10)
t.append(self.stage_range[n, 1])
if self.tabdata:
ipos.append(5)
t.append(self.iunit_tab[n])
f.write(
write_fixed_var(
t, ipos=ipos, free=self.parent.free_format_input
)
)
ds8_keys = (
list(self.sill_data.keys()) if self.sill_data is not None else []
)
ds9_keys = list(self.flux_data.keys())
nper = self.parent.dis.steady.shape[0]
for kper in range(nper):
itmp, file_entry_lakarr = self.lakarr.get_kper_entry(kper)
ibd, file_entry_bdlknc = self.bdlknc.get_kper_entry(kper)
itmp2 = 0
if kper in ds9_keys:
itmp2 = 1
elif len(ds9_keys) > 0:
itmp2 = -1
if isinstance(self.lwrt, list):
tmplwrt = self.lwrt[kper]
else:
tmplwrt = self.lwrt
t = [itmp, itmp2, tmplwrt]
comment = "Stress period {}".format(kper + 1)
f.write(
write_fixed_var(
t, free=self.parent.free_format_input, comment=comment
)
)
if itmp > 0:
f.write(file_entry_lakarr)
f.write(file_entry_bdlknc)
nslms = 0
if kper in ds8_keys:
ds8 = self.sill_data[kper]
nslms = len(ds8)
f.write(
write_fixed_var(
[nslms],
length=5,
free=self.parent.free_format_input,
comment="Data set 7",
)
)
if nslms > 0:
for n in range(nslms):
d1, d2 = ds8[n]
s = write_fixed_var(
d1,
length=5,
free=self.parent.free_format_input,
comment="Data set 8a",
)
f.write(s)
s = write_fixed_var(
d2,
free=self.parent.free_format_input,
comment="Data set 8b",
)
f.write(s)
if itmp2 > 0:
ds9 = self.flux_data[kper]
for n in range(self.nlakes):
try:
steady = self.parent.dis.steady[kper]
except:
steady = True
if kper > 0 and steady:
t = ds9[n]
else:
t = ds9[n][0:4]
s = write_fixed_var(
t,
free=self.parent.free_format_input,
comment="Data set 9a",
)
f.write(s)
# close the lak file
f.close()
@classmethod
def load(cls, f, model, nper=None, ext_unit_dict=None):
"""
Load an existing package.
Parameters
----------
f : filename or file handle
File to load.
model : model object
The model object (of type :class:`flopy.modflow.mf.Modflow`) to
which this package will be added.
nper : int
The number of stress periods. If nper is None, then nper will be
obtained from the model object. (default is None).
ext_unit_dict : dictionary, optional
If the arrays in the file are specified using EXTERNAL,
or older style array control records, then `f` should be a file
handle. In this case ext_unit_dict is required, which can be
constructed using the function
:class:`flopy.utils.mfreadnam.parsenamefile`.
Returns
-------
str : ModflowLak object
ModflowLak object.
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> lak = flopy.modflow.ModflowStr.load('test.lak', m)
"""
if model.verbose:
sys.stdout.write("loading lak package file...\n")
openfile = not hasattr(f, "read")
if openfile:
filename = f
f = open(filename, "r", errors="replace")
# dataset 0 -- header
while True:
line = f.readline()
if line[0] != "#":
break
options = []
tabdata = False
if "TABLEINPUT" in line.upper():
if model.verbose:
print(" reading lak dataset 1a")
options.append("TABLEINPUT")
tabdata = True
line = f.readline()
# read dataset 1b
if model.verbose:
print(" reading lak dataset 1b")
t = line.strip().split()
nlakes = int(t[0])
ipakcb = 0
try:
ipakcb = int(t[1])
except:
pass
# read dataset 2
line = f.readline().rstrip()
if model.array_free_format:
t = line.split()
else:
t = read_fixed_var(line, ncol=4)
theta = float(t[0])
nssitr, sscncr = 0, 0.0
if theta < 0:
try:
nssitr = int(t[1])
except:
if model.verbose:
print(" implicit nssitr defined in file")
try:
sscncr = float(t[2])
except:
if model.verbose:
print(" implicit sscncr defined in file")
surfdep = 0.0
if theta < 0.0:
surfdep = float(t[3])
if nper is None:
nrow, ncol, nlay, nper = model.get_nrow_ncol_nlay_nper()
if model.verbose:
print(" reading lak dataset 3")
stages = []
stage_range = []
if tabdata:
tab_units = []
else:
tab_units = None
for lake in range(nlakes):
line = f.readline().rstrip()
if model.array_free_format:
t = line.split()
else:
t = read_fixed_var(line, ipos=[10, 10, 10, 5])
stages.append(t[0])
ipos = 1
if model.dis.steady[0]:
stage_range.append((float(t[ipos]), float(t[ipos + 1])))
ipos += 2
if tabdata:
iu = int(t[ipos])
tab_units.append(iu)
lake_loc = {}
lake_lknc = {}
sill_data = {}
flux_data = {}
lwrt = []
for iper in range(nper):
if model.verbose:
print(
" reading lak dataset 4 - "
+ "for stress period {}".format(iper + 1)
)
line = f.readline().rstrip()
if model.array_free_format:
t = line.split()
else:
t = read_fixed_var(line, ncol=3)
itmp, itmp1, tmplwrt = int(t[0]), int(t[1]), int(t[2])
lwrt.append(tmplwrt)
if itmp > 0:
if model.verbose:
print(
" reading lak dataset 5 - "
+ "for stress period {}".format(iper + 1)
)
name = "LKARR_StressPeriod_{}".format(iper)
lakarr = Util3d.load(
f, model, (nlay, nrow, ncol), np.int32, name, ext_unit_dict
)
if model.verbose:
print(
" reading lak dataset 6 - "
+ "for stress period {}".format(iper + 1)
)
name = "BDLKNC_StressPeriod_{}".format(iper)
bdlknc = Util3d.load(
f,
model,
(nlay, nrow, ncol),
np.float32,
name,
ext_unit_dict,
)
lake_loc[iper] = lakarr
lake_lknc[iper] = bdlknc
if model.verbose:
print(
" reading lak dataset 7 - "
+ "for stress period {}".format(iper + 1)
)
line = f.readline().rstrip()
t = line.split()
nslms = int(t[0])
ds8 = []
if nslms > 0:
if model.verbose:
print(
" reading lak dataset 8 - "
+ "for stress period {}".format(iper + 1)
)
for i in range(nslms):
line = f.readline().rstrip()
if model.array_free_format:
t = line.split()
else:
ic = int(line[0:5])
t = read_fixed_var(line, ncol=ic + 1, length=5)
ic = int(t[0])
ds8a = [ic]
for j in range(1, ic + 1):
ds8a.append(int(t[j]))
line = f.readline().rstrip()
if model.array_free_format:
t = line.split()
else:
t = read_fixed_var(line, ncol=ic - 1)
silvt = []
for j in range(ic - 1):
silvt.append(float(t[j]))
ds8.append((ds8a, silvt))
sill_data[iper] = ds8
if itmp1 >= 0:
if model.verbose:
print(
" reading lak dataset 9 - "
+ "for stress period {}".format(iper + 1)
)
ds9 = {}
for n in range(nlakes):
line = f.readline().rstrip()
if model.array_free_format:
t = line.split()
else:
t = read_fixed_var(line, ncol=6)
tds = []
tds.append(float(t[0]))
tds.append(float(t[1]))
tds.append(float(t[2]))
tds.append(float(t[3]))
if model.dis.steady[iper]:
if iper == 0:
tds.append(stage_range[n][0])
tds.append(stage_range[n][1])
else:
tds.append(float(t[4]))
tds.append(float(t[5]))
else:
tds.append(0.0)
tds.append(0.0)
ds9[n] = tds
flux_data[iper] = ds9
if openfile:
f.close()
# convert lake data to Transient3d objects
lake_loc = Transient3d(
model, (nlay, nrow, ncol), np.int32, lake_loc, name="lakarr_"
)
lake_lknc = Transient3d(
model, (nlay, nrow, ncol), np.float32, lake_lknc, name="bdlknc_"
)
# determine specified unit number
n = 2
if tab_units is not None:
n += nlakes
unitnumber = None
filenames = [None for x in range(n)]
if ext_unit_dict is not None:
unitnumber, filenames[0] = model.get_ext_dict_attr(
ext_unit_dict, filetype=ModflowLak._ftype()
)
if ipakcb > 0:
iu, filenames[1] = model.get_ext_dict_attr(
ext_unit_dict, unit=ipakcb
)
model.add_pop_key_list(ipakcb)
ipos = 2
if tab_units is not None:
for i in range(len(tab_units)):
iu, filenames[ipos] = model.get_ext_dict_attr(
ext_unit_dict, unit=tab_units[i]
)
ipos += 1
return cls(
model,
options=options,
nlakes=nlakes,
ipakcb=ipakcb,
theta=theta,
nssitr=nssitr,
surfdep=surfdep,
sscncr=sscncr,
lwrt=lwrt,
stages=stages,
stage_range=stage_range,
tab_units=tab_units,
lakarr=lake_loc,
bdlknc=lake_lknc,
sill_data=sill_data,
flux_data=flux_data,
unitnumber=unitnumber,
filenames=filenames,
)
@staticmethod
def _ftype():
return "LAK"
@staticmethod
def _defaultunit():
return 119
| 39.44186
| 82
| 0.52254
|
acfeac4dce08e67da07979f139dce0917fc0b4b0
| 7,404
|
py
|
Python
|
examples/parametertree.py
|
sneakers-the-rat/pyqtgraph
|
65ef2a5a60a1a08d0106e819110528b2ca3499a3
|
[
"MIT"
] | 150
|
2018-03-27T16:45:37.000Z
|
2022-03-30T03:47:56.000Z
|
examples/parametertree.py
|
sneakers-the-rat/pyqtgraph
|
65ef2a5a60a1a08d0106e819110528b2ca3499a3
|
[
"MIT"
] | 34
|
2018-09-28T00:01:59.000Z
|
2022-03-21T15:40:02.000Z
|
examples/parametertree.py
|
sneakers-the-rat/pyqtgraph
|
65ef2a5a60a1a08d0106e819110528b2ca3499a3
|
[
"MIT"
] | 40
|
2018-04-06T19:42:21.000Z
|
2022-01-11T00:34:17.000Z
|
# -*- coding: utf-8 -*-
"""
This example demonstrates the use of pyqtgraph's parametertree system. This provides
a simple way to generate user interfaces that control sets of parameters. The example
demonstrates a variety of different parameter types (int, float, list, etc.)
as well as some customized parameter types
"""
import initExample ## Add path to library (just for examples; you do not need this)
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui
app = QtGui.QApplication([])
import pyqtgraph.parametertree.parameterTypes as pTypes
from pyqtgraph.parametertree import Parameter, ParameterTree, ParameterItem, registerParameterType
## test subclassing parameters
## This parameter automatically generates two child parameters which are always reciprocals of each other
class ComplexParameter(pTypes.GroupParameter):
def __init__(self, **opts):
opts['type'] = 'bool'
opts['value'] = True
pTypes.GroupParameter.__init__(self, **opts)
self.addChild({'name': 'A = 1/B', 'type': 'float', 'value': 7, 'suffix': 'Hz', 'siPrefix': True})
self.addChild({'name': 'B = 1/A', 'type': 'float', 'value': 1/7., 'suffix': 's', 'siPrefix': True})
self.a = self.param('A = 1/B')
self.b = self.param('B = 1/A')
self.a.sigValueChanged.connect(self.aChanged)
self.b.sigValueChanged.connect(self.bChanged)
def aChanged(self):
self.b.setValue(1.0 / self.a.value(), blockSignal=self.bChanged)
def bChanged(self):
self.a.setValue(1.0 / self.b.value(), blockSignal=self.aChanged)
## test add/remove
## this group includes a menu allowing the user to add new parameters into its child list
class ScalableGroup(pTypes.GroupParameter):
def __init__(self, **opts):
opts['type'] = 'group'
opts['addText'] = "Add"
opts['addList'] = ['str', 'float', 'int']
pTypes.GroupParameter.__init__(self, **opts)
def addNew(self, typ):
val = {
'str': '',
'float': 0.0,
'int': 0
}[typ]
self.addChild(dict(name="ScalableParam %d" % (len(self.childs)+1), type=typ, value=val, removable=True, renamable=True))
params = [
{'name': 'Basic parameter data types', 'type': 'group', 'children': [
{'name': 'Integer', 'type': 'int', 'value': 10},
{'name': 'Float', 'type': 'float', 'value': 10.5, 'step': 0.1},
{'name': 'String', 'type': 'str', 'value': "hi"},
{'name': 'List', 'type': 'list', 'values': [1,2,3], 'value': 2},
{'name': 'Named List', 'type': 'list', 'values': {"one": 1, "two": "twosies", "three": [3,3,3]}, 'value': 2},
{'name': 'Boolean', 'type': 'bool', 'value': True, 'tip': "This is a checkbox"},
{'name': 'Color', 'type': 'color', 'value': "FF0", 'tip': "This is a color button"},
{'name': 'Gradient', 'type': 'colormap'},
{'name': 'Subgroup', 'type': 'group', 'children': [
{'name': 'Sub-param 1', 'type': 'int', 'value': 10},
{'name': 'Sub-param 2', 'type': 'float', 'value': 1.2e6},
]},
{'name': 'Text Parameter', 'type': 'text', 'value': 'Some text...'},
{'name': 'Action Parameter', 'type': 'action'},
]},
{'name': 'Numerical Parameter Options', 'type': 'group', 'children': [
{'name': 'Units + SI prefix', 'type': 'float', 'value': 1.2e-6, 'step': 1e-6, 'siPrefix': True, 'suffix': 'V'},
{'name': 'Limits (min=7;max=15)', 'type': 'int', 'value': 11, 'limits': (7, 15), 'default': -6},
{'name': 'DEC stepping', 'type': 'float', 'value': 1.2e6, 'dec': True, 'step': 1, 'siPrefix': True, 'suffix': 'Hz'},
]},
{'name': 'Save/Restore functionality', 'type': 'group', 'children': [
{'name': 'Save State', 'type': 'action'},
{'name': 'Restore State', 'type': 'action', 'children': [
{'name': 'Add missing items', 'type': 'bool', 'value': True},
{'name': 'Remove extra items', 'type': 'bool', 'value': True},
]},
]},
{'name': 'Extra Parameter Options', 'type': 'group', 'children': [
{'name': 'Read-only', 'type': 'float', 'value': 1.2e6, 'siPrefix': True, 'suffix': 'Hz', 'readonly': True},
{'name': 'Renamable', 'type': 'float', 'value': 1.2e6, 'siPrefix': True, 'suffix': 'Hz', 'renamable': True},
{'name': 'Removable', 'type': 'float', 'value': 1.2e6, 'siPrefix': True, 'suffix': 'Hz', 'removable': True},
]},
{'name': 'Custom context menu', 'type': 'group', 'children': [
{'name': 'List contextMenu', 'type': 'float', 'value': 0, 'context': [
'menu1',
'menu2'
]},
{'name': 'Dict contextMenu', 'type': 'float', 'value': 0, 'context': {
'changeName': 'Title',
'internal': 'What the user sees',
}},
]},
ComplexParameter(name='Custom parameter group (reciprocal values)'),
ScalableGroup(name="Expandable Parameter Group", children=[
{'name': 'ScalableParam 1', 'type': 'str', 'value': "default param 1"},
{'name': 'ScalableParam 2', 'type': 'str', 'value': "default param 2"},
]),
]
## Create tree of Parameter objects
p = Parameter.create(name='params', type='group', children=params)
## If anything changes in the tree, print a message
def change(param, changes):
print("tree changes:")
for param, change, data in changes:
path = p.childPath(param)
if path is not None:
childName = '.'.join(path)
else:
childName = param.name()
print(' parameter: %s'% childName)
print(' change: %s'% change)
print(' data: %s'% str(data))
print(' ----------')
p.sigTreeStateChanged.connect(change)
def valueChanging(param, value):
print("Value changing (not finalized): %s %s" % (param, value))
# Too lazy for recursion:
for child in p.children():
child.sigValueChanging.connect(valueChanging)
for ch2 in child.children():
ch2.sigValueChanging.connect(valueChanging)
def save():
global state
state = p.saveState()
def restore():
global state
add = p['Save/Restore functionality', 'Restore State', 'Add missing items']
rem = p['Save/Restore functionality', 'Restore State', 'Remove extra items']
p.restoreState(state, addChildren=add, removeChildren=rem)
p.param('Save/Restore functionality', 'Save State').sigActivated.connect(save)
p.param('Save/Restore functionality', 'Restore State').sigActivated.connect(restore)
## Create two ParameterTree widgets, both accessing the same data
t = ParameterTree()
t.setParameters(p, showTop=False)
t.setWindowTitle('pyqtgraph example: Parameter Tree')
t2 = ParameterTree()
t2.setParameters(p, showTop=False)
win = QtGui.QWidget()
layout = QtGui.QGridLayout()
win.setLayout(layout)
layout.addWidget(QtGui.QLabel("These are two views of the same data. They should always display the same values."), 0, 0, 1, 2)
layout.addWidget(t, 1, 0, 1, 1)
layout.addWidget(t2, 1, 1, 1, 1)
win.show()
win.resize(800,800)
## test save/restore
s = p.saveState()
p.restoreState(s)
## Start Qt event loop unless running in interactive mode or using pyside.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
| 39.806452
| 128
| 0.59873
|
acfeadd47a29a99bcf550b8f2f8ef032458c355b
| 297
|
py
|
Python
|
exercises/pt/exc_03_16_01.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 2,085
|
2019-04-17T13:10:40.000Z
|
2022-03-30T21:51:46.000Z
|
exercises/pt/exc_03_16_01.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 79
|
2019-04-18T14:42:55.000Z
|
2022-03-07T08:15:43.000Z
|
exercises/pt/exc_03_16_01.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 361
|
2019-04-17T13:34:32.000Z
|
2022-03-28T04:42:45.000Z
|
import spacy
nlp = spacy.load("en_core_web_sm")
text = (
"Chick-fil-A is an American fast food restaurant chain headquartered in "
"the city of College Park, Georgia, specializing in chicken sandwiches."
)
# Apenas toquenizar o texto
doc = nlp(text)
print([token.text for token in doc])
| 24.75
| 77
| 0.73064
|
acfeaf004dee0450bf95c1fc1afe6634d4c35921
| 3,809
|
py
|
Python
|
ssm_cache/cache.py
|
benkehoe/ssm-cache-python
|
c21c536b7ba38494bfccafea311a853f50360609
|
[
"MIT"
] | 1
|
2020-05-25T08:26:55.000Z
|
2020-05-25T08:26:55.000Z
|
ssm_cache/cache.py
|
benkehoe/ssm-cache-python
|
c21c536b7ba38494bfccafea311a853f50360609
|
[
"MIT"
] | null | null | null |
ssm_cache/cache.py
|
benkehoe/ssm-cache-python
|
c21c536b7ba38494bfccafea311a853f50360609
|
[
"MIT"
] | null | null | null |
""" Cache module that implements the SSM caching wrapper """
from __future__ import print_function
from datetime import datetime, timedelta
from functools import wraps
from past.builtins import basestring
import boto3
class InvalidParam(Exception):
""" Raised when something's wrong with the provided param name """
class SSMParameter(object):
""" The class wraps an SSM Parameter and adds optional caching """
ssm_client = boto3.client('ssm')
def __init__(self, param_names=None, max_age=None, with_decryption=True):
if isinstance(param_names, basestring):
param_names = [param_names]
if not param_names:
raise ValueError("At least one parameter should be configured")
self._names = param_names
self._values = {}
self._with_decryption = with_decryption
self._last_refresh_time = None
self._max_age = max_age
self._max_age_delta = timedelta(seconds=max_age or 0)
def _should_refresh(self):
# never force refresh if no max_age is configured
if not self._max_age:
return False
# always force refresh if values were never fetched
if not self._last_refresh_time:
return True
# force refresh only if max_age seconds have expired
return datetime.utcnow() > self._last_refresh_time + self._max_age_delta
def refresh(self):
""" Force refresh of the configured param names """
response = self.ssm_client.get_parameters(
Names=self._names,
WithDecryption=self._with_decryption,
)
# create a dict of name:value for each param
self._values = {
param['Name']: param['Value']
for param in response['Parameters']
}
# keep track of update date for max_age checks
self._last_refresh_time = datetime.utcnow()
def value(self, name=None):
"""
Retrieve the value of a given param name.
If only one name is configured, the name can be omitted.
"""
# transform single string into list (syntactic sugar)
if name is None:
# name is required, unless only one parameter is configured
if len(self._names) == 1:
name = self._names[0]
else:
raise TypeError("Parameter name is required (None was given)")
if name not in self._names:
raise InvalidParam("Parameter %s is not configured" % name)
if name not in self._values or self._should_refresh():
self.refresh()
try:
return self._values[name]
except KeyError:
raise InvalidParam("Param '%s' does not exist" % name)
def values(self, names=None):
"""
Retrieve a list of values.
If no name is provided, all values are returned.
"""
if not names:
names = self._names
return [self.value(name) for name in names]
def refresh_on_error(
self,
error_class=Exception,
error_callback=None,
retry_argument='is_retry'
):
""" Decorator to handle errors and retries """
def true_decorator(func):
""" Actual func wrapper """
@wraps(func)
def wrapped(*args, **kwargs):
""" Actual error/retry handling """
try:
return func(*args, **kwargs)
except error_class:
self.refresh()
if callable(error_callback):
error_callback()
kwargs[retry_argument] = True
return func(*args, **kwargs)
return wrapped
return true_decorator
| 36.27619
| 80
| 0.592807
|
acfeb070bb783851d7cdf64520eff54c3f18b77f
| 2,801
|
py
|
Python
|
spacy_ixakat/ixakat.py
|
sarnthil/spaCy-ixaKat
|
841e6d8938eca8e69ca620c85ba4f164dee4071f
|
[
"MIT"
] | null | null | null |
spacy_ixakat/ixakat.py
|
sarnthil/spaCy-ixaKat
|
841e6d8938eca8e69ca620c85ba4f164dee4071f
|
[
"MIT"
] | null | null | null |
spacy_ixakat/ixakat.py
|
sarnthil/spaCy-ixaKat
|
841e6d8938eca8e69ca620c85ba4f164dee4071f
|
[
"MIT"
] | 1
|
2022-02-07T07:29:07.000Z
|
2022-02-07T07:29:07.000Z
|
#! /usr/bin/python3 -i
# coding=utf-8
import os
PACKAGE_DIR=os.path.abspath(os.path.dirname(__file__))
IXAKAT2UD=os.path.join(PACKAGE_DIR,"bin","ixakat2ud")
IXAKAT2CONLL=os.path.join(PACKAGE_DIR,"bin","ixakat2conll")
import numpy
from spacy.language import Language
from spacy.symbols import LANG,NORM,LEMMA,POS,TAG,DEP,HEAD,ENT_IOB,ENT_TYPE
from spacy.tokens import Doc,Span,Token
from spacy.util import get_lang_class
class ixaKatLanguage(Language):
lang="eu"
max_length=10**6
def __init__(self,convUD):
self.Defaults.lex_attr_getters[LANG]=lambda _text:"eu"
try:
self.vocab=self.Defaults.create_vocab()
self.pipeline=[]
except:
from spacy.vocab import create_vocab
self.vocab=create_vocab("eu",self.Defaults)
self._components=[]
self._disabled=set()
self.tokenizer=ixaKatTokenizer(self.vocab,convUD)
self._meta={
"author":"Koichi Yasuoka",
"description":"derived from ixaKat",
"lang":"eu_ixaKat",
"license":"MIT",
"name":"eu_ixaKat",
"pipeline":"Tokenizer, POS-Tagger, Parser",
"spacy_version":">=2.2.2"
}
self._path=None
class ixaKatTokenizer(object):
to_disk=lambda self,*args,**kwargs:None
from_disk=lambda self,*args,**kwargs:None
to_bytes=lambda self,*args,**kwargs:None
from_bytes=lambda self,*args,**kwargs:None
def __init__(self,vocab,convUD):
import subprocess
self.model=lambda s:subprocess.check_output([IXAKAT2UD if convUD else IXAKAT2CONLL],input=s.encode("utf-8")).decode("utf-8")
self.convUD=convUD
self.vocab=vocab
def __call__(self,text):
u=self.model(text) if text else ""
if not self.convUD:
return u
vs=self.vocab.strings
r=vs.add("ROOT")
words=[]
lemmas=[]
pos=[]
tags=[]
heads=[]
deps=[]
spaces=[]
for t in u.split("\n"):
if t=="" or t.startswith("#"):
continue
s=t.split("\t")
if len(s)!=10:
continue
id,form,lemma,upos,xpos,dummy_feats,head,deprel,dummy_deps,misc=s
words.append(form)
lemmas.append(vs.add(lemma))
pos.append(vs.add(upos))
tags.append(vs.add(xpos))
if deprel=="root" or deprel=="ROOT":
heads.append(0)
deps.append(r)
elif head=="0":
heads.append(0)
deps.append(vs.add(deprel))
else:
heads.append(int(head)-int(id))
deps.append(vs.add(deprel))
spaces.append(False if "SpaceAfter=No" in misc else True)
doc=Doc(self.vocab,words=words,spaces=spaces)
a=numpy.array(list(zip(lemmas,pos,tags,deps,heads)),dtype="uint64")
doc.from_array([LEMMA,POS,TAG,DEP,HEAD],a)
try:
doc.is_tagged=True
doc.is_parsed=True
except:
pass
return doc
def load(convUD=True):
return ixaKatLanguage(convUD)
| 28.876289
| 128
| 0.656194
|
acfeb21c559d090f95c060819bfb25747b8b61ab
| 7,354
|
py
|
Python
|
pvn3d/lib/utils/etw_pytorch_utils/sacred_trainer.py
|
JiazeWang/PVN3D
|
07241f5e0de488c123cd78f516a707bff207c2e0
|
[
"MIT"
] | 369
|
2019-11-12T08:27:08.000Z
|
2022-03-28T08:33:14.000Z
|
pvn3d/lib/utils/etw_pytorch_utils/sacred_trainer.py
|
hansongfang/PVN3D
|
1f0216505faa543ac2367cf485e0703cabcaefa0
|
[
"MIT"
] | 99
|
2019-11-27T12:48:49.000Z
|
2022-03-23T07:15:24.000Z
|
pvn3d/lib/utils/etw_pytorch_utils/sacred_trainer.py
|
hansongfang/PVN3D
|
1f0216505faa543ac2367cf485e0703cabcaefa0
|
[
"MIT"
] | 102
|
2019-11-20T13:14:16.000Z
|
2022-03-31T08:05:29.000Z
|
from __future__ import (
division,
absolute_import,
with_statement,
print_function,
unicode_literals,
)
import torch
import torch.nn as nn
import numpy as np
import tqdm
import sacred
import os.path as osp
from .pytorch_utils import checkpoint_state
if False:
# Workaround for type hints without depending on the `typing` module
from typing import *
class _DefaultExCallback(object):
def __init__(self):
self.train_vals = {}
self.train_emas = {}
self.ema_beta = 0.25
def __call__(self, ex, mode, k, v):
# type: (_DefaultExCallback, sacred.Experiment, Any, Any, Any) -> None
if mode == "train":
self.train_emas[k] = self.ema_beta * v + (
1.0 - self.ema_beta
) * self.train_emas.get(k, v)
self.train_vals[k] = self.train_vals.get(k, []) + [v]
ex.log_scalar("training.{k}".format({"k": k}), self.train_emas[k])
elif mode == "val":
ex.log_scalar("val.{k}".format({"k": k}), np.mean(np.array(v)))
ex.log_scalar(
"train.{k}".format({"k": k}), np.mean(np.array(self.train_vals[k]))
)
self.train_vals[k] = []
class SacredTrainer(object):
r"""
Reasonably generic trainer for pytorch models
Parameters
----------
model : pytorch model
Model to be trained
model_fn : function (model, inputs, labels) -> preds, loss, accuracy
optimizer : torch.optim
Optimizer for model
checkpoint_name : str
Name of file to save checkpoints to
best_name : str
Name of file to save best model to
lr_scheduler : torch.optim.lr_scheduler
Learning rate scheduler. .step() will be called at the start of every epoch
bnm_scheduler : BNMomentumScheduler
Batchnorm momentum scheduler. .step() will be called at the start of every epoch
eval_frequency : int
How often to run an eval
log_name : str
Name of file to output tensorboard_logger to
"""
def __init__(
self,
model,
model_fn,
optimizer,
lr_scheduler=None,
bnm_scheduler=None,
eval_frequency=-1,
ex=None,
checkpoint_dir=None,
):
self.model, self.model_fn, self.optimizer, self.lr_scheduler, self.bnm_scheduler = (
model,
model_fn,
optimizer,
lr_scheduler,
bnm_scheduler,
)
self.checkpoint_dir = checkpoint_dir
self.eval_frequency = eval_frequency
self.ex = ex
self.update_callbacks = {}
self.default_cb = _DefaultExCallback()
def add_callback(self, name, cb):
self.update_callbacks[name] = cb
def add_callbacks(self, cbs={}, **kwargs):
cbs = dict(cbs)
cbs.update(**kwargs)
for name, cb in cbs.items():
self.add_callback(name, cb)
def _update(self, mode, val_dict):
for k, v in val_dict.items():
if k in self.update_callbacks:
self.update_callbacks[k](self.ex, mode, k, v)
else:
self.default_cb(self.ex, mode, k, v)
def _train_it(self, it, batch):
self.model.train()
if self.lr_scheduler is not None:
self.lr_scheduler.step(it)
if self.bnm_scheduler is not None:
self.bnm_scheduler.step(it)
self.optimizer.zero_grad()
_, loss, eval_res = self.model_fn(self.model, batch)
loss.backward()
self.optimizer.step()
return eval_res
def eval_epoch(self, d_loader):
self.model.eval()
eval_dict = {}
total_loss = 0.0
count = 1.0
for i, data in tqdm.tqdm(
enumerate(d_loader, 0), total=len(d_loader), leave=False, desc="val"
):
self.optimizer.zero_grad()
_, loss, eval_res = self.model_fn(self.model, data, eval=True)
total_loss += loss.item()
count += 1
for k, v in eval_res.items():
if v is not None:
eval_dict[k] = eval_dict.get(k, []) + [v]
return total_loss / count, eval_dict
def train(
self,
start_it,
start_epoch,
n_epochs,
train_loader,
test_loader=None,
best_loss=1e10,
):
# type: (SacredTrainer, Any, int, int, torch.utils.data.DataLoader, torch.utils.data.DataLoader, float) -> float
r"""
Call to begin training the model
Parameters
----------
start_epoch : int
Epoch to start at
n_epochs : int
Number of epochs to train for
test_loader : torch.utils.data.DataLoader
DataLoader of the test_data
train_loader : torch.utils.data.DataLoader
DataLoader of training data
best_loss : float
Testing loss of the best model
"""
eval_frequency = (
self.eval_frequency if self.eval_frequency > 0 else len(train_loader)
)
it = start_it
with tqdm.trange(
start_epoch, n_epochs, desc="epochs", dynamic_ncols=True
) as tbar, tqdm.tqdm(
total=eval_frequency, leave=False, desc="train", dynamic_ncols=True
) as pbar:
for epoch in tbar:
for batch in train_loader:
res = self._train_it(it, batch)
it += 1
pbar.update()
pbar.set_postfix(dict(total_it=it))
tbar.refresh()
if self.ex is not None:
self._update("train", res)
if (it % eval_frequency) == 0:
pbar.close()
if test_loader is not None:
val_loss, res = self.eval_epoch(test_loader)
if self.ex is not None:
self._update("val", res)
if self.checkpoint_dir is not None:
is_best = val_loss < best_loss
best_loss = min(val_loss, best_loss)
state = checkpoint_state(
self.model, self.optimizer, val_loss, epoch, it
)
name = osp.join(self.checkpoint_dir, "checkpoint.pt")
torch.save(state, name)
if self.ex is not None:
self.ex.add_artifact(name)
if is_best:
name = osp.join(self.checkpoint_dir, "best.pt")
torch.save(state, name)
if self.ex is not None:
self.ex.add_artifact(name)
pbar = tqdm.tqdm(
total=eval_frequency,
leave=False,
desc="train",
dynamic_ncols=True,
)
pbar.set_postfix(dict(total_it=it))
return best_loss
| 30.89916
| 120
| 0.517677
|
acfeb26ca7caa7037547df8edbf11277d5b807f6
| 439
|
py
|
Python
|
Mi_comida_favorita.py
|
rmanuel34/Programacion-
|
681b3912b88f7c839d1240b2f6f3b6d72adab6cf
|
[
"Apache-2.0"
] | null | null | null |
Mi_comida_favorita.py
|
rmanuel34/Programacion-
|
681b3912b88f7c839d1240b2f6f3b6d72adab6cf
|
[
"Apache-2.0"
] | null | null | null |
Mi_comida_favorita.py
|
rmanuel34/Programacion-
|
681b3912b88f7c839d1240b2f6f3b6d72adab6cf
|
[
"Apache-2.0"
] | null | null | null |
comidas["pizza", "hamburguesa", "costilla agridulce", "hot dog", "pollo asado", "arroz chino", "papas fitas", "ensalada de papas", "pastelitos de maseca", "pastelitos de harina", "carne ala plancha", "carna asada", "bistec", "camarones empanizados", "lasaña", "sopa marinera", "ceviche", "pollo frito", "baleadas", "aros de cebolla"]
def imprimir_menu()
print("escojas su platillo favorito")
for comida in comidas
print(comidas)
| 73.166667
| 335
| 0.70615
|
acfeb27b04f0f486a4c98dcf0eacd20a3afa785c
| 1,264
|
py
|
Python
|
models/simple.py
|
MetroStar/bitnest
|
a8d9cef5a17a5366e088a774ae951a0f06f97ae7
|
[
"MIT"
] | 4
|
2021-09-16T21:33:13.000Z
|
2022-01-18T22:05:57.000Z
|
models/simple.py
|
MetroStar/bitnest
|
a8d9cef5a17a5366e088a774ae951a0f06f97ae7
|
[
"MIT"
] | 1
|
2021-12-02T03:47:45.000Z
|
2021-12-02T03:47:45.000Z
|
models/simple.py
|
MetroStar/bitnest
|
a8d9cef5a17a5366e088a774ae951a0f06f97ae7
|
[
"MIT"
] | null | null | null |
from bitnest.field import (
Struct,
UnsignedInteger,
Bits,
Union,
FieldReference,
Vector,
)
class CommandWord(Struct):
name = "CommandWord"
fields = [
UnsignedInteger("remote_terminal_address", 5),
UnsignedInteger("number_of_words", 3),
]
class DataWord(Struct):
name = "DataWord"
fields = [
Bits("data", 16),
]
class RTToController(Struct):
name = "Remote Terminal to Controller"
fields = [
CommandWord,
Vector(DataWord, length=FieldReference("CommandWord.number_of_words")),
]
conditions = [(FieldReference("CommandWord.remote_terminal_address") == 0x1F)]
class ControllerToRT(Struct):
name = "Controller to Remote Terminal"
fields = [
CommandWord,
]
conditions = [(FieldReference("CommandWord.number_of_words") == 0x0)]
class MILSTD_1553_Message(Struct):
"""This is a mock specification for a MILSTD 1553 Message to be as
simple as possible while still representative of the difficulty of
handling specifications.
"""
name = "MIL-STD 1553 Mock Message"
fields = [
UnsignedInteger("bus_id", 8),
Union(
RTToController,
ControllerToRT,
),
]
| 19.446154
| 82
| 0.630538
|
acfeb2a254a25061925adb8675aa8b82a244d39c
| 10,351
|
py
|
Python
|
wandb/run-20210412_003525-5km65f5j/files/code/main.py
|
ccoltong1215/simple-lenet5-torch-mnist
|
3eaa25160525f89dd6b9fe1db5de26a2bfda2fea
|
[
"MIT"
] | null | null | null |
wandb/run-20210412_003525-5km65f5j/files/code/main.py
|
ccoltong1215/simple-lenet5-torch-mnist
|
3eaa25160525f89dd6b9fe1db5de26a2bfda2fea
|
[
"MIT"
] | 5
|
2021-09-08T03:09:50.000Z
|
2022-03-12T00:56:43.000Z
|
wandb/run-20210412_003525-5km65f5j/files/code/main.py
|
ccoltong1215/simple-lenet5-torch-mnist
|
3eaa25160525f89dd6b9fe1db5de26a2bfda2fea
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from dataset import Dataset
from model import LeNet5, CustomMLP,LeNet5_regulized
import numpy as np
import matplotlib.pyplot as plt
import wandb
def train(model, trn_loader, device, criterion, optimizer,epoch,modelname):
""" Train function
Args:
model: network
trn_loader: torch.utils.data.DataLoader instance for training
device: device for computing, cpu or gpu
criterion: cost function
optimizer: optimization method, refer to torch.optim
Returns:
trn_loss: average loss value
acc: accuracy
"""
model.to(device)
model.train()
trn_loss, acc = [], []
for m in range(epoch):
train_loss = 0
trainacc = 0
for i, (images, labels) in enumerate(trn_loader):
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
loss = criterion(outputs, labels)
train_loss += loss
temp_acc = torch.mean(torch.eq(torch.argmax(outputs, dim=1), labels).to(dtype=torch.float64))
trainacc += temp_acc
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i) % 1000 == 0:
print("\r {} Step [{}] Loss: {:.4f} acc: {:.4f}\nlabel".format(modelname,i, loss.item(), temp_acc),labels,"\n output", torch.argmax(outputs, dim=1))
trainacc = trainacc / trn_loader.__len__()
train_loss = train_loss / (trn_loader.__len__()) #10은 batchsize, 원래는 argument로 받아와서 사용가능
print("{} training {} epoch Loss: {:.4f} acc: {:.4f}".format(modelname,m, train_loss, trainacc))
trn_loss.append(train_loss.item())
acc.append(trainacc.item())
epochlist = range(epoch)
data = [[x, y] for (x, y) in zip( epochlist,acc)]
data2 = [[x, y] for (x, y) in zip(epochlist, trn_loss)]
table = wandb.Table(data=data, columns=[ "epoch","{}Acc".format(modelname)])
table2 = wandb.Table(data=data2, columns=["epoch", "{}loss".format(modelname)])
wandb.log({"{}Acc".format(modelname): wandb.plot.line(table, "epoch", "{}Acc".format(modelname),title= "{}Acc graph".format(modelname))})
wandb.log({"{}loss".format(modelname): wandb.plot.line(table2, "epoch", "{}loss".format(modelname),title="{}loss graph".format(modelname))})
trn_loss = np.array(trn_loss)
acc=np.array(acc)
try:
dummy_input = torch.randn(10,1,28,28,device=device)
input_names = ["input_0"]
output_names = ["output_0"]
dummy_output = model(dummy_input)
torch.onnx.export(model, dummy_input, "{}.onnx".format(modelname), verbose=True, input_names=input_names,output_names=output_names)
except:
pass
return trn_loss, acc
def test(model, tst_loader, device, criterion,modelname):
""" Test function
Args:
model: network
tst_loader: torch.utils.data.DataLoader instance for testing
device: device for computing, cpu or gpu
criterion: cost function
Returns:
tst_loss: average loss value
acc: accuracy
"""
model.to(device)
model.eval()
tst_loss, acc = [],[]
test_loss=0
test_acc=0
with torch.no_grad(): # 미분 안함,
for i, (images, labels) in enumerate(tst_loader):
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
loss = criterion(outputs, labels)
test_loss += loss
temp_acc = torch.mean(torch.eq(torch.argmax(outputs, dim=1), labels).to(dtype=torch.float64))
test_acc += temp_acc
if (i) % 100 == 0:
print(" Step [{}] Loss: {:.4f} acc: {:.4f}".format(i, loss.item(), temp_acc))
print("label", labels)
print("output", torch.argmax(outputs, dim=1))
tst_loss.append(loss.item())
acc.append(temp_acc.item())
test_acc = test_acc/tst_loader.__len__()
test_loss = test_loss / (tst_loader.__len__())
print("TEST Step [{}] Loss: {:.4f} acc: {:.4f}".format(tst_loader.__len__(), test_loss, test_acc))
tst_loss=np.array(tst_loss).astype(float)
acc=np.array(acc).astype(float)
wandb.log({"{}Acc_test".format(modelname): test_acc,
"{}loss_test".format(modelname): test_loss})
return tst_loss, acc
# import some packages you need here
def main():
""" Main function
Here, you should instantiate
1) Dataset objects for training and test datasets
2) DataLoaders for training and testing
3) model
4) optimizer: SGD with initial learning rate 0.01 and momentum 0.9
5) cost function: use torch.nn.CrossEntropyLoss
"""
wandb.init(project="simple_MNIST_report", config={
})
roottrain='data/train'
roottest ='data/test'
epoch = 100
# declare pipeline
trainloader = DataLoader(dataset=Dataset(root=roottrain,normalize=True), #################################################
batch_size=10,
shuffle=True)
trainloader_normalize = DataLoader(dataset=Dataset(root=roottrain,normalize=True), #################################################
batch_size=10,
shuffle=False)
testloader = DataLoader(dataset=Dataset(root=roottest,normalize=False), ################################################
batch_size=10,
shuffle=False)
device = torch.device("cuda:0")
#declare model and opt and loss
LeNet5_model = LeNet5()
criterionLeNet = torch.nn.CrossEntropyLoss()
optimizerLeNet = torch.optim.SGD(LeNet5_model.parameters(), lr=0.001, momentum=0.9)
LeNet5_regulized_model = LeNet5_regulized()
criterionLeNet_regulized = torch.nn.CrossEntropyLoss()
optimizerLeNet_regulized = torch.optim.SGD(LeNet5_regulized_model.parameters(), lr=0.001, momentum=0.9,weight_decay=0.001) ## L2 regularization
CustomMLP_model = CustomMLP()
criterionCustomMLP = torch.nn.CrossEntropyLoss()
optimizerCustomMLP = torch.optim.SGD(CustomMLP_model.parameters(), lr=0.001, momentum=0.9)
wandb.watch(
LeNet5_model
)
wandb.watch(
CustomMLP_model
)
####################################################################################
#start training
lenet5_regulizedtrnloss, lenet5_regulizedtrnacc = train(model=LeNet5_regulized_model, trn_loader=trainloader_normalize, device=device, criterion=criterionLeNet_regulized,
optimizer=optimizerLeNet_regulized,epoch=epoch,modelname="lenet_regulized")
lenet5_regulizedtstloss, lenet5_regulizedtstacc = test(model=LeNet5_regulized_model, tst_loader=testloader, device=device, criterion=criterionLeNet_regulized,modelname="lenet_regulized")
lenet5trnloss, lenet5trnacc = train(model=LeNet5_model, trn_loader=trainloader, device=device, criterion=criterionLeNet,
optimizer=optimizerLeNet,epoch=epoch,modelname="lenet")
lenet5tstloss, lenet5tstacc = test(model=LeNet5_model, tst_loader=testloader, device=device, criterion=criterionLeNet,modelname="lenet")
CustomMLPtrnloss, CustomMLPtrnacc = train(model=CustomMLP_model, trn_loader=trainloader, device=device,
criterion=criterionCustomMLP, optimizer=optimizerCustomMLP,epoch=epoch,modelname="custom")
CustomMLPtstloss, CustomMLPtstacc = test(model=CustomMLP_model, tst_loader=testloader, device=device, criterion=criterionCustomMLP,modelname="custom")
fig= plt.figure()
lossplt=fig.add_subplot(1, 2, 1)
plt.plot(range(epoch), lenet5trnloss, color='g', label='LeNet5 train loss')
plt.plot(range(epoch), lenet5_regulizedtrnloss,color='r' ,label='LeNet5_regulized train loss' )
plt.plot(range(epoch), CustomMLPtrnloss,color='b',label='Custom MLP train loss')
plt.legend(loc='upper right',bbox_to_anchor=(1.0, 1.0))
plt.xlabel('epoch (x100)')
plt.ylabel('loss')
plt.title('Loss')
accplt=fig.add_subplot(1, 2, 2)
plt.plot(range(epoch), lenet5trnacc,color='g' ,label='LeNet5 train accuracy' )
plt.plot(range(epoch), lenet5_regulizedtrnacc, color='r', label='LeNet5_regulized train loss')
plt.plot(range(epoch), CustomMLPtrnacc,color='b',label='Custom MLP train accuracy')
plt.legend(loc='upper right', bbox_to_anchor=(1.0, 1.0))
plt.xlabel('epoch (x100)')
plt.ylabel('acc')
plt.title('Accuracy')
#
# lenetplt=fig.add_subplot(2, 2, 3)
# plt.plot(range(int((trainloader.__len__())/100)), lenet5trnloss,color='g',label='train loss' )
# plt.plot(range(int((testloader .__len__())/100)), lenet5tstloss,color='r',label='test loss' )
# plt.plot(range(int((trainloader.__len__())/100)), lenet5trnacc,color='b' ,label='train accuracy')
# plt.plot(range(int((testloader .__len__())/100)), lenet5tstacc,color='m' ,label='test accuracy' )
# plt.legend(loc='upper right', bbox_to_anchor=(1.0, 1.0))
# plt.xlabel('epoch (x100)')
# plt.title('Loss and Accuracy of LeNet5')
# #
# # customplt=fig.add_subplot(2, 2, 4)
# # plt.plot(range(int((trainloader.__len__())/100)), CustomMLPtrnloss,color='g',label='train loss' )
# # plt.plot(range(int((testloader .__len__())/100)), CustomMLPtstloss,color='r',label='test loss' )
# # plt.plot(range(int((trainloader.__len__())/100)), CustomMLPtrnacc,color='b' ,label='train accuracy')
# # plt.plot(range(int((testloader .__len__())/100)), CustomMLPtstacc,color='m' ,label='test accuracy' )
# # plt.legend(loc='upper right', bbox_to_anchor=(1.0, 1.0))
# # plt.xlabel('epoch (x100)')
# # plt.title('Loss and Accuracy of Custom MLP')
plt.show()
plt.savefig('/fig.png')
if __name__ == '__main__':
main()
### MNIST WEB app with python - Flask http://hanwifi.iptime.org:9000/
### 19512062 young il han
### ccoltong1215@seoultech.ac.kr
### https://github.com/ccoltong1215/simple-lenet5-torch-mnist
### https://wandb.ai/ccoltong1215/simple_MNIST_report/runs/16etprdd?workspace=user-ccoltong1215
| 42.950207
| 190
| 0.632113
|
acfeb2c87eca12ab7566379f135d0e6c647eb2cd
| 2,651
|
py
|
Python
|
neuro_pypes/tests/test__utils.py
|
Neurita/pypes
|
e88d27ebba842e8fa1f36b52ca12a0b9d5777e89
|
[
"Apache-2.0"
] | 14
|
2015-11-30T19:32:08.000Z
|
2021-11-16T05:35:20.000Z
|
neuro_pypes/tests/test__utils.py
|
Neurita/pypes
|
e88d27ebba842e8fa1f36b52ca12a0b9d5777e89
|
[
"Apache-2.0"
] | 42
|
2015-11-28T23:18:42.000Z
|
2021-02-23T01:45:02.000Z
|
neuro_pypes/tests/test__utils.py
|
Neurita/pypes
|
e88d27ebba842e8fa1f36b52ca12a0b9d5777e89
|
[
"Apache-2.0"
] | 9
|
2015-12-09T17:10:59.000Z
|
2022-01-03T17:26:40.000Z
|
# -*- coding: utf-8 -*-
from neuro_pypes._utils import format_pair_list
def test_format_pair_list():
anat_fbasename = 'anat_hc'
regexp_subst = [
(r"/{anat}_.*corrected_seg8.mat$", "/{anat}_to_mni_affine.mat"),
(r"/m{anat}.*_corrected.nii$", "/{anat}_biascorrected.nii"),
(r"/w{anat}.*_biascorrected.nii$", "/{anat}_mni.nii"),
(r"/y_{anat}.*nii$", "/{anat}_to_mni_field.nii"),
(r"/iy_{anat}.*nii$", "/{anat}_to_mni_inv_field.nii"),
(r"/mwc1{anat}.*nii$", "/{anat}_gm_mod_w2tpm.nii"),
(r"/mwc2{anat}.*nii$", "/{anat}_wm_mod_w2tpm.nii"),
(r"/mwc3{anat}.*nii$", "/{anat}_csf_mod_w2tpm.nii"),
(r"/mwc4{anat}.*nii$", "/{anat}_nobrain_mod_w2tpm.nii"),
(r"/c1{anat}.*nii$", "/{anat}_gm.nii"),
(r"/c2{anat}.*nii$", "/{anat}_wm.nii"),
(r"/c3{anat}.*nii$", "/{anat}_csf.nii"),
(r"/c4{anat}.*nii$", "/{anat}_nobrain.nii"),
(r"/c5{anat}.*nii$", "/{anat}_nobrain_mask.nii"),
]
result = format_pair_list(regexp_subst, anat=anat_fbasename)
assert(result == [
(r"/anat_hc_.*corrected_seg8.mat$", "/anat_hc_to_mni_affine.mat"),
(r"/manat_hc.*_corrected.nii$", "/anat_hc_biascorrected.nii"),
(r"/wanat_hc.*_biascorrected.nii$", "/anat_hc_mni.nii"),
(r"/y_anat_hc.*nii$", "/anat_hc_to_mni_field.nii"),
(r"/iy_anat_hc.*nii$", "/anat_hc_to_mni_inv_field.nii"),
(r"/mwc1anat_hc.*nii$", "/anat_hc_gm_mod_w2tpm.nii"),
(r"/mwc2anat_hc.*nii$", "/anat_hc_wm_mod_w2tpm.nii"),
(r"/mwc3anat_hc.*nii$", "/anat_hc_csf_mod_w2tpm.nii"),
(r"/mwc4anat_hc.*nii$", "/anat_hc_nobrain_mod_w2tpm.nii"),
(r"/c1anat_hc.*nii$", "/anat_hc_gm.nii"),
(r"/c2anat_hc.*nii$", "/anat_hc_wm.nii"),
(r"/c3anat_hc.*nii$", "/anat_hc_csf.nii"),
(r"/c4anat_hc.*nii$", "/anat_hc_nobrain.nii"),
(r"/c5anat_hc.*nii$", "/anat_hc_nobrain_mask.nii"),
])
| 61.651163
| 92
| 0.423991
|
acfeb534f571b1bcf6b6184ecfb06bc128c3ad4e
| 1,443
|
py
|
Python
|
actions/tests/test_scale.py
|
jubelcassio/ImageProcessing
|
ebf8f0bfb1613dea1c0d0008c26cbfc970b98d45
|
[
"MIT"
] | null | null | null |
actions/tests/test_scale.py
|
jubelcassio/ImageProcessing
|
ebf8f0bfb1613dea1c0d0008c26cbfc970b98d45
|
[
"MIT"
] | 6
|
2018-08-21T17:34:23.000Z
|
2019-01-26T15:31:21.000Z
|
actions/tests/test_scale.py
|
jubelcassio/ImageProcessing
|
ebf8f0bfb1613dea1c0d0008c26cbfc970b98d45
|
[
"MIT"
] | null | null | null |
import argparse
from actions import scale
from PIL import Image
def test_parse():
main_parser = argparse.ArgumentParser()
subparsers = main_parser.add_subparsers()
scale.subparser(subparsers)
args = ["scale", "image.jpg", "2"]
result = {"command": "scale", "path": "image.jpg", "scalar": 2,
"save_as": None, "save_folder": None, "mode": None,
"resample": None, "optimize": False, "background": (255,255,255)
}
assert vars(main_parser.parse_args(args)) == result
args = ["scale", "image.jpg", "2", "--save_as=png",
"--save_folder=home/output", "--mode=RGB", "--background=#bbb",
"--resample=BOX", "-optimize"]
result = {"command": "scale", "path": "image.jpg", "scalar": 2,
"save_as": "png", "save_folder": "home/output", "mode": "RGB",
"resample": "BOX", "optimize": True, "background": (187,187,187)
}
assert vars(main_parser.parse_args(args)) == result
def test_scale():
im = Image.new("RGB", (300, 100))
scale_im = scale.scale(im, 2, None)
assert scale_im.size == (600, 200)
scale_im = scale.scale(im, 2, "BICUBIC")
assert scale_im.size == (600, 200)
# Asserts the resulting image is at least 1x1 px
scale_im = scale.scale(im, 0, None)
assert scale_im.size == (1, 1)
scale_im = scale.scale(im, 2.0, "BICUBIC")
assert scale_im.size == (600, 200)
| 32.795455
| 78
| 0.587665
|
acfeb5ef19241a5fbf0e2b3752f12509dfc226de
| 426
|
py
|
Python
|
mergelists.py
|
ormiret/badwords
|
eda5159d081e4c2ffa3f63973f739cca6afac624
|
[
"BSD-2-Clause"
] | 1
|
2021-08-02T07:49:18.000Z
|
2021-08-02T07:49:18.000Z
|
mergelists.py
|
ormiret/badwords
|
eda5159d081e4c2ffa3f63973f739cca6afac624
|
[
"BSD-2-Clause"
] | null | null | null |
mergelists.py
|
ormiret/badwords
|
eda5159d081e4c2ffa3f63973f739cca6afac624
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
#word compile and sort
#usage: mergelists.py firstlistfile secondlistfile
import sys
with open(sys.argv[1],'r') as f:
first = f.readlines()
with open(sys.argv[2],'r') as f:
second = f.readlines()
final_list = sorted(list(set(first) | set(second)))
new_file_name = (sys.argv[1] + "-" + sys.argv[2]).replace('.txt','') + ".txt"
with open(new_file_name,'w') as f:
[f.write(x) for x in final_list]
| 21.3
| 77
| 0.666667
|
acfeb624c370c851eee430a885ef4bd3f0ce0a1b
| 53,431
|
py
|
Python
|
home/views.py
|
NachiketaDhal/Ed-Flix
|
b4f806a2492a9894b30f50ea07700ec2d9b8d390
|
[
"MIT"
] | 6
|
2021-02-28T06:12:22.000Z
|
2021-02-28T19:13:00.000Z
|
home/views.py
|
NachiketaDhal/Ed-Flix
|
b4f806a2492a9894b30f50ea07700ec2d9b8d390
|
[
"MIT"
] | 3
|
2021-02-27T21:36:43.000Z
|
2021-03-01T14:53:53.000Z
|
home/views.py
|
NachiketaDhal/Ed-Flix
|
b4f806a2492a9894b30f50ea07700ec2d9b8d390
|
[
"MIT"
] | 9
|
2020-11-20T07:24:02.000Z
|
2021-05-02T06:01:07.000Z
|
from django.http.response import HttpResponse, JsonResponse
from django.shortcuts import render, redirect
from django.contrib.auth.decorators import login_required
from django.core.files.storage import FileSystemStorage
from api.models import FileUpload
from accounts.models import DocViews
# Create your views here.
SubjectMapping = {
"cse" : "Computer Science Engineering",
"mechanical":"Mechanical Engineering",
"civil":"Civil Engineering",
"electrical":"Electrical Engineering",
"etc":"Electronics and Telecommunication Engineering",
"metallurgy":"Metallurgical Engineering",
"chemical":"Chemical Engineering",
"production":"Production Engineering",
"mining" : "Mining engineering",
}
SubjectArchive = {
"cse": [
{"code" : "M-I","name":"Applied Mathematics-I","type":"common"},
{"type":"common","code":"Phy","name":"Applied Physics"},
{"type":"common","code":"BE","name":"Basic Electronics"},
{"type":"common","code":"BCE","name":"Basic Civil Engg"},
{"type":"prog core","code":"C","name":"C Programming"},
{"type":"common","code":"English","name":"English Comm Skills"},
{"type":"common","code":"WS","name":"Engg Workshop"},
{"type":"common","code":"Chem","name":"Applied Chemistry"},
{"type":"common","code":"BEE","name":"Basic Electrical Engg"},
{"type":"common","code":"BME","name":"Basic Mechanical Engg"},
{"type":"prog core","code":"DS","name":"Data Structure"},
{"type":"common","code":"ED","name":"Engg Drawing"},
{"type":"common","code":"STLD","name":"Digital Electronics"},
{"type":"prog core","code":"Java","name":"Programming using JAVA"},
{"type":"core","code":"SP","name":"System Programming"},
{"type":"core","code":"SE","name":"Software Engg"},
{"type":"core","code":"DS","name":"Discrete Mathematics"},
{"type":"common","code":"EE","name":"Engg Economics"},
{"type":"core","code":"COA","name":"Computer Architecture"},
{"type":"core","code":"DAA","name":"Algorithm"},
{"type":"core","code":"DBMS","name":"Database Managemant System"},
{"type":"core","code":"FLAT","name":"Theory Of Computation"},
{"type":"common","code":"OB","name":"Organisational Behaviour"},
{"type":"core","code":"OS","name":"Operating System"},
{"type":"core","code":"CG","name":"Computer Graphics"},
{"type":"core","code":"ACA","name":"Advanced Computer Architecture"},
{"type":"core","code":"CC","name":"Cloud Computing"},
{"type":"core","code":"DMDW","name":"Data Mining Data Warehousing"},
{"type":"core","code":"CN","name":"Computer Networks"},
{"type":"core","code":"CD","name":"Compiler Design"},
{"type":"core","code":"WSN","name":"Wireless Sensor Network"},
{"type":"core","code":"ML","name":"Machine Learning"},
{"type":"common","code":"EVS","name":"Environmental Science"},
{"type":"core","code":"CNS","name":"Cryptography and Network Security"},
{"type":"core","code":"SC","name":"Soft Computing"},
{"type":"core","code":"IoT","name":"Internet of Things"},
{"type":"common","code":"GT","name":"Green Technology"},
{"type":"common","code":"ET","name":"Entrepreneurship Training"}
],
"mechanical":[
{"code":"ADE","name":"Analog And Digital Electronics","type":"core"},
{"code":"AE","name":"Automobile Engineering","type":"core"},
{"code":"BC","name":"Business Communication","type":"core"},
{"code":"BCE","name":"Basics Of Civil Engineering","type":"core"},
{"code":"BE","name":"Basic Electronics","type":"core"},
{"code":"BEE","name":"Basic Electrical Engineering","type":"core"},
{"code":"BME","name":"Basics Of Mechanical Engineering","type":"core"},
{"code":"BMP","name":"Basic Manufacturing Process","type":"core"},
{"code":"C","name":"Programming In C","type":"core"},
{"code":"CAC","name":"CAD AND CAM","type":"core"},
{"code":"CADM","name":"Computer Aided Design And Manufacturing","type":"core"},
{"code":"CE","name":"Communicative English","type":"core"},
{"code":"CHEM","name":"Applied Chemistry","type":"core"},
{"code":"CIM","name":"Computer Integrated Manufacturing","type":"core"},
{"code":"DME","name":"Design Of Machine Elements","type":"core"},
{"code":"DSP","name":"Digital Signal Processing","type":"core"},
{"code":"EC","name":"Engineering Chemistry","type":"core"},
{"code":"ECS","name":"English Communication Skills","type":"core"},
{"code":"ECT","name":"Energy Conversion Techniques","type":"core"},
{"code":"EDC","name":"Electrical Drives And Controls","type":"core"},
{"code":"EE","name":"Engineering Economics","type":"core"},
{"code":"EE","name":"Environmental Engineering","type":"core"},
{"code":"EEE","name":"Electrical And Electronics Engineering","type":"core"},
{"code":"EG","name":"ENGINEERING GRAPHICS","type":"core"},
{"code":"EG","name":"Engineering Graphics","type":"core"},
{"code":"EM","name":"Engineering Mechanics","type":"core"},
{"code":"EM-2","name":"Engineering Mathematics 2","type":"core"},
{"code":"EM1","name":"Engineering Mathematics 1","type":"core"},
{"code":"EMM","name":"Engineering Metrology And Measurements","type":"core"},
{"code":"EMPD","name":"Electrical Machines And Power Devices","type":"core"},
{"code":"EP","name":"Engineering Physics","type":"core"},
{"code":"ESE","name":"Environment Science And Engineering","type":"core"},
{"code":"ESHC","name":"Environmental Studies And Health Care","type":"core"},
{"code":"ET","name":"Engineering Thermodynamics","type":"core"},
{"code":"ET","name":"Engineering Tribology","type":"core"},
{"code":"EW","name":"Engineering Workshop","type":"core"},
{"code":"FCF","name":"Fatigue Creep ","type":"core"},
{"code":"FDHM","name":"Fluid Dynamics And Hydraulic Machines","type":"core"},
{"code":"FM","name":"Fluid Mechanics","type":"core"},
{"code":"FMHM","name":"Fluid Mechanics And Hydraulic Machines","type":"core"},
{"code":"FMM","name":"Fluid Mechanics And Machinery","type":"core"},
{"code":"HMF","name":"Heat ","type":"core"},
{"code":"HT","name":"Heat Transfer","type":"core"},
{"code":"ICEG","name":"IC Engines And Gas Turbines","type":"core"},
{"code":"IPM","name":"Introduction To Physical Metallurgy And Engineering Materials","type":"core"},
{"code":"IWT","name":"Internet And Web-Technologies","type":"core"},
{"code":"KDM","name":"Kinematics And Dynamics Of Machines","type":"core"},
{"code":"KKM","name":"Kinematic And Kinetics Of Machines","type":"core"},
{"code":"M-1","name":"Applied Mathematics-1","type":"core"},
{"code":"M-2","name":"Applied Mathematics - 2","type":"core"},
{"code":"M-3","name":"Mathematics-3","type":"core"},
{"code":"M-4","name":"Mathematics-4","type":"core"},
{"code":"MCTD","name":"Metal Cutting And Tool Design","type":"core"},
{"code":"MD","name":"Machine Dynamics","type":"core"},
{"code":"MDHP","name":"Machine Dynamics And Heat Power Lab","type":"core"},
{"code":"MECH","name":"Mechanics","type":"core"},
{"code":"MEMS","name":"Micro-Electro-Mechanical Systems","type":"core"},
{"code":"MIS","name":"Management Information System","type":"core"},
{"code":"MM","name":"Marketing Management","type":"core"},
{"code":"MM","name":"Mechanism And Machines","type":"core"},
{"code":"MM","name":"Metrology And Measurements","type":"core"},
{"code":"MMMR","name":"Mechanical Measurement Metallurgy And Reliability","type":"core"},
{"code":"MOS","name":"Mechanics Of Solid","type":"core"},
{"code":"MQCR","name":"Metrology Quality Control And Reliability","type":"core"},
{"code":"MS","name":"Material Science","type":"core"},
{"code":"MST","name":"Machining Science And Technology","type":"core"},
{"code":"MT-1","name":"Manufacturing Technology- 1","type":"core"},
{"code":"MV","name":"Mechanical Vibration","type":"core"},
{"code":"NCES","name":"Non-Conventional Energy Systems","type":"core"},
{"code":"NM","name":"Numerical Methods","type":"core"},
{"code":"OB","name":"Organizational Behaviour","type":"core"},
{"code":"OE","name":"Optimization In Engineering","type":"core"},
{"code":"OOP","name":"Object Oriented Programming Using Cpp","type":"core"},
{"code":"PDPT","name":"Production Design And Production Tooling","type":"core"},
{"code":"PE","name":"Professional Ethics","type":"core"},
{"code":"PHY","name":"Applied Physics","type":"core"},
{"code":"PM","name":"Project Management","type":"core"},
{"code":"POM","name":"Principles Of Management","type":"core"},
{"code":"POM","name":"Production And Operation Management","type":"core"},
{"code":"PPCE","name":"PROCESS PLANNING AND COST ESTIMATION","type":"core"},
{"code":"PPE","name":"Power Plant Engineering","type":"core"},
{"code":"PSPP","name":"Problem Solving And Python Programming","type":"core"},
{"code":"RAC","name":"Refrigeration And Air Conditioning","type":"core"},
{"code":"RRA","name":"Robotics And Robot Applications","type":"core"},
{"code":"SC","name":"Soft Computing","type":"core"},
{"code":"TD","name":"Thermodynamics","type":"core"},
{"code":"TE","name":"Technical English","type":"core"},
{"code":"TE-II","name":"Thermal Engineering- II","type":"core"},
{"code":"TPDE","name":"Transform And Partial Differential Equations","type":"core"},
{"code":"WSN","name":"Wireless Sensor Network","type":"core"},
{"code":"PHY","name":"Applied Physics","type":"core"},
],
"civil":[
{"code":"PHY","name":"Applied Physics","type":"core"},
{"code":"ECS","name":"English Communication Skills","type":"core"},
{"code":"C","name":"Programming In C","type":"core"},
{"code":"M-1","name":"Applied Mathematics-1","type":"core"},
{"code":"BE","name":"Basic Electronics","type":"core"},
{"code":"BEE","name":"Basic Electrical Engineering","type":"core"},
{"code":"CHEM","name":"Applied Chemistry","type":"core"},
{"code":"M-2","name":"Applied Mathematics - 2","type":"core"},
{"code":"BCE","name":"Basics Of Civil Engineering","type":"core"},
{"code":"PE","name":"Professional Ethics","type":"core"},
{"code":"BME","name":"Basics Of Mechanical Engineering","type":"core"},
{"code":"ESHC","name":"Environmental Studies And Health Care","type":"core"},
{"code":"EEE","name":"Electrical And Electronics Engineering","type":"core"},
{"code":"BME","name":"Basics Of Mechanical Engineering","type":"core"},
{"code":"ESHC","name":"Environmental Studies And Health Care","type":"core"},
{"code":"EEE","name":"Electrical And Electronics Engineering","type":"core"},
{"code":"EE","name":"Engineering Economics","type":"core"},
{"code":"OB","name":"Organizational Behaviour","type":"core"},
{"code":"FMHM","name":"Fluid Mechanics And Hydraulic Machines","type":"core"},
{"code":"MOS","name":"Mechanics Of Solid","type":"core"},
{"code":"S-1","name":"Surveying-1","type":"core"},
{"code":"GTE-1","name":"Geotechnical Engineering- 1","type":"core"},
{"code":"CT","name":"Construction Technology","type":"core"},
{"code":"M-3","name":"Mathematics-3","type":"core"},
{"code":"TE1","name":"Transportation Engineering 1","type":"core"},
{"code":"SA-1","name":"Structural Analysis-1","type":"core"},
{"code":"AMOS","name":"Advanced Mechanics Of Solids","type":"core"},
{"code":"SA-1","name":"Structural Analysis-1","type":"core"},
{"code":"AMOS","name":"Advanced Mechanics Of Solids","type":"core"},
{"code":"DCS","name":"Design Of Concrete Structures","type":"core"},
{"code":"MT","name":"Material Testing","type":"core"},
{"code":"DSS","name":"Design Of Steel Structure","type":"core"},
{"code":"WSSE","name":"Water Supply And Sanitary Engineering","type":"core"},
{"code":"WRE","name":"Water Resources Engineering","type":"core"},
{"code":"SA-2","name":"Structural Analysis-2","type":"core"},
{"code":"EES","name":"Environmental Engineering And Safety","type":"core"},
{"code":"IE","name":"Irrigation Engineering","type":"core"},
{"code":"TE 2","name":"Transportation Engineering 2","type":"core"},
{"code":"FE","name":"Foundation Engineering","type":"core"},
{"code":"FEM","name":"Finite Element Methods","type":"core"},
{"code":"PCS","name":"Prestressed Concrete Structures","type":"core"},
{"code":"SC","name":"Soft Computing","type":"core"},
{"code":"GIT","name":"Ground Improvement Techniques","type":"core"},
{"code":"PHY","name":"Applied Physics","type":"core"},
{"code":"HTE","name":"Highway And Traffic Engineering","type":"core"},
{"code":"ATE","name":"Advanced Transportation Engineering","type":"core"},
{"code":"MATH","name":"Pure Applied Mathematics For Specific Branch Of Engineering","type":"core"},
{"code":"GWH","name":"Ground Water Hydrology","type":"core"},
{"code":"OCF","name":"Open Channel Flow","type":"core"},
{"code":"PD","name":"Pavement Design","type":"core"},
{"code":"SD","name":"Structural Dynamics","type":"core"},
{"code":"MTS","name":"Mass Transit System","type":"core"},
{"code":"ITA","name":"Internet Technologies And Applications","type":"core"},
{"code":"MM","name":"Marketing Management","type":"core"},
{"code":"POM","name":"Production And Operation Management","type":"core"},
{"code":"WRE","name":"Water Resources Engineering","type":"core"},
{"code":"FEM","name":"Finite Element Methods","type":"core"},
{"code":"CEPM","name":"Construction Equipments Planning And Management","type":"core"},
{"code":"MIS","name":"Management Information System","type":"core"},
{"code":"GIT","name":"Ground Improvement Techniques","type":"core"},
{"code":"AFE","name":"Advanced Foundation Engineering","type":"core"},
{"code":"PCS","name":"Prestressed Concrete Structures","type":"core"},
{"code":"HTE","name":"Highway And Traffic Engineering","type":"core"},
{"code":"SD","name":"Structural Dynamics","type":"core"},
{"code":"BCE","name":"Basics Of Civil Engineering","type":"core"},
{"code":"PE","name":"Professional Ethics","type":"core"},
{"code":"BME","name":"Basics Of Mechanical Engineering","type":"core"},
{"code":"ESHC","name":"Environmental Studies And Health Care","type":"core"},
{"code":"EEE","name":"Electrical And Electronics Engineering","type":"core"},
{"code":"C","name":"Programming In C","type":"core"},
{"code":"M-1","name":"Applied Mathematics-1","type":"core"},
{"code":"PHY","name":"Applied Physics","type":"core"},
{"code":"BEE","name":"Basic Electrical Engineering","type":"core"},
{"code":"MECH","name":"Mechanics","type":"core"},
{"code":"MECH","name":"Mechanics","type":"core"},
{"code":"BE","name":"Basic Electronics","type":"core"},
{"code":"OOP","name":"Object Oriented Programming Using Cpp","type":"core"},
{"code":"M-2","name":"Applied Mathematics - 2","type":"core"},
{"code":"BC","name":"Business Communication","type":"core"},
{"code":"CHEM","name":"Applied Chemistry","type":"core"},
{"code":"ESE","name":"Environment Science And Engineering","type":"core"},
{"code":"C","name":"Programming In C","type":"core"},
{"code":"M-1","name":"Applied Mathematics-1","type":"core"},
{"code":"PHY","name":"Applied Physics","type":"core"},
{"code":"BC","name":"Business Communication","type":"core"},
{"code":"CHEM","name":"Applied Chemistry","type":"core"},
{"code":"ESE","name":"Environment Science And Engineering","type":"core"},
{"code":"C","name":"Programming In C","type":"core"},
{"code":"M-1","name":"Applied Mathematics-1","type":"core"},
{"code":"PHY","name":"Applied Physics","type":"core"},
{"code":"BEE","name":"Basic Electrical Engineering","type":"core"},
{"code":"MECH","name":"Mechanics","type":"core"},
{"code":"EES","name":"Environmental Engineering And Safety","type":"core"},
{"code":"M-3","name":"Mathematics-3","type":"core"},
{"code":"MOS","name":"Mechanics Of Solid","type":"core"},
{"code":"BMBC","name":"Building Materials And Building Construction","type":"core"},
{"code":"S-1","name":"Surveying-1","type":"core"},
{"code":"FM-1","name":"Fluid Mechanics-1","type":"core"},
{"code":"NM","name":"Numerical Methods","type":"core"},
{"code":"TE1","name":"Transportation Engineering 1","type":"core"},
{"code":"SA-1","name":"Structural Analysis-1","type":"core"},
{"code":"DCS","name":"Design Of Concrete Structures","type":"core"},
{"code":"ECPP","name":"Estimation Costing And Professional Practice","type":"core"},
{"code":"FM2","name":"Fluid Mechanics-2","type":"core"},
{"code":"AS","name":"Advanced Surveying","type":"core"},
{"code":"BC","name":"Business Communication","type":"core"},
{"code":"GTE-1","name":"Geotechnical Engineering- 1","type":"core"},
{"code":"DSS","name":"Design Of Steel Structure","type":"core"},
{"code":"WRE","name":"Water Resources Engineering","type":"core"},
{"code":"CEPM","name":"Construction Equipments Planning And Management","type":"core"},
{"code":"SA-2","name":"Structural Analysis-2","type":"core"},
{"code":"EES","name":"Environmental Engineering And Safety","type":"core"},
{"code":"PHY","name":"Applied Physics","type":"core"},
{"code":"DCS","name":"Design Of Concrete Structures","type":"core"},
{"code":"WRE","name":"Water Resources Engineering","type":"core"},
{"code":"PHY","name":"Applied Physics","type":"core"},
{"code":"BEE","name":"Basic Electrical Engineering","type":"core"},
{"code":"M-3","name":"Mathematics-3","type":"core"},
{"code":"CHEM","name":"Applied Chemistry","type":"core"},
{"code":"ECS","name":"English Communication Skills","type":"core"},
{"code":"EW","name":"Engineering Workshop","type":"core"},
{"code":"C","name":"Programming In C","type":"core"},
{"code":"M-1","name":"Applied Mathematics-1","type":"core"},
{"code":"MECH","name":"Mechanics","type":"core"},
{"code":"M-2","name":"Applied Mathematics - 2","type":"core"},
{"code":"EG","name":"Engineering Graphics","type":"core"},
{"code":"M-4","name":"Mathematics-4","type":"core"},
{"code":"BMBC","name":"Building Materials And Building Construction","type":"core"},
{"code":"BMBC","name":"Building Materials And Building Construction","type":"core"},
{"code":"S-1","name":"Surveying-1","type":"core"},
{"code":"FM","name":"Fluid Mechanics","type":"core"},
{"code":"SOM","name":"Strength Of Materials","type":"core"},
{"code":"EE","name":"Engineering Economics","type":"core"},
{"code":"SA-1","name":"Structural Analysis-1","type":"core"},
{"code":"ESE","name":"Environment Science And Engineering","type":"core"},
{"code":"EG","name":"Engineering Geology","type":"core"},
{"code":"CE","name":"Communicative English","type":"core"},
{"code":"EP","name":"Engineering Physics","type":"core"},
{"code":"EG","name":"ENGINEERING GRAPHICS","type":"core"},
{"code":"EC","name":"Engineering Chemistry","type":"core"},
{"code":"EM1","name":"Engineering Mathematics 1","type":"core"},
{"code":"PSPP","name":"Problem Solving And Python Programming","type":"core"}
],
"electrical":[
{"code":"M-1","name":"Applied Mathematics-1","type":"common"},
{"code":"PHY","name":"Applied Physics","type":"common"},
{"code":"ECS","name":"English Communication Skills","type":"common"},
{"code":"C","name":"Programming In C","type":"common"},
{"code":"BE","name":"Basic Electronics","type":"common"},
{"code":"BEE","name":"Basic Electrical Engineering","type":"common"},
{"code":"CHEM","name":"Applied Chemistry","type":"common"},
{"code":"M-2","name":"Applied Mathematics - 2","type":"common"},
{"code":"BCE","name":"Basics Of Civil Engineering","type":"common"},
{"code":"PE","name":"Professional Ethics","type":"common"},
{"code":"BME","name":"Basics Of Mechanical Engineering","type":"common"},
{"code":"ESHC","name":"Environmental Studies And Health Care","type":"common"},
{"code":"EEE","name":"Electrical And Electronics Engineering","type":"common"},
{"code":"BME","name":"Basics Of Mechanical Engineering","type":"common"},
{"code":"ESHC","name":"Environmental Studies And Health Care","type":"common"},
{"code":"EEE","name":"Electrical And Electronics Engineering","type":"common"},
{"code":"AEC","name":"Analog Electronic Circuits","type":"core"},
{"code":"EE","name":"Engineering Economics","type":"core"},
{"code":"NT","name":"Network Theory","type":"core"},
{"code":"M-2","name":"Applied Mathematics - 2","type":"core"},
{"code":"CHEM","name":"Applied Chemistry","type":"core"},
{"code":"AEC","name":"Analog Electronic Circuits","type":"core"},
{"code":"EE","name":"Engineering Economics","type":"core"},
{"code":"NT","name":"Network Theory","type":"core"},
{"code":"OB","name":"Organizational Behaviour","type":"core"},
{"code":"DEC","name":"Digital Electronics Circuit","type":"core"},
{"code":"EM2","name":"Electrical Machines 2","type":"core"},
{"code":"M-3","name":"Mathematics-3","type":"core"},
{"code":"CSE","name":"Control System Engineering","type":"core"},
{"code":"EPTD","name":"Electrical Power Transmission And Distribution","type":"core"},
{"code":"MPMC","name":"Microprocessor And Microcontroller","type":"core"},
{"code":"OE","name":"Optimization In Engineering","type":"core"},
{"code":"PE","name":"Power Electronics","type":"core"},
{"code":"OE","name":"Optimization In Engineering","type":"core"},
{"code":"PE","name":"Power Electronics","type":"core"},
{"code":"ODI","name":"Optoelectronics Devices And Instrumentation","type":"core"},
{"code":"DSP","name":"Digital Signal Processing","type":"core"},
{"code":"RES","name":"Renewable Energy System","type":"core"},
{"code":"EES","name":"Environmental Engineering And Safety","type":"core"},
{"code":"EM2","name":"Electrical Machines 2","type":"core"},
{"code":"VLSI","name":"VLSI Design","type":"core"},
{"code":"ED","name":"Electrical Drives","type":"core"},
{"code":"ACS","name":"Advanced Control Systems","type":"core"},
{"code":"HVDC","name":"High Voltage DC Transmission","type":"core"},
{"code":"PSOC","name":"Power System Operation And Control","type":"core"},
{"code":"BI","name":"Biomedical Instrumentation","type":"core"},
{"code":"CE","name":"Communication Engineering","type":"core"},
{"code":"EPQ","name":"Electrical Power Quality","type":"core"},
{"code":"MC","name":"Mobile Communication","type":"core"},
{"code":"DIP","name":"Digital Image Processing","type":"core"},
{"code":"PSEE","name":"Power Station Engineering And Economics","type":"core"},
{"code":"SC","name":"Soft Computing","type":"core"},
{"code":"HVE","name":"High Voltage Engineering","type":"core"},
{"code":"BCE","name":"Basics Of Civil Engineering","type":"core"},
{"code":"PE","name":"Professional Ethics","type":"core"},
{"code":"BME","name":"Basics Of Mechanical Engineering","type":"core"},
{"code":"ESHC","name":"Environmental Studies And Health Care","type":"core"},
{"code":"EEE","name":"Electrical And Electronics Engineering","type":"core"},
{"code":"EMMI","name":"Electrical Measurement And Measuring Instruments","type":"core"},
{"code":"MATH","name":"Pure Applied Mathematics For Specific Branch Of Engineering","type":"core"},
{"code":"ADSP","name":"Advanced Digital Signal Processing","type":"core"},
{"code":"NSBT","name":"Nano Science ","type":"core"},
{"code":"SGPD","name":"Switch Gear ","type":"core"},
{"code":"NSBT","name":"Nano Science ","type":"core"},
{"code":"SGPD","name":"Switch Gear ","type":"core"},
{"code":"AC","name":"Adaptive Control","type":"core"},
{"code":"II","name":"Industrial Instrumentation","type":"core"},
{"code":"EPQ","name":"Electrical Power Quality","type":"core"},
{"code":"APE","name":"Advanced Power Electronics","type":"core"},
{"code":"DIP","name":"Digital Image Processing","type":"core"},
{"code":"MM","name":"Marketing Management","type":"core"},
{"code":"PSEE","name":"Power Station Engineering And Economics","type":"core"},
{"code":"PSP","name":"Power System Protection","type":"core"},
{"code":"POM","name":"Production And Operation Management","type":"core"},
{"code":"SCS","name":"Satellite Communication System","type":"core"},
{"code":"ES","name":"Embedded System","type":"core"},
{"code":"BCE","name":"Basics Of Civil Engineering","type":"core"},
{"code":"PE","name":"Professional Ethics","type":"core"},
{"code":"PE","name":"Professional Ethics","type":"core"},
{"code":"EEE","name":"Electrical And Electronics Engineering","type":"core"},
{"code":"CSE","name":"Control System Engineering","type":"core"},
{"code":"MM","name":"Marketing Management","type":"core"},
{"code":"POM","name":"Production And Operation Management","type":"core"},
{"code":"CV","name":"Computer Vision","type":"core"},
{"code":"ESE","name":"Environment Science And Engineering","type":"core"},
{"code":"SG","name":"Smart Grid","type":"core"},
{"code":"EM1","name":"Electrical Machines 1","type":"core"},
{"code":"IPCD","name":"Industrial Process Control And Dynamics","type":"core"},
{"code":"DEM","name":"Design Of Electrical Machines","type":"core"},
{"code":"ECD","name":"Energy Conversion Devices","type":"core"},
{"code":"SSD","name":"Solid State Devices","type":"core"},
{"code":"C","name":"Programming In C","type":"core"},
{"code":"M-1","name":"Applied Mathematics-1","type":"core"},
{"code":"M-1","name":"Applied Mathematics-1","type":"core"},
{"code":"PHY","name":"Applied Physics","type":"core"},
{"code":"BEE","name":"Basic Electrical Engineering","type":"core"},
{"code":"MECH","name":"Mechanics","type":"core"},
{"code":"BE","name":"Basic Electronics","type":"core"},
{"code":"OOP","name":"Object Oriented Programming Using Cpp","type":"core"},
{"code":"M-2","name":"Applied Mathematics - 2","type":"core"},
{"code":"BC","name":"Business Communication","type":"core"},
{"code":"CHEM","name":"Applied Chemistry","type":"core"},
{"code":"ESE","name":"Environment Science And Engineering","type":"core"},
{"code":"BE","name":"Basic Electronics","type":"core"},
{"code":"OOP","name":"Object Oriented Programming Using Cpp","type":"core"},
{"code":"M-2","name":"Applied Mathematics - 2","type":"core"},
{"code":"BC","name":"Business Communication","type":"core"},
{"code":"CHEM","name":"Applied Chemistry","type":"core"},
{"code":"ESE","name":"Environment Science And Engineering","type":"core"},
{"code":"C","name":"Programming In C","type":"core"},
{"code":"M-1","name":"Applied Mathematics-1","type":"core"},
{"code":"PHY","name":"Applied Physics","type":"core"},
{"code":"BEE","name":"Basic Electrical Engineering","type":"core"},
{"code":"MECH","name":"Mechanics","type":"core"},
{"code":"CE","name":"Communication Engineering","type":"core"},
{"code":"ED","name":"Electrical Drives","type":"core"},
{"code":"PSOC","name":"Power System Operation And Control","type":"core"},
{"code":"AEC","name":"Analog Electronic Circuits","type":"core"},
{"code":"M-3","name":"Mathematics-3","type":"core"},
{"code":"EM1","name":"Electrical Machines 1","type":"core"},
{"code":"DS","name":"Data Structure Using C","type":"core"},
{"code":"ESE","name":"Environment Science And Engineering","type":"core"},
{"code":"NA","name":"Network Analysis","type":"core"},
{"code":"DEC","name":"Digital Electronics Circuit","type":"core"},
{"code":"EM2","name":"Electrical Machines 2","type":"core"},
{"code":"M-4","name":"Mathematics-4","type":"core"},
{"code":"EMI","name":"Electrical Measurement And Instrumentation","type":"core"},
{"code":"TE-1","name":"Thermal Engineering 1","type":"core"},
{"code":"MPMC","name":"Microprocessor And Microcontroller","type":"core"},
{"code":"PE","name":"Power Electronics","type":"core"},
{"code":"EPTD","name":"Electrical Power Transmission And Distribution","type":"core"},
{"code":"LCT","name":"Linear Control Theory","type":"core"},
{"code":"IDSP","name":"Introduction To Digital Signal Processing","type":"core"},
{"code":"EMF","name":"Electromagnetic Field","type":"core"},
{"code":"ED","name":"Electrical Drives","type":"core"},
{"code":"RES","name":"Renewable Energy System","type":"core"},
{"code":"SGP","name":"Switch Gear And Protection","type":"core"},
{"code":"IAC","name":"Industrial Automation And Control","type":"core"},
{"code":"EE","name":"Engineering Economics","type":"core"},
{"code":"MCA","name":"Microcontroller And Applications","type":"core"},
{"code":"EPS","name":"Elements Of Power System","type":"core"},
{"code":"PED","name":"Power Electronics And Drives","type":"core"},
{"code":"CS","name":"Control System","type":"core"},
{"code":"DSP","name":"Digital Signal Processing","type":"core"},
{"code":"HVE","name":"High Voltage Engineering","type":"core"},
{"code":"PSA","name":"Power System Analysis","type":"core"},
{"code":"BE","name":"Basic Electronics","type":"core"},
{"code":"BEE","name":"Basic Electrical Engineering","type":"core"},
{"code":"EP","name":"Engineering Physics","type":"core"},
{"code":"EM1","name":"Engineering Mathematics 1","type":"core"}
],
"etc":[
{"code":"M-1","name":"Applied Mathematics-1","type":"common"},
{"code":"PHY","name":"Applied Physics","type":"common"},
{"code":"ECS","name":"English Communication Skills","type":"common"},
{"code":"C","name":"Programming In C","type":"common"},
{"code":"BE","name":"Basic Electronics","type":"common"},
{"code":"BEE","name":"Basic Electrical Engineering","type":"common"},
{"code":"CHEM","name":"Applied Chemistry","type":"common"},
{"code":"M-2","name":"Applied Mathematics - 2","type":"common"},
{"code":"BCE","name":"Basics Of Civil Engineering","type":"common"},
{"code":"PE","name":"Professional Ethics","type":"common"},
{"code":"BME","name":"Basics Of Mechanical Engineering","type":"common"},
{"code":"ESHC","name":"Environmental Studies And Health Care","type":"common"},
{"code":"EEE","name":"Electrical And Electronics Engineering","type":"common"},
{"code":"BME","name":"Basics Of Mechanical Engineering","type":"common"},
{"code":"ESHC","name":"Environmental Studies And Health Care","type":"common"},
{"code":"EEE","name":"Electrical And Electronics Engineering","type":"common"},
{"code":"AEC","name":"Analog Electronic Circuits","type":"core"},
{"code":"DEC","name":"Digital Electronics Circuit","type":"core"},
{"code":"EE","name":"Engineering Economics","type":"core"},
{"code":"NT","name":"Network Theory","type":"core"},
{"code":"SS","name":"Signals And Systems","type":"core"},
{"code":"PSD","name":"Semiconductor Devices","type":"core"},
{"code":"OB","name":"Organizational Behaviour","type":"core"},
{"code":"M-3","name":"Mathematics-3","type":"core"},
{"code":"MPMC","name":"Microprocessor And Microcontroller","type":"core"},
{"code":"EEM","name":"Electrical And Electronic Measurement","type":"core"},
{"code":"EMT","name":"Electromagnetic Theory","type":"core"},
{"code":"EEM","name":"Electrical And Electronic Measurement","type":"core"},
{"code":"EMT","name":"Electromagnetic Theory","type":"core"},
{"code":"EMPD","name":"Electrical Machines And Power Devices","type":"core"},
{"code":"ACT","name":"Analog Communication Technique","type":"core"},
{"code":"COA","name":"Computer Organisation And Architecture","type":"core"},
{"code":"OE","name":"Optimization In Engineering","type":"core"},
{"code":"ST","name":"Sensors And Transducers","type":"core"},
{"code":"VLSI","name":"VLSI Design","type":"core"},
{"code":"PE","name":"Power Electronics","type":"core"},
{"code":"DSP","name":"Digital Signal Processing","type":"core"},
{"code":"CSE","name":"Control System Engineering","type":"core"},
{"code":"JAVA","name":"Java Programming","type":"core"},
{"code":"FOOD","name":"Fiber Optics And Opto-electronic Devices","type":"core"},
{"code":"EDM","name":"Electronic Devices And Modelling","type":"core"},
{"code":"OOPJ","name":"Object Oriented Programming Using JAVA","type":"core"},
{"code":"ES","name":"Embedded System","type":"core"},
{"code":"DCCN","name":"Data Communication And Computer Network","type":"core"},
{"code":"DSP","name":"Digital Signal Processing","type":"core"},
{"code":"DCT","name":"Digital Communication Techniques","type":"core"},
{"code":"OS","name":"Operating Systems","type":"core"},
{"code":"MC","name":"Mobile Communication","type":"core"},
{"code":"CNS","name":"Cryptography And Network Security","type":"core"},
{"code":"RRA","name":"Robotics And Robot Applications","type":"core"},
{"code":"AVLSI","name":"Analog VLSI Design","type":"core"},
{"code":"ESE","name":"Environment Science And Engineering","type":"core"},
{"code":"AWP","name":"Antenna And Wave Propagation","type":"core"},
{"code":"DBMS","name":"Database Management System","type":"core"},
{"code":"BSP","name":"Biomedical Signal Processing","type":"core"},
{"code":"ASP","name":"Adaptive Signal Processing","type":"core"},
{"code":"DIP","name":"Digital Image Processing","type":"core"},
{"code":"SCS","name":"Satellite Communication System","type":"core"},
{"code":"WSN","name":"Wireless Sensor Network","type":"core"},
{"code":"ACS","name":"Advanced Control Systems","type":"core"},
{"code":"MC","name":"Mobile Computing","type":"core"},
{"code":"SC","name":"Soft Computing","type":"core"},
{"code":"ES","name":"Embedded System","type":"core"},
{"code":"II","name":"Industrial Instrumentation","type":"core"},
{"code":"ME","name":"Microwave Engineering","type":"core"},
{"code":"DIP","name":"Digital Image Processing","type":"core"},
{"code":"ITA","name":"Internet Technologies And Applications","type":"core"},
{"code":"SCS","name":"Satellite Communication System","type":"core"},
{"code":"WSN","name":"Wireless Sensor Network","type":"core"},
{"code":"CNS","name":"Cryptography And Network Security","type":"core"},
{"code":"MEMS","name":"Micro-Electro-Mechanical Systems","type":"core"},
{"code":"AVLSI","name":"Analog VLSI Design","type":"core"},
{"code":"BCE","name":"Basics Of Civil Engineering","type":"core"},
{"code":"PE","name":"Professional Ethics","type":"core"},
{"code":"PE","name":"Professional Ethics","type":"core"},
{"code":"BME","name":"Basics Of Mechanical Engineering","type":"core"},
{"code":"ESHC","name":"Environmental Studies And Health Care","type":"core"},
{"code":"EEE","name":"Electrical And Electronics Engineering","type":"core"},
{"code":"PHY","name":"Applied Physics","type":"core"},
{"code":"C","name":"Programming In C","type":"core"},
{"code":"MATH","name":"Pure Applied Mathematics For Specific Branch Of Engineering","type":"core"},
{"code":"DSD","name":"Digital System Design","type":"core"},
{"code":"M-1","name":"Applied Mathematics-1","type":"core"},
{"code":"BEE","name":"Basic Electrical Engineering","type":"core"},
{"code":"MECH","name":"Mechanics","type":"core"},
{"code":"BE","name":"Basic Electronics","type":"core"},
{"code":"OOP","name":"Object Oriented Programming Using Cpp","type":"core"},
{"code":"M-2","name":"Applied Mathematics - 2","type":"core"},
{"code":"OOP","name":"Object Oriented Programming Using Cpp","type":"core"},
{"code":"M-2","name":"Applied Mathematics - 2","type":"core"},
{"code":"BC","name":"Business Communication","type":"core"},
{"code":"CHEM","name":"Applied Chemistry","type":"core"},
{"code":"ESE","name":"Environment Science And Engineering","type":"core"},
{"code":"BE","name":"Basic Electronics","type":"core"},
{"code":"C","name":"Programming In C","type":"core"},
{"code":"M-1","name":"Applied Mathematics-1","type":"core"},
{"code":"PHY","name":"Applied Physics","type":"core"},
{"code":"BEE","name":"Basic Electrical Engineering","type":"core"},
{"code":"MECH","name":"Mechanics","type":"core"},
{"code":"C","name":"Programming In C","type":"core"},
{"code":"M-1","name":"Applied Mathematics-1","type":"core"},
{"code":"PHY","name":"Applied Physics","type":"core"},
{"code":"BEE","name":"Basic Electrical Engineering","type":"core"},
{"code":"MECH","name":"Mechanics","type":"core"},
{"code":"AEC","name":"Analog Electronic Circuits","type":"core"},
{"code":"M-3","name":"Mathematics-3","type":"core"},
{"code":"NT","name":"Network Theory","type":"core"},
{"code":"SS","name":"Signals And Systems","type":"core"},
{"code":"BC","name":"Business Communication","type":"core"},
{"code":"PSD","name":"Semiconductor Devices","type":"core"},
{"code":"ACT","name":"Analog Communication Technique","type":"core"},
{"code":"DEC","name":"Digital Electronics Circuit","type":"core"},
{"code":"M-4","name":"Mathematics-4","type":"core"},
{"code":"EMI","name":"Electrical Measurement And Instrumentation","type":"core"},
{"code":"EMI","name":"Electrical Measurement And Instrumentation","type":"core"},
{"code":"MPMC","name":"Microprocessor And Microcontroller","type":"core"},
{"code":"DSP","name":"Digital Signal Processing","type":"core"},
{"code":"DCT","name":"Digital Communication Techniques","type":"core"},
{"code":"CSE","name":"Control System Engineering","type":"core"},
{"code":"EMT","name":"Electromagnetic Theory","type":"core"},
{"code":"VLSI","name":"VLSI Design","type":"core"},
{"code":"AWP","name":"Antenna And Wave Propagation","type":"core"},
{"code":"DCCN","name":"Data Communication And Computer Network","type":"core"},
{"code":"COA","name":"Computer Organisation And Architecture","type":"core"},
{"code":"OS","name":"Operating Systems","type":"core"},
{"code":"ME","name":"Microwave Engineering","type":"core"},
{"code":"WNMC","name":"Wireless Networks And Mobile Computing","type":"core"},
{"code":"M-1","name":"Applied Mathematics-1","type":"core"},
{"code":"PHY","name":"Applied Physics","type":"core"},
{"code":"PHY","name":"Applied Physics","type":"core"},
{"code":"BEE","name":"Basic Electrical Engineering","type":"core"},
{"code":"MECH","name":"Mechanics","type":"core"},
{"code":"CHEM","name":"Applied Chemistry","type":"core"},
{"code":"ECS","name":"English Communication Skills","type":"core"},
{"code":"EW","name":"Engineering Workshop","type":"core"},
{"code":"C","name":"Programming In C","type":"core"},
{"code":"M-3","name":"Mathematics-3","type":"core"},
{"code":"M-2","name":"Applied Mathematics - 2","type":"core"},
{"code":"EG","name":"Engineering Graphics","type":"core"},
{"code":"AEC","name":"Analog Electronic Circuits","type":"core"},
{"code":"DSP","name":"Digital Signal Processing","type":"core"},
{"code":"M-4","name":"Mathematics-4","type":"core"},
{"code":"ESE","name":"Environment Science And Engineering","type":"core"},
{"code":"ACT","name":"Analog Communication Technique","type":"core"},
{"code":"ACT","name":"Analog Communication Technique","type":"core"},
{"code":"DEC","name":"Digital Electronics Circuit","type":"core"},
{"code":"EE","name":"Engineering Economics","type":"core"},
{"code":"CSE","name":"Control System Engineering","type":"core"},
{"code":"STLD","name":"Switching Theory And Logic Design","type":"core"},
{"code":"CG","name":"Computer Graphics","type":"core"},
{"code":"CE","name":"Communicative English","type":"core"},
{"code":"EP","name":"Engineering Physics","type":"core"},
{"code":"EC","name":"Engineering Chemistry","type":"core"},
{"code":"EM1","name":"Engineering Mathematics 1","type":"core"},
{"code":"PSPP","name":"Problem Solving And Python Programming","type":"core"},
{"code":"EG","name":"ENGINEERING GRAPHICS","type":"core"},
{"code":"EM-2","name":"Engineering Mathematics 2","type":"core"},
{"code":"TE","name":"Technical English","type":"core"},
{"code":"PEE","name":"Physics Of Electronics Engineering","type":"core"},
{"code":"BEIE","name":"Basic Electrical And Instrumentation Engineering","type":"core"},
{"code":"CA","name":"Circuit Analysis","type":"core"},
{"code":"ED","name":"Electronics Devices","type":"core"},
{"code":"CSE","name":"Control System Engineering","type":"core"},
{"code":"SS","name":"Signal And Systems","type":"core"},
{"code":"LAPDE","name":"Linear Algebra And Partial Differential Equations","type":"core"},
{"code":"EC-1","name":"Electronic Circuits-I","type":"core"},
{"code":"PRP","name":"Probability And Random Processes","type":"core"},
{"code":"ESE","name":"Environment Science And Engineering","type":"core"},
{"code":"EMF","name":"Electromagnetic Field","type":"core"},
{"code":"LIC","name":"Linear Integrated Circuits","type":"core"},
{"code":"EC-2","name":"Electronic Circuits-2","type":"core"},
{"code":"CT","name":"Communication Theory","type":"core"},
{"code":"OOP","name":"Object Oriented Programming Using Cpp","type":"core"},
{"code":"OS","name":"Operating Systems","type":"core"},
{"code":"CAO","name":"Computer Architecture Organisation","type":"core"},
{"code":"TQM","name":"Total Quality Management","type":"core"}
],
"chemical":[
{"code":"M-1","name":"Applied Mathematics-1","type":"common"},
{"code":"PHY","name":"Applied Physics","type":"common"},
{"code":"ECS","name":"English Communication Skills","type":"common"},
{"code":"C","name":"Programming In C","type":"common"},
{"code":"BE","name":"Basic Electronics","type":"common"},
{"code":"BEE","name":"Basic Electrical Engineering","type":"common"},
{"code":"CHEM","name":"Applied Chemistry","type":"common"},
{"code":"M-2","name":"Applied Mathematics - 2","type":"common"},
{"code":"BCE","name":"Basics Of Civil Engineering","type":"common"},
{"code":"PE","name":"Professional Ethics","type":"common"},
{"code":"BME","name":"Basics Of Mechanical Engineering","type":"common"},
{"code":"ESHC","name":"Environmental Studies And Health Care","type":"common"},
{"code":"EEE","name":"Electrical And Electronics Engineering","type":"common"},
{"code":"BME","name":"Basics Of Mechanical Engineering","type":"common"},
{"code":"ESHC","name":"Environmental Studies And Health Care","type":"common"},
{"code":"EEE","name":"Electrical And Electronics Engineering","type":"common"},
{"code":"FFFM","name":"Fluid Flow & Flow Measurement ","type":"core"},
{"code":"CT","name":" Chemical Technology ","type":"core"},
{"code":"MO","name":" Mechanical Operation ","type":"core"},
{"code":"MT-I","name":" Mass Transfer-I ","type":"core"},
{"code":"CP","name":" Chemical Process ","type":"core"},
{"code":"EE","name":" Engineering Economics ","type":"core"},
{"code":"PAM","name":" Purely Applied Mathematics ","type":"core"},
{"code":"MT-II","name":" Mass Transfer-II ","type":"core"},
{"code":"HT","name":" Heat Transfer ","type":"core"},
{"code":"CET","name":" Chemical Engg. Thermodynamics ","type":"core"},
{"code":"FET","name":" Fuel & Energyy Technology ","type":"core"},
{"code":"EEOB","name":" Engineering Economics/Organizational Behavior ","type":"core"},
{"code":"TP","name":" Transport Phenomena ","type":"core"},
{"code":"PED","name":" Process Equipment Design ","type":"core"},
{"code":"CRE","name":" Chemical Reaction Engg. ","type":"core"},
{"code":"PIPSM","name":" Process Instrumentaion/Process Simulation & Modelling ","type":"core"},
{"code":"BFB ","name":" Biotechnology/Food Biotechnology ","type":"core"},
{"code":"AL-I","name":" Advance Lab-I ","type":"core"},
{"code":"NM-MATLAB","name":" Numerical Methods & MATLAB ","type":"core"},
{"code":"PDC","name":" Process Dynamics & Control","type":"core"},
{"code":"FoBEFE","name":" Fundamentals of Biochemical Engg./FluidizationEngg.","type":"core"},
{"code":"PT","name":" Polymer Technogy","type":"core"},
{"code":"sT","name":" separation Technology","type":"core"},
{"code":"ESE","name":" GS Environmental Science & Engineering","type":"core"},
{"code":"IL","name":" Industrial Lecture ","type":"core"},
{"code":"NSBT","name":" Nano Science & Bio Technology","type":"core"},
{"code":"PPT","name":" Pulp & Paper Technology","type":"core"},
{"code":"PRE","name":" Petroleum Refinery Engg.","type":"core"},
{"code":"MP","name":" Mineral Processing","type":"core"},
{"code":"PTGTNT","name":" Pinch Technology/Green Technology/Nano-Technology","type":"core"},
{"code":"SC","name":" Soft Computing ","type":"core"},
{"code":"ALIIP","name":" Advance Lab-II/ Project ","type":"core"},
{"code":"IoT","name":" Internet of Things","type":"core"}
],
"metallurgy":[{"code":"404","name":"Coming Soon","type":"core"}],
"production":[{"code":"404","name":"Coming Soon","type":"core"}]
}
cse_code_subject_mapping = {
"M-I":"Applied Mathematics-I","Phy":"Applied Physics","BE":"Basic Electronics","BCE":"Basic Civil Engg","C":"C Programming","English":"English Comm Skills","WS":"Engg Workshop","Chem":"Applied Chemistry","BEE":"Basic Electrical Engg","BME":"Basic Mechanical Engg","DS":"Data Structure","ED":"Engg Drawing","STLD":"Digital Electronics","Java":"Programming using JAVA","SP":"System Programming","SE":"Software Engg","DS":"Discrete Mathematics","EE":"Engg Economics","COA":"Computer Architecture","DAA":"Algorithm","DBMS":"Database Managemant System","FLAT":"Formal Language and Automata Theory","OB":"Organisational Behaviour","OS":"Operating System","CG":"Computer Graphics","ACA":"Advanced Computer Architecture","CC":"Cloud Computing","DMDW":"Data Mining Data Warehousing","CN":"Computer Networks","CD":"Compiler Design","WSN":"Wireless Sensor Network","ML":"Machine Learning","EVS":"Environmental Science","CNS":"Cryptography and Network Security","SC":"Soft Computing","IoT":"Internet of Things","GT":"Green Technology","ET":"Entrepreneurship Training"
}
''' Index Page '''
def index(request):
if request.user.is_authenticated:
return render(request, 'home.html',{"subjects":SubjectMapping})
else:
return render(request, 'index.html')
''' Explore Page '''
def explore(request):
subject_code = request.GET.get('sub')
subjects = {}
subject_name = ""
if subject_code is not None:
subjects = SubjectArchive.get(subject_code)
subject_name = SubjectMapping.get(subject_code)
return render(request, 'subject.html',{"subjects" : subjects,"name":subject_name})
''' Details of a Subject '''
def sub_details(request):
subject_code = request.GET.get('sub')
subject_name = ""
if subject_code is not None:
subject_name = cse_code_subject_mapping.get(subject_code)
files = FileUpload.objects.filter(subject_code = subject_code.upper())
notes = []
pyqs = []
gate_pyqs = []
for file in files:
if file.documentType == 'Notes':
notes.append(file)
elif file.documentType == 'PYQ':
pyqs.append(file)
else:
gate_pyqs.append(file)
return render(request,"subjectDetails.html",{"notes":notes,"pyqs":pyqs,"gate_pyqs":gate_pyqs})
def contentView(request):
file_id = request.GET.get('id')
file = FileUpload.objects.get(pk=file_id)
if request.user.is_authenticated:
visit = DocViews(username=request.user,visit_id=file_id)
visit.save()
return render(request,"subjectContent.html",{"file":file})
''' Upload Page '''
@login_required(login_url="login")
def upload(request):
return render(request,"upload.html")
''' After login Page '''
@login_required
def afterLogin(request):
#Most Viewed Docs Logic
visits = {}
for obj in DocViews.objects.all():
if obj.visit_id in visits:
visits[obj.visit_id] += 1
else:
visits[obj.visit_id] = 1
visits = dict( sorted(visits.items(), key=lambda item: item[1],reverse=True))
most_viewed = []
for k,v in visits.items():
most_viewed.append(FileUpload.objects.get(pk=int(k)))
# print(most_viewed)
#Recommendations Logic
file_visited = DocViews.objects.filter(username=request.user)
user_visits = {}
my_visit = set()
for obj in file_visited:
my_visit.add(obj.visit_id)
if obj.visit_id in user_visits:
user_visits[obj.visit_id] += 1
else:
user_visits[obj.visit_id] = 1
user_visits = dict( sorted(user_visits.items(), key=lambda item: item[1],reverse=True))
subject_visits = {}
for k,v in user_visits.items():
sub = FileUpload.objects.get(pk=int(k)).subject_code
if sub in subject_visits:
subject_visits[sub] += int(v)
else:
subject_visits[sub] = int(v)
subject_visits = dict( sorted(subject_visits.items(), key=lambda item: item[1],reverse=True))
recommendations = []
for k,v in subject_visits.items():
files = FileUpload.objects.filter(subject_code=k)
for file in files:
if file.id not in my_visit:
recommendations.append(file)
# print(recommendations)
# Continue Reading Logic
user_visit = set()
recently_visited = []
for obj in reversed(file_visited):
if obj.visit_id not in user_visit:
recently_visited.append(FileUpload.objects.get(pk=int(obj.visit_id)))
user_visit.add(obj.visit_id)
return render(request,"afterLogin.html",{"most_vieweds":most_viewed,"recommendations":recommendations,"recently_visiteds":recently_visited})
def search(request):
query = request.GET.get("query")
files = []
if query is not None:
query_lower = query.lower()
for obj in FileUpload.objects.all():
if obj.title.lower().find(query_lower) != -1 or obj.subtitle.lower().find(query_lower) != -1 or obj.author.lower().find(query_lower) != -1:
files.append(obj)
print(files)
return render(request,"advancedSearch.html",{"query":query,"files":files});
@login_required(login_url="login")
def home(request):
return render(request, 'auth.html')
| 61.984919
| 1,059
| 0.581142
|
acfeb671cec0617ecf7e9a324de1bbba433c4f7d
| 75
|
py
|
Python
|
dbsettings/__init__.py
|
johnpaulett/django-dbsettings
|
87465bdfebf8b6546212f987748111f33b80fbd4
|
[
"BSD-3-Clause"
] | null | null | null |
dbsettings/__init__.py
|
johnpaulett/django-dbsettings
|
87465bdfebf8b6546212f987748111f33b80fbd4
|
[
"BSD-3-Clause"
] | null | null | null |
dbsettings/__init__.py
|
johnpaulett/django-dbsettings
|
87465bdfebf8b6546212f987748111f33b80fbd4
|
[
"BSD-3-Clause"
] | null | null | null |
from dbsettings.values import * # NOQA
from dbsettings.group import Group
| 25
| 39
| 0.8
|
acfeb679100096f3e0ee974a1a9a59c516b3e15a
| 10,555
|
py
|
Python
|
analysis/sahadiagonalfault.py
|
aewag/archie
|
65770d9e489112040e8cb4950c3612e508528e12
|
[
"Apache-2.0"
] | null | null | null |
analysis/sahadiagonalfault.py
|
aewag/archie
|
65770d9e489112040e8cb4950c3612e508528e12
|
[
"Apache-2.0"
] | null | null | null |
analysis/sahadiagonalfault.py
|
aewag/archie
|
65770d9e489112040e8cb4950c3612e508528e12
|
[
"Apache-2.0"
] | null | null | null |
# Implementation of the differential fault attack for the M0 fault model
#
# For a description of this attack see:
# Dhiman Saha, Debdeep Mukhopadhyay and Dipanwita Roy Chowdhury
# A Diagonal Fault Attack on the Advanced Encryption Standard.
# IACR Cryptology ePrint Archive, 581, 2009.
#
# input: faulty cipher texts and correct cipher text after the 10th round
# output: key set for round 10
import numpy as np
# inverse s-box
invSBox = (
0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb,
0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87, 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb,
0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d, 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e,
0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2, 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25,
0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92,
0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda, 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84,
0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a, 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06,
0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02, 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b,
0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea, 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73,
0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85, 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e,
0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89, 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b,
0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20, 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4,
0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31, 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f,
0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef,
0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61,
0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
)
# exponential table: lookup table for l table addition result
e = (
0x01, 0x03, 0x05, 0x0f, 0x11, 0x33, 0x55, 0xff, 0x1a, 0x2e, 0x72, 0x96, 0xa1, 0xf8, 0x13, 0x35,
0x5f, 0xe1, 0x38, 0x48, 0xd8, 0x73, 0x95, 0xa4, 0xf7, 0x02, 0x06, 0x0a, 0x1e, 0x22, 0x66, 0xaa,
0xe5, 0x34, 0x5c, 0xe4, 0x37, 0x59, 0xeb, 0x26, 0x6a, 0xbe, 0xd9, 0x70, 0x90, 0xab, 0xe6, 0x31,
0x53, 0xf5, 0x04, 0x0c, 0x14, 0x3c, 0x44, 0xcc, 0x4f, 0xd1, 0x68, 0xb8, 0xd3, 0x6e, 0xb2, 0xcd,
0x4c, 0xd4, 0x67, 0xa9, 0xe0, 0x3b, 0x4d, 0xd7, 0x62, 0xa6, 0xf1, 0x08, 0x18, 0x28, 0x78, 0x88,
0x83, 0x9e, 0xb9, 0xd0, 0x6b, 0xbd, 0xdc, 0x7f, 0x81, 0x98, 0xb3, 0xce, 0x49, 0xdb, 0x76, 0x9a,
0xb5, 0xc4, 0x57, 0xf9, 0x10, 0x30, 0x50, 0xf0, 0x0b, 0x1d, 0x27, 0x69, 0xbb, 0xd6, 0x61, 0xa3,
0xfe, 0x19, 0x2b, 0x7d, 0x87, 0x92, 0xad, 0xec, 0x2f, 0x71, 0x93, 0xae, 0xe9, 0x20, 0x60, 0xa0,
0xfb, 0x16, 0x3a, 0x4e, 0xd2, 0x6d, 0xb7, 0xc2, 0x5d, 0xe7, 0x32, 0x56, 0xfa, 0x15, 0x3f, 0x41,
0xc3, 0x5e, 0xe2, 0x3d, 0x47, 0xc9, 0x40, 0xc0, 0x5b, 0xed, 0x2c, 0x74, 0x9c, 0xbf, 0xda, 0x75,
0x9f, 0xba, 0xd5, 0x64, 0xac, 0xef, 0x2a, 0x7e, 0x82, 0x9d, 0xbc, 0xdf, 0x7a, 0x8e, 0x89, 0x80,
0x9b, 0xb6, 0xc1, 0x58, 0xe8, 0x23, 0x65, 0xaf, 0xea, 0x25, 0x6f, 0xb1, 0xc8, 0x43, 0xc5, 0x54,
0xfc, 0x1f, 0x21, 0x63, 0xa5, 0xf4, 0x07, 0x09, 0x1b, 0x2d, 0x77, 0x99, 0xb0, 0xcb, 0x46, 0xca,
0x45, 0xcf, 0x4a, 0xde, 0x79, 0x8b, 0x86, 0x91, 0xa8, 0xe3, 0x3e, 0x42, 0xc6, 0x51, 0xf3, 0x0e,
0x12, 0x36, 0x5a, 0xee, 0x29, 0x7b, 0x8d, 0x8c, 0x8f, 0x8a, 0x85, 0x94, 0xa7, 0xf2, 0x0d, 0x17,
0x39, 0x4b, 0xdd, 0x7c, 0x84, 0x97, 0xa2, 0xfd, 0x1c, 0x24, 0x6c, 0xb4, 0xc7, 0x52, 0xf6, 0x01
)
# logarithmic table: lookup table for multiplication
# initial value -1 is a dummy value
l = (
-1, 0x00, 0x19, 0x01, 0x32, 0x02, 0x1a, 0xc6, 0x4b, 0xc7, 0x1b, 0x68, 0x33, 0xee, 0xdf, 0x03,
0x64, 0x04, 0xe0, 0x0e, 0x34, 0x8d, 0x81, 0xef, 0x4c, 0x71, 0x08, 0xc8, 0xf8, 0x69, 0x1c, 0xc1,
0x7d, 0xc2, 0x1d, 0xb5, 0xf9, 0xb9, 0x27, 0x6a, 0x4d, 0xe4, 0xa6, 0x72, 0x9a, 0xc9, 0x09, 0x78,
0x65, 0x2f, 0x8a, 0x05, 0x21, 0x0f, 0xe1, 0x24, 0x12, 0xf0, 0x82, 0x45, 0x35, 0x93, 0xda, 0x8e,
0x96, 0x8f, 0xdb, 0xbd, 0x36, 0xd0, 0xce, 0x94, 0x13, 0x5C, 0xd2, 0xf1, 0x40, 0x46, 0x83, 0x38,
0x66, 0xdd, 0xfd, 0x30, 0xbf, 0x06, 0x8b, 0x62, 0xb3, 0x25, 0xe2, 0x98, 0x22, 0x88, 0x91, 0x10,
0x7e, 0x6e, 0x48, 0xc3, 0xa3, 0xb6, 0x1e, 0x42, 0x3a, 0x6b, 0x28, 0x54, 0xfa, 0x85, 0x3d, 0xba,
0x2b, 0x79, 0x0a, 0x15, 0x9b, 0x9f, 0x5e, 0xca, 0x4e, 0xd4, 0xac, 0xe5, 0xf3, 0x73, 0xa7, 0x57,
0xaf, 0x58, 0xa8, 0x50, 0xf4, 0xea, 0xd6, 0x74, 0x4f, 0xae, 0xe9, 0xd5, 0xe7, 0xe6, 0xad, 0xe8,
0x2c, 0xd7, 0x75, 0x7a, 0xeb, 0x16, 0x0b, 0xf5, 0x59, 0xcb, 0x5f, 0xb0, 0x9c, 0xa9, 0x51, 0xa0,
0x7f, 0x0c, 0xf6, 0x6f, 0x17, 0xc4, 0x49, 0xec, 0xd8, 0x43, 0x1f, 0x2d, 0xa4, 0x76, 0x7b, 0xb7,
0xcc, 0xbb, 0x3e, 0x5a, 0xfb, 0x60, 0xb1, 0x86, 0x3b, 0x52, 0xa1, 0x6c, 0xaa, 0x55, 0x29, 0x9d,
0x97, 0xb2, 0x87, 0x90, 0x61, 0xbe, 0xdc, 0xfc, 0xbc, 0x95, 0xcf, 0xcd, 0x37, 0x3f, 0x5b, 0xd1,
0x53, 0x39, 0x84, 0x3c, 0x41, 0xa2, 0x6d, 0x47, 0x14, 0x2a, 0x9e, 0x5d, 0x56, 0xf2, 0xd3, 0xab,
0x44, 0x11, 0x92, 0xd9, 0x23, 0x20, 0x2e, 0x89, 0xb4, 0x7c, 0xb8, 0x26, 0x77, 0x99, 0xe3, 0xa5,
0x67, 0x4a, 0xed, 0xde, 0xc5, 0x31, 0xfe, 0x18, 0x0d, 0x63, 0x8c, 0x80, 0xc0, 0xf7, 0x70, 0x07
)
def mult(a,b):
# cases 0x00 and 0x01 are specific in terms of multiplication
r=0
if a==0 or b==0:
return 0
elif a==1:
return b
elif b==1:
return a
else:
r = l[a] + l[b]
if r > 0xFF:
return e[r-0xFF]
else:
return e[r]
# equations for the M0 fault model (see section 5.2 in the above-mentioned publication)
def differentialFaultAttack(c, keyset, x):
key = [0] * 16
# obtain set for key bytes 0, 7, 10 and 13
for k in range (0,256):
key[0] = k
#print(k)
for j in range (0,256):
key[13] = j
f1 = (invSBox[key[0] ^ x[0]] ^ invSBox[key[0] ^ c[0]])
f2 = (invSBox[key[13] ^ x[13]] ^ invSBox[key[13] ^ c[13]])
if f1 == mult(2,f2):
for i in range (0,256):
key[10] = i
f3 = (invSBox[key[10] ^ x[10]] ^ invSBox[key[10] ^ c[10]])
if f3 == f2:
for n in range (0,256):
key[7] = n
f4 = (invSBox[key[7] ^ x[7]] ^ invSBox[key[7] ^ c[7]])
if mult(3,f3) == f4:
keyset[0].append(key[0])
keyset[13].append(key[13])
keyset[10].append(key[10])
keyset[7].append(key[7])
# obtain set for key bytes 3, 6, 9 and 12
for k in range (0,256):
key[12] = k
#print(k)
for j in range (0,256):
key[6] = j
f1 = (invSBox[key[12] ^ x[12]] ^ invSBox[key[12] ^ c[12]])
f2 = (invSBox[key[6] ^ x[6]] ^ invSBox[key[6] ^ c[6]])
if f1 == mult(3,f2):
for i in range (0,256):
key[9] = i
f3 = (invSBox[key[9] ^ x[9]] ^ invSBox[key[9] ^ c[9]])
if f3 == mult(2,f2):
for n in range (0,256):
key[3] = n
f4 = (invSBox[key[3] ^ x[3]] ^ invSBox[key[3] ^ c[3]])
if f2 == f4:
keyset[12].append(key[12])
keyset[6].append(key[6])
keyset[9].append(key[9])
keyset[3].append(key[3])
# obtain set for key bytes 2, 5, 8 and 15
for k in range (0,256):
key[8] = k
#print(k)
for j in range (0,256):
key[5] = j
f1 = (invSBox[key[8] ^ x[8]] ^ invSBox[key[8] ^ c[8]])
f2 = (invSBox[key[5] ^ x[5]] ^ invSBox[key[5] ^ c[5]])
if mult(3,f1) == f2:
for i in range (0,256):
key[2] = i
f3 = (invSBox[key[2] ^ x[2]] ^ invSBox[key[2] ^ c[2]])
if f3 == mult(2,f1):
for n in range (0,256):
key[15] = n
f4 = (invSBox[key[15] ^ x[15]] ^ invSBox[key[15] ^ c[15]])
if f1 == f4:
keyset[8].append(key[8])
keyset[5].append(key[5])
keyset[2].append(key[2])
keyset[15].append(key[15])
# obtain set for key bytes 1, 4, 11 and 14
for k in range (0,256):
key[1] = k
#print(k)
for j in range (0,256):
key[4] = j
f1 = (invSBox[key[4] ^ x[4]] ^ invSBox[key[4] ^ c[4]])
f2 = (invSBox[key[1] ^ x[1]] ^ invSBox[key[1] ^ c[1]])
if f1 == f2:
for i in range (0,256):
key[14] = i
f3 = (invSBox[key[14] ^ x[14]] ^ invSBox[key[14] ^ c[14]])
if f3 == mult(3,f2):
for n in range (0,256):
key[11] = n
f4 = (invSBox[key[11] ^ x[11]] ^ invSBox[key[11] ^ c[11]])
if mult(2,f2) == f4:
keyset[1].append(key[1])
keyset[4].append(key[4])
keyset[14].append(key[14])
keyset[11].append(key[11])
if __name__ == '__main__':
key = [0]*16
# original ciphertext
x = (0x3a, 0xd7, 0x7b, 0xb4, 0x0d, 0x7a, 0x36, 0x60, 0xa8, 0x9e, 0xca, 0xf3, 0x24, 0x66, 0xef, 0x97)
# faulty ciphertexts
cipher = (0x54, 0x12, 0xc6, 0x2e, 0xbb, 0x33, 0x9d, 0xc4, 0x4f, 0x47, 0x76, 0x3b, 0xfc, 0x4a, 0xc2, 0xbe)
cipher1 = (0x7e, 0x1e, 0x7e, 0x29, 0x64, 0x20, 0x2b, 0x66, 0xc1, 0x06, 0x5e, 0x88, 0x8b, 0x35, 0x82, 0xbb)
cipher2 = (0xb8, 0x37, 0x22, 0x8d, 0x6e, 0xdf, 0x7a, 0x85, 0xf0, 0x6b, 0xf4, 0x7b, 0x45, 0xa9, 0x23, 0x5f)
cipher3 = (0x2b, 0x1e, 0x1c, 0x92, 0x64, 0xc7, 0x4c, 0x46, 0xf9, 0x18, 0x6e, 0x88, 0xab, 0xdd, 0x82, 0x08)
cipher4 = (0xcd, 0xf2, 0x07, 0x0e, 0xe6, 0x58, 0xc9, 0x4e, 0x2d, 0x32, 0xb0, 0xd5, 0x43, 0xc4, 0x90, 0x33)
# cipher key sets
keysetCipher = [[] for i in range(0,16)]
keysetCipher1 = [[] for i in range(0,16)]
keysetCipher2 = [[] for i in range(0,16)]
keysetCipher3 = [[] for i in range(0,16)]
keysetCipher4 = [[] for i in range(0,16)]
# test first ciphertext to obtain reduced keyset
differentialFaultAttack(cipher, keysetCipher, x)
# test second ciphertext to obtain reduced keyset
differentialFaultAttack(cipher1, keysetCipher1, x)
# test third ciphertext to obtain reduced keyset
differentialFaultAttack(cipher2, keysetCipher2, x)
# test fourth ciphertext to obtain reduced keyset
differentialFaultAttack(cipher3, keysetCipher3, x)
# test fourth ciphertext to obtain reduced keyset
differentialFaultAttack(cipher4, keysetCipher4, x)
# remove duplicates, sort lists and print intersection
print("reduced key set of tenth round key")
for i in range(0,16):
keysetCipher[i] = list(set(keysetCipher[i]))
keysetCipher[i].sort()
keysetCipher1[i] = list(set(keysetCipher1[i]))
keysetCipher1[i].sort()
keysetCipher2[i] = list(set(keysetCipher2[i]))
keysetCipher2[i].sort()
keysetCipher3[i] = list(set(keysetCipher3[i]))
keysetCipher3[i].sort()
keysetCipher4[i] = list(set(keysetCipher4[i]))
keysetCipher4[i].sort()
intersection = list(set(keysetCipher[i]) & set(keysetCipher1[i]) & set(keysetCipher2[i]) & set(keysetCipher3[i]) & set(keysetCipher4[i]))
print([hex(c) for c in intersection])
| 44.914894
| 139
| 0.62937
|
acfeb68ee745d3d7d7b1986e54c5c55737c943b0
| 9,787
|
py
|
Python
|
autoarray/plot/imaging_plots.py
|
Sketos/PyAutoArray
|
72dc7e8d1c38786915f82a7e7284239e5ce87624
|
[
"MIT"
] | null | null | null |
autoarray/plot/imaging_plots.py
|
Sketos/PyAutoArray
|
72dc7e8d1c38786915f82a7e7284239e5ce87624
|
[
"MIT"
] | null | null | null |
autoarray/plot/imaging_plots.py
|
Sketos/PyAutoArray
|
72dc7e8d1c38786915f82a7e7284239e5ce87624
|
[
"MIT"
] | null | null | null |
from autoarray.plot import plotters
@plotters.set_include_and_sub_plotter
@plotters.set_subplot_filename
def subplot_imaging(
imaging, grid=None, mask=None, positions=None, include=None, sub_plotter=None
):
"""Plot the imaging data_type as a sub-plotters of all its quantites (e.g. the dataset, noise_map-map, PSF, Signal-to_noise-map, \
etc).
Set *autolens.data_type.array.plotters.plotters* for a description of all innput parameters not described below.
Parameters
-----------
imaging : data_type.ImagingData
The imaging data_type, which includes the observed data_type, noise_map-map, PSF, signal-to-noise_map-map, etc.
include_origin : True
If true, the include_origin of the dataset's coordinate system is plotted as a 'x'.
image_plane_pix_grid : ndarray or data_type.array.grid_stacks.PixGrid
If an adaptive pixelization whose pixels are formed by tracing pixels from the dataset, this plots those pixels \
over the immage.
ignore_config : bool
If *False*, the config file general.ini is used to determine whether the subpot is plotted. If *True*, the \
config file is ignored.
"""
number_subplots = 6
sub_plotter.open_subplot_figure(number_subplots=number_subplots)
sub_plotter.setup_subplot(number_subplots=number_subplots, subplot_index=1)
image(
imaging=imaging,
grid=grid,
mask=mask,
positions=positions,
include=include,
plotter=sub_plotter,
)
sub_plotter.setup_subplot(number_subplots=number_subplots, subplot_index=2)
noise_map(
imaging=imaging,
mask=mask,
positions=positions,
include=include,
plotter=sub_plotter,
)
sub_plotter.setup_subplot(number_subplots=number_subplots, subplot_index=3)
psf(imaging=imaging, positions=positions, include=include, plotter=sub_plotter)
sub_plotter.setup_subplot(number_subplots=number_subplots, subplot_index=4)
signal_to_noise_map(
imaging=imaging,
mask=mask,
positions=positions,
include=include,
plotter=sub_plotter,
)
sub_plotter.setup_subplot(number_subplots=number_subplots, subplot_index=5)
absolute_signal_to_noise_map(
imaging=imaging,
mask=mask,
positions=positions,
include=include,
plotter=sub_plotter,
)
sub_plotter.setup_subplot(number_subplots=number_subplots, subplot_index=6)
potential_chi_squared_map(
imaging=imaging,
mask=mask,
positions=positions,
include=include,
plotter=sub_plotter,
)
sub_plotter.output.subplot_to_figure()
sub_plotter.figure.close()
def individual(
imaging,
grid=None,
mask=None,
positions=None,
plot_image=False,
plot_noise_map=False,
plot_psf=False,
plot_signal_to_noise_map=False,
plot_absolute_signal_to_noise_map=False,
plot_potential_chi_squared_map=False,
include=None,
plotter=None,
):
"""Plot each attribute of the imaging data_type as individual figures one by one (e.g. the dataset, noise_map-map, PSF, \
Signal-to_noise-map, etc).
Set *autolens.data_type.array.plotters.plotters* for a description of all innput parameters not described below.
Parameters
-----------
imaging : data_type.ImagingData
The imaging data_type, which includes the observed data_type, noise_map-map, PSF, signal-to-noise_map-map, etc.
include_origin : True
If true, the include_origin of the dataset's coordinate system is plotted as a 'x'.
"""
if plot_image:
image(
imaging=imaging,
grid=grid,
mask=mask,
positions=positions,
include=include,
plotter=plotter,
)
if plot_noise_map:
noise_map(imaging=imaging, mask=mask, include=include, plotter=plotter)
if plot_psf:
psf(imaging=imaging, include=include, plotter=plotter)
if plot_signal_to_noise_map:
signal_to_noise_map(
imaging=imaging, mask=mask, include=include, plotter=plotter
)
if plot_absolute_signal_to_noise_map:
absolute_signal_to_noise_map(
imaging=imaging, mask=mask, include=include, plotter=plotter
)
if plot_potential_chi_squared_map:
potential_chi_squared_map(
imaging=imaging, mask=mask, include=include, plotter=plotter
)
@plotters.set_include_and_plotter
@plotters.set_labels
def image(imaging, grid=None, mask=None, positions=None, include=None, plotter=None):
"""Plot the observed data_type of the imaging data_type.
Set *autolens.data_type.array.plotters.plotters* for a description of all innput parameters not described below.
Parameters
-----------
image : data_type.ImagingData
The imaging data_type, which includes the observed data_type, noise_map-map, PSF, signal-to-noise_map-map, etc.
include_origin : True
If true, the include_origin of the dataset's coordinate system is plotted as a 'x'.
image_plane_pix_grid : ndarray or data_type.array.grid_stacks.PixGrid
If an adaptive pixelization whose pixels are formed by tracing pixels from the dataset, this plots those pixels \
over the immage.
"""
plotter.plot_array(
array=imaging.image,
include_origin=include.origin,
include_border=include.border,
grid=grid,
mask=mask,
positions=positions,
)
@plotters.set_include_and_plotter
@plotters.set_labels
def noise_map(
imaging, grid=None, mask=None, positions=None, include=None, plotter=None
):
"""Plot the noise_map-map of the imaging data_type.
Set *autolens.data_type.array.plotters.plotters* for a description of all innput parameters not described below.
Parameters
-----------
image : data_type.ImagingData
The imaging data_type, which includes the observed data_type, noise_map-map, PSF, signal-to-noise_map-map, etc.
include_origin : True
If true, the include_origin of the dataset's coordinate system is plotted as a 'x'.
"""
plotter.plot_array(
array=imaging.noise_map,
include_origin=include.origin,
include_border=include.border,
grid=grid,
mask=mask,
positions=positions,
)
@plotters.set_include_and_plotter
@plotters.set_labels
def psf(imaging, grid=None, positions=None, include=None, plotter=None):
"""Plot the PSF of the imaging data_type.
Set *autolens.data_type.array.plotters.plotters* for a description of all innput parameters not described below.
Parameters
-----------
image : data_type.ImagingData
The imaging data_type, which includes the observed data_type, noise_map-map, PSF, signal-to-noise_map-map, etc.
include_origin : True
If true, the include_origin of the dataset's coordinate system is plotted as a 'x'.
"""
plotter.plot_array(
array=imaging.psf, include_origin=include.origin, grid=grid, positions=positions
)
@plotters.set_include_and_plotter
@plotters.set_labels
def signal_to_noise_map(
imaging, grid=None, mask=None, positions=None, include=None, plotter=None
):
"""Plot the signal-to-noise_map-map of the imaging data_type.
Set *autolens.data_type.array.plotters.plotters* for a description of all innput parameters not described below.
Parameters
-----------
image : data_type.ImagingData
The imaging data_type, which includes the observed image, noise_map-map, PSF, signal-to-noise_map-map, etc.
include_origin : True
If true, the include_origin of the dataset's coordinate system is plotted as a 'x'.
"""
plotter.plot_array(
array=imaging.signal_to_noise_map,
include_origin=include.origin,
include_border=include.border,
grid=grid,
mask=mask,
positions=positions,
)
@plotters.set_include_and_plotter
@plotters.set_labels
def absolute_signal_to_noise_map(
imaging, grid=None, mask=None, positions=None, include=None, plotter=None
):
"""Plot the signal-to-noise_map-map of the imaging data_type.
Set *autolens.data_type.array.plotters.plotters* for a description of all innput parameters not described below.
Parameters
-----------
image : data_type.ImagingData
The imaging data_type, which includes the observed image, noise_map-map, PSF, signal-to-noise_map-map, etc.
include_origin : True
If true, the include_origin of the dataset's coordinate system is plotted as a 'x'.
"""
plotter.plot_array(
array=imaging.absolute_signal_to_noise_map,
include_origin=include.origin,
include_border=include.border,
grid=grid,
mask=mask,
positions=positions,
)
@plotters.set_include_and_plotter
@plotters.set_labels
def potential_chi_squared_map(
imaging, grid=None, mask=None, positions=None, include=None, plotter=None
):
"""Plot the signal-to-noise_map-map of the imaging data_type.
Set *autolens.data_type.array.plotters.plotters* for a description of all innput parameters not described below.
Parameters
-----------
image : data_type.ImagingData
The imaging data_type, which includes the observed image, noise_map-map, PSF, signal-to-noise_map-map, etc.
include_origin : True
If true, the include_origin of the dataset's coordinate system is plotted as a 'x'.
"""
plotter.plot_array(
array=imaging.potential_chi_squared_map,
include_origin=include.origin,
include_border=include.border,
grid=grid,
mask=mask,
positions=positions,
)
| 31.775974
| 134
| 0.697047
|
acfeb6e521e29b825a0c66be210109a53d2986f0
| 2,044
|
py
|
Python
|
fedn/fedn/common/security/certificatemanager.py
|
eriks-aidotse/fedn
|
ab784e6ac45fd02be4532c9bbc8d5b8c75b62d51
|
[
"Apache-2.0"
] | 75
|
2020-07-19T10:40:15.000Z
|
2022-03-13T06:56:04.000Z
|
fedn/fedn/common/security/certificatemanager.py
|
eriks-aidotse/fedn
|
ab784e6ac45fd02be4532c9bbc8d5b8c75b62d51
|
[
"Apache-2.0"
] | 124
|
2020-07-27T18:16:21.000Z
|
2022-03-10T12:16:04.000Z
|
fedn/fedn/common/security/certificatemanager.py
|
eriks-aidotse/fedn
|
ab784e6ac45fd02be4532c9bbc8d5b8c75b62d51
|
[
"Apache-2.0"
] | 28
|
2020-08-14T19:39:30.000Z
|
2022-03-16T10:29:09.000Z
|
from .certificate import Certificate
class CertificateManager:
"""
"""
def __init__(self, directory):
self.directory = directory
self.certificates = []
self.allowed = dict()
self.load_all()
def get_or_create(self, name):
"""
:param name:
:return:
"""
search = self.find(name)
if search:
return search
else:
cert = Certificate(self.directory, name=name, cert_name=name + '-cert.pem', key_name=name + '-key.pem')
cert.gen_keypair()
self.certificates.append(cert)
return cert
def add(self, certificate):
"""
:param certificate:
:return:
"""
if not self.find(certificate.name):
self.certificates.append(certificate)
return True
return False
def load_all(self):
"""
"""
import os
for filename in sorted(os.listdir(self.directory)):
if filename.endswith('cert.pem'):
name = filename.split('-')[0]
# print("got a file here! Read it {}".format(filename))
key_name = name + '-key.pem'
# print("trying with {}".format(key_name))
if os.path.isfile(os.path.join(self.directory, key_name)):
c = Certificate(self.directory, name=name, cert_name=filename, key_name=key_name)
self.certificates.append(c)
else:
c = Certificate(self.directory, name=name, cert_name=filename,
key_name=key_name) # , cert_only=True)
self.certificates.append(c)
def find(self, name):
"""
:param name:
:return:
"""
for cert in self.certificates:
if cert.name == name:
return cert
for cert in self.allowed:
if cert.name == name:
return cert
return None
| 27.253333
| 115
| 0.508317
|
acfeb7592bac5462ed6a7b456c76f64d601e1fdd
| 84,190
|
py
|
Python
|
tensorflow_federated/python/core/impl/compiler/building_block_factory.py
|
ixcc/federated
|
3fb48ae6d019ee763c5112d23c3bdbcbaea17948
|
[
"Apache-2.0"
] | 1
|
2020-01-13T02:51:38.000Z
|
2020-01-13T02:51:38.000Z
|
tensorflow_federated/python/core/impl/compiler/building_block_factory.py
|
ixcc/federated
|
3fb48ae6d019ee763c5112d23c3bdbcbaea17948
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_federated/python/core/impl/compiler/building_block_factory.py
|
ixcc/federated
|
3fb48ae6d019ee763c5112d23c3bdbcbaea17948
|
[
"Apache-2.0"
] | null | null | null |
# Lint as: python3
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library implementing reusable `computation_building_blocks` structures."""
import random
import string
from typing import Sequence
import tensorflow as tf
from tensorflow_federated.proto.v0 import computation_pb2 as pb
from tensorflow_federated.python.common_libs import anonymous_tuple
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.common_libs import serialization_utils
from tensorflow_federated.python.core.api import computation_types
from tensorflow_federated.python.core.impl import type_utils
from tensorflow_federated.python.core.impl.compiler import building_blocks
from tensorflow_federated.python.core.impl.compiler import intrinsic_defs
from tensorflow_federated.python.core.impl.compiler import placement_literals
from tensorflow_federated.python.core.impl.compiler import transformation_utils
from tensorflow_federated.python.core.impl.compiler import type_serialization
from tensorflow_federated.python.core.impl.utils import tensorflow_utils
def unique_name_generator(comp, prefix='_var'):
"""Yields a new unique name that does not exist in `comp`.
Args:
comp: The compuation building block to use as a reference.
prefix: The prefix to use when generating unique names. If `prefix` is
`None` or if `comp` contains any name with this prefix, then a unique
prefix will be generated from random lowercase ascii characters.
"""
if comp is not None:
names = transformation_utils.get_unique_names(comp)
else:
names = set()
while prefix is None or any(n.startswith(prefix) for n in names):
characters = string.ascii_lowercase
prefix = '_{}'.format(''.join(random.choice(characters) for _ in range(3)))
index = 1
while True:
yield '{}{}'.format(prefix, index)
index += 1
def create_compiled_empty_tuple():
"""Returns called graph representing the empty tuple.
Returns:
An instance of `building_blocks.Call`, calling a noarg function
which returns an empty tuple. This function is an instance of
`building_blocks.CompiledComputation`.
"""
with tf.Graph().as_default() as graph:
result_type, result_binding = tensorflow_utils.capture_result_from_graph(
[], graph)
function_type = computation_types.FunctionType(None, result_type)
serialized_function_type = type_serialization.serialize_type(function_type)
proto = pb.Computation(
type=serialized_function_type,
tensorflow=pb.TensorFlow(
graph_def=serialization_utils.pack_graph_def(graph.as_graph_def()),
parameter=None,
result=result_binding))
return building_blocks.Call(building_blocks.CompiledComputation(proto), None)
def create_compiled_identity(type_signature, name=None):
"""Creates CompiledComputation representing identity function.
Args:
type_signature: Argument convertible to instance of `computation_types.Type`
via `computation_types.to_type`.
name: An optional string name to use as the name of the computation.
Returns:
An instance of `building_blocks.CompiledComputation`
representing the identity function taking an argument of type
`type_signature` and returning the same value.
Raises:
TypeError: If `type_signature` contains any types which cannot appear in
TensorFlow bindings.
"""
type_spec = computation_types.to_type(type_signature)
py_typecheck.check_type(type_spec, computation_types.Type)
type_utils.check_tensorflow_compatible_type(type_spec)
with tf.Graph().as_default() as graph:
parameter_value, parameter_binding = tensorflow_utils.stamp_parameter_in_graph(
'x', type_spec, graph)
result_type, result_binding = tensorflow_utils.capture_result_from_graph(
parameter_value, graph)
function_type = computation_types.FunctionType(type_spec, result_type)
serialized_function_type = type_serialization.serialize_type(function_type)
proto = pb.Computation(
type=serialized_function_type,
tensorflow=pb.TensorFlow(
graph_def=serialization_utils.pack_graph_def(graph.as_graph_def()),
parameter=parameter_binding,
result=result_binding))
return building_blocks.CompiledComputation(proto, name)
class SelectionSpec(object):
"""Data class representing map from input tuple to selection of result.
Attributes:
tuple_index: The index of the source of the selection sequence in the
desired result of the generated TensorFlow. If this `SelectionSpec`
appears at index i of a list of `SelectionSpec`s, index j is the source
for the result of the generated function at index i.
selection_sequence: A list or tuple representing the selections to make from
`tuple_index`, so that the list `[0]` for example would represent the
output is the 0th element of `tuple_index`, while `[0, 0]` would represent
that the output is the 0th element of the 0th element of `tuple_index`.
"""
def __init__(self, tuple_index: int, selection_sequence: Sequence[int]):
self._tuple_index = tuple_index
self._selection_sequence = selection_sequence
@property
def tuple_index(self):
return self._tuple_index
@property
def selection_sequence(self):
return self._selection_sequence
def __str__(self):
return 'SelectionSequence(tuple_index={},selection_sequence={}'.format(
self._tuple_index, self._selection_sequence)
def __repr__(self):
return str(self)
def _extract_selections(parameter_value, output_spec):
results = []
for selection_spec in output_spec:
result_element = parameter_value[selection_spec.tuple_index]
for selection in selection_spec.selection_sequence:
py_typecheck.check_type(selection, int)
result_element = result_element[selection]
results.append(result_element)
return results
def construct_tensorflow_selecting_and_packing_outputs(
arg_type, output_structure: anonymous_tuple.AnonymousTuple):
"""Constructs TensorFlow selecting and packing elements from its input.
The result of this function can be called on a deduplicated
`building_blocks.Tuple` containing called graphs, thus preventing us from
embedding the same TensorFlow computation in the generated graphs, and
reducing the amount of work duplicated in the process of generating
TensorFlow.
The TensorFlow which results here will be a function which takes an argument
of type `arg_type`, returning a result specified by `output_structure`. Each
`SelectionSpec` nested inside of `output_structure` will represent a selection
from one of the arguments of the tuple `arg_type`, with the empty selection
being a possibility. The nested structure of `output_structure` will determine
how these selections are packed back into a result, IE, the result of the
function will be a nested tuple with the same structure as `output_structure`,
where the leaves of this structure (the `SelectionSpecs` of
`output_structure`) will be selections from the argument.
Args:
arg_type: `computation_types.Type` of the argument on which the constructed
function will be called. Should be an instance of
`computation_types.NamedTupleType`.
output_structure: `anonymous_tuple.AnonymousTuple` with `SelectionSpec`
or `anonymous_tupl.AnonymousTupl` elements, mapping from elements of
the nested argument tuple to the desired result of the generated
computation. `output_structure` must contain all the names desired on
the output of the computation.
Returns:
A `building_blocks.CompiledComputation` representing the specification
above.
Raises:
TypeError: If `arg_type` is not a `computation_types.NamedTupleType`, or
represents a type which cannot act as an input or output to a TensorFlow
computation in TFF, IE does not contain exclusively
`computation_types.SequenceType`, `computation_types.NamedTupleType` or
`computation_types.TensorType`.
"""
py_typecheck.check_type(output_structure, anonymous_tuple.AnonymousTuple)
def _check_output_structure(elem):
if isinstance(elem, anonymous_tuple.AnonymousTuple):
for x in elem:
_check_output_structure(x)
elif not isinstance(elem, SelectionSpec):
raise TypeError('output_structure can only contain nested anonymous '
'tuples and `SelectionSpecs`; encountered the value {} '
'of type {}.'.format(elem, type(elem)))
_check_output_structure(output_structure)
output_spec = anonymous_tuple.flatten(output_structure)
type_spec = computation_types.to_type(arg_type)
py_typecheck.check_type(type_spec, computation_types.NamedTupleType)
type_utils.check_tensorflow_compatible_type(type_spec)
with tf.Graph().as_default() as graph:
parameter_value, parameter_binding = tensorflow_utils.stamp_parameter_in_graph(
'x', type_spec, graph)
results = _extract_selections(parameter_value, output_spec)
repacked_result = anonymous_tuple.pack_sequence_as(output_structure, results)
result_type, result_binding = tensorflow_utils.capture_result_from_graph(
repacked_result, graph)
function_type = computation_types.FunctionType(type_spec, result_type)
serialized_function_type = type_serialization.serialize_type(function_type)
proto = pb.Computation(
type=serialized_function_type,
tensorflow=pb.TensorFlow(
graph_def=serialization_utils.pack_graph_def(graph.as_graph_def()),
parameter=parameter_binding,
result=result_binding))
return building_blocks.CompiledComputation(proto)
def create_tensorflow_constant(type_spec, scalar_value):
"""Creates called graph returning constant `scalar_value` of type `type_spec`.
`scalar_value` must be a scalar, and cannot be a float if any of the tensor
leaves of `type_spec` contain an integer data type. `type_spec` must contain
only named tuples and tensor types, but these can be arbitrarily nested.
Args:
type_spec: Value convertible to `computation_types.Type` via
`computation_types.to_type`, and whose resulting type tree can only
contain named tuples and tensors.
scalar_value: Scalar value to place in all the tensor leaves of `type_spec`.
Returns:
An instance of `building_blocks.Call`, whose argument is `None`
and whose function is a noarg
`building_blocks.CompiledComputation` which returns the
specified `scalar_value` packed into a TFF structure of type `type_spec.
Raises:
TypeError: If the type assumptions above are violated.
"""
type_spec = computation_types.to_type(type_spec)
py_typecheck.check_type(type_spec, computation_types.Type)
if not type_utils.is_generic_op_compatible_type(type_spec):
raise TypeError('Type spec {} cannot be constructed as a TensorFlow '
'constant in TFF; only nested tuples and tensors are '
'permitted.'.format(type_spec))
inferred_scalar_value_type = type_utils.infer_type(scalar_value)
if (not isinstance(inferred_scalar_value_type, computation_types.TensorType)
or inferred_scalar_value_type.shape != tf.TensorShape(())):
raise TypeError('Must pass a scalar value to '
'`create_tensorflow_constant`; encountered a value '
'{}'.format(scalar_value))
tensor_dtypes_in_type_spec = []
def _pack_dtypes(type_signature):
"""Appends dtype of `type_signature` to nonlocal variable."""
if isinstance(type_signature, computation_types.TensorType):
tensor_dtypes_in_type_spec.append(type_signature.dtype)
return type_signature, False
type_utils.transform_type_postorder(type_spec, _pack_dtypes)
if (any(x.is_integer for x in tensor_dtypes_in_type_spec) and
not inferred_scalar_value_type.dtype.is_integer):
raise TypeError('Only integers can be used as scalar values if our desired '
'constant type spec contains any integer tensors; passed '
'scalar {} of dtype {} for type spec {}.'.format(
scalar_value, inferred_scalar_value_type.dtype,
type_spec))
def _create_result_tensor(type_spec, scalar_value):
"""Packs `scalar_value` into `type_spec` recursively."""
if isinstance(type_spec, computation_types.TensorType):
type_spec.shape.assert_is_fully_defined()
result = tf.constant(
scalar_value, dtype=type_spec.dtype, shape=type_spec.shape)
else:
elements = []
for _, type_element in anonymous_tuple.iter_elements(type_spec):
elements.append(_create_result_tensor(type_element, scalar_value))
result = elements
return result
with tf.Graph().as_default() as graph:
result = _create_result_tensor(type_spec, scalar_value)
_, result_binding = tensorflow_utils.capture_result_from_graph(result, graph)
function_type = computation_types.FunctionType(None, type_spec)
serialized_function_type = type_serialization.serialize_type(function_type)
proto = pb.Computation(
type=serialized_function_type,
tensorflow=pb.TensorFlow(
graph_def=serialization_utils.pack_graph_def(graph.as_graph_def()),
parameter=None,
result=result_binding))
noarg_constant_fn = building_blocks.CompiledComputation(proto)
return building_blocks.Call(noarg_constant_fn, None)
def create_compiled_input_replication(type_signature, n_replicas):
"""Creates a compiled computation which replicates its argument.
Args:
type_signature: Value convertible to `computation_types.Type` via
`computation_types.to_type`. The type of the parameter of the constructed
computation.
n_replicas: Integer, the number of times the argument is intended to be
replicated.
Returns:
An instance of `building_blocks.CompiledComputation` encoding
a function taking a single argument fo type `type_signature` and returning
`n_replicas` identical copies of this argument.
Raises:
TypeError: If `type_signature` contains any types which cannot appear in
TensorFlow bindings, or if `n_replicas` is not an integer.
"""
type_spec = computation_types.to_type(type_signature)
py_typecheck.check_type(type_spec, computation_types.Type)
type_utils.check_tensorflow_compatible_type(type_spec)
py_typecheck.check_type(n_replicas, int)
with tf.Graph().as_default() as graph:
parameter_value, parameter_binding = tensorflow_utils.stamp_parameter_in_graph(
'x', type_spec, graph)
result = [parameter_value] * n_replicas
result_type, result_binding = tensorflow_utils.capture_result_from_graph(
result, graph)
function_type = computation_types.FunctionType(type_spec, result_type)
serialized_function_type = type_serialization.serialize_type(function_type)
proto = pb.Computation(
type=serialized_function_type,
tensorflow=pb.TensorFlow(
graph_def=serialization_utils.pack_graph_def(graph.as_graph_def()),
parameter=parameter_binding,
result=result_binding))
return building_blocks.CompiledComputation(proto)
def create_tensorflow_to_broadcast_scalar(scalar_type, new_shape):
"""Creates TF function broadcasting scalar to shape `new_shape`.
Args:
scalar_type: Instance of `tf.DType`, the type of the scalar we are looking
to broadcast.
new_shape: Instance of `tf.TensorShape`, the shape we wish to broadcast to.
Must be fully defined.
Returns:
Instance of `building_blocks.CompiledComputation` representing
a function declaring a scalar parameter of dtype `scalar_type`, and
returning a tensor of this same dtype and shape `new_shape`, with the same
value in each entry as its scalar argument.
Raises:
TypeError: If the types of the arguments do not match the declared arg
types.
ValueError: If `new_shape` is not fully defined.
"""
# TODO(b/136119348): There are enough of these little TF helper functions,
# and they are suffificiently conceptually similar, to potentially warrant
# factoring out into their own file. At the same time, a possible clearer
# pattern than immediately dropping into the graph context manager would be
# to declare parameter ad result bindings outside of the context manager
# (after constructing the graph of course) and only dropping in for the body.
# If these functions get moved, perhaps that would be a natural time to
# revisit the pattern.
py_typecheck.check_type(scalar_type, tf.DType)
py_typecheck.check_type(new_shape, tf.TensorShape)
new_shape.assert_is_fully_defined()
tensor_spec = computation_types.TensorType(scalar_type, shape=())
with tf.Graph().as_default() as graph:
parameter_value, parameter_binding = tensorflow_utils.stamp_parameter_in_graph(
'x', tensor_spec, graph)
result = tf.broadcast_to(parameter_value, new_shape)
result_type, result_binding = tensorflow_utils.capture_result_from_graph(
result, graph)
function_type = computation_types.FunctionType(
computation_types.TensorType(dtype=scalar_type, shape=()), result_type)
serialized_function_type = type_serialization.serialize_type(function_type)
proto = pb.Computation(
type=serialized_function_type,
tensorflow=pb.TensorFlow(
graph_def=serialization_utils.pack_graph_def(graph.as_graph_def()),
parameter=parameter_binding,
result=result_binding))
return building_blocks.CompiledComputation(proto)
def create_tensorflow_binary_operator(operand_type, operator):
"""Creates a TensorFlow computation for the binary `operator`.
For `T` the `operand_type`, the type signature of the constructed operator
will be `(<T,T> -> U)`, where `U` is the result of applying `operator` to
a tuple of type `<T,T>`.
Notice that we have quite serious restrictions on `operand_type` here; not
only must it be compatible with stamping into a TensorFlow graph, but
additionally cannot contain a `computation_types.SequenceType`, as checked by
`type_utils.is_generic_op_compatible_type`.
Notice also that if `operand_type` is a `computation_types.NamedTupleType`,
`operator` will be applied pointwise. This places the burden on callers of
this function to construct the correct values to pass into the returned
function. For example, to divide `[2, 2]` by `2`, first the `int 2` must
be packed into the data structure `[x, x]`, before the division operator of
the appropriate type is called.
Args:
operand_type: The type of argument to the constructed binary operator. Must
be convertible to `computation_types.Type`.
operator: Callable taking two arguments specifying the operation to encode.
For example, `tf.add`, `tf.multiply`, `tf.divide`, ...
Returns:
Instance of `building_blocks.CompiledComputation` encoding
this binary operator.
Raises:
TypeError: If the type tree of `operand_type` contains any type which is
incompatible with the TFF generic operators, as checked by
`type_utils.is_generic_op_compatible_type`, or `operator` is not callable.
"""
operand_type = computation_types.to_type(operand_type)
py_typecheck.check_type(operand_type, computation_types.Type)
py_typecheck.check_callable(operator)
if not type_utils.is_generic_op_compatible_type(operand_type):
raise TypeError('The type {} contains a type other than '
'`computation_types.TensorType` and '
'`computation_types.NamedTupleType`; this is disallowed '
'in the generic operators.'.format(operand_type))
with tf.Graph().as_default() as graph:
operand_1_value, operand_1_binding = tensorflow_utils.stamp_parameter_in_graph(
'x', operand_type, graph)
operand_2_value, operand_2_binding = tensorflow_utils.stamp_parameter_in_graph(
'y', operand_type, graph)
if isinstance(operand_type, computation_types.TensorType):
result_value = operator(operand_1_value, operand_2_value)
elif isinstance(operand_type, computation_types.NamedTupleType):
result_value = anonymous_tuple.map_structure(operator, operand_1_value,
operand_2_value)
else:
raise TypeError('Operand type {} cannot be used in generic operations. '
'The whitelist in '
'`type_utils.is_generic_op_compatible_type` has allowed '
'it to pass, and should be updated.'.format(operand_type))
result_type, result_binding = tensorflow_utils.capture_result_from_graph(
result_value, graph)
function_type = computation_types.FunctionType(
computation_types.NamedTupleType([operand_type, operand_type]),
result_type)
serialized_function_type = type_serialization.serialize_type(function_type)
parameter_binding = pb.TensorFlow.Binding(
tuple=pb.TensorFlow.NamedTupleBinding(
element=[operand_1_binding, operand_2_binding]))
proto = pb.Computation(
type=serialized_function_type,
tensorflow=pb.TensorFlow(
graph_def=serialization_utils.pack_graph_def(graph.as_graph_def()),
parameter=parameter_binding,
result=result_binding))
return building_blocks.CompiledComputation(proto)
def create_federated_getitem_call(arg, idx):
"""Creates computation building block passing getitem to federated value.
Args:
arg: Instance of `building_blocks.ComputationBuildingBlock` of
`computation_types.FederatedType` with member of type
`computation_types.NamedTupleType` from which we wish to pick out item
`idx`.
idx: Index, instance of `int` or `slice` used to address the
`computation_types.NamedTupleType` underlying `arg`.
Returns:
Returns a `building_blocks.Call` with type signature
`computation_types.FederatedType` of same placement as `arg`, the result
of applying or mapping the appropriate `__getitem__` function, as defined
by `idx`.
"""
py_typecheck.check_type(arg, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(idx, (int, slice))
py_typecheck.check_type(arg.type_signature, computation_types.FederatedType)
py_typecheck.check_type(arg.type_signature.member,
computation_types.NamedTupleType)
getitem_comp = create_federated_getitem_comp(arg, idx)
return create_federated_map_or_apply(getitem_comp, arg)
def create_federated_getattr_call(arg, name):
"""Creates computation building block passing getattr to federated value.
Args:
arg: Instance of `building_blocks.ComputationBuildingBlock` of
`computation_types.FederatedType` with member of type
`computation_types.NamedTupleType` from which we wish to pick out item
`name`.
name: String name to address the `computation_types.NamedTupleType`
underlying `arg`.
Returns:
Returns a `building_blocks.Call` with type signature
`computation_types.FederatedType` of same placement as `arg`,
the result of applying or mapping the appropriate `__getattr__` function,
as defined by `name`.
"""
py_typecheck.check_type(arg, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(name, str)
py_typecheck.check_type(arg.type_signature, computation_types.FederatedType)
py_typecheck.check_type(arg.type_signature.member,
computation_types.NamedTupleType)
getattr_comp = create_federated_getattr_comp(arg, name)
return create_federated_map_or_apply(getattr_comp, arg)
def create_federated_setattr_call(federated_comp, name, value_comp):
"""Returns building block for `setattr(name, value_comp)` on `federated_comp`.
Creates an appropriate communication intrinsic (either `federated_map` or
`federated_apply`) as well as a `building_blocks.Lambda`
representing setting the `name` attribute of `federated_comp`'s `member` to
`value_comp`, and stitches these together in a call.
Notice that `federated_comp`'s `member` must actually define a `name`
attribute; this is enforced to avoid the need to worry about theplacement of a
previously undefined name.
Args:
federated_comp: Instance of `building_blocks.ComputationBuildingBlock` of
type `computation_types.FederatedType`, with member of type
`computation_types.NamedTupleType` whose attribute `name` we wish to set
to `value_comp`.
name: String name of the attribute we wish to overwrite in `federated_comp`.
value_comp: Instance of `building_blocks.ComputationBuildingBlock`, the
value to assign to `federated_comp`'s `member`'s `name` attribute.
Returns:
Instance of `building_blocks.ComputationBuildingBlock`
representing `federated_comp` with its `member`'s `name` attribute set to
`value`.
"""
py_typecheck.check_type(federated_comp,
building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(name, str)
py_typecheck.check_type(value_comp, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(federated_comp.type_signature,
computation_types.FederatedType)
py_typecheck.check_type(federated_comp.type_signature.member,
computation_types.NamedTupleType)
named_tuple_type_signature = federated_comp.type_signature.member
setattr_lambda = create_named_tuple_setattr_lambda(named_tuple_type_signature,
name, value_comp)
return create_federated_map_or_apply(setattr_lambda, federated_comp)
def create_named_tuple_setattr_lambda(named_tuple_signature, name, value_comp):
"""Creates a building block for replacing one attribute in a named tuple.
Returns an instance of `building_blocks.Lambda` which takes an
argument of type `computation_types.NamedTupleType` and returns a
`building_blocks.Tuple` which contains all the same elements as
the argument, except the attribute `name` now has value `value_comp`. The
Lambda constructed is the analogue of Python's `setattr` for the concrete
type `named_tuple_signature`.
Args:
named_tuple_signature: Instance of `computation_types.NamedTupleType`, the
type of the argument to the constructed `building_blocks.Lambda`.
name: String name of the attribute in the `named_tuple_signature` to replace
with `value_comp`. Must be present as a name in `named_tuple_signature;
otherwise we will raise an `AttributeError`.
value_comp: Instance of `building_blocks.ComputationBuildingBlock`, the
value to place as attribute `name` in the argument of the returned
function.
Returns:
An instance of `building_blocks.Block` of functional type
representing setting attribute `name` to value `value_comp` in its argument
of type `named_tuple_signature`.
Raises:
TypeError: If the types of the arguments don't match the assumptions above.
AttributeError: If `name` is not present as a named element in
`named_tuple_signature`
"""
py_typecheck.check_type(named_tuple_signature,
computation_types.NamedTupleType)
py_typecheck.check_type(name, str)
py_typecheck.check_type(value_comp, building_blocks.ComputationBuildingBlock)
value_comp_placeholder = building_blocks.Reference('value_comp_placeholder',
value_comp.type_signature)
lambda_arg = building_blocks.Reference('lambda_arg', named_tuple_signature)
if name not in dir(named_tuple_signature):
raise AttributeError(
'There is no such attribute as \'{}\' in this federated tuple. '
'TFF does not allow for assigning to a nonexistent attribute. '
'If you want to assign to \'{}\', you must create a new named tuple '
'containing this attribute.'.format(name, name))
elements = []
for idx, (key, element_type) in enumerate(
anonymous_tuple.to_elements(named_tuple_signature)):
if key == name:
if not type_utils.is_assignable_from(element_type,
value_comp.type_signature):
raise TypeError(
'`setattr` has attempted to set element {} of type {} with incompatible type {}'
.format(key, element_type, value_comp.type_signature))
elements.append((key, value_comp_placeholder))
else:
elements.append((key, building_blocks.Selection(lambda_arg, index=idx)))
return_tuple = building_blocks.Tuple(elements)
lambda_to_return = building_blocks.Lambda(lambda_arg.name,
named_tuple_signature, return_tuple)
symbols = ((value_comp_placeholder.name, value_comp),)
return building_blocks.Block(symbols, lambda_to_return)
def create_federated_getattr_comp(comp, name):
"""Function to construct computation for `federated_apply` of `__getattr__`.
Creates a `building_blocks.ComputationBuildingBlock`
which selects `name` from its argument, of type `comp.type_signature.member`,
an instance of `computation_types.NamedTupleType`.
Args:
comp: Instance of `building_blocks.ComputationBuildingBlock` with type
signature `computation_types.FederatedType` whose `member` attribute is of
type `computation_types.NamedTupleType`.
name: String name of attribute to grab.
Returns:
Instance of `building_blocks.Lambda` which grabs attribute
according to `name` of its argument.
"""
py_typecheck.check_type(comp, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(comp.type_signature, computation_types.FederatedType)
py_typecheck.check_type(comp.type_signature.member,
computation_types.NamedTupleType)
py_typecheck.check_type(name, str)
element_names = [
x for x, _ in anonymous_tuple.iter_elements(comp.type_signature.member)
]
if name not in element_names:
raise ValueError(
'The federated value has no element of name `{}`. Value: {}'.format(
name, comp.formatted_representation()))
apply_input = building_blocks.Reference('x', comp.type_signature.member)
selected = building_blocks.Selection(apply_input, name=name)
apply_lambda = building_blocks.Lambda('x', apply_input.type_signature,
selected)
return apply_lambda
def create_federated_getitem_comp(comp, key):
"""Function to construct computation for `federated_apply` of `__getitem__`.
Creates a `building_blocks.ComputationBuildingBlock`
which selects `key` from its argument, of type `comp.type_signature.member`,
of type `computation_types.NamedTupleType`.
Args:
comp: Instance of `building_blocks.ComputationBuildingBlock` with type
signature `computation_types.FederatedType` whose `member` attribute is of
type `computation_types.NamedTupleType`.
key: Instance of `int` or `slice`, key used to grab elements from the member
of `comp`. implementation of slicing for `ValueImpl` objects with
`type_signature` `computation_types.NamedTupleType`.
Returns:
Instance of `building_blocks.Lambda` which grabs slice
according to `key` of its argument.
"""
py_typecheck.check_type(comp, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(comp.type_signature, computation_types.FederatedType)
py_typecheck.check_type(comp.type_signature.member,
computation_types.NamedTupleType)
py_typecheck.check_type(key, (int, slice))
apply_input = building_blocks.Reference('x', comp.type_signature.member)
if isinstance(key, int):
selected = building_blocks.Selection(apply_input, index=key)
else:
elems = anonymous_tuple.to_elements(comp.type_signature.member)
index_range = range(*key.indices(len(elems)))
elem_list = []
for k in index_range:
elem_list.append(
(elems[k][0], building_blocks.Selection(apply_input, index=k)))
selected = building_blocks.Tuple(elem_list)
apply_lambda = building_blocks.Lambda('x', apply_input.type_signature,
selected)
return apply_lambda
def create_computation_appending(comp1, comp2):
r"""Returns a block appending `comp2` to `comp1`.
Block
/ \
[comps=Tuple] Tuple
| |
[Comp, Comp] [Sel(0), ..., Sel(0), Sel(1)]
\ \ \
Sel(0) Sel(n) Ref(comps)
\ \
Ref(comps) Ref(comps)
Args:
comp1: A `building_blocks.ComputationBuildingBlock` with a `type_signature`
of type `computation_type.NamedTupleType`.
comp2: A `building_blocks.ComputationBuildingBlock` or a named computation
(a tuple pair of name, computation) representing a single element of an
`anonymous_tuple.AnonymousTuple`.
Returns:
A `building_blocks.Block`.
Raises:
TypeError: If any of the types do not match.
"""
py_typecheck.check_type(comp1, building_blocks.ComputationBuildingBlock)
if isinstance(comp2, building_blocks.ComputationBuildingBlock):
name2 = None
elif py_typecheck.is_name_value_pair(
comp2,
name_required=False,
value_type=building_blocks.ComputationBuildingBlock):
name2, comp2 = comp2
else:
raise TypeError('Unexpected tuple element: {}.'.format(comp2))
comps = building_blocks.Tuple((comp1, comp2))
ref = building_blocks.Reference('comps', comps.type_signature)
sel_0 = building_blocks.Selection(ref, index=0)
elements = []
named_type_signatures = anonymous_tuple.to_elements(comp1.type_signature)
for index, (name, _) in enumerate(named_type_signatures):
sel = building_blocks.Selection(sel_0, index=index)
elements.append((name, sel))
sel_1 = building_blocks.Selection(ref, index=1)
elements.append((name2, sel_1))
result = building_blocks.Tuple(elements)
symbols = ((ref.name, comps),)
return building_blocks.Block(symbols, result)
def create_federated_aggregate(value, zero, accumulate, merge, report):
r"""Creates a called federated aggregate.
Call
/ \
Intrinsic Tuple
|
[Comp, Comp, Comp, Comp, Comp]
Args:
value: A `building_blocks.ComputationBuildingBlock` to use as the value.
zero: A `building_blocks.ComputationBuildingBlock` to use as the initial
value.
accumulate: A `building_blocks.ComputationBuildingBlock` to use as the
accumulate function.
merge: A `building_blocks.ComputationBuildingBlock` to use as the merge
function.
report: A `building_blocks.ComputationBuildingBlock` to use as the report
function.
Returns:
A `building_blocks.Call`.
Raises:
TypeError: If any of the types do not match.
"""
py_typecheck.check_type(value, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(zero, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(accumulate, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(merge, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(report, building_blocks.ComputationBuildingBlock)
# Its okay if the first argument of accumulate is assignable from the zero,
# without being the exact type. This occurs when accumulate has a type like
# (<int32[?], int32> -> int32[?]) but zero is int32[0].
zero_arg_type = accumulate.type_signature.parameter[0]
type_utils.check_assignable_from(zero_arg_type, zero.type_signature)
result_type = computation_types.FederatedType(report.type_signature.result,
placement_literals.SERVER)
intrinsic_type = computation_types.FunctionType((
type_utils.to_non_all_equal(value.type_signature),
zero_arg_type,
accumulate.type_signature,
merge.type_signature,
report.type_signature,
), result_type)
intrinsic = building_blocks.Intrinsic(intrinsic_defs.FEDERATED_AGGREGATE.uri,
intrinsic_type)
values = building_blocks.Tuple((value, zero, accumulate, merge, report))
return building_blocks.Call(intrinsic, values)
def create_federated_apply(fn, arg):
r"""Creates a called federated apply.
Call
/ \
Intrinsic Tuple
|
[Comp, Comp]
Args:
fn: A `building_blocks.ComputationBuildingBlock` to use as the function.
arg: A `building_blocks.ComputationBuildingBlock` to use as the argument.
Returns:
A `building_blocks.Call`.
Raises:
TypeError: If any of the types do not match.
"""
py_typecheck.check_type(fn, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(arg, building_blocks.ComputationBuildingBlock)
result_type = computation_types.FederatedType(fn.type_signature.result,
placement_literals.SERVER)
intrinsic_type = computation_types.FunctionType(
(fn.type_signature, arg.type_signature), result_type)
intrinsic = building_blocks.Intrinsic(intrinsic_defs.FEDERATED_APPLY.uri,
intrinsic_type)
values = building_blocks.Tuple((fn, arg))
return building_blocks.Call(intrinsic, values)
def create_federated_broadcast(value):
r"""Creates a called federated broadcast.
Call
/ \
Intrinsic Comp
Args:
value: A `building_blocks.ComputationBuildingBlock` to use as the value.
Returns:
A `building_blocks.Call`.
Raises:
TypeError: If any of the types do not match.
"""
py_typecheck.check_type(value, building_blocks.ComputationBuildingBlock)
result_type = computation_types.FederatedType(
value.type_signature.member, placement_literals.CLIENTS, all_equal=True)
intrinsic_type = computation_types.FunctionType(value.type_signature,
result_type)
intrinsic = building_blocks.Intrinsic(intrinsic_defs.FEDERATED_BROADCAST.uri,
intrinsic_type)
return building_blocks.Call(intrinsic, value)
def create_federated_collect(value):
r"""Creates a called federated collect.
Call
/ \
Intrinsic Comp
Args:
value: A `building_blocks.ComputationBuildingBlock` to use as the value.
Returns:
A `building_blocks.Call`.
Raises:
TypeError: If any of the types do not match.
"""
py_typecheck.check_type(value, building_blocks.ComputationBuildingBlock)
type_signature = computation_types.SequenceType(value.type_signature.member)
result_type = computation_types.FederatedType(type_signature,
placement_literals.SERVER)
intrinsic_type = computation_types.FunctionType(
type_utils.to_non_all_equal(value.type_signature), result_type)
intrinsic = building_blocks.Intrinsic(intrinsic_defs.FEDERATED_COLLECT.uri,
intrinsic_type)
return building_blocks.Call(intrinsic, value)
def create_federated_map(fn, arg):
r"""Creates a called federated map.
Call
/ \
Intrinsic Tuple
|
[Comp, Comp]
Args:
fn: A `building_blocks.ComputationBuildingBlock` to use as the function.
arg: A `building_blocks.ComputationBuildingBlock` to use as the argument.
Returns:
A `building_blocks.Call`.
Raises:
TypeError: If any of the types do not match.
"""
py_typecheck.check_type(fn, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(arg, building_blocks.ComputationBuildingBlock)
parameter_type = computation_types.FederatedType(arg.type_signature.member,
placement_literals.CLIENTS)
result_type = computation_types.FederatedType(fn.type_signature.result,
placement_literals.CLIENTS)
intrinsic_type = computation_types.FunctionType(
(fn.type_signature, parameter_type), result_type)
intrinsic = building_blocks.Intrinsic(intrinsic_defs.FEDERATED_MAP.uri,
intrinsic_type)
values = building_blocks.Tuple((fn, arg))
return building_blocks.Call(intrinsic, values)
def create_federated_map_all_equal(fn, arg):
r"""Creates a called federated map of equal values.
Call
/ \
Intrinsic Tuple
|
[Comp, Comp]
NOTE: The `fn` is required to be deterministic and therefore should contain no
`building_blocks.CompiledComputations`.
Args:
fn: A `building_blocks.ComputationBuildingBlock` to use as the function.
arg: A `building_blocks.ComputationBuildingBlock` to use as the argument.
Returns:
A `building_blocks.Call`.
Raises:
TypeError: If any of the types do not match.
"""
py_typecheck.check_type(fn, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(arg, building_blocks.ComputationBuildingBlock)
parameter_type = computation_types.FederatedType(
arg.type_signature.member, placement_literals.CLIENTS, all_equal=True)
result_type = computation_types.FederatedType(
fn.type_signature.result, placement_literals.CLIENTS, all_equal=True)
intrinsic_type = computation_types.FunctionType(
(fn.type_signature, parameter_type), result_type)
intrinsic = building_blocks.Intrinsic(
intrinsic_defs.FEDERATED_MAP_ALL_EQUAL.uri, intrinsic_type)
values = building_blocks.Tuple((fn, arg))
return building_blocks.Call(intrinsic, values)
def create_federated_map_or_apply(fn, arg):
r"""Creates a called federated map or apply depending on `arg`s placement.
Call
/ \
Intrinsic Tuple
|
[Comp, Comp]
Args:
fn: A `building_blocks.ComputationBuildingBlock` to use as the function.
arg: A `building_blocks.ComputationBuildingBlock` to use as the argument.
Returns:
A `building_blocks.Call`.
Raises:
TypeError: If any of the types do not match.
"""
py_typecheck.check_type(fn, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(arg, building_blocks.ComputationBuildingBlock)
if arg.type_signature.placement is placement_literals.CLIENTS:
if arg.type_signature.all_equal:
return create_federated_map_all_equal(fn, arg)
else:
return create_federated_map(fn, arg)
elif arg.type_signature.placement is placement_literals.SERVER:
return create_federated_apply(fn, arg)
else:
raise TypeError('Unsupported placement {}.'.format(
arg.type_signature.placement))
def create_federated_mean(value, weight):
r"""Creates a called federated mean.
Call
/ \
Intrinsic Tuple
|
[Comp, Comp]
Args:
value: A `building_blocks.ComputationBuildingBlock` to use as the value.
weight: A `building_blocks.ComputationBuildingBlock` to use as the weight or
`None`.
Returns:
A `building_blocks.Call`.
Raises:
TypeError: If any of the types do not match.
"""
py_typecheck.check_type(value, building_blocks.ComputationBuildingBlock)
if weight is not None:
py_typecheck.check_type(weight, building_blocks.ComputationBuildingBlock)
result_type = computation_types.FederatedType(value.type_signature.member,
placement_literals.SERVER)
if weight is not None:
intrinsic_type = computation_types.FunctionType(
(type_utils.to_non_all_equal(value.type_signature),
type_utils.to_non_all_equal(weight.type_signature)), result_type)
intrinsic = building_blocks.Intrinsic(
intrinsic_defs.FEDERATED_WEIGHTED_MEAN.uri, intrinsic_type)
values = building_blocks.Tuple((value, weight))
return building_blocks.Call(intrinsic, values)
else:
intrinsic_type = computation_types.FunctionType(
type_utils.to_non_all_equal(value.type_signature), result_type)
intrinsic = building_blocks.Intrinsic(intrinsic_defs.FEDERATED_MEAN.uri,
intrinsic_type)
return building_blocks.Call(intrinsic, value)
def create_federated_reduce(value, zero, op):
r"""Creates a called federated reduce.
Call
/ \
Intrinsic Tuple
|
[Comp, Comp, Comp]
Args:
value: A `building_blocks.ComputationBuildingBlock` to use as the value.
zero: A `building_blocks.ComputationBuildingBlock` to use as the initial
value.
op: A `building_blocks.ComputationBuildingBlock` to use as the op function.
Returns:
A `building_blocks.Call`.
Raises:
TypeError: If any of the types do not match.
"""
py_typecheck.check_type(value, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(zero, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(op, building_blocks.ComputationBuildingBlock)
result_type = computation_types.FederatedType(op.type_signature.result,
placement_literals.SERVER)
intrinsic_type = computation_types.FunctionType((
type_utils.to_non_all_equal(value.type_signature),
zero.type_signature,
op.type_signature,
), result_type)
intrinsic = building_blocks.Intrinsic(intrinsic_defs.FEDERATED_REDUCE.uri,
intrinsic_type)
values = building_blocks.Tuple((value, zero, op))
return building_blocks.Call(intrinsic, values)
def create_federated_sum(value):
r"""Creates a called federated sum.
Call
/ \
Intrinsic Comp
Args:
value: A `building_blocks.ComputationBuildingBlock` to use as the value.
Returns:
A `building_blocks.Call`.
Raises:
TypeError: If any of the types do not match.
"""
py_typecheck.check_type(value, building_blocks.ComputationBuildingBlock)
result_type = computation_types.FederatedType(value.type_signature.member,
placement_literals.SERVER)
intrinsic_type = computation_types.FunctionType(
type_utils.to_non_all_equal(value.type_signature), result_type)
intrinsic = building_blocks.Intrinsic(intrinsic_defs.FEDERATED_SUM.uri,
intrinsic_type)
return building_blocks.Call(intrinsic, value)
def create_federated_unzip(value):
r"""Creates a tuple of called federated maps or applies.
Block
/ \
[value=Comp] Tuple
|
[Call, Call, ...]
/ \ / \
Intrinsic Tuple Intrinsic Tuple
| |
[Lambda(arg), Ref(value)] [Lambda(arg), Ref(value)]
\ \
Sel(0) Sel(1)
\ \
Ref(arg) Ref(arg)
This function returns a tuple of federated values given a `value` with a
federated tuple type signature.
Args:
value: A `building_blocks.ComputationBuildingBlock` with a `type_signature`
of type `computation_types.NamedTupleType` containing at least one
element.
Returns:
A `building_blocks.Block`.
Raises:
TypeError: If any of the types do not match.
ValueError: If `value` does not contain any elements.
"""
py_typecheck.check_type(value, building_blocks.ComputationBuildingBlock)
named_type_signatures = anonymous_tuple.to_elements(
value.type_signature.member)
length = len(named_type_signatures)
if length == 0:
raise ValueError('federated_zip is only supported on non-empty tuples.')
value_ref = building_blocks.Reference('value', value.type_signature)
elements = []
fn_ref = building_blocks.Reference('arg', named_type_signatures)
for index, (name, _) in enumerate(named_type_signatures):
sel = building_blocks.Selection(fn_ref, index=index)
fn = building_blocks.Lambda(fn_ref.name, fn_ref.type_signature, sel)
intrinsic = create_federated_map_or_apply(fn, value_ref)
elements.append((name, intrinsic))
result = building_blocks.Tuple(elements)
symbols = ((value_ref.name, value),)
return building_blocks.Block(symbols, result)
def create_federated_value(value, placement):
r"""Creates a called federated value.
Call
/ \
Intrinsic Comp
Args:
value: A `building_blocks.ComputationBuildingBlock` to use as the value.
placement: A `placement_literals.PlacementLiteral` to use as the placement.
Returns:
A `building_blocks.Call`.
Raises:
TypeError: If any of the types do not match.
"""
py_typecheck.check_type(value, building_blocks.ComputationBuildingBlock)
if placement is placement_literals.CLIENTS:
uri = intrinsic_defs.FEDERATED_VALUE_AT_CLIENTS.uri
elif placement is placement_literals.SERVER:
uri = intrinsic_defs.FEDERATED_VALUE_AT_SERVER.uri
else:
raise TypeError('Unsupported placement {}.'.format(placement))
result_type = computation_types.FederatedType(
value.type_signature, placement, all_equal=True)
intrinsic_type = computation_types.FunctionType(value.type_signature,
result_type)
intrinsic = building_blocks.Intrinsic(uri, intrinsic_type)
return building_blocks.Call(intrinsic, value)
def _create_flat_federated_zip(value):
r"""Private function to create a called federated zip for a non-nested type.
Call
/ \
Intrinsic Tuple
|
[Comp, Comp]
This function returns a federated tuple given a `value` with a tuple of
federated values type signature.
Args:
value: A `building_blocks.ComputationBuildingBlock` with a `type_signature`
of type `computation_types.NamedTupleType` containing at least one
element.
Returns:
A `building_blocks.Call`.
Raises:
TypeError: If any of the types do not match.
ValueError: If `value` does not contain any elements.
"""
py_typecheck.check_type(value, building_blocks.ComputationBuildingBlock)
named_type_signatures = anonymous_tuple.to_elements(value.type_signature)
names_to_add = [name for name, _ in named_type_signatures]
length = len(named_type_signatures)
if length == 0:
raise ValueError('federated_zip is only supported on non-empty tuples.')
first_name, first_type_signature = named_type_signatures[0]
if first_type_signature.placement == placement_literals.CLIENTS:
map_fn = create_federated_map
elif first_type_signature.placement == placement_literals.SERVER:
map_fn = create_federated_apply
else:
raise TypeError('Unsupported placement {}.'.format(
first_type_signature.placement))
if length == 1:
ref = building_blocks.Reference('arg', first_type_signature.member)
values = building_blocks.Tuple(((first_name, ref),))
fn = building_blocks.Lambda(ref.name, ref.type_signature, values)
sel = building_blocks.Selection(value, index=0)
return map_fn(fn, sel)
else:
zipped_args = _create_chain_zipped_values(value)
append_fn = _create_fn_to_append_chain_zipped_values(value)
unnamed_zip = map_fn(append_fn, zipped_args)
return create_named_federated_tuple(unnamed_zip, names_to_add)
def create_federated_zip(value):
r"""Creates a called federated zip.
This function accepts a value whose type signature is a (potentially) nested
tuple structure of federated values all with the same placement, and uses
one of the federated_zip intrinsics (at client or at server) to promote the
placement to the highest level. E.g., A value of type '<A@S, <<B@S>, C@S>>'
would be mapped to a value of type '<A, <<B>, C>>@S'.
Args:
value: A `building_blocks.ComputationBuildingBlock` with a `type_signature`
of type `computation_types.NamedTupleType` that may contain other nested
`computation_types.NamedTupleTypes` bottoming out in at least one element
of type `computation_Types.FederatedType`. These federated types must be
at the same placement.
Returns:
A `building_blocks.Call` whose type signature is now a federated
`computation_types.NamedTupleType`, placed at the same placement as the
leaves of `value`.
Raises:
TypeError: If any of the types do not match.
ValueError: If `value` does not contain any elements.
"""
py_typecheck.check_type(value, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(value.type_signature,
computation_types.NamedTupleType)
# If the type signature is flat, just call _create_flat_federated_zip.
elements = anonymous_tuple.to_elements(value.type_signature)
if all([(isinstance(type_sig, computation_types.FederatedType))
for (_, type_sig) in elements]):
return _create_flat_federated_zip(value)
all_placements = set()
nested_selections = []
def _make_nested_selections(nested):
"""Generates list of selections from nested representation."""
if isinstance(nested.type_signature, computation_types.FederatedType):
all_placements.add(nested.type_signature.placement)
nested_selections.append(nested)
elif isinstance(nested.type_signature, computation_types.NamedTupleType):
for i in range(len(nested.type_signature)):
inner_selection = building_blocks.Selection(nested, index=i)
_make_nested_selections(inner_selection)
else:
raise TypeError('Only type signatures consisting of structures of '
'NamedTupleType bottoming out in FederatedType can be '
'used in federated_zip.')
_make_nested_selections(value)
if not all_placements:
raise TypeError('federated_zip is only supported on nested tuples '
'containing at least one FederatedType.')
elif len(all_placements) > 1:
raise TypeError('federated_zip requires all nested FederatedTypes to '
'have the same placement.')
placement = all_placements.pop()
flat = building_blocks.Tuple(nested_selections)
flat_zipped = _create_flat_federated_zip(flat)
# Every building block under the lambda is being constructed below, so it is
# safe to have a fixed static name for the reference-- we don't need to worry
# about namespace issues as usual.
ref = building_blocks.Reference('x', flat_zipped.type_signature.member)
def _make_flat_selections(type_signature, index):
"""Generates nested struct of selections from flattened representation."""
if isinstance(type_signature, computation_types.FederatedType):
return building_blocks.Selection(ref, index=index), index + 1
elif isinstance(type_signature, computation_types.NamedTupleType):
elements = anonymous_tuple.to_elements(type_signature)
return_tuple = []
for name, element in elements:
selection, index = _make_flat_selections(element, index)
return_tuple.append((name, selection))
return building_blocks.Tuple(return_tuple), index
else:
# This shouldn't be possible since the structure was already traversed
# above.
raise TypeError('Only type signatures consisting of structures of '
'NamedTupleType bottoming out in FederatedType can be '
'used in federated_zip.')
repacked, _ = _make_flat_selections(value.type_signature, 0)
lam = building_blocks.Lambda('x', ref.type_signature, repacked)
if placement == placement_literals.CLIENTS:
return create_federated_map(lam, flat_zipped)
elif placement == placement_literals.SERVER:
return create_federated_apply(lam, flat_zipped)
else:
raise TypeError('Unsupported placement {}.'.format(placement))
def create_generic_constant(type_spec, scalar_value):
"""Creates constant for a combination of federated, tuple and tensor types.
Args:
type_spec: Instance of `computation_types.Type` containing only federated,
tuple or tensor types for which we wish to construct a generic constant.
May also be something convertible to a `computation_types.Type` via
`computation_types.to_type`.
scalar_value: The scalar value we wish this constant to have.
Returns:
Instance of `building_blocks.ComputationBuildingBlock`
representing `scalar_value` packed into `type_spec`.
Raises:
TypeError: If types don't match their specification in the args section.
Notice validation of consistency of `type_spec` with `scalar_value` is not
the rsponsibility of this function.
"""
type_spec = computation_types.to_type(type_spec)
py_typecheck.check_type(type_spec, computation_types.Type)
inferred_scalar_value_type = type_utils.infer_type(scalar_value)
if (not isinstance(inferred_scalar_value_type, computation_types.TensorType)
or inferred_scalar_value_type.shape != tf.TensorShape(())):
raise TypeError(
'Must pass a scalar value to `create_generic_constant`; encountered a '
'value {}'.format(scalar_value))
if not type_utils.type_tree_contains_only(type_spec, (
computation_types.FederatedType,
computation_types.NamedTupleType,
computation_types.TensorType,
)):
raise TypeError
if type_utils.type_tree_contains_only(type_spec, (
computation_types.NamedTupleType,
computation_types.TensorType,
)):
return create_tensorflow_constant(type_spec, scalar_value)
elif isinstance(type_spec, computation_types.FederatedType):
unplaced_zero = create_tensorflow_constant(type_spec.member, scalar_value)
if type_spec.placement == placement_literals.CLIENTS:
placement_federated_type = computation_types.FederatedType(
type_spec.member, type_spec.placement, all_equal=True)
placement_fn_type = computation_types.FunctionType(
type_spec.member, placement_federated_type)
placement_function = building_blocks.Intrinsic(
intrinsic_defs.FEDERATED_VALUE_AT_CLIENTS.uri, placement_fn_type)
elif type_spec.placement == placement_literals.SERVER:
placement_federated_type = computation_types.FederatedType(
type_spec.member, type_spec.placement, all_equal=True)
placement_fn_type = computation_types.FunctionType(
type_spec.member, placement_federated_type)
placement_function = building_blocks.Intrinsic(
intrinsic_defs.FEDERATED_VALUE_AT_SERVER.uri, placement_fn_type)
return building_blocks.Call(placement_function, unplaced_zero)
elif isinstance(type_spec, computation_types.NamedTupleType):
elements = []
for k in range(len(type_spec)):
elements.append(create_generic_constant(type_spec[k], scalar_value))
names = [name for name, _ in anonymous_tuple.iter_elements(type_spec)]
packed_elements = building_blocks.Tuple(elements)
named_tuple = create_named_tuple(packed_elements, names)
return named_tuple
else:
raise ValueError(
'The type_spec {} has slipped through all our '
'generic constant cases, and failed to raise.'.format(type_spec))
def _create_chain_zipped_values(value):
r"""Creates a chain of called federated zip with two values.
Block--------
/ \
[value=Tuple] Call
| / \
[Comp1, Intrinsic Tuple
Comp2, |
...] [Call, Sel(n)]
/ \ \
Intrinsic Tuple Ref(value)
|
[Sel(0), Sel(1)]
\ \
Ref(value) Ref(value)
NOTE: This function is intended to be used in conjunction with
`_create_fn_to_append_chain_zipped_values` and will drop the tuple names. The
names will be added back to the resulting computation when the zipped values
are mapped to a function that flattens the chain. This nested zip -> flatten
structure must be used since length of a named tuple type in the TFF type
system is an element of the type proper. That is, a named tuple type of
length 2 is a different type than a named tuple type of length 3, they are
not simply items with the same type and different values, as would be the
case if you were thinking of these as Python `list`s. It may be better to
think of named tuple types in TFF as more like `struct`s.
Args:
value: A `building_blocks.ComputationBuildingBlock` with a `type_signature`
of type `computation_types.NamedTupleType` containing at least two
elements.
Returns:
A `building_blocks.Call`.
Raises:
TypeError: If any of the types do not match.
ValueError: If `value` does not contain at least two elements.
"""
py_typecheck.check_type(value, building_blocks.ComputationBuildingBlock)
named_type_signatures = anonymous_tuple.to_elements(value.type_signature)
length = len(named_type_signatures)
if length < 2:
raise ValueError(
'Expected a value with at least two elements, received {} elements.'
.format(named_type_signatures))
ref = building_blocks.Reference('value', value.type_signature)
symbols = ((ref.name, value),)
sel_0 = building_blocks.Selection(ref, index=0)
result = sel_0
for i in range(1, length):
sel = building_blocks.Selection(ref, index=i)
values = building_blocks.Tuple((result, sel))
result = create_zip_two_values(values)
return building_blocks.Block(symbols, result)
def create_zip_two_values(value):
r"""Creates a called federated zip with two values.
Call
/ \
Intrinsic Tuple
|
[Comp1, Comp2]
Notice that this function will drop any names associated to the two-tuple it
is processing. This is necessary due to the type signature of the
underlying federated zip intrinsic, `<T@P,U@P>-><T,U>@P`. Keeping names
here would violate this type signature. The names are cached at a higher
level than this function, and appended to the resulting tuple in a single
call to `federated_map` or `federated_apply` before the resulting structure
is sent back to the caller.
Args:
value: A `building_blocks.ComputationBuildingBlock` with a `type_signature`
of type `computation_types.NamedTupleType` containing exactly two
elements.
Returns:
A `building_blocks.Call`.
Raises:
TypeError: If any of the types do not match.
ValueError: If `value` does not contain exactly two elements.
"""
py_typecheck.check_type(value, building_blocks.ComputationBuildingBlock)
named_type_signatures = anonymous_tuple.to_elements(value.type_signature)
length = len(named_type_signatures)
if length != 2:
raise ValueError(
'Expected a value with exactly two elements, received {} elements.'
.format(named_type_signatures))
placement = value[0].type_signature.placement
if placement is placement_literals.CLIENTS:
uri = intrinsic_defs.FEDERATED_ZIP_AT_CLIENTS.uri
all_equal = False
elif placement is placement_literals.SERVER:
uri = intrinsic_defs.FEDERATED_ZIP_AT_SERVER.uri
all_equal = True
else:
raise TypeError('Unsupported placement {}.'.format(placement))
elements = []
for _, type_signature in named_type_signatures:
federated_type = computation_types.FederatedType(type_signature.member,
placement, all_equal)
elements.append((None, federated_type))
parameter_type = computation_types.NamedTupleType(elements)
result_type = computation_types.FederatedType(
[(None, e.member) for _, e in named_type_signatures], placement,
all_equal)
intrinsic_type = computation_types.FunctionType(parameter_type, result_type)
intrinsic = building_blocks.Intrinsic(uri, intrinsic_type)
return building_blocks.Call(intrinsic, value)
def _create_fn_to_append_chain_zipped_values(value):
r"""Creates a function to append a chain of zipped values.
Lambda(arg3)
\
append([Call, Sel(1)])
/ \ \
Lambda(arg2) Sel(0) Ref(arg3)
\ \
\ Ref(arg3)
\
append([Call, Sel(1)])
/ \ \
Lambda(arg1) Sel(0) Ref(arg2)
\ \
\ Ref(arg2)
\
Ref(arg1)
Note that this function will not respect any names it is passed; names
for tuples will be cached at a higher level than this function and added back
in a single call to federated map or federated apply.
Args:
value: A `building_blocks.ComputationBuildingBlock` with a `type_signature`
of type `computation_types.NamedTupleType` containing at least two
elements.
Returns:
A `building_blocks.Call`.
Raises:
TypeError: If any of the types do not match.
"""
py_typecheck.check_type(value, building_blocks.ComputationBuildingBlock)
named_type_signatures = anonymous_tuple.to_elements(value.type_signature)
length = len(named_type_signatures)
if length < 2:
raise ValueError(
'Expected a value with at least two elements, received {} elements.'
.format(named_type_signatures))
_, first_type_signature = named_type_signatures[0]
_, second_type_signature = named_type_signatures[1]
ref_type = computation_types.NamedTupleType((
first_type_signature.member,
second_type_signature.member,
))
ref = building_blocks.Reference('arg', ref_type)
fn = building_blocks.Lambda(ref.name, ref.type_signature, ref)
for _, type_signature in named_type_signatures[2:]:
ref_type = computation_types.NamedTupleType((
fn.type_signature.parameter,
type_signature.member,
))
ref = building_blocks.Reference('arg', ref_type)
sel_0 = building_blocks.Selection(ref, index=0)
call = building_blocks.Call(fn, sel_0)
sel_1 = building_blocks.Selection(ref, index=1)
result = create_computation_appending(call, sel_1)
fn = building_blocks.Lambda(ref.name, ref.type_signature, result)
return fn
def create_sequence_map(fn, arg):
r"""Creates a called sequence map.
Call
/ \
Intrinsic Tuple
|
[Comp, Comp]
Args:
fn: A `building_blocks.ComputationBuildingBlock` to use as the function.
arg: A `building_blocks.ComputationBuildingBlock` to use as the argument.
Returns:
A `building_blocks.Call`.
Raises:
TypeError: If any of the types do not match.
"""
py_typecheck.check_type(fn, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(arg, building_blocks.ComputationBuildingBlock)
result_type = computation_types.SequenceType(fn.type_signature.result)
intrinsic_type = computation_types.FunctionType(
(fn.type_signature, arg.type_signature), result_type)
intrinsic = building_blocks.Intrinsic(intrinsic_defs.SEQUENCE_MAP.uri,
intrinsic_type)
values = building_blocks.Tuple((fn, arg))
return building_blocks.Call(intrinsic, values)
def create_sequence_reduce(value, zero, op):
r"""Creates a called sequence reduce.
Call
/ \
Intrinsic Tuple
|
[Comp, Comp, Comp]
Args:
value: A `building_blocks.ComputationBuildingBlock` to use as the value.
zero: A `building_blocks.ComputationBuildingBlock` to use as the initial
value.
op: A `building_blocks.ComputationBuildingBlock` to use as the op function.
Returns:
A `building_blocks.Call`.
Raises:
TypeError: If any of the types do not match.
"""
py_typecheck.check_type(value, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(zero, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(op, building_blocks.ComputationBuildingBlock)
intrinsic_type = computation_types.FunctionType((
value.type_signature,
zero.type_signature,
op.type_signature,
), op.type_signature.result)
intrinsic = building_blocks.Intrinsic(intrinsic_defs.SEQUENCE_REDUCE.uri,
intrinsic_type)
values = building_blocks.Tuple((value, zero, op))
return building_blocks.Call(intrinsic, values)
def create_sequence_sum(value):
r"""Creates a called sequence sum.
Call
/ \
Intrinsic Comp
Args:
value: A `building_blocks.ComputationBuildingBlock` to use as the value.
Returns:
A `building_blocks.Call`.
Raises:
TypeError: If any of the types do not match.
"""
py_typecheck.check_type(value, building_blocks.ComputationBuildingBlock)
intrinsic_type = computation_types.FunctionType(value.type_signature,
value.type_signature.element)
intrinsic = building_blocks.Intrinsic(intrinsic_defs.SEQUENCE_SUM.uri,
intrinsic_type)
return building_blocks.Call(intrinsic, value)
def _create_naming_function(tuple_type_to_name, names_to_add):
"""Private function to construct lambda naming a given tuple type.
Args:
tuple_type_to_name: Instance of `computation_types.NamedTupleType`, the type
of the argument which we wish to name.
names_to_add: Python `list` or `tuple`, the names we wish to give to
`tuple_type_to_name`.
Returns:
An instance of `building_blocks.Lambda` representing a function
which will take an argument of type `tuple_type_to_name` and return a tuple
with the same elements, but with names in `names_to_add` attached.
Raises:
ValueError: If `tuple_type_to_name` and `names_to_add` have different
lengths.
"""
py_typecheck.check_type(tuple_type_to_name, computation_types.NamedTupleType)
if len(names_to_add) != len(tuple_type_to_name):
raise ValueError(
'Number of elements in `names_to_add` must match number of element in '
'the named tuple type `tuple_type_to_name`; here, `names_to_add` has '
'{} elements and `tuple_type_to_name` has {}.'.format(
len(names_to_add), len(tuple_type_to_name)))
naming_lambda_arg = building_blocks.Reference('x', tuple_type_to_name)
def _create_tuple_element(i):
return (names_to_add[i],
building_blocks.Selection(naming_lambda_arg, index=i))
named_result = building_blocks.Tuple(
[_create_tuple_element(k) for k in range(len(names_to_add))])
return building_blocks.Lambda('x', naming_lambda_arg.type_signature,
named_result)
def create_named_federated_tuple(tuple_to_name, names_to_add):
"""Name tuple elements with names in `names_to_add`.
Certain intrinsics, e.g. `federated_zip`, only accept unnamed tuples as
arguments, and can only produce unnamed tuples as their outputs. This is not
necessarily desirable behavior, as it necessitates dropping any names that
exist before the zip. This function is intended to provide a general remedy
for this shortcoming, so that a tuple can be renamed after it is passed
through any function which drops its names.
Args:
tuple_to_name: Instance of `building_blocks.ComputationBuildingBlock` of
type `computation_types.FederatedType` with
`computation_types.NamedTupleType` member, to populate with names from
`names_to_add`.
names_to_add: Python `tuple` or `list` containing instances of type `str` or
`None`, the names to give to `tuple_to_name`.
Returns:
An instance of `building_blocks.ComputationBuildingBlock`
representing a federated tuple with the same elements as `tuple_to_name`
but with the names from `names_to_add` attached to the type
signature. Notice that if these names are already present in
`tuple_to_name`, `create_naming_function` represents the identity.
Raises:
TypeError: If the types do not match the description above.
"""
py_typecheck.check_type(names_to_add, (list, tuple))
if not all((x is None or isinstance(x, str)) for x in names_to_add):
raise TypeError('`names_to_add` must contain only instances of `str` or '
'NoneType; you have passed in {}'.format(names_to_add))
py_typecheck.check_type(tuple_to_name,
building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(tuple_to_name.type_signature,
computation_types.FederatedType)
naming_fn = _create_naming_function(tuple_to_name.type_signature.member,
names_to_add)
return create_federated_map_or_apply(naming_fn, tuple_to_name)
def create_named_tuple(comp, names):
"""Creates a computation that applies `names` to `comp`.
Args:
comp: A `building_blocks.ComputationBuildingBlock` with a `type_signature`
of type `computation_types.NamedTupleType`.
names: Python `tuple` or `list` containing instances of type `str` or
`None`, the names to apply to `comp`.
Returns:
A `building_blocks.ComputationBuildingBlock` representing a
tuple with the elements from `comp` and the names from `names` attached to
the `type_signature` of those elements.
Raises:
TypeError: If the types do not match.
"""
py_typecheck.check_type(names, (list, tuple))
if not all(isinstance(x, (str, type(None))) for x in names):
raise TypeError('Expected `names` containing only instances of `str` or '
'`None`, found {}'.format(names))
py_typecheck.check_type(comp, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(comp.type_signature, computation_types.NamedTupleType)
fn = _create_naming_function(comp.type_signature, names)
return building_blocks.Call(fn, comp)
def create_zip(comp):
r"""Returns a computation which zips `comp`.
Returns the following computation where `x` is `comp` unless `comp` is a
Reference, in which case the Reference is inlined and the Tuple is returned.
Block
/ \
[comp=x] Tuple
|
[Tuple, Tuple]
| |
[Sel(0), Sel(0)] [Sel(1), Sel(1)]
| | | |
Sel(0) Sel(1) Sel(0) Sel(1)
| | | |
Ref(comp) Ref(comp) Ref(comp) Ref(comp)
The returned computation intentionally drops names from the tuples, otherwise
it would be possible for the resulting type signature to contain a Tuple where
two elements have the same name and this is not allowed. It is left up to the
caller to descide if and where to add the names back.
Args:
comp: The computation building block in which to perform the merges.
"""
py_typecheck.check_type(comp, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(comp.type_signature, computation_types.NamedTupleType)
named_type_signatures = anonymous_tuple.to_elements(comp.type_signature)
_, first_type_signature = named_type_signatures[0]
py_typecheck.check_type(first_type_signature,
computation_types.NamedTupleType)
length = len(first_type_signature)
for _, type_signature in named_type_signatures:
py_typecheck.check_type(type_signature, computation_types.NamedTupleType)
if len(type_signature) != length:
raise TypeError(
'Expected a NamedTupleType containing NamedTupleTypes with the same '
'length, found: {}'.format(comp.type_signature))
if not isinstance(comp, building_blocks.Reference):
name_generator = unique_name_generator(comp)
name = next(name_generator)
ref = building_blocks.Reference(name, comp.type_signature)
else:
ref = comp
rows = []
for column in range(len(first_type_signature)):
columns = []
for row in range(len(named_type_signatures)):
sel_row = building_blocks.Selection(ref, index=row)
sel_column = building_blocks.Selection(sel_row, index=column)
columns.append(sel_column)
tup = building_blocks.Tuple(columns)
rows.append(tup)
tup = building_blocks.Tuple(rows)
if not isinstance(comp, building_blocks.Reference):
return building_blocks.Block(((ref.name, comp),), tup)
else:
return tup
def _check_generic_operator_type(type_spec):
"""Checks that `type_spec` can be the signature of args to a generic op."""
if not type_utils.type_tree_contains_only(type_spec, (
computation_types.FederatedType,
computation_types.NamedTupleType,
computation_types.TensorType,
)):
raise TypeError(
'Generic operators are only implemented for arguments both containing '
'only federated, tuple and tensor types; you have passed an argument '
'of type {} '.format(type_spec))
if not (isinstance(type_spec, computation_types.NamedTupleType) and
len(type_spec) == 2):
raise TypeError(
'We are trying to construct a generic operator declaring argument that '
'is not a two-tuple, the type {}.'.format(type_spec))
if not type_utils.is_binary_op_with_upcast_compatible_pair(
type_spec[0], type_spec[1]):
raise TypeError(
'The two-tuple you have passed in is incompatible with upcasted '
'binary operators. You have passed the tuple type {}, which fails the '
'check that the two members of the tuple are either the same type, or '
'the second is a scalar with the same dtype as the leaves of the '
'first. See `type_utils.is_binary_op_with_upcast_compatible_pair` for '
'more details.'.format(type_spec))
def create_binary_operator_with_upcast(type_signature, operator):
"""Creates lambda upcasting its argument and applying `operator`.
The concept of upcasting is explained further in the docstring for
`apply_binary_operator_with_upcast`.
Notice that since we are constructing a function here, e.g. for the body
of an intrinsic, the function we are constructing must be reducible to
TensorFlow. Therefore `type_signature` can only have named tuple or tensor
type elements; that is, we cannot handle federated types here in a generic
way.
Args:
type_signature: Value convertible to `computation_types.NamedTupleType`,
with two elements, both of the same type or the second able to be upcast
to the first, as explained in `apply_binary_operator_with_upcast`, and
both containing only tuples and tensors in their type tree.
operator: Callable defining the operator.
Returns:
A `building_blocks.Lambda` encapsulating a function which
upcasts the second element of its argument and applies the binary
operator.
"""
py_typecheck.check_callable(operator)
type_signature = computation_types.to_type(type_signature)
_check_generic_operator_type(type_signature)
ref_to_arg = building_blocks.Reference('binary_operator_arg', type_signature)
def _pack_into_type(to_pack, type_spec):
"""Pack Tensor value `to_pack` into the nested structure `type_spec`."""
if isinstance(type_spec, computation_types.NamedTupleType):
elems = anonymous_tuple.to_elements(type_spec)
packed_elems = [(elem_name, _pack_into_type(to_pack, elem_type))
for elem_name, elem_type in elems]
return building_blocks.Tuple(packed_elems)
elif isinstance(type_spec, computation_types.TensorType):
expand_fn = create_tensorflow_to_broadcast_scalar(
to_pack.type_signature.dtype, type_spec.shape)
return building_blocks.Call(expand_fn, to_pack)
y_ref = building_blocks.Selection(ref_to_arg, index=1)
first_arg = building_blocks.Selection(ref_to_arg, index=0)
if type_utils.are_equivalent_types(first_arg.type_signature,
y_ref.type_signature):
second_arg = y_ref
else:
second_arg = _pack_into_type(y_ref, first_arg.type_signature)
fn = create_tensorflow_binary_operator(first_arg.type_signature, operator)
packed = building_blocks.Tuple([first_arg, second_arg])
operated = building_blocks.Call(fn, packed)
lambda_encapsulating_op = building_blocks.Lambda(ref_to_arg.name,
ref_to_arg.type_signature,
operated)
return lambda_encapsulating_op
def apply_binary_operator_with_upcast(arg, operator):
"""Constructs result of applying `operator` to `arg` upcasting if appropriate.
Notice `arg` here must be of federated type, with a named tuple member of
length 2, or a named tuple type of length 2. If the named tuple type of `arg`
satisfies certain conditions (that is, there is only a single tensor dtype in
the first element of `arg`, and the second element represents a scalar of
this dtype), the second element will be upcast to match the first. Here this
means it will be pushed into a nested structure matching the structure of the
first element of `arg`. For example, it makes perfect sense to divide a model
of type `<a=float32[784],b=float32[10]>` by a scalar of type `float32`, but
the binary operator constructors we have implemented only take arguments of
type `<T, T>`. Therefore in this case we would broadcast the `float` argument
to the `tuple` type, before constructing a biary operator which divides
pointwise.
Args:
arg: `building_blocks.ComputationBuildingBlock` of federated type whose
`member` attribute is a named tuple type of length 2, or named tuple type
of length 2.
operator: Callable representing binary operator to apply to the 2-tuple
represented by the federated `arg`.
Returns:
Instance of `building_blocks.ComputationBuildingBlock`
encapsulating the result of formally applying `operator` to
`arg[0], `arg[1]`, upcasting `arg[1]` in the condition described above.
Raises:
TypeError: If the types don't match.
"""
py_typecheck.check_type(arg, building_blocks.ComputationBuildingBlock)
py_typecheck.check_callable(operator)
if isinstance(arg.type_signature, computation_types.FederatedType):
py_typecheck.check_type(arg.type_signature.member,
computation_types.NamedTupleType)
tuple_type = arg.type_signature.member
elif isinstance(arg.type_signature, computation_types.NamedTupleType):
tuple_type = arg.type_signature
else:
raise TypeError(
'Generic binary operators are only implemented for federated tuple and '
'unplaced tuples; you have passed {}.'.format(arg.type_signature))
lambda_encapsulating_op = create_binary_operator_with_upcast(
tuple_type, operator)
if isinstance(arg.type_signature, computation_types.FederatedType):
called = create_federated_map_or_apply(lambda_encapsulating_op, arg)
else:
called = building_blocks.Call(lambda_encapsulating_op, arg)
return called
| 42.031952
| 92
| 0.716356
|
acfeb856a7fa7e64493bda2cf5f627e594135cb9
| 1,881
|
py
|
Python
|
julia/static.py
|
arcturus5340/julia
|
6bb7b6e956edc9b16f773926978679dd6031e959
|
[
"MIT"
] | 13
|
2018-10-20T17:31:00.000Z
|
2022-03-06T23:02:48.000Z
|
julia/static.py
|
arcturus5340/julia
|
6bb7b6e956edc9b16f773926978679dd6031e959
|
[
"MIT"
] | 4
|
2019-10-08T18:24:38.000Z
|
2021-06-10T20:06:26.000Z
|
julia/static.py
|
arcturus5340/julia
|
6bb7b6e956edc9b16f773926978679dd6031e959
|
[
"MIT"
] | 6
|
2018-10-29T00:58:55.000Z
|
2020-03-25T14:02:18.000Z
|
import mimetypes
import posixpath
from pathlib import Path
from django.http import (
FileResponse, Http404, HttpResponseNotModified,
)
from django.utils._os import safe_join
from django.utils.http import http_date
from django.views import static
from django.utils import timezone
from django.core.exceptions import PermissionDenied
from contest.models import Task
from auth.models import User
def secure_serve(request, path, document_root=None, show_indexes=False):
path = posixpath.normpath(path).lstrip('/')
fullpath = Path(safe_join(document_root, path))
if fullpath.is_dir():
if show_indexes:
return static.directory_index(path, fullpath)
raise Http404("Directory indexes are not allowed here.")
if not fullpath.exists():
raise Http404('“%(path)s” does not exist' % {'path': fullpath})
# if request.user.is_anonymous:
# raise PermissionDenied('Wait until the end of the contest to see other contestants decisions')
username, task_id, *_ = path.split('_')
contest = Task.objects.get(id=int(task_id)).contest
is_owner = request.user is User.objects.get(username=username)
if (timezone.now() < contest.start_time + contest.duration) and not is_owner:
raise PermissionDenied('Waituntil the end of the contest to see other contestants decisions')
statobj = fullpath.stat()
if not static.was_modified_since(request.META.get('HTTP_IF_MODIFIED_SINCE'), statobj.st_mtime, statobj.st_size):
return HttpResponseNotModified()
content_type, encoding = mimetypes.guess_type(str(fullpath))
content_type = content_type or 'application/octet-stream'
response = FileResponse(fullpath.open('rb'), content_type=content_type)
response["Last-Modified"] = http_date(statobj.st_mtime)
if encoding:
response["Content-Encoding"] = encoding
return response
| 40.021277
| 116
| 0.738969
|
acfeb8f4c2a1c7754050e2ebce47e60170850355
| 2,065
|
py
|
Python
|
base/pylib/tuple.py
|
brownplt/lambda-py
|
c3ee39502c8953d36b886e5a203f2eb51d2f495b
|
[
"Apache-2.0"
] | 25
|
2015-04-16T04:31:49.000Z
|
2022-03-10T15:53:28.000Z
|
base/pylib/tuple.py
|
brownplt/lambda-py
|
c3ee39502c8953d36b886e5a203f2eb51d2f495b
|
[
"Apache-2.0"
] | 1
|
2018-11-21T22:40:02.000Z
|
2018-11-26T17:53:11.000Z
|
base/pylib/tuple.py
|
brownplt/lambda-py
|
c3ee39502c8953d36b886e5a203f2eb51d2f495b
|
[
"Apache-2.0"
] | 1
|
2021-03-26T03:36:19.000Z
|
2021-03-26T03:36:19.000Z
|
class tuple(object):
def __new__(self, *args):
if ___delta("num=", args.__len__(), 0):
return ()
else:
first_arg = ___delta("tuple-getitem", args, 0)
return first_arg.__tuple__()
def __init__(self, *args):
pass
def __len__(self):
int = ___id("%int")
return ___delta("tuple-len", self, int)
def __getitem__(self, f):
return ___delta("tuple-getitem", self, f)
def __tuple__(self): return self
def __add__(self, other):
tuple = ___id("%tuple")
return ___delta("tuple+", self, other, tuple)
def __mult__(self, other):
tuple = ___id("%tuple")
return ___delta("tuple*", self, other, tuple)
def __in__(self, other):
c = 0
while c < self.__len__():
if self.__getitem__(c).__eq__(other):
return True
c = c.__add__(1)
return False
def __str__(self):
str = ___id("%str")
return ___delta("tuple-str", self, str)
def __bool__(self):
return not ___delta("num=", self.__len__(), 0)
def __iter__(self):
SeqIter = ___id("%SeqIter")
return SeqIter(self)
# NOTE(joe): copied code (list.py)
def __cmp__(self, other):
def lstcmp(self, other, idx):
li1 = self.__getitem__(idx)
li2 = other.__getitem__(idx)
if ___prim2("Is", li1, None):
if ___prim2("Is", li2, None):
return 0
else:
return 1
else:
if ___prim2("Is", li2, None):
return 1
else:
cmpval = li1.__cmp__(li2)
if cmpval.__eq__(0):
nidx = idx.__add__(1)
return lstcmp(self, other, nidx)
else:
return cmpval
return lstcmp(self, other, 0)
def __eq__(self, other):
cmpresult = self.__cmp__(other)
return cmpresult.__eq__(0)
def __hash__(self):
result = 0
for elt in self:
result += self.__hash__() * 17
return result
def __list__(self):
return SeqIter(self).__list__()
def __set__(self):
set = ___id("%set")
return ___delta("tuple-set", self, set)
___assign("%tuple", tuple)
| 23.202247
| 52
| 0.588862
|
acfeb9f14c20850d1fb9ea3a7cab45a30c8d3b2c
| 6,890
|
py
|
Python
|
tests/test_anomaly_functionsv2.py
|
Agnarsh/functions
|
64a408ecf55773f38c5ce3b2fe75119e7235e9c9
|
[
"Apache-2.0"
] | null | null | null |
tests/test_anomaly_functionsv2.py
|
Agnarsh/functions
|
64a408ecf55773f38c5ce3b2fe75119e7235e9c9
|
[
"Apache-2.0"
] | null | null | null |
tests/test_anomaly_functionsv2.py
|
Agnarsh/functions
|
64a408ecf55773f38c5ce3b2fe75119e7235e9c9
|
[
"Apache-2.0"
] | null | null | null |
# *****************************************************************************
# © Copyright IBM Corp. 2018. All Rights Reserved.
#
# This program and the accompanying materials
# are made available under the terms of the Apache V2.0 license
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
#
# *****************************************************************************
import logging
import numpy as np
import pandas as pd
from sklearn.metrics import r2_score
from sqlalchemy import Column, Float
from iotfunctions.db import Database
from iotfunctions.dbtables import FileModelStore
from iotfunctions.enginelog import EngineLogging
from iotfunctions.anomaly import (SaliencybasedGeneralizedAnomalyScoreV2, SpectralAnomalyScoreExt,
FFTbasedGeneralizedAnomalyScoreV2, KMeansAnomalyScoreV2)
from nose.tools import assert_true, nottest
# constants
Temperature = 'Temperature'
kmeans = 'TemperatureKmeansScore'
fft = 'TemperatureFFTScore'
spectral = 'TemperatureSpectralScore'
spectralinv = 'TemperatureSpectralScoreInv'
sal = 'SaliencyAnomalyScore'
gen = 'TemperatureGeneralizedScore'
logger = logging.getLogger('Test Regressor')
@nottest
class DatabaseDummy:
tenant_id = '###_IBM_###'
db_type = 'db2'
model_store = FileModelStore('./data')
def _init(self):
return
def test_anomaly_scores():
numba_logger = logging.getLogger('numba')
numba_logger.setLevel(logging.ERROR)
####
print('Create dummy database')
db_schema=None
db = DatabaseDummy()
print (db.model_store)
#####
jobsettings = { 'db': db, '_db_schema': 'public'}
EngineLogging.configure_console_logging(logging.DEBUG)
# Run on the good pump first
# Get stuff in
print('Read Anomaly Sample data in')
df_i = pd.read_csv('./data/AzureAnomalysample.csv', index_col=False, parse_dates=['timestamp'])
df_i['entity'] = 'MyRoom'
df_i[Temperature] = df_i['value'] + 20
df_i = df_i.drop(columns=['value'])
# and sort it by timestamp
df_i = df_i.sort_values(by='timestamp')
df_i = df_i.set_index(['entity', 'timestamp']).dropna()
for i in range(0, df_i.index.nlevels):
print(str(df_i.index.get_level_values(i)))
#####
print('Use scaling model generated with sklearn 0.21.3')
print('Compute Saliency Anomaly Score')
sali = SaliencybasedGeneralizedAnomalyScoreV2(Temperature, 12, True, sal)
et = sali._build_entity_type(columns=[Column(Temperature, Float())], **jobsettings)
sali._entity_type = et
df_i = sali.execute(df=df_i)
print('Compute FFT Anomaly Score')
ffti = FFTbasedGeneralizedAnomalyScoreV2(Temperature, 12, True, fft)
et = ffti._build_entity_type(columns=[Column(Temperature, Float())], **jobsettings)
ffti._entity_type = et
df_i = ffti.execute(df=df_i)
print('Compute K-Means Anomaly Score')
kmi = KMeansAnomalyScoreV2(Temperature, 12, True, kmeans)
et = kmi._build_entity_type(columns=[Column(Temperature, Float())], **jobsettings)
kmi._entity_type = et
df_comp = kmi.execute(df=df_i)
print("Executed Anomaly functions on sklearn 0.21.3")
print("Now generate new scalings with recent sklearn")
db.model_store = FileModelStore('/tmp')
print('Compute Spectral Anomaly Score')
spsi = SpectralAnomalyScoreExt(Temperature, 12, spectral, spectralinv)
et = spsi._build_entity_type(columns=[Column(Temperature, Float())], **jobsettings)
spsi._entity_type = et
df_i = spsi.execute(df=df_i)
print('Compute Saliency Anomaly Score')
sali = SaliencybasedGeneralizedAnomalyScoreV2(Temperature, 12, True, sal)
et = sali._build_entity_type(columns=[Column(Temperature, Float())], **jobsettings)
sali._entity_type = et
df_i = sali.execute(df=df_i)
print('Compute FFT Anomaly Score')
ffti = FFTbasedGeneralizedAnomalyScoreV2(Temperature, 12, True, fft)
et = ffti._build_entity_type(columns=[Column(Temperature, Float())], **jobsettings)
ffti._entity_type = et
df_i = ffti.execute(df=df_i)
print('Compute K-Means Anomaly Score')
kmi = KMeansAnomalyScoreV2(Temperature, 12, True, kmeans)
et = kmi._build_entity_type(columns=[Column(Temperature, Float())], **jobsettings)
kmi._entity_type = et
df_comp = kmi.execute(df=df_i)
print("Executed Anomaly functions")
# df_comp.to_csv('./data/AzureAnomalysampleOutputV2.csv')
df_o = pd.read_csv('./data/AzureAnomalysampleOutputV2.csv')
# print('Compare Scores - Linf')
print('Compare Scores R2-score')
comp2 = {spectral: r2_score(df_o[spectralinv].values, df_comp[spectralinv].values),
fft: r2_score(df_o[fft].values, df_comp[fft].values),
sal: r2_score(df_o[sal].values, df_comp[sal].values),
kmeans: r2_score(df_o[kmeans].values, df_comp[kmeans].values)}
print(comp2)
# assert_true(comp2[spectral] > 0.9)
assert_true(comp2[fft] > 0.9)
assert_true(comp2[sal] > 0.9)
# assert_true(comp2[kmeans] > 0.9)
df_agg = df_i.copy()
# add frequency to time
df_agg = df_agg.reset_index().set_index(['timestamp']).asfreq(freq='T')
df_agg['site'] = 'Munich'
df_agg = df_agg.reset_index().set_index(['entity', 'timestamp', 'site']).dropna()
print('Compute Spectral Anomaly Score - aggr')
spsi = SpectralAnomalyScoreExt(Temperature, 12, spectral, spectralinv)
et = spsi._build_entity_type(columns=[Column(Temperature, Float())], **jobsettings)
spsi._entity_type = et
df_agg = spsi.execute(df=df_agg)
print('Compute K-Means Anomaly Score - aggr')
kmi = KMeansAnomalyScoreV2(Temperature, 12, True, kmeans)
et = kmi._build_entity_type(columns=[Column(Temperature, Float())], **jobsettings)
kmi._entity_type = et
df_agg = kmi.execute(df=df_agg)
print('Compute Saliency Anomaly Score - aggr')
sali = SaliencybasedGeneralizedAnomalyScoreV2(Temperature, 12, True, sal)
et = sali._build_entity_type(columns=[Column(Temperature, Float())], **jobsettings)
sali._entity_type = et
df_agg = sali.execute(df=df_agg)
print('Compute FFT Anomaly Score - aggr')
ffti = FFTbasedGeneralizedAnomalyScoreV2(Temperature, 12, True, fft)
et = ffti._build_entity_type(columns=[Column(Temperature, Float())], **jobsettings)
ffti._entity_type = et
df_agg = ffti.execute(df=df_agg)
print(df_agg.describe())
comp3 = {spectral: r2_score(df_o[spectralinv].values, df_agg[spectralinv].values),
fft: r2_score(df_o[fft].values, df_agg[fft].values),
sal: r2_score(df_o[sal].values, df_agg[sal].values),
kmeans: r2_score(df_o[kmeans].values, df_agg[kmeans].values)}
print(comp3)
print("Executed Anomaly functions on aggregation data")
pass
# uncomment to run from the command line
# test_anomaly_scores()
| 35.515464
| 99
| 0.688534
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.